Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. |
| 3 | * Copyright 2008 Red Hat Inc. |
| 4 | * Copyright 2009 Jerome Glisse. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the "Software"), |
| 8 | * to deal in the Software without restriction, including without limitation |
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 10 | * and/or sell copies of the Software, and to permit persons to whom the |
| 11 | * Software is furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in |
| 14 | * all copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 22 | * OTHER DEALINGS IN THE SOFTWARE. |
| 23 | * |
| 24 | * Authors: Dave Airlie |
| 25 | * Alex Deucher |
| 26 | * Jerome Glisse |
| 27 | */ |
| 28 | #include <linux/ktime.h> |
Stephen Rothwell | 568d7c7 | 2016-03-17 15:30:49 +1100 | [diff] [blame^] | 29 | #include <linux/pagemap.h> |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 30 | #include <drm/drmP.h> |
| 31 | #include <drm/amdgpu_drm.h> |
| 32 | #include "amdgpu.h" |
| 33 | |
| 34 | void amdgpu_gem_object_free(struct drm_gem_object *gobj) |
| 35 | { |
| 36 | struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); |
| 37 | |
| 38 | if (robj) { |
| 39 | if (robj->gem_base.import_attach) |
| 40 | drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg); |
Christian König | 9298e52 | 2015-06-03 21:31:20 +0200 | [diff] [blame] | 41 | amdgpu_mn_unregister(robj); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 42 | amdgpu_bo_unref(&robj); |
| 43 | } |
| 44 | } |
| 45 | |
| 46 | int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, |
| 47 | int alignment, u32 initial_domain, |
| 48 | u64 flags, bool kernel, |
| 49 | struct drm_gem_object **obj) |
| 50 | { |
| 51 | struct amdgpu_bo *robj; |
| 52 | unsigned long max_size; |
| 53 | int r; |
| 54 | |
| 55 | *obj = NULL; |
| 56 | /* At least align on page size */ |
| 57 | if (alignment < PAGE_SIZE) { |
| 58 | alignment = PAGE_SIZE; |
| 59 | } |
| 60 | |
| 61 | if (!(initial_domain & (AMDGPU_GEM_DOMAIN_GDS | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA))) { |
| 62 | /* Maximum bo size is the unpinned gtt size since we use the gtt to |
| 63 | * handle vram to system pool migrations. |
| 64 | */ |
| 65 | max_size = adev->mc.gtt_size - adev->gart_pin_size; |
| 66 | if (size > max_size) { |
| 67 | DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n", |
| 68 | size >> 20, max_size >> 20); |
| 69 | return -ENOMEM; |
| 70 | } |
| 71 | } |
| 72 | retry: |
Christian König | 72d7668 | 2015-09-03 17:34:59 +0200 | [diff] [blame] | 73 | r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, |
| 74 | flags, NULL, NULL, &robj); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 75 | if (r) { |
| 76 | if (r != -ERESTARTSYS) { |
| 77 | if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { |
| 78 | initial_domain |= AMDGPU_GEM_DOMAIN_GTT; |
| 79 | goto retry; |
| 80 | } |
| 81 | DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n", |
| 82 | size, initial_domain, alignment, r); |
| 83 | } |
| 84 | return r; |
| 85 | } |
| 86 | *obj = &robj->gem_base; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 87 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 88 | return 0; |
| 89 | } |
| 90 | |
Christian König | 418aa0c | 2016-02-15 16:59:57 +0100 | [diff] [blame] | 91 | void amdgpu_gem_force_release(struct amdgpu_device *adev) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 92 | { |
Christian König | 418aa0c | 2016-02-15 16:59:57 +0100 | [diff] [blame] | 93 | struct drm_device *ddev = adev->ddev; |
| 94 | struct drm_file *file; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 95 | |
Christian König | 418aa0c | 2016-02-15 16:59:57 +0100 | [diff] [blame] | 96 | mutex_lock(&ddev->struct_mutex); |
| 97 | |
| 98 | list_for_each_entry(file, &ddev->filelist, lhead) { |
| 99 | struct drm_gem_object *gobj; |
| 100 | int handle; |
| 101 | |
| 102 | WARN_ONCE(1, "Still active user space clients!\n"); |
| 103 | spin_lock(&file->table_lock); |
| 104 | idr_for_each_entry(&file->object_idr, gobj, handle) { |
| 105 | WARN_ONCE(1, "And also active allocations!\n"); |
| 106 | drm_gem_object_unreference(gobj); |
| 107 | } |
| 108 | idr_destroy(&file->object_idr); |
| 109 | spin_unlock(&file->table_lock); |
| 110 | } |
| 111 | |
| 112 | mutex_unlock(&ddev->struct_mutex); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 113 | } |
| 114 | |
| 115 | /* |
| 116 | * Call from drm_gem_handle_create which appear in both new and open ioctl |
| 117 | * case. |
| 118 | */ |
| 119 | int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) |
| 120 | { |
| 121 | struct amdgpu_bo *rbo = gem_to_amdgpu_bo(obj); |
| 122 | struct amdgpu_device *adev = rbo->adev; |
| 123 | struct amdgpu_fpriv *fpriv = file_priv->driver_priv; |
| 124 | struct amdgpu_vm *vm = &fpriv->vm; |
| 125 | struct amdgpu_bo_va *bo_va; |
| 126 | int r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 127 | r = amdgpu_bo_reserve(rbo, false); |
Chunming Zhou | e98c1b0 | 2015-11-13 15:22:04 +0800 | [diff] [blame] | 128 | if (r) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 129 | return r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 130 | |
| 131 | bo_va = amdgpu_vm_bo_find(vm, rbo); |
| 132 | if (!bo_va) { |
| 133 | bo_va = amdgpu_vm_bo_add(adev, vm, rbo); |
| 134 | } else { |
| 135 | ++bo_va->ref_count; |
| 136 | } |
| 137 | amdgpu_bo_unreserve(rbo); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 138 | return 0; |
| 139 | } |
| 140 | |
| 141 | void amdgpu_gem_object_close(struct drm_gem_object *obj, |
| 142 | struct drm_file *file_priv) |
| 143 | { |
Christian König | b5a5ec5 | 2016-03-08 17:47:46 +0100 | [diff] [blame] | 144 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); |
| 145 | struct amdgpu_device *adev = bo->adev; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 146 | struct amdgpu_fpriv *fpriv = file_priv->driver_priv; |
| 147 | struct amdgpu_vm *vm = &fpriv->vm; |
Christian König | b5a5ec5 | 2016-03-08 17:47:46 +0100 | [diff] [blame] | 148 | |
| 149 | struct amdgpu_bo_list_entry vm_pd; |
| 150 | struct list_head list, duplicates; |
| 151 | struct ttm_validate_buffer tv; |
| 152 | struct ww_acquire_ctx ticket; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 153 | struct amdgpu_bo_va *bo_va; |
| 154 | int r; |
Christian König | b5a5ec5 | 2016-03-08 17:47:46 +0100 | [diff] [blame] | 155 | |
| 156 | INIT_LIST_HEAD(&list); |
| 157 | INIT_LIST_HEAD(&duplicates); |
| 158 | |
| 159 | tv.bo = &bo->tbo; |
| 160 | tv.shared = true; |
| 161 | list_add(&tv.head, &list); |
| 162 | |
| 163 | amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); |
| 164 | |
Christian König | 35264f6 | 2016-03-17 17:14:10 +0100 | [diff] [blame] | 165 | r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 166 | if (r) { |
| 167 | dev_err(adev->dev, "leaking bo va because " |
| 168 | "we fail to reserve bo (%d)\n", r); |
| 169 | return; |
| 170 | } |
Christian König | b5a5ec5 | 2016-03-08 17:47:46 +0100 | [diff] [blame] | 171 | bo_va = amdgpu_vm_bo_find(vm, bo); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 172 | if (bo_va) { |
| 173 | if (--bo_va->ref_count == 0) { |
| 174 | amdgpu_vm_bo_rmv(adev, bo_va); |
| 175 | } |
| 176 | } |
Christian König | b5a5ec5 | 2016-03-08 17:47:46 +0100 | [diff] [blame] | 177 | ttm_eu_backoff_reservation(&ticket, &list); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 178 | } |
| 179 | |
| 180 | static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) |
| 181 | { |
| 182 | if (r == -EDEADLK) { |
| 183 | r = amdgpu_gpu_reset(adev); |
| 184 | if (!r) |
| 185 | r = -EAGAIN; |
| 186 | } |
| 187 | return r; |
| 188 | } |
| 189 | |
| 190 | /* |
| 191 | * GEM ioctls. |
| 192 | */ |
| 193 | int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, |
| 194 | struct drm_file *filp) |
| 195 | { |
| 196 | struct amdgpu_device *adev = dev->dev_private; |
| 197 | union drm_amdgpu_gem_create *args = data; |
| 198 | uint64_t size = args->in.bo_size; |
| 199 | struct drm_gem_object *gobj; |
| 200 | uint32_t handle; |
| 201 | bool kernel = false; |
| 202 | int r; |
| 203 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 204 | /* create a gem object to contain this object in */ |
| 205 | if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | |
| 206 | AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { |
| 207 | kernel = true; |
| 208 | if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS) |
| 209 | size = size << AMDGPU_GDS_SHIFT; |
| 210 | else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS) |
| 211 | size = size << AMDGPU_GWS_SHIFT; |
| 212 | else if (args->in.domains == AMDGPU_GEM_DOMAIN_OA) |
| 213 | size = size << AMDGPU_OA_SHIFT; |
| 214 | else { |
| 215 | r = -EINVAL; |
| 216 | goto error_unlock; |
| 217 | } |
| 218 | } |
| 219 | size = roundup(size, PAGE_SIZE); |
| 220 | |
| 221 | r = amdgpu_gem_object_create(adev, size, args->in.alignment, |
| 222 | (u32)(0xffffffff & args->in.domains), |
| 223 | args->in.domain_flags, |
| 224 | kernel, &gobj); |
| 225 | if (r) |
| 226 | goto error_unlock; |
| 227 | |
| 228 | r = drm_gem_handle_create(filp, gobj, &handle); |
| 229 | /* drop reference from allocate - handle holds it now */ |
| 230 | drm_gem_object_unreference_unlocked(gobj); |
| 231 | if (r) |
| 232 | goto error_unlock; |
| 233 | |
| 234 | memset(args, 0, sizeof(*args)); |
| 235 | args->out.handle = handle; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 236 | return 0; |
| 237 | |
| 238 | error_unlock: |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 239 | r = amdgpu_gem_handle_lockup(adev, r); |
| 240 | return r; |
| 241 | } |
| 242 | |
| 243 | int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, |
| 244 | struct drm_file *filp) |
| 245 | { |
| 246 | struct amdgpu_device *adev = dev->dev_private; |
| 247 | struct drm_amdgpu_gem_userptr *args = data; |
| 248 | struct drm_gem_object *gobj; |
| 249 | struct amdgpu_bo *bo; |
| 250 | uint32_t handle; |
| 251 | int r; |
| 252 | |
| 253 | if (offset_in_page(args->addr | args->size)) |
| 254 | return -EINVAL; |
| 255 | |
| 256 | /* reject unknown flag values */ |
| 257 | if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY | |
| 258 | AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE | |
| 259 | AMDGPU_GEM_USERPTR_REGISTER)) |
| 260 | return -EINVAL; |
| 261 | |
Christian König | 358c258 | 2016-03-11 15:29:27 +0100 | [diff] [blame] | 262 | if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) && |
| 263 | !(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 264 | |
Christian König | 358c258 | 2016-03-11 15:29:27 +0100 | [diff] [blame] | 265 | /* if we want to write to it we must install a MMU notifier */ |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 266 | return -EACCES; |
| 267 | } |
| 268 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 269 | /* create a gem object to contain this object in */ |
| 270 | r = amdgpu_gem_object_create(adev, args->size, 0, |
| 271 | AMDGPU_GEM_DOMAIN_CPU, 0, |
| 272 | 0, &gobj); |
| 273 | if (r) |
| 274 | goto handle_lockup; |
| 275 | |
| 276 | bo = gem_to_amdgpu_bo(gobj); |
Christian König | 1ea863f | 2015-12-18 22:13:12 +0100 | [diff] [blame] | 277 | bo->prefered_domains = AMDGPU_GEM_DOMAIN_GTT; |
| 278 | bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 279 | r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); |
| 280 | if (r) |
| 281 | goto release_object; |
| 282 | |
| 283 | if (args->flags & AMDGPU_GEM_USERPTR_REGISTER) { |
| 284 | r = amdgpu_mn_register(bo, args->addr); |
| 285 | if (r) |
| 286 | goto release_object; |
| 287 | } |
| 288 | |
| 289 | if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { |
| 290 | down_read(¤t->mm->mmap_sem); |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 291 | |
| 292 | r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, |
| 293 | bo->tbo.ttm->pages); |
| 294 | if (r) |
| 295 | goto unlock_mmap_sem; |
| 296 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 297 | r = amdgpu_bo_reserve(bo, true); |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 298 | if (r) |
| 299 | goto free_pages; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 300 | |
| 301 | amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); |
| 302 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); |
| 303 | amdgpu_bo_unreserve(bo); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 304 | if (r) |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 305 | goto free_pages; |
| 306 | |
| 307 | up_read(¤t->mm->mmap_sem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 308 | } |
| 309 | |
| 310 | r = drm_gem_handle_create(filp, gobj, &handle); |
| 311 | /* drop reference from allocate - handle holds it now */ |
| 312 | drm_gem_object_unreference_unlocked(gobj); |
| 313 | if (r) |
| 314 | goto handle_lockup; |
| 315 | |
| 316 | args->handle = handle; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 317 | return 0; |
| 318 | |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 319 | free_pages: |
| 320 | release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages, false); |
| 321 | |
| 322 | unlock_mmap_sem: |
| 323 | up_read(¤t->mm->mmap_sem); |
| 324 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 325 | release_object: |
| 326 | drm_gem_object_unreference_unlocked(gobj); |
| 327 | |
| 328 | handle_lockup: |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 329 | r = amdgpu_gem_handle_lockup(adev, r); |
| 330 | |
| 331 | return r; |
| 332 | } |
| 333 | |
| 334 | int amdgpu_mode_dumb_mmap(struct drm_file *filp, |
| 335 | struct drm_device *dev, |
| 336 | uint32_t handle, uint64_t *offset_p) |
| 337 | { |
| 338 | struct drm_gem_object *gobj; |
| 339 | struct amdgpu_bo *robj; |
| 340 | |
| 341 | gobj = drm_gem_object_lookup(dev, filp, handle); |
| 342 | if (gobj == NULL) { |
| 343 | return -ENOENT; |
| 344 | } |
| 345 | robj = gem_to_amdgpu_bo(gobj); |
Christian König | cc325d1 | 2016-02-08 11:08:35 +0100 | [diff] [blame] | 346 | if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) || |
Christian König | 271c812 | 2015-05-13 14:30:53 +0200 | [diff] [blame] | 347 | (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 348 | drm_gem_object_unreference_unlocked(gobj); |
| 349 | return -EPERM; |
| 350 | } |
| 351 | *offset_p = amdgpu_bo_mmap_offset(robj); |
| 352 | drm_gem_object_unreference_unlocked(gobj); |
| 353 | return 0; |
| 354 | } |
| 355 | |
| 356 | int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, |
| 357 | struct drm_file *filp) |
| 358 | { |
| 359 | union drm_amdgpu_gem_mmap *args = data; |
| 360 | uint32_t handle = args->in.handle; |
| 361 | memset(args, 0, sizeof(*args)); |
| 362 | return amdgpu_mode_dumb_mmap(filp, dev, handle, &args->out.addr_ptr); |
| 363 | } |
| 364 | |
| 365 | /** |
| 366 | * amdgpu_gem_timeout - calculate jiffies timeout from absolute value |
| 367 | * |
| 368 | * @timeout_ns: timeout in ns |
| 369 | * |
| 370 | * Calculate the timeout in jiffies from an absolute timeout in ns. |
| 371 | */ |
| 372 | unsigned long amdgpu_gem_timeout(uint64_t timeout_ns) |
| 373 | { |
| 374 | unsigned long timeout_jiffies; |
| 375 | ktime_t timeout; |
| 376 | |
| 377 | /* clamp timeout if it's to large */ |
| 378 | if (((int64_t)timeout_ns) < 0) |
| 379 | return MAX_SCHEDULE_TIMEOUT; |
| 380 | |
Christian König | 0f11770 | 2015-07-08 16:58:48 +0200 | [diff] [blame] | 381 | timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get()); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 382 | if (ktime_to_ns(timeout) < 0) |
| 383 | return 0; |
| 384 | |
| 385 | timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout)); |
| 386 | /* clamp timeout to avoid unsigned-> signed overflow */ |
| 387 | if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT ) |
| 388 | return MAX_SCHEDULE_TIMEOUT - 1; |
| 389 | |
| 390 | return timeout_jiffies; |
| 391 | } |
| 392 | |
| 393 | int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, |
| 394 | struct drm_file *filp) |
| 395 | { |
| 396 | struct amdgpu_device *adev = dev->dev_private; |
| 397 | union drm_amdgpu_gem_wait_idle *args = data; |
| 398 | struct drm_gem_object *gobj; |
| 399 | struct amdgpu_bo *robj; |
| 400 | uint32_t handle = args->in.handle; |
| 401 | unsigned long timeout = amdgpu_gem_timeout(args->in.timeout); |
| 402 | int r = 0; |
| 403 | long ret; |
| 404 | |
| 405 | gobj = drm_gem_object_lookup(dev, filp, handle); |
| 406 | if (gobj == NULL) { |
| 407 | return -ENOENT; |
| 408 | } |
| 409 | robj = gem_to_amdgpu_bo(gobj); |
| 410 | if (timeout == 0) |
| 411 | ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true); |
| 412 | else |
| 413 | ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout); |
| 414 | |
| 415 | /* ret == 0 means not signaled, |
| 416 | * ret > 0 means signaled |
| 417 | * ret < 0 means interrupted before timeout |
| 418 | */ |
| 419 | if (ret >= 0) { |
| 420 | memset(args, 0, sizeof(*args)); |
| 421 | args->out.status = (ret == 0); |
| 422 | } else |
| 423 | r = ret; |
| 424 | |
| 425 | drm_gem_object_unreference_unlocked(gobj); |
| 426 | r = amdgpu_gem_handle_lockup(adev, r); |
| 427 | return r; |
| 428 | } |
| 429 | |
| 430 | int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, |
| 431 | struct drm_file *filp) |
| 432 | { |
| 433 | struct drm_amdgpu_gem_metadata *args = data; |
| 434 | struct drm_gem_object *gobj; |
| 435 | struct amdgpu_bo *robj; |
| 436 | int r = -1; |
| 437 | |
| 438 | DRM_DEBUG("%d \n", args->handle); |
| 439 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
| 440 | if (gobj == NULL) |
| 441 | return -ENOENT; |
| 442 | robj = gem_to_amdgpu_bo(gobj); |
| 443 | |
| 444 | r = amdgpu_bo_reserve(robj, false); |
| 445 | if (unlikely(r != 0)) |
| 446 | goto out; |
| 447 | |
| 448 | if (args->op == AMDGPU_GEM_METADATA_OP_GET_METADATA) { |
| 449 | amdgpu_bo_get_tiling_flags(robj, &args->data.tiling_info); |
| 450 | r = amdgpu_bo_get_metadata(robj, args->data.data, |
| 451 | sizeof(args->data.data), |
| 452 | &args->data.data_size_bytes, |
| 453 | &args->data.flags); |
| 454 | } else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) { |
Dan Carpenter | 0913eab | 2015-09-23 14:00:35 +0300 | [diff] [blame] | 455 | if (args->data.data_size_bytes > sizeof(args->data.data)) { |
| 456 | r = -EINVAL; |
| 457 | goto unreserve; |
| 458 | } |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 459 | r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info); |
| 460 | if (!r) |
| 461 | r = amdgpu_bo_set_metadata(robj, args->data.data, |
| 462 | args->data.data_size_bytes, |
| 463 | args->data.flags); |
| 464 | } |
| 465 | |
Dan Carpenter | 0913eab | 2015-09-23 14:00:35 +0300 | [diff] [blame] | 466 | unreserve: |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 467 | amdgpu_bo_unreserve(robj); |
| 468 | out: |
| 469 | drm_gem_object_unreference_unlocked(gobj); |
| 470 | return r; |
| 471 | } |
| 472 | |
| 473 | /** |
| 474 | * amdgpu_gem_va_update_vm -update the bo_va in its VM |
| 475 | * |
| 476 | * @adev: amdgpu_device pointer |
| 477 | * @bo_va: bo_va to update |
| 478 | * |
| 479 | * Update the bo_va directly after setting it's address. Errors are not |
| 480 | * vital here, so they are not reported back to userspace. |
| 481 | */ |
| 482 | static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, |
monk.liu | 194a336 | 2015-07-22 13:29:28 +0800 | [diff] [blame] | 483 | struct amdgpu_bo_va *bo_va, uint32_t operation) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 484 | { |
| 485 | struct ttm_validate_buffer tv, *entry; |
Christian König | 56467eb | 2015-12-11 15:16:32 +0100 | [diff] [blame] | 486 | struct amdgpu_bo_list_entry vm_pd; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 487 | struct ww_acquire_ctx ticket; |
Christian König | bf60efd | 2015-09-04 10:47:56 +0200 | [diff] [blame] | 488 | struct list_head list, duplicates; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 489 | unsigned domain; |
| 490 | int r; |
| 491 | |
| 492 | INIT_LIST_HEAD(&list); |
Christian König | bf60efd | 2015-09-04 10:47:56 +0200 | [diff] [blame] | 493 | INIT_LIST_HEAD(&duplicates); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 494 | |
| 495 | tv.bo = &bo_va->bo->tbo; |
| 496 | tv.shared = true; |
| 497 | list_add(&tv.head, &list); |
| 498 | |
Christian König | 56467eb | 2015-12-11 15:16:32 +0100 | [diff] [blame] | 499 | amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 500 | |
Christian König | bf60efd | 2015-09-04 10:47:56 +0200 | [diff] [blame] | 501 | /* Provide duplicates to avoid -EALREADY */ |
| 502 | r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 503 | if (r) |
Christian König | 56467eb | 2015-12-11 15:16:32 +0100 | [diff] [blame] | 504 | goto error_print; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 505 | |
Christian König | ee1782c | 2015-12-11 21:01:23 +0100 | [diff] [blame] | 506 | amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 507 | list_for_each_entry(entry, &list, head) { |
| 508 | domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); |
| 509 | /* if anything is swapped out don't swap it in here, |
| 510 | just abort and wait for the next CS */ |
| 511 | if (domain == AMDGPU_GEM_DOMAIN_CPU) |
| 512 | goto error_unreserve; |
| 513 | } |
Chunming Zhou | e410b5c | 2015-12-07 15:02:52 +0800 | [diff] [blame] | 514 | list_for_each_entry(entry, &duplicates, head) { |
| 515 | domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); |
| 516 | /* if anything is swapped out don't swap it in here, |
| 517 | just abort and wait for the next CS */ |
| 518 | if (domain == AMDGPU_GEM_DOMAIN_CPU) |
| 519 | goto error_unreserve; |
| 520 | } |
| 521 | |
Chunming Zhou | 43c27fb | 2015-11-12 15:33:09 +0800 | [diff] [blame] | 522 | r = amdgpu_vm_update_page_directory(adev, bo_va->vm); |
| 523 | if (r) |
| 524 | goto error_unreserve; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 525 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 526 | r = amdgpu_vm_clear_freed(adev, bo_va->vm); |
| 527 | if (r) |
Chunming Zhou | f48b265 | 2015-10-16 14:06:19 +0800 | [diff] [blame] | 528 | goto error_unreserve; |
monk.liu | 194a336 | 2015-07-22 13:29:28 +0800 | [diff] [blame] | 529 | |
| 530 | if (operation == AMDGPU_VA_OP_MAP) |
| 531 | r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 532 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 533 | error_unreserve: |
| 534 | ttm_eu_backoff_reservation(&ticket, &list); |
| 535 | |
Christian König | 56467eb | 2015-12-11 15:16:32 +0100 | [diff] [blame] | 536 | error_print: |
Christian König | 68fdd3d | 2015-06-16 14:50:02 +0200 | [diff] [blame] | 537 | if (r && r != -ERESTARTSYS) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 538 | DRM_ERROR("Couldn't update BO_VA (%d)\n", r); |
| 539 | } |
| 540 | |
| 541 | |
| 542 | |
| 543 | int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, |
| 544 | struct drm_file *filp) |
| 545 | { |
Christian König | 34b5f6a | 2015-06-08 15:03:00 +0200 | [diff] [blame] | 546 | struct drm_amdgpu_gem_va *args = data; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 547 | struct drm_gem_object *gobj; |
| 548 | struct amdgpu_device *adev = dev->dev_private; |
| 549 | struct amdgpu_fpriv *fpriv = filp->driver_priv; |
| 550 | struct amdgpu_bo *rbo; |
| 551 | struct amdgpu_bo_va *bo_va; |
Chunming Zhou | 49b02b1 | 2015-11-13 14:18:38 +0800 | [diff] [blame] | 552 | struct ttm_validate_buffer tv, tv_pd; |
| 553 | struct ww_acquire_ctx ticket; |
| 554 | struct list_head list, duplicates; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 555 | uint32_t invalid_flags, va_flags = 0; |
| 556 | int r = 0; |
| 557 | |
Christian König | 34b5f6a | 2015-06-08 15:03:00 +0200 | [diff] [blame] | 558 | if (!adev->vm_manager.enabled) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 559 | return -ENOTTY; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 560 | |
Christian König | 34b5f6a | 2015-06-08 15:03:00 +0200 | [diff] [blame] | 561 | if (args->va_address < AMDGPU_VA_RESERVED_SIZE) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 562 | dev_err(&dev->pdev->dev, |
| 563 | "va_address 0x%lX is in reserved area 0x%X\n", |
Christian König | 34b5f6a | 2015-06-08 15:03:00 +0200 | [diff] [blame] | 564 | (unsigned long)args->va_address, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 565 | AMDGPU_VA_RESERVED_SIZE); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 566 | return -EINVAL; |
| 567 | } |
| 568 | |
Christian König | fc220f6 | 2015-06-29 17:12:20 +0200 | [diff] [blame] | 569 | invalid_flags = ~(AMDGPU_VM_DELAY_UPDATE | AMDGPU_VM_PAGE_READABLE | |
| 570 | AMDGPU_VM_PAGE_WRITEABLE | AMDGPU_VM_PAGE_EXECUTABLE); |
Christian König | 34b5f6a | 2015-06-08 15:03:00 +0200 | [diff] [blame] | 571 | if ((args->flags & invalid_flags)) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 572 | dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", |
Christian König | 34b5f6a | 2015-06-08 15:03:00 +0200 | [diff] [blame] | 573 | args->flags, invalid_flags); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 574 | return -EINVAL; |
| 575 | } |
| 576 | |
Christian König | 34b5f6a | 2015-06-08 15:03:00 +0200 | [diff] [blame] | 577 | switch (args->operation) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 578 | case AMDGPU_VA_OP_MAP: |
| 579 | case AMDGPU_VA_OP_UNMAP: |
| 580 | break; |
| 581 | default: |
| 582 | dev_err(&dev->pdev->dev, "unsupported operation %d\n", |
Christian König | 34b5f6a | 2015-06-08 15:03:00 +0200 | [diff] [blame] | 583 | args->operation); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 584 | return -EINVAL; |
| 585 | } |
| 586 | |
Christian König | 34b5f6a | 2015-06-08 15:03:00 +0200 | [diff] [blame] | 587 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
| 588 | if (gobj == NULL) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 589 | return -ENOENT; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 590 | rbo = gem_to_amdgpu_bo(gobj); |
Chunming Zhou | 49b02b1 | 2015-11-13 14:18:38 +0800 | [diff] [blame] | 591 | INIT_LIST_HEAD(&list); |
| 592 | INIT_LIST_HEAD(&duplicates); |
| 593 | tv.bo = &rbo->tbo; |
| 594 | tv.shared = true; |
| 595 | list_add(&tv.head, &list); |
| 596 | |
Christian König | b5a5ec5 | 2016-03-08 17:47:46 +0100 | [diff] [blame] | 597 | tv_pd.bo = &fpriv->vm.page_directory->tbo; |
| 598 | tv_pd.shared = true; |
| 599 | list_add(&tv_pd.head, &list); |
| 600 | |
Chunming Zhou | 49b02b1 | 2015-11-13 14:18:38 +0800 | [diff] [blame] | 601 | r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 602 | if (r) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 603 | drm_gem_object_unreference_unlocked(gobj); |
| 604 | return r; |
| 605 | } |
Christian König | 34b5f6a | 2015-06-08 15:03:00 +0200 | [diff] [blame] | 606 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 607 | bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); |
| 608 | if (!bo_va) { |
Chunming Zhou | 49b02b1 | 2015-11-13 14:18:38 +0800 | [diff] [blame] | 609 | ttm_eu_backoff_reservation(&ticket, &list); |
| 610 | drm_gem_object_unreference_unlocked(gobj); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 611 | return -ENOENT; |
| 612 | } |
| 613 | |
Christian König | 34b5f6a | 2015-06-08 15:03:00 +0200 | [diff] [blame] | 614 | switch (args->operation) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 615 | case AMDGPU_VA_OP_MAP: |
Christian König | 34b5f6a | 2015-06-08 15:03:00 +0200 | [diff] [blame] | 616 | if (args->flags & AMDGPU_VM_PAGE_READABLE) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 617 | va_flags |= AMDGPU_PTE_READABLE; |
Christian König | 34b5f6a | 2015-06-08 15:03:00 +0200 | [diff] [blame] | 618 | if (args->flags & AMDGPU_VM_PAGE_WRITEABLE) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 619 | va_flags |= AMDGPU_PTE_WRITEABLE; |
Christian König | 34b5f6a | 2015-06-08 15:03:00 +0200 | [diff] [blame] | 620 | if (args->flags & AMDGPU_VM_PAGE_EXECUTABLE) |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 621 | va_flags |= AMDGPU_PTE_EXECUTABLE; |
Christian König | 34b5f6a | 2015-06-08 15:03:00 +0200 | [diff] [blame] | 622 | r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, |
| 623 | args->offset_in_bo, args->map_size, |
Christian König | 9f7eb53 | 2015-05-18 16:05:57 +0200 | [diff] [blame] | 624 | va_flags); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 625 | break; |
| 626 | case AMDGPU_VA_OP_UNMAP: |
Christian König | 34b5f6a | 2015-06-08 15:03:00 +0200 | [diff] [blame] | 627 | r = amdgpu_vm_bo_unmap(adev, bo_va, args->va_address); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 628 | break; |
| 629 | default: |
| 630 | break; |
| 631 | } |
Chunming Zhou | 49b02b1 | 2015-11-13 14:18:38 +0800 | [diff] [blame] | 632 | ttm_eu_backoff_reservation(&ticket, &list); |
Christian König | 6378076 | 2016-02-19 10:03:03 +0100 | [diff] [blame] | 633 | if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) && |
| 634 | !amdgpu_vm_debug) |
monk.liu | 194a336 | 2015-07-22 13:29:28 +0800 | [diff] [blame] | 635 | amdgpu_gem_va_update_vm(adev, bo_va, args->operation); |
Chunming Zhou | e98c1b0 | 2015-11-13 15:22:04 +0800 | [diff] [blame] | 636 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 637 | drm_gem_object_unreference_unlocked(gobj); |
| 638 | return r; |
| 639 | } |
| 640 | |
| 641 | int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, |
| 642 | struct drm_file *filp) |
| 643 | { |
| 644 | struct drm_amdgpu_gem_op *args = data; |
| 645 | struct drm_gem_object *gobj; |
| 646 | struct amdgpu_bo *robj; |
| 647 | int r; |
| 648 | |
| 649 | gobj = drm_gem_object_lookup(dev, filp, args->handle); |
| 650 | if (gobj == NULL) { |
| 651 | return -ENOENT; |
| 652 | } |
| 653 | robj = gem_to_amdgpu_bo(gobj); |
| 654 | |
| 655 | r = amdgpu_bo_reserve(robj, false); |
| 656 | if (unlikely(r)) |
| 657 | goto out; |
| 658 | |
| 659 | switch (args->op) { |
| 660 | case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: { |
| 661 | struct drm_amdgpu_gem_create_in info; |
| 662 | void __user *out = (void __user *)(long)args->value; |
| 663 | |
| 664 | info.bo_size = robj->gem_base.size; |
| 665 | info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT; |
Christian König | 1ea863f | 2015-12-18 22:13:12 +0100 | [diff] [blame] | 666 | info.domains = robj->prefered_domains; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 667 | info.domain_flags = robj->flags; |
Christian König | 4c28fb0 | 2015-08-28 17:27:54 +0200 | [diff] [blame] | 668 | amdgpu_bo_unreserve(robj); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 669 | if (copy_to_user(out, &info, sizeof(info))) |
| 670 | r = -EFAULT; |
| 671 | break; |
| 672 | } |
Marek Olšák | d8f65a2 | 2015-05-27 14:30:38 +0200 | [diff] [blame] | 673 | case AMDGPU_GEM_OP_SET_PLACEMENT: |
Christian König | cc325d1 | 2016-02-08 11:08:35 +0100 | [diff] [blame] | 674 | if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) { |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 675 | r = -EPERM; |
Christian König | 4c28fb0 | 2015-08-28 17:27:54 +0200 | [diff] [blame] | 676 | amdgpu_bo_unreserve(robj); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 677 | break; |
| 678 | } |
Christian König | 1ea863f | 2015-12-18 22:13:12 +0100 | [diff] [blame] | 679 | robj->prefered_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM | |
| 680 | AMDGPU_GEM_DOMAIN_GTT | |
| 681 | AMDGPU_GEM_DOMAIN_CPU); |
| 682 | robj->allowed_domains = robj->prefered_domains; |
| 683 | if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM) |
| 684 | robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT; |
| 685 | |
Christian König | 4c28fb0 | 2015-08-28 17:27:54 +0200 | [diff] [blame] | 686 | amdgpu_bo_unreserve(robj); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 687 | break; |
| 688 | default: |
Christian König | 4c28fb0 | 2015-08-28 17:27:54 +0200 | [diff] [blame] | 689 | amdgpu_bo_unreserve(robj); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 690 | r = -EINVAL; |
| 691 | } |
| 692 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 693 | out: |
| 694 | drm_gem_object_unreference_unlocked(gobj); |
| 695 | return r; |
| 696 | } |
| 697 | |
| 698 | int amdgpu_mode_dumb_create(struct drm_file *file_priv, |
| 699 | struct drm_device *dev, |
| 700 | struct drm_mode_create_dumb *args) |
| 701 | { |
| 702 | struct amdgpu_device *adev = dev->dev_private; |
| 703 | struct drm_gem_object *gobj; |
| 704 | uint32_t handle; |
| 705 | int r; |
| 706 | |
| 707 | args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8); |
Dan Carpenter | 54ef0b5 | 2015-09-23 14:00:59 +0300 | [diff] [blame] | 708 | args->size = (u64)args->pitch * args->height; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 709 | args->size = ALIGN(args->size, PAGE_SIZE); |
| 710 | |
| 711 | r = amdgpu_gem_object_create(adev, args->size, 0, |
| 712 | AMDGPU_GEM_DOMAIN_VRAM, |
Alex Deucher | 857d913 | 2015-08-27 00:14:16 -0400 | [diff] [blame] | 713 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, |
| 714 | ttm_bo_type_device, |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 715 | &gobj); |
| 716 | if (r) |
| 717 | return -ENOMEM; |
| 718 | |
| 719 | r = drm_gem_handle_create(file_priv, gobj, &handle); |
| 720 | /* drop reference from allocate - handle holds it now */ |
| 721 | drm_gem_object_unreference_unlocked(gobj); |
| 722 | if (r) { |
| 723 | return r; |
| 724 | } |
| 725 | args->handle = handle; |
| 726 | return 0; |
| 727 | } |
| 728 | |
| 729 | #if defined(CONFIG_DEBUG_FS) |
Christian König | 7ea2356 | 2016-02-15 15:23:00 +0100 | [diff] [blame] | 730 | static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) |
| 731 | { |
| 732 | struct drm_gem_object *gobj = ptr; |
| 733 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); |
| 734 | struct seq_file *m = data; |
| 735 | |
| 736 | unsigned domain; |
| 737 | const char *placement; |
| 738 | unsigned pin_count; |
| 739 | |
| 740 | domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); |
| 741 | switch (domain) { |
| 742 | case AMDGPU_GEM_DOMAIN_VRAM: |
| 743 | placement = "VRAM"; |
| 744 | break; |
| 745 | case AMDGPU_GEM_DOMAIN_GTT: |
| 746 | placement = " GTT"; |
| 747 | break; |
| 748 | case AMDGPU_GEM_DOMAIN_CPU: |
| 749 | default: |
| 750 | placement = " CPU"; |
| 751 | break; |
| 752 | } |
| 753 | seq_printf(m, "\t0x%08x: %12ld byte %s @ 0x%010Lx", |
| 754 | id, amdgpu_bo_size(bo), placement, |
| 755 | amdgpu_bo_gpu_offset(bo)); |
| 756 | |
| 757 | pin_count = ACCESS_ONCE(bo->pin_count); |
| 758 | if (pin_count) |
| 759 | seq_printf(m, " pin count %d", pin_count); |
| 760 | seq_printf(m, "\n"); |
| 761 | |
| 762 | return 0; |
| 763 | } |
| 764 | |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 765 | static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data) |
| 766 | { |
| 767 | struct drm_info_node *node = (struct drm_info_node *)m->private; |
| 768 | struct drm_device *dev = node->minor->dev; |
Christian König | 7ea2356 | 2016-02-15 15:23:00 +0100 | [diff] [blame] | 769 | struct drm_file *file; |
| 770 | int r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 771 | |
Christian König | 7ea2356 | 2016-02-15 15:23:00 +0100 | [diff] [blame] | 772 | r = mutex_lock_interruptible(&dev->struct_mutex); |
| 773 | if (r) |
| 774 | return r; |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 775 | |
Christian König | 7ea2356 | 2016-02-15 15:23:00 +0100 | [diff] [blame] | 776 | list_for_each_entry(file, &dev->filelist, lhead) { |
| 777 | struct task_struct *task; |
Christian König | b22e3ce | 2016-02-15 12:41:37 +0100 | [diff] [blame] | 778 | |
Christian König | 7ea2356 | 2016-02-15 15:23:00 +0100 | [diff] [blame] | 779 | /* |
| 780 | * Although we have a valid reference on file->pid, that does |
| 781 | * not guarantee that the task_struct who called get_pid() is |
| 782 | * still alive (e.g. get_pid(current) => fork() => exit()). |
| 783 | * Therefore, we need to protect this ->comm access using RCU. |
| 784 | */ |
| 785 | rcu_read_lock(); |
| 786 | task = pid_task(file->pid, PIDTYPE_PID); |
| 787 | seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid), |
| 788 | task ? task->comm : "<unknown>"); |
| 789 | rcu_read_unlock(); |
| 790 | |
| 791 | spin_lock(&file->table_lock); |
| 792 | idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m); |
| 793 | spin_unlock(&file->table_lock); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 794 | } |
Christian König | 7ea2356 | 2016-02-15 15:23:00 +0100 | [diff] [blame] | 795 | |
| 796 | mutex_unlock(&dev->struct_mutex); |
Alex Deucher | d38ceaf | 2015-04-20 16:55:21 -0400 | [diff] [blame] | 797 | return 0; |
| 798 | } |
| 799 | |
| 800 | static struct drm_info_list amdgpu_debugfs_gem_list[] = { |
| 801 | {"amdgpu_gem_info", &amdgpu_debugfs_gem_info, 0, NULL}, |
| 802 | }; |
| 803 | #endif |
| 804 | |
| 805 | int amdgpu_gem_debugfs_init(struct amdgpu_device *adev) |
| 806 | { |
| 807 | #if defined(CONFIG_DEBUG_FS) |
| 808 | return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1); |
| 809 | #endif |
| 810 | return 0; |
| 811 | } |