Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2009 Jerome Glisse. |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the |
| 7 | * "Software"), to deal in the Software without restriction, including |
| 8 | * without limitation the rights to use, copy, modify, merge, publish, |
| 9 | * distribute, sub license, and/or sell copies of the Software, and to |
| 10 | * permit persons to whom the Software is furnished to do so, subject to |
| 11 | * the following conditions: |
| 12 | * |
| 13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
| 16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
| 17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
| 19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 20 | * |
| 21 | * The above copyright notice and this permission notice (including the |
| 22 | * next paragraph) shall be included in all copies or substantial portions |
| 23 | * of the Software. |
| 24 | * |
| 25 | */ |
| 26 | /* |
| 27 | * Authors: |
| 28 | * Jerome Glisse <glisse@freedesktop.org> |
| 29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> |
| 30 | * Dave Airlie |
| 31 | */ |
| 32 | #include <ttm/ttm_bo_api.h> |
| 33 | #include <ttm/ttm_bo_driver.h> |
| 34 | #include <ttm/ttm_placement.h> |
| 35 | #include <ttm/ttm_module.h> |
| 36 | #include <drm/drmP.h> |
| 37 | #include <drm/radeon_drm.h> |
| 38 | #include "radeon_reg.h" |
| 39 | #include "radeon.h" |
| 40 | |
| 41 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) |
| 42 | |
| 43 | static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) |
| 44 | { |
| 45 | struct radeon_mman *mman; |
| 46 | struct radeon_device *rdev; |
| 47 | |
| 48 | mman = container_of(bdev, struct radeon_mman, bdev); |
| 49 | rdev = container_of(mman, struct radeon_device, mman); |
| 50 | return rdev; |
| 51 | } |
| 52 | |
| 53 | |
| 54 | /* |
| 55 | * Global memory. |
| 56 | */ |
| 57 | static int radeon_ttm_mem_global_init(struct ttm_global_reference *ref) |
| 58 | { |
| 59 | return ttm_mem_global_init(ref->object); |
| 60 | } |
| 61 | |
| 62 | static void radeon_ttm_mem_global_release(struct ttm_global_reference *ref) |
| 63 | { |
| 64 | ttm_mem_global_release(ref->object); |
| 65 | } |
| 66 | |
| 67 | static int radeon_ttm_global_init(struct radeon_device *rdev) |
| 68 | { |
| 69 | struct ttm_global_reference *global_ref; |
| 70 | int r; |
| 71 | |
| 72 | rdev->mman.mem_global_referenced = false; |
| 73 | global_ref = &rdev->mman.mem_global_ref; |
| 74 | global_ref->global_type = TTM_GLOBAL_TTM_MEM; |
| 75 | global_ref->size = sizeof(struct ttm_mem_global); |
| 76 | global_ref->init = &radeon_ttm_mem_global_init; |
| 77 | global_ref->release = &radeon_ttm_mem_global_release; |
| 78 | r = ttm_global_item_ref(global_ref); |
| 79 | if (r != 0) { |
Thomas Hellstrom | a987fca | 2009-08-18 16:51:56 +0200 | [diff] [blame] | 80 | DRM_ERROR("Failed setting up TTM memory accounting " |
| 81 | "subsystem.\n"); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 82 | return r; |
| 83 | } |
Thomas Hellstrom | a987fca | 2009-08-18 16:51:56 +0200 | [diff] [blame] | 84 | |
| 85 | rdev->mman.bo_global_ref.mem_glob = |
| 86 | rdev->mman.mem_global_ref.object; |
| 87 | global_ref = &rdev->mman.bo_global_ref.ref; |
| 88 | global_ref->global_type = TTM_GLOBAL_TTM_BO; |
Thomas Hellstrom | 7f5f4db | 2009-08-20 10:29:08 +0200 | [diff] [blame^] | 89 | global_ref->size = sizeof(struct ttm_bo_global); |
Thomas Hellstrom | a987fca | 2009-08-18 16:51:56 +0200 | [diff] [blame] | 90 | global_ref->init = &ttm_bo_global_init; |
| 91 | global_ref->release = &ttm_bo_global_release; |
| 92 | r = ttm_global_item_ref(global_ref); |
| 93 | if (r != 0) { |
| 94 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); |
| 95 | ttm_global_item_unref(&rdev->mman.mem_global_ref); |
| 96 | return r; |
| 97 | } |
| 98 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 99 | rdev->mman.mem_global_referenced = true; |
| 100 | return 0; |
| 101 | } |
| 102 | |
| 103 | static void radeon_ttm_global_fini(struct radeon_device *rdev) |
| 104 | { |
| 105 | if (rdev->mman.mem_global_referenced) { |
Thomas Hellstrom | a987fca | 2009-08-18 16:51:56 +0200 | [diff] [blame] | 106 | ttm_global_item_unref(&rdev->mman.bo_global_ref.ref); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 107 | ttm_global_item_unref(&rdev->mman.mem_global_ref); |
| 108 | rdev->mman.mem_global_referenced = false; |
| 109 | } |
| 110 | } |
| 111 | |
| 112 | struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev); |
| 113 | |
| 114 | static struct ttm_backend* |
| 115 | radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev) |
| 116 | { |
| 117 | struct radeon_device *rdev; |
| 118 | |
| 119 | rdev = radeon_get_rdev(bdev); |
| 120 | #if __OS_HAS_AGP |
| 121 | if (rdev->flags & RADEON_IS_AGP) { |
| 122 | return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge); |
| 123 | } else |
| 124 | #endif |
| 125 | { |
| 126 | return radeon_ttm_backend_create(rdev); |
| 127 | } |
| 128 | } |
| 129 | |
| 130 | static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
| 131 | { |
| 132 | return 0; |
| 133 | } |
| 134 | |
| 135 | static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, |
| 136 | struct ttm_mem_type_manager *man) |
| 137 | { |
| 138 | struct radeon_device *rdev; |
| 139 | |
| 140 | rdev = radeon_get_rdev(bdev); |
| 141 | |
| 142 | switch (type) { |
| 143 | case TTM_PL_SYSTEM: |
| 144 | /* System memory */ |
| 145 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
| 146 | man->available_caching = TTM_PL_MASK_CACHING; |
| 147 | man->default_caching = TTM_PL_FLAG_CACHED; |
| 148 | break; |
| 149 | case TTM_PL_TT: |
| 150 | man->gpu_offset = 0; |
| 151 | man->available_caching = TTM_PL_MASK_CACHING; |
| 152 | man->default_caching = TTM_PL_FLAG_CACHED; |
Michel Dänzer | 55c9327 | 2009-06-15 16:56:11 +0200 | [diff] [blame] | 153 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 154 | #if __OS_HAS_AGP |
| 155 | if (rdev->flags & RADEON_IS_AGP) { |
| 156 | if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) { |
| 157 | DRM_ERROR("AGP is not enabled for memory type %u\n", |
| 158 | (unsigned)type); |
| 159 | return -EINVAL; |
| 160 | } |
| 161 | man->io_offset = rdev->mc.agp_base; |
| 162 | man->io_size = rdev->mc.gtt_size; |
| 163 | man->io_addr = NULL; |
Michel Dänzer | 55c9327 | 2009-06-15 16:56:11 +0200 | [diff] [blame] | 164 | if (!rdev->ddev->agp->cant_use_aperture) |
| 165 | man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | |
| 166 | TTM_MEMTYPE_FLAG_MAPPABLE; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 167 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
| 168 | TTM_PL_FLAG_WC; |
| 169 | man->default_caching = TTM_PL_FLAG_WC; |
| 170 | } else |
| 171 | #endif |
| 172 | { |
| 173 | man->io_offset = 0; |
| 174 | man->io_size = 0; |
| 175 | man->io_addr = NULL; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 176 | } |
| 177 | break; |
| 178 | case TTM_PL_VRAM: |
| 179 | /* "On-card" video ram */ |
| 180 | man->gpu_offset = 0; |
| 181 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
| 182 | TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | |
| 183 | TTM_MEMTYPE_FLAG_MAPPABLE; |
| 184 | man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; |
| 185 | man->default_caching = TTM_PL_FLAG_WC; |
| 186 | man->io_addr = NULL; |
| 187 | man->io_offset = rdev->mc.aper_base; |
| 188 | man->io_size = rdev->mc.aper_size; |
| 189 | break; |
| 190 | default: |
| 191 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); |
| 192 | return -EINVAL; |
| 193 | } |
| 194 | return 0; |
| 195 | } |
| 196 | |
| 197 | static uint32_t radeon_evict_flags(struct ttm_buffer_object *bo) |
| 198 | { |
| 199 | uint32_t cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEMTYPE; |
| 200 | |
| 201 | switch (bo->mem.mem_type) { |
| 202 | default: |
| 203 | return (cur_placement & ~TTM_PL_MASK_CACHING) | |
| 204 | TTM_PL_FLAG_SYSTEM | |
| 205 | TTM_PL_FLAG_CACHED; |
| 206 | } |
| 207 | } |
| 208 | |
| 209 | static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) |
| 210 | { |
| 211 | return 0; |
| 212 | } |
| 213 | |
| 214 | static void radeon_move_null(struct ttm_buffer_object *bo, |
| 215 | struct ttm_mem_reg *new_mem) |
| 216 | { |
| 217 | struct ttm_mem_reg *old_mem = &bo->mem; |
| 218 | |
| 219 | BUG_ON(old_mem->mm_node != NULL); |
| 220 | *old_mem = *new_mem; |
| 221 | new_mem->mm_node = NULL; |
| 222 | } |
| 223 | |
| 224 | static int radeon_move_blit(struct ttm_buffer_object *bo, |
| 225 | bool evict, int no_wait, |
| 226 | struct ttm_mem_reg *new_mem, |
| 227 | struct ttm_mem_reg *old_mem) |
| 228 | { |
| 229 | struct radeon_device *rdev; |
| 230 | uint64_t old_start, new_start; |
| 231 | struct radeon_fence *fence; |
| 232 | int r; |
| 233 | |
| 234 | rdev = radeon_get_rdev(bo->bdev); |
| 235 | r = radeon_fence_create(rdev, &fence); |
| 236 | if (unlikely(r)) { |
| 237 | return r; |
| 238 | } |
| 239 | old_start = old_mem->mm_node->start << PAGE_SHIFT; |
| 240 | new_start = new_mem->mm_node->start << PAGE_SHIFT; |
| 241 | |
| 242 | switch (old_mem->mem_type) { |
| 243 | case TTM_PL_VRAM: |
| 244 | old_start += rdev->mc.vram_location; |
| 245 | break; |
| 246 | case TTM_PL_TT: |
| 247 | old_start += rdev->mc.gtt_location; |
| 248 | break; |
| 249 | default: |
| 250 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); |
| 251 | return -EINVAL; |
| 252 | } |
| 253 | switch (new_mem->mem_type) { |
| 254 | case TTM_PL_VRAM: |
| 255 | new_start += rdev->mc.vram_location; |
| 256 | break; |
| 257 | case TTM_PL_TT: |
| 258 | new_start += rdev->mc.gtt_location; |
| 259 | break; |
| 260 | default: |
| 261 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); |
| 262 | return -EINVAL; |
| 263 | } |
| 264 | if (!rdev->cp.ready) { |
| 265 | DRM_ERROR("Trying to move memory with CP turned off.\n"); |
| 266 | return -EINVAL; |
| 267 | } |
| 268 | r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence); |
| 269 | /* FIXME: handle copy error */ |
| 270 | r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL, |
| 271 | evict, no_wait, new_mem); |
| 272 | radeon_fence_unref(&fence); |
| 273 | return r; |
| 274 | } |
| 275 | |
| 276 | static int radeon_move_vram_ram(struct ttm_buffer_object *bo, |
| 277 | bool evict, bool interruptible, bool no_wait, |
| 278 | struct ttm_mem_reg *new_mem) |
| 279 | { |
| 280 | struct radeon_device *rdev; |
| 281 | struct ttm_mem_reg *old_mem = &bo->mem; |
| 282 | struct ttm_mem_reg tmp_mem; |
| 283 | uint32_t proposed_placement; |
| 284 | int r; |
| 285 | |
| 286 | rdev = radeon_get_rdev(bo->bdev); |
| 287 | tmp_mem = *new_mem; |
| 288 | tmp_mem.mm_node = NULL; |
| 289 | proposed_placement = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; |
| 290 | r = ttm_bo_mem_space(bo, proposed_placement, &tmp_mem, |
| 291 | interruptible, no_wait); |
| 292 | if (unlikely(r)) { |
| 293 | return r; |
| 294 | } |
| 295 | r = ttm_tt_bind(bo->ttm, &tmp_mem); |
| 296 | if (unlikely(r)) { |
| 297 | goto out_cleanup; |
| 298 | } |
| 299 | r = radeon_move_blit(bo, true, no_wait, &tmp_mem, old_mem); |
| 300 | if (unlikely(r)) { |
| 301 | goto out_cleanup; |
| 302 | } |
| 303 | r = ttm_bo_move_ttm(bo, true, no_wait, new_mem); |
| 304 | out_cleanup: |
| 305 | if (tmp_mem.mm_node) { |
Thomas Hellstrom | a987fca | 2009-08-18 16:51:56 +0200 | [diff] [blame] | 306 | struct ttm_bo_global *glob = rdev->mman.bdev.glob; |
| 307 | |
| 308 | spin_lock(&glob->lru_lock); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 309 | drm_mm_put_block(tmp_mem.mm_node); |
Thomas Hellstrom | a987fca | 2009-08-18 16:51:56 +0200 | [diff] [blame] | 310 | spin_unlock(&glob->lru_lock); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 311 | return r; |
| 312 | } |
| 313 | return r; |
| 314 | } |
| 315 | |
| 316 | static int radeon_move_ram_vram(struct ttm_buffer_object *bo, |
| 317 | bool evict, bool interruptible, bool no_wait, |
| 318 | struct ttm_mem_reg *new_mem) |
| 319 | { |
| 320 | struct radeon_device *rdev; |
| 321 | struct ttm_mem_reg *old_mem = &bo->mem; |
| 322 | struct ttm_mem_reg tmp_mem; |
| 323 | uint32_t proposed_flags; |
| 324 | int r; |
| 325 | |
| 326 | rdev = radeon_get_rdev(bo->bdev); |
| 327 | tmp_mem = *new_mem; |
| 328 | tmp_mem.mm_node = NULL; |
| 329 | proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; |
| 330 | r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem, |
| 331 | interruptible, no_wait); |
| 332 | if (unlikely(r)) { |
| 333 | return r; |
| 334 | } |
| 335 | r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem); |
| 336 | if (unlikely(r)) { |
| 337 | goto out_cleanup; |
| 338 | } |
| 339 | r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem); |
| 340 | if (unlikely(r)) { |
| 341 | goto out_cleanup; |
| 342 | } |
| 343 | out_cleanup: |
| 344 | if (tmp_mem.mm_node) { |
Thomas Hellstrom | a987fca | 2009-08-18 16:51:56 +0200 | [diff] [blame] | 345 | struct ttm_bo_global *glob = rdev->mman.bdev.glob; |
| 346 | |
| 347 | spin_lock(&glob->lru_lock); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 348 | drm_mm_put_block(tmp_mem.mm_node); |
Thomas Hellstrom | a987fca | 2009-08-18 16:51:56 +0200 | [diff] [blame] | 349 | spin_unlock(&glob->lru_lock); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 350 | return r; |
| 351 | } |
| 352 | return r; |
| 353 | } |
| 354 | |
| 355 | static int radeon_bo_move(struct ttm_buffer_object *bo, |
| 356 | bool evict, bool interruptible, bool no_wait, |
| 357 | struct ttm_mem_reg *new_mem) |
| 358 | { |
| 359 | struct radeon_device *rdev; |
| 360 | struct ttm_mem_reg *old_mem = &bo->mem; |
| 361 | int r; |
| 362 | |
| 363 | rdev = radeon_get_rdev(bo->bdev); |
| 364 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { |
| 365 | radeon_move_null(bo, new_mem); |
| 366 | return 0; |
| 367 | } |
| 368 | if ((old_mem->mem_type == TTM_PL_TT && |
| 369 | new_mem->mem_type == TTM_PL_SYSTEM) || |
| 370 | (old_mem->mem_type == TTM_PL_SYSTEM && |
| 371 | new_mem->mem_type == TTM_PL_TT)) { |
| 372 | /* bind is enought */ |
| 373 | radeon_move_null(bo, new_mem); |
| 374 | return 0; |
| 375 | } |
| 376 | if (!rdev->cp.ready) { |
| 377 | /* use memcpy */ |
| 378 | DRM_ERROR("CP is not ready use memcpy.\n"); |
Michel Dänzer | 1ab2e10 | 2009-07-28 12:30:56 +0200 | [diff] [blame] | 379 | goto memcpy; |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 380 | } |
| 381 | |
| 382 | if (old_mem->mem_type == TTM_PL_VRAM && |
| 383 | new_mem->mem_type == TTM_PL_SYSTEM) { |
Michel Dänzer | 1ab2e10 | 2009-07-28 12:30:56 +0200 | [diff] [blame] | 384 | r = radeon_move_vram_ram(bo, evict, interruptible, |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 385 | no_wait, new_mem); |
| 386 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && |
| 387 | new_mem->mem_type == TTM_PL_VRAM) { |
Michel Dänzer | 1ab2e10 | 2009-07-28 12:30:56 +0200 | [diff] [blame] | 388 | r = radeon_move_ram_vram(bo, evict, interruptible, |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 389 | no_wait, new_mem); |
| 390 | } else { |
| 391 | r = radeon_move_blit(bo, evict, no_wait, new_mem, old_mem); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 392 | } |
Michel Dänzer | 1ab2e10 | 2009-07-28 12:30:56 +0200 | [diff] [blame] | 393 | |
| 394 | if (r) { |
| 395 | memcpy: |
| 396 | r = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); |
| 397 | } |
| 398 | |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 399 | return r; |
| 400 | } |
| 401 | |
| 402 | const uint32_t radeon_mem_prios[] = { |
| 403 | TTM_PL_VRAM, |
| 404 | TTM_PL_TT, |
| 405 | TTM_PL_SYSTEM, |
| 406 | }; |
| 407 | |
| 408 | const uint32_t radeon_busy_prios[] = { |
| 409 | TTM_PL_TT, |
| 410 | TTM_PL_VRAM, |
| 411 | TTM_PL_SYSTEM, |
| 412 | }; |
| 413 | |
| 414 | static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg, |
| 415 | bool lazy, bool interruptible) |
| 416 | { |
| 417 | return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible); |
| 418 | } |
| 419 | |
| 420 | static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg) |
| 421 | { |
| 422 | return 0; |
| 423 | } |
| 424 | |
| 425 | static void radeon_sync_obj_unref(void **sync_obj) |
| 426 | { |
| 427 | radeon_fence_unref((struct radeon_fence **)sync_obj); |
| 428 | } |
| 429 | |
| 430 | static void *radeon_sync_obj_ref(void *sync_obj) |
| 431 | { |
| 432 | return radeon_fence_ref((struct radeon_fence *)sync_obj); |
| 433 | } |
| 434 | |
| 435 | static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg) |
| 436 | { |
| 437 | return radeon_fence_signaled((struct radeon_fence *)sync_obj); |
| 438 | } |
| 439 | |
| 440 | static struct ttm_bo_driver radeon_bo_driver = { |
| 441 | .mem_type_prio = radeon_mem_prios, |
| 442 | .mem_busy_prio = radeon_busy_prios, |
| 443 | .num_mem_type_prio = ARRAY_SIZE(radeon_mem_prios), |
| 444 | .num_mem_busy_prio = ARRAY_SIZE(radeon_busy_prios), |
| 445 | .create_ttm_backend_entry = &radeon_create_ttm_backend_entry, |
| 446 | .invalidate_caches = &radeon_invalidate_caches, |
| 447 | .init_mem_type = &radeon_init_mem_type, |
| 448 | .evict_flags = &radeon_evict_flags, |
| 449 | .move = &radeon_bo_move, |
| 450 | .verify_access = &radeon_verify_access, |
| 451 | .sync_obj_signaled = &radeon_sync_obj_signaled, |
| 452 | .sync_obj_wait = &radeon_sync_obj_wait, |
| 453 | .sync_obj_flush = &radeon_sync_obj_flush, |
| 454 | .sync_obj_unref = &radeon_sync_obj_unref, |
| 455 | .sync_obj_ref = &radeon_sync_obj_ref, |
Dave Airlie | e024e11 | 2009-06-24 09:48:08 +1000 | [diff] [blame] | 456 | .move_notify = &radeon_bo_move_notify, |
| 457 | .fault_reserve_notify = &radeon_bo_fault_reserve_notify, |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 458 | }; |
| 459 | |
| 460 | int radeon_ttm_init(struct radeon_device *rdev) |
| 461 | { |
| 462 | int r; |
| 463 | |
| 464 | r = radeon_ttm_global_init(rdev); |
| 465 | if (r) { |
| 466 | return r; |
| 467 | } |
| 468 | /* No others user of address space so set it to 0 */ |
| 469 | r = ttm_bo_device_init(&rdev->mman.bdev, |
Thomas Hellstrom | a987fca | 2009-08-18 16:51:56 +0200 | [diff] [blame] | 470 | rdev->mman.bo_global_ref.ref.object, |
Dave Airlie | ad49f50 | 2009-07-10 22:36:26 +1000 | [diff] [blame] | 471 | &radeon_bo_driver, DRM_FILE_PAGE_OFFSET, |
| 472 | rdev->need_dma32); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 473 | if (r) { |
| 474 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); |
| 475 | return r; |
| 476 | } |
| 477 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0, |
Dave Airlie | 7a50f01 | 2009-07-21 20:39:30 +1000 | [diff] [blame] | 478 | ((rdev->mc.real_vram_size) >> PAGE_SHIFT)); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 479 | if (r) { |
| 480 | DRM_ERROR("Failed initializing VRAM heap.\n"); |
| 481 | return r; |
| 482 | } |
| 483 | r = radeon_object_create(rdev, NULL, 256 * 1024, true, |
| 484 | RADEON_GEM_DOMAIN_VRAM, false, |
| 485 | &rdev->stollen_vga_memory); |
| 486 | if (r) { |
| 487 | return r; |
| 488 | } |
| 489 | r = radeon_object_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); |
| 490 | if (r) { |
| 491 | radeon_object_unref(&rdev->stollen_vga_memory); |
| 492 | return r; |
| 493 | } |
| 494 | DRM_INFO("radeon: %uM of VRAM memory ready\n", |
Dave Airlie | 7a50f01 | 2009-07-21 20:39:30 +1000 | [diff] [blame] | 495 | rdev->mc.real_vram_size / (1024 * 1024)); |
Jerome Glisse | 771fe6b | 2009-06-05 14:42:42 +0200 | [diff] [blame] | 496 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0, |
| 497 | ((rdev->mc.gtt_size) >> PAGE_SHIFT)); |
| 498 | if (r) { |
| 499 | DRM_ERROR("Failed initializing GTT heap.\n"); |
| 500 | return r; |
| 501 | } |
| 502 | DRM_INFO("radeon: %uM of GTT memory ready.\n", |
| 503 | rdev->mc.gtt_size / (1024 * 1024)); |
| 504 | if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { |
| 505 | rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; |
| 506 | } |
| 507 | return 0; |
| 508 | } |
| 509 | |
| 510 | void radeon_ttm_fini(struct radeon_device *rdev) |
| 511 | { |
| 512 | if (rdev->stollen_vga_memory) { |
| 513 | radeon_object_unpin(rdev->stollen_vga_memory); |
| 514 | radeon_object_unref(&rdev->stollen_vga_memory); |
| 515 | } |
| 516 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); |
| 517 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); |
| 518 | ttm_bo_device_release(&rdev->mman.bdev); |
| 519 | radeon_gart_fini(rdev); |
| 520 | radeon_ttm_global_fini(rdev); |
| 521 | DRM_INFO("radeon: ttm finalized\n"); |
| 522 | } |
| 523 | |
| 524 | static struct vm_operations_struct radeon_ttm_vm_ops; |
| 525 | static struct vm_operations_struct *ttm_vm_ops = NULL; |
| 526 | |
| 527 | static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
| 528 | { |
| 529 | struct ttm_buffer_object *bo; |
| 530 | int r; |
| 531 | |
| 532 | bo = (struct ttm_buffer_object *)vma->vm_private_data; |
| 533 | if (bo == NULL) { |
| 534 | return VM_FAULT_NOPAGE; |
| 535 | } |
| 536 | r = ttm_vm_ops->fault(vma, vmf); |
| 537 | return r; |
| 538 | } |
| 539 | |
| 540 | int radeon_mmap(struct file *filp, struct vm_area_struct *vma) |
| 541 | { |
| 542 | struct drm_file *file_priv; |
| 543 | struct radeon_device *rdev; |
| 544 | int r; |
| 545 | |
| 546 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { |
| 547 | return drm_mmap(filp, vma); |
| 548 | } |
| 549 | |
| 550 | file_priv = (struct drm_file *)filp->private_data; |
| 551 | rdev = file_priv->minor->dev->dev_private; |
| 552 | if (rdev == NULL) { |
| 553 | return -EINVAL; |
| 554 | } |
| 555 | r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev); |
| 556 | if (unlikely(r != 0)) { |
| 557 | return r; |
| 558 | } |
| 559 | if (unlikely(ttm_vm_ops == NULL)) { |
| 560 | ttm_vm_ops = vma->vm_ops; |
| 561 | radeon_ttm_vm_ops = *ttm_vm_ops; |
| 562 | radeon_ttm_vm_ops.fault = &radeon_ttm_fault; |
| 563 | } |
| 564 | vma->vm_ops = &radeon_ttm_vm_ops; |
| 565 | return 0; |
| 566 | } |
| 567 | |
| 568 | |
| 569 | /* |
| 570 | * TTM backend functions. |
| 571 | */ |
| 572 | struct radeon_ttm_backend { |
| 573 | struct ttm_backend backend; |
| 574 | struct radeon_device *rdev; |
| 575 | unsigned long num_pages; |
| 576 | struct page **pages; |
| 577 | struct page *dummy_read_page; |
| 578 | bool populated; |
| 579 | bool bound; |
| 580 | unsigned offset; |
| 581 | }; |
| 582 | |
| 583 | static int radeon_ttm_backend_populate(struct ttm_backend *backend, |
| 584 | unsigned long num_pages, |
| 585 | struct page **pages, |
| 586 | struct page *dummy_read_page) |
| 587 | { |
| 588 | struct radeon_ttm_backend *gtt; |
| 589 | |
| 590 | gtt = container_of(backend, struct radeon_ttm_backend, backend); |
| 591 | gtt->pages = pages; |
| 592 | gtt->num_pages = num_pages; |
| 593 | gtt->dummy_read_page = dummy_read_page; |
| 594 | gtt->populated = true; |
| 595 | return 0; |
| 596 | } |
| 597 | |
| 598 | static void radeon_ttm_backend_clear(struct ttm_backend *backend) |
| 599 | { |
| 600 | struct radeon_ttm_backend *gtt; |
| 601 | |
| 602 | gtt = container_of(backend, struct radeon_ttm_backend, backend); |
| 603 | gtt->pages = NULL; |
| 604 | gtt->num_pages = 0; |
| 605 | gtt->dummy_read_page = NULL; |
| 606 | gtt->populated = false; |
| 607 | gtt->bound = false; |
| 608 | } |
| 609 | |
| 610 | |
| 611 | static int radeon_ttm_backend_bind(struct ttm_backend *backend, |
| 612 | struct ttm_mem_reg *bo_mem) |
| 613 | { |
| 614 | struct radeon_ttm_backend *gtt; |
| 615 | int r; |
| 616 | |
| 617 | gtt = container_of(backend, struct radeon_ttm_backend, backend); |
| 618 | gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT; |
| 619 | if (!gtt->num_pages) { |
| 620 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend); |
| 621 | } |
| 622 | r = radeon_gart_bind(gtt->rdev, gtt->offset, |
| 623 | gtt->num_pages, gtt->pages); |
| 624 | if (r) { |
| 625 | DRM_ERROR("failed to bind %lu pages at 0x%08X\n", |
| 626 | gtt->num_pages, gtt->offset); |
| 627 | return r; |
| 628 | } |
| 629 | gtt->bound = true; |
| 630 | return 0; |
| 631 | } |
| 632 | |
| 633 | static int radeon_ttm_backend_unbind(struct ttm_backend *backend) |
| 634 | { |
| 635 | struct radeon_ttm_backend *gtt; |
| 636 | |
| 637 | gtt = container_of(backend, struct radeon_ttm_backend, backend); |
| 638 | radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages); |
| 639 | gtt->bound = false; |
| 640 | return 0; |
| 641 | } |
| 642 | |
| 643 | static void radeon_ttm_backend_destroy(struct ttm_backend *backend) |
| 644 | { |
| 645 | struct radeon_ttm_backend *gtt; |
| 646 | |
| 647 | gtt = container_of(backend, struct radeon_ttm_backend, backend); |
| 648 | if (gtt->bound) { |
| 649 | radeon_ttm_backend_unbind(backend); |
| 650 | } |
| 651 | kfree(gtt); |
| 652 | } |
| 653 | |
| 654 | static struct ttm_backend_func radeon_backend_func = { |
| 655 | .populate = &radeon_ttm_backend_populate, |
| 656 | .clear = &radeon_ttm_backend_clear, |
| 657 | .bind = &radeon_ttm_backend_bind, |
| 658 | .unbind = &radeon_ttm_backend_unbind, |
| 659 | .destroy = &radeon_ttm_backend_destroy, |
| 660 | }; |
| 661 | |
| 662 | struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev) |
| 663 | { |
| 664 | struct radeon_ttm_backend *gtt; |
| 665 | |
| 666 | gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL); |
| 667 | if (gtt == NULL) { |
| 668 | return NULL; |
| 669 | } |
| 670 | gtt->backend.bdev = &rdev->mman.bdev; |
| 671 | gtt->backend.flags = 0; |
| 672 | gtt->backend.func = &radeon_backend_func; |
| 673 | gtt->rdev = rdev; |
| 674 | gtt->pages = NULL; |
| 675 | gtt->num_pages = 0; |
| 676 | gtt->dummy_read_page = NULL; |
| 677 | gtt->populated = false; |
| 678 | gtt->bound = false; |
| 679 | return >t->backend; |
| 680 | } |