Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. |
| 3 | * Copyright 2008 Red Hat Inc. |
| 4 | * Copyright 2009 Jerome Glisse. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the "Software"), |
| 8 | * to deal in the Software without restriction, including without limitation |
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 10 | * and/or sell copies of the Software, and to permit persons to whom the |
| 11 | * Software is furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in |
| 14 | * all copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 22 | * OTHER DEALINGS IN THE SOFTWARE. |
| 23 | * |
| 24 | * Authors: Dave Airlie |
| 25 | * Alex Deucher |
| 26 | * Jerome Glisse |
| 27 | */ |
| 28 | #ifndef __AMDGPU_H__ |
| 29 | #define __AMDGPU_H__ |
| 30 | |
| 31 | #include <linux/atomic.h> |
| 32 | #include <linux/wait.h> |
| 33 | #include <linux/list.h> |
| 34 | #include <linux/kref.h> |
| 35 | #include <linux/interval_tree.h> |
| 36 | #include <linux/hashtable.h> |
| 37 | #include <linux/fence.h> |
| 38 | |
| 39 | #include <ttm/ttm_bo_api.h> |
| 40 | #include <ttm/ttm_bo_driver.h> |
| 41 | #include <ttm/ttm_placement.h> |
| 42 | #include <ttm/ttm_module.h> |
| 43 | #include <ttm/ttm_execbuf_util.h> |
| 44 | |
Chunming Zhou | d03846a | 2015-07-28 14:20:03 -0400 | [diff] [blame] | 45 | #include <drm/drmP.h> |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 46 | #include <drm/drm_gem.h> |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 47 | #include <drm/amdgpu_drm.h> |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 48 | |
yanyang1 | 5fc3aee | 2015-05-22 14:39:35 -0400 | [diff] [blame] | 49 | #include "amd_shared.h" |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 50 | #include "amdgpu_mode.h" |
| 51 | #include "amdgpu_ih.h" |
| 52 | #include "amdgpu_irq.h" |
| 53 | #include "amdgpu_ucode.h" |
| 54 | #include "amdgpu_gds.h" |
Alex Deucher | 1f7371b | 2015-12-02 17:46:21 -0500 | [diff] [blame] | 55 | #include "amd_powerplay.h" |
Maruthi Bayyavarapu | a8fe58c | 2015-09-22 17:05:20 -0400 | [diff] [blame] | 56 | #include "amdgpu_acp.h" |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 57 | |
Alex Deucher | b80d847 | 2015-08-16 22:55:02 -0400 | [diff] [blame] | 58 | #include "gpu_scheduler.h" |
| 59 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 60 | /* |
| 61 | * Modules parameters. |
| 62 | */ |
| 63 | extern int amdgpu_modeset; |
| 64 | extern int amdgpu_vram_limit; |
| 65 | extern int amdgpu_gart_size; |
| 66 | extern int amdgpu_benchmarking; |
| 67 | extern int amdgpu_testing; |
| 68 | extern int amdgpu_audio; |
| 69 | extern int amdgpu_disp_priority; |
| 70 | extern int amdgpu_hw_i2c; |
| 71 | extern int amdgpu_pcie_gen2; |
| 72 | extern int amdgpu_msi; |
| 73 | extern int amdgpu_lockup_timeout; |
| 74 | extern int amdgpu_dpm; |
| 75 | extern int amdgpu_smc_load_fw; |
| 76 | extern int amdgpu_aspm; |
| 77 | extern int amdgpu_runtime_pm; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 78 | extern unsigned amdgpu_ip_block_mask; |
| 79 | extern int amdgpu_bapm; |
| 80 | extern int amdgpu_deep_color; |
| 81 | extern int amdgpu_vm_size; |
| 82 | extern int amdgpu_vm_block_size; |
Christian König | d9c1315 | 2015-09-28 12:31:26 +0200 | [diff] [blame] | 83 | extern int amdgpu_vm_fault_stop; |
Christian König | b495bd3 | 2015-09-10 14:00:35 +0200 | [diff] [blame] | 84 | extern int amdgpu_vm_debug; |
Jammy Zhou | 1333f72 | 2015-07-30 16:36:58 +0800 | [diff] [blame] | 85 | extern int amdgpu_sched_jobs; |
Jammy Zhou | 4afcb30 | 2015-07-30 16:44:05 +0800 | [diff] [blame] | 86 | extern int amdgpu_sched_hw_submission; |
Alex Deucher | 1f7371b | 2015-12-02 17:46:21 -0500 | [diff] [blame] | 87 | extern int amdgpu_powerplay; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 88 | |
Chunming Zhou | 4b559c9 | 2015-07-21 15:53:04 +0800 | [diff] [blame] | 89 | #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 90 | #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ |
| 91 | #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) |
| 92 | /* AMDGPU_IB_POOL_SIZE must be a power of 2 */ |
| 93 | #define AMDGPU_IB_POOL_SIZE 16 |
| 94 | #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32 |
| 95 | #define AMDGPUFB_CONN_LIMIT 4 |
| 96 | #define AMDGPU_BIOS_NUM_SCRATCH 8 |
| 97 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 98 | /* max number of rings */ |
| 99 | #define AMDGPU_MAX_RINGS 16 |
| 100 | #define AMDGPU_MAX_GFX_RINGS 1 |
| 101 | #define AMDGPU_MAX_COMPUTE_RINGS 8 |
| 102 | #define AMDGPU_MAX_VCE_RINGS 2 |
| 103 | |
Jammy Zhou | 36f523a | 2015-09-01 12:54:27 +0800 | [diff] [blame] | 104 | /* max number of IP instances */ |
| 105 | #define AMDGPU_MAX_SDMA_INSTANCES 2 |
| 106 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 107 | /* number of hw syncs before falling back on blocking */ |
| 108 | #define AMDGPU_NUM_SYNCS 4 |
| 109 | |
| 110 | /* hardcode that limit for now */ |
| 111 | #define AMDGPU_VA_RESERVED_SIZE (8 << 20) |
| 112 | |
| 113 | /* hard reset data */ |
| 114 | #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b |
| 115 | |
| 116 | /* reset flags */ |
| 117 | #define AMDGPU_RESET_GFX (1 << 0) |
| 118 | #define AMDGPU_RESET_COMPUTE (1 << 1) |
| 119 | #define AMDGPU_RESET_DMA (1 << 2) |
| 120 | #define AMDGPU_RESET_CP (1 << 3) |
| 121 | #define AMDGPU_RESET_GRBM (1 << 4) |
| 122 | #define AMDGPU_RESET_DMA1 (1 << 5) |
| 123 | #define AMDGPU_RESET_RLC (1 << 6) |
| 124 | #define AMDGPU_RESET_SEM (1 << 7) |
| 125 | #define AMDGPU_RESET_IH (1 << 8) |
| 126 | #define AMDGPU_RESET_VMC (1 << 9) |
| 127 | #define AMDGPU_RESET_MC (1 << 10) |
| 128 | #define AMDGPU_RESET_DISPLAY (1 << 11) |
| 129 | #define AMDGPU_RESET_UVD (1 << 12) |
| 130 | #define AMDGPU_RESET_VCE (1 << 13) |
| 131 | #define AMDGPU_RESET_VCE1 (1 << 14) |
| 132 | |
| 133 | /* CG block flags */ |
| 134 | #define AMDGPU_CG_BLOCK_GFX (1 << 0) |
| 135 | #define AMDGPU_CG_BLOCK_MC (1 << 1) |
| 136 | #define AMDGPU_CG_BLOCK_SDMA (1 << 2) |
| 137 | #define AMDGPU_CG_BLOCK_UVD (1 << 3) |
| 138 | #define AMDGPU_CG_BLOCK_VCE (1 << 4) |
| 139 | #define AMDGPU_CG_BLOCK_HDP (1 << 5) |
| 140 | #define AMDGPU_CG_BLOCK_BIF (1 << 6) |
| 141 | |
| 142 | /* CG flags */ |
| 143 | #define AMDGPU_CG_SUPPORT_GFX_MGCG (1 << 0) |
| 144 | #define AMDGPU_CG_SUPPORT_GFX_MGLS (1 << 1) |
| 145 | #define AMDGPU_CG_SUPPORT_GFX_CGCG (1 << 2) |
| 146 | #define AMDGPU_CG_SUPPORT_GFX_CGLS (1 << 3) |
| 147 | #define AMDGPU_CG_SUPPORT_GFX_CGTS (1 << 4) |
| 148 | #define AMDGPU_CG_SUPPORT_GFX_CGTS_LS (1 << 5) |
| 149 | #define AMDGPU_CG_SUPPORT_GFX_CP_LS (1 << 6) |
| 150 | #define AMDGPU_CG_SUPPORT_GFX_RLC_LS (1 << 7) |
| 151 | #define AMDGPU_CG_SUPPORT_MC_LS (1 << 8) |
| 152 | #define AMDGPU_CG_SUPPORT_MC_MGCG (1 << 9) |
| 153 | #define AMDGPU_CG_SUPPORT_SDMA_LS (1 << 10) |
| 154 | #define AMDGPU_CG_SUPPORT_SDMA_MGCG (1 << 11) |
| 155 | #define AMDGPU_CG_SUPPORT_BIF_LS (1 << 12) |
| 156 | #define AMDGPU_CG_SUPPORT_UVD_MGCG (1 << 13) |
| 157 | #define AMDGPU_CG_SUPPORT_VCE_MGCG (1 << 14) |
| 158 | #define AMDGPU_CG_SUPPORT_HDP_LS (1 << 15) |
| 159 | #define AMDGPU_CG_SUPPORT_HDP_MGCG (1 << 16) |
| 160 | |
| 161 | /* PG flags */ |
| 162 | #define AMDGPU_PG_SUPPORT_GFX_PG (1 << 0) |
| 163 | #define AMDGPU_PG_SUPPORT_GFX_SMG (1 << 1) |
| 164 | #define AMDGPU_PG_SUPPORT_GFX_DMG (1 << 2) |
| 165 | #define AMDGPU_PG_SUPPORT_UVD (1 << 3) |
| 166 | #define AMDGPU_PG_SUPPORT_VCE (1 << 4) |
| 167 | #define AMDGPU_PG_SUPPORT_CP (1 << 5) |
| 168 | #define AMDGPU_PG_SUPPORT_GDS (1 << 6) |
| 169 | #define AMDGPU_PG_SUPPORT_RLC_SMU_HS (1 << 7) |
| 170 | #define AMDGPU_PG_SUPPORT_SDMA (1 << 8) |
| 171 | #define AMDGPU_PG_SUPPORT_ACP (1 << 9) |
| 172 | #define AMDGPU_PG_SUPPORT_SAMU (1 << 10) |
| 173 | |
| 174 | /* GFX current status */ |
| 175 | #define AMDGPU_GFX_NORMAL_MODE 0x00000000L |
| 176 | #define AMDGPU_GFX_SAFE_MODE 0x00000001L |
| 177 | #define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L |
| 178 | #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L |
| 179 | #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L |
| 180 | |
| 181 | /* max cursor sizes (in pixels) */ |
| 182 | #define CIK_CURSOR_WIDTH 128 |
| 183 | #define CIK_CURSOR_HEIGHT 128 |
| 184 | |
| 185 | struct amdgpu_device; |
| 186 | struct amdgpu_fence; |
| 187 | struct amdgpu_ib; |
| 188 | struct amdgpu_vm; |
| 189 | struct amdgpu_ring; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 190 | struct amdgpu_cs_parser; |
Chunming Zhou | bb977d3 | 2015-08-18 15:16:40 +0800 | [diff] [blame] | 191 | struct amdgpu_job; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 192 | struct amdgpu_irq_src; |
Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 193 | struct amdgpu_fpriv; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 194 | |
| 195 | enum amdgpu_cp_irq { |
| 196 | AMDGPU_CP_IRQ_GFX_EOP = 0, |
| 197 | AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP, |
| 198 | AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP, |
| 199 | AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP, |
| 200 | AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP, |
| 201 | AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP, |
| 202 | AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP, |
| 203 | AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP, |
| 204 | AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP, |
| 205 | |
| 206 | AMDGPU_CP_IRQ_LAST |
| 207 | }; |
| 208 | |
| 209 | enum amdgpu_sdma_irq { |
| 210 | AMDGPU_SDMA_IRQ_TRAP0 = 0, |
| 211 | AMDGPU_SDMA_IRQ_TRAP1, |
| 212 | |
| 213 | AMDGPU_SDMA_IRQ_LAST |
| 214 | }; |
| 215 | |
| 216 | enum amdgpu_thermal_irq { |
| 217 | AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0, |
| 218 | AMDGPU_THERMAL_IRQ_HIGH_TO_LOW, |
| 219 | |
| 220 | AMDGPU_THERMAL_IRQ_LAST |
| 221 | }; |
| 222 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 223 | int amdgpu_set_clockgating_state(struct amdgpu_device *adev, |
yanyang1 | 5fc3aee | 2015-05-22 14:39:35 -0400 | [diff] [blame] | 224 | enum amd_ip_block_type block_type, |
| 225 | enum amd_clockgating_state state); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 226 | int amdgpu_set_powergating_state(struct amdgpu_device *adev, |
yanyang1 | 5fc3aee | 2015-05-22 14:39:35 -0400 | [diff] [blame] | 227 | enum amd_ip_block_type block_type, |
| 228 | enum amd_powergating_state state); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 229 | |
| 230 | struct amdgpu_ip_block_version { |
yanyang1 | 5fc3aee | 2015-05-22 14:39:35 -0400 | [diff] [blame] | 231 | enum amd_ip_block_type type; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 232 | u32 major; |
| 233 | u32 minor; |
| 234 | u32 rev; |
yanyang1 | 5fc3aee | 2015-05-22 14:39:35 -0400 | [diff] [blame] | 235 | const struct amd_ip_funcs *funcs; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 236 | }; |
| 237 | |
| 238 | int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, |
yanyang1 | 5fc3aee | 2015-05-22 14:39:35 -0400 | [diff] [blame] | 239 | enum amd_ip_block_type type, |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 240 | u32 major, u32 minor); |
| 241 | |
| 242 | const struct amdgpu_ip_block_version * amdgpu_get_ip_block( |
| 243 | struct amdgpu_device *adev, |
yanyang1 | 5fc3aee | 2015-05-22 14:39:35 -0400 | [diff] [blame] | 244 | enum amd_ip_block_type type); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 245 | |
| 246 | /* provided by hw blocks that can move/clear data. e.g., gfx or sdma */ |
| 247 | struct amdgpu_buffer_funcs { |
| 248 | /* maximum bytes in a single operation */ |
| 249 | uint32_t copy_max_bytes; |
| 250 | |
| 251 | /* number of dw to reserve per operation */ |
| 252 | unsigned copy_num_dw; |
| 253 | |
| 254 | /* used for buffer migration */ |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 255 | void (*emit_copy_buffer)(struct amdgpu_ib *ib, |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 256 | /* src addr in bytes */ |
| 257 | uint64_t src_offset, |
| 258 | /* dst addr in bytes */ |
| 259 | uint64_t dst_offset, |
| 260 | /* number of byte to transfer */ |
| 261 | uint32_t byte_count); |
| 262 | |
| 263 | /* maximum bytes in a single operation */ |
| 264 | uint32_t fill_max_bytes; |
| 265 | |
| 266 | /* number of dw to reserve per operation */ |
| 267 | unsigned fill_num_dw; |
| 268 | |
| 269 | /* used for buffer clearing */ |
Chunming Zhou | 6e7a384 | 2015-08-27 13:46:09 +0800 | [diff] [blame] | 270 | void (*emit_fill_buffer)(struct amdgpu_ib *ib, |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 271 | /* value to write to memory */ |
| 272 | uint32_t src_data, |
| 273 | /* dst addr in bytes */ |
| 274 | uint64_t dst_offset, |
| 275 | /* number of byte to fill */ |
| 276 | uint32_t byte_count); |
| 277 | }; |
| 278 | |
| 279 | /* provided by hw blocks that can write ptes, e.g., sdma */ |
| 280 | struct amdgpu_vm_pte_funcs { |
| 281 | /* copy pte entries from GART */ |
| 282 | void (*copy_pte)(struct amdgpu_ib *ib, |
| 283 | uint64_t pe, uint64_t src, |
| 284 | unsigned count); |
| 285 | /* write pte one entry at a time with addr mapping */ |
| 286 | void (*write_pte)(struct amdgpu_ib *ib, |
Christian König | b07c9d2 | 2015-11-30 13:26:07 +0100 | [diff] [blame] | 287 | const dma_addr_t *pages_addr, uint64_t pe, |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 288 | uint64_t addr, unsigned count, |
| 289 | uint32_t incr, uint32_t flags); |
| 290 | /* for linear pte/pde updates without addr mapping */ |
| 291 | void (*set_pte_pde)(struct amdgpu_ib *ib, |
| 292 | uint64_t pe, |
| 293 | uint64_t addr, unsigned count, |
| 294 | uint32_t incr, uint32_t flags); |
| 295 | /* pad the indirect buffer to the necessary number of dw */ |
| 296 | void (*pad_ib)(struct amdgpu_ib *ib); |
| 297 | }; |
| 298 | |
| 299 | /* provided by the gmc block */ |
| 300 | struct amdgpu_gart_funcs { |
| 301 | /* flush the vm tlb via mmio */ |
| 302 | void (*flush_gpu_tlb)(struct amdgpu_device *adev, |
| 303 | uint32_t vmid); |
| 304 | /* write pte/pde updates using the cpu */ |
| 305 | int (*set_pte_pde)(struct amdgpu_device *adev, |
| 306 | void *cpu_pt_addr, /* cpu addr of page table */ |
| 307 | uint32_t gpu_page_idx, /* pte/pde to update */ |
| 308 | uint64_t addr, /* addr to write into pte/pde */ |
| 309 | uint32_t flags); /* access flags */ |
| 310 | }; |
| 311 | |
| 312 | /* provided by the ih block */ |
| 313 | struct amdgpu_ih_funcs { |
| 314 | /* ring read/write ptr handling, called from interrupt context */ |
| 315 | u32 (*get_wptr)(struct amdgpu_device *adev); |
| 316 | void (*decode_iv)(struct amdgpu_device *adev, |
| 317 | struct amdgpu_iv_entry *entry); |
| 318 | void (*set_rptr)(struct amdgpu_device *adev); |
| 319 | }; |
| 320 | |
| 321 | /* provided by hw blocks that expose a ring buffer for commands */ |
| 322 | struct amdgpu_ring_funcs { |
| 323 | /* ring read/write ptr handling */ |
| 324 | u32 (*get_rptr)(struct amdgpu_ring *ring); |
| 325 | u32 (*get_wptr)(struct amdgpu_ring *ring); |
| 326 | void (*set_wptr)(struct amdgpu_ring *ring); |
| 327 | /* validating and patching of IBs */ |
| 328 | int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx); |
| 329 | /* command emit functions */ |
| 330 | void (*emit_ib)(struct amdgpu_ring *ring, |
| 331 | struct amdgpu_ib *ib); |
| 332 | void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, |
Chunming Zhou | 890ee23 | 2015-06-01 14:35:03 +0800 | [diff] [blame] | 333 | uint64_t seq, unsigned flags); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 334 | void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id, |
| 335 | uint64_t pd_addr); |
Christian König | d2edb07 | 2015-05-11 14:10:34 +0200 | [diff] [blame] | 336 | void (*emit_hdp_flush)(struct amdgpu_ring *ring); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 337 | void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid, |
| 338 | uint32_t gds_base, uint32_t gds_size, |
| 339 | uint32_t gws_base, uint32_t gws_size, |
| 340 | uint32_t oa_base, uint32_t oa_size); |
| 341 | /* testing functions */ |
| 342 | int (*test_ring)(struct amdgpu_ring *ring); |
| 343 | int (*test_ib)(struct amdgpu_ring *ring); |
Jammy Zhou | edff0e2 | 2015-09-01 13:04:08 +0800 | [diff] [blame] | 344 | /* insert NOP packets */ |
| 345 | void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 346 | }; |
| 347 | |
| 348 | /* |
| 349 | * BIOS. |
| 350 | */ |
| 351 | bool amdgpu_get_bios(struct amdgpu_device *adev); |
| 352 | bool amdgpu_read_bios(struct amdgpu_device *adev); |
| 353 | |
| 354 | /* |
| 355 | * Dummy page |
| 356 | */ |
| 357 | struct amdgpu_dummy_page { |
| 358 | struct page *page; |
| 359 | dma_addr_t addr; |
| 360 | }; |
| 361 | int amdgpu_dummy_page_init(struct amdgpu_device *adev); |
| 362 | void amdgpu_dummy_page_fini(struct amdgpu_device *adev); |
| 363 | |
| 364 | |
| 365 | /* |
| 366 | * Clocks |
| 367 | */ |
| 368 | |
| 369 | #define AMDGPU_MAX_PPLL 3 |
| 370 | |
| 371 | struct amdgpu_clock { |
| 372 | struct amdgpu_pll ppll[AMDGPU_MAX_PPLL]; |
| 373 | struct amdgpu_pll spll; |
| 374 | struct amdgpu_pll mpll; |
| 375 | /* 10 Khz units */ |
| 376 | uint32_t default_mclk; |
| 377 | uint32_t default_sclk; |
| 378 | uint32_t default_dispclk; |
| 379 | uint32_t current_dispclk; |
| 380 | uint32_t dp_extclk; |
| 381 | uint32_t max_pixel_clock; |
| 382 | }; |
| 383 | |
| 384 | /* |
| 385 | * Fences. |
| 386 | */ |
| 387 | struct amdgpu_fence_driver { |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 388 | uint64_t gpu_addr; |
| 389 | volatile uint32_t *cpu_addr; |
| 390 | /* sync_seq is protected by ring emission lock */ |
Christian König | 5907a0d | 2016-01-18 15:16:53 +0100 | [diff] [blame] | 391 | uint64_t sync_seq; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 392 | atomic64_t last_seq; |
| 393 | bool initialized; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 394 | struct amdgpu_irq_src *irq_src; |
| 395 | unsigned irq_type; |
Christian König | c2776af | 2015-11-03 13:27:39 +0100 | [diff] [blame] | 396 | struct timer_list fallback_timer; |
monk.liu | 7f06c23 | 2015-07-30 18:28:12 +0800 | [diff] [blame] | 397 | wait_queue_head_t fence_queue; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 398 | }; |
| 399 | |
| 400 | /* some special values for the owner field */ |
| 401 | #define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul) |
| 402 | #define AMDGPU_FENCE_OWNER_VM ((void*)1ul) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 403 | |
Chunming Zhou | 890ee23 | 2015-06-01 14:35:03 +0800 | [diff] [blame] | 404 | #define AMDGPU_FENCE_FLAG_64BIT (1 << 0) |
| 405 | #define AMDGPU_FENCE_FLAG_INT (1 << 1) |
| 406 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 407 | struct amdgpu_fence { |
| 408 | struct fence base; |
Chunming Zhou | 4cef926 | 2015-08-05 19:52:14 +0800 | [diff] [blame] | 409 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 410 | /* RB, DMA, etc. */ |
| 411 | struct amdgpu_ring *ring; |
| 412 | uint64_t seq; |
| 413 | |
| 414 | /* filp or special value for fence creator */ |
| 415 | void *owner; |
| 416 | |
| 417 | wait_queue_t fence_wake; |
| 418 | }; |
| 419 | |
| 420 | struct amdgpu_user_fence { |
| 421 | /* write-back bo */ |
| 422 | struct amdgpu_bo *bo; |
| 423 | /* write-back address offset to bo start */ |
| 424 | uint32_t offset; |
| 425 | }; |
| 426 | |
| 427 | int amdgpu_fence_driver_init(struct amdgpu_device *adev); |
| 428 | void amdgpu_fence_driver_fini(struct amdgpu_device *adev); |
| 429 | void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); |
| 430 | |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 431 | int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 432 | int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, |
| 433 | struct amdgpu_irq_src *irq_src, |
| 434 | unsigned irq_type); |
Alex Deucher | 5ceb54c | 2015-08-05 12:41:48 -0400 | [diff] [blame] | 435 | void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); |
| 436 | void amdgpu_fence_driver_resume(struct amdgpu_device *adev); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 437 | int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, |
| 438 | struct amdgpu_fence **fence); |
| 439 | void amdgpu_fence_process(struct amdgpu_ring *ring); |
| 440 | int amdgpu_fence_wait_next(struct amdgpu_ring *ring); |
| 441 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); |
| 442 | unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); |
| 443 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 444 | /* |
| 445 | * TTM. |
| 446 | */ |
| 447 | struct amdgpu_mman { |
| 448 | struct ttm_bo_global_ref bo_global_ref; |
| 449 | struct drm_global_reference mem_global_ref; |
| 450 | struct ttm_bo_device bdev; |
| 451 | bool mem_global_referenced; |
| 452 | bool initialized; |
| 453 | |
| 454 | #if defined(CONFIG_DEBUG_FS) |
| 455 | struct dentry *vram; |
| 456 | struct dentry *gtt; |
| 457 | #endif |
| 458 | |
| 459 | /* buffer handling */ |
| 460 | const struct amdgpu_buffer_funcs *buffer_funcs; |
| 461 | struct amdgpu_ring *buffer_funcs_ring; |
| 462 | }; |
| 463 | |
| 464 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, |
| 465 | uint64_t src_offset, |
| 466 | uint64_t dst_offset, |
| 467 | uint32_t byte_count, |
| 468 | struct reservation_object *resv, |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 469 | struct fence **fence); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 470 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); |
| 471 | |
| 472 | struct amdgpu_bo_list_entry { |
| 473 | struct amdgpu_bo *robj; |
| 474 | struct ttm_validate_buffer tv; |
| 475 | struct amdgpu_bo_va *bo_va; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 476 | uint32_t priority; |
| 477 | }; |
| 478 | |
| 479 | struct amdgpu_bo_va_mapping { |
| 480 | struct list_head list; |
| 481 | struct interval_tree_node it; |
| 482 | uint64_t offset; |
| 483 | uint32_t flags; |
| 484 | }; |
| 485 | |
| 486 | /* bo virtual addresses in a specific vm */ |
| 487 | struct amdgpu_bo_va { |
Chunming Zhou | 69b576a | 2015-11-18 11:17:39 +0800 | [diff] [blame] | 488 | struct mutex mutex; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 489 | /* protected by bo being reserved */ |
| 490 | struct list_head bo_list; |
Chunming Zhou | bb1e38a4 | 2015-08-03 18:19:38 +0800 | [diff] [blame] | 491 | struct fence *last_pt_update; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 492 | unsigned ref_count; |
| 493 | |
Christian König | 7fc1195 | 2015-07-30 11:53:42 +0200 | [diff] [blame] | 494 | /* protected by vm mutex and spinlock */ |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 495 | struct list_head vm_status; |
| 496 | |
Christian König | 7fc1195 | 2015-07-30 11:53:42 +0200 | [diff] [blame] | 497 | /* mappings for this bo_va */ |
| 498 | struct list_head invalids; |
| 499 | struct list_head valids; |
| 500 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 501 | /* constant after initialization */ |
| 502 | struct amdgpu_vm *vm; |
| 503 | struct amdgpu_bo *bo; |
| 504 | }; |
| 505 | |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 506 | #define AMDGPU_GEM_DOMAIN_MAX 0x3 |
| 507 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 508 | struct amdgpu_bo { |
| 509 | /* Protected by gem.mutex */ |
| 510 | struct list_head list; |
| 511 | /* Protected by tbo.reserved */ |
Christian König | 1ea863f | 2015-12-18 22:13:12 +0100 | [diff] [blame] | 512 | u32 prefered_domains; |
| 513 | u32 allowed_domains; |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 514 | struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 515 | struct ttm_placement placement; |
| 516 | struct ttm_buffer_object tbo; |
| 517 | struct ttm_bo_kmap_obj kmap; |
| 518 | u64 flags; |
| 519 | unsigned pin_count; |
| 520 | void *kptr; |
| 521 | u64 tiling_flags; |
| 522 | u64 metadata_flags; |
| 523 | void *metadata; |
| 524 | u32 metadata_size; |
| 525 | /* list of all virtual address to which this bo |
| 526 | * is associated to |
| 527 | */ |
| 528 | struct list_head va; |
| 529 | /* Constant after initialization */ |
| 530 | struct amdgpu_device *adev; |
| 531 | struct drm_gem_object gem_base; |
Christian König | 82b9c55 | 2015-11-27 16:49:00 +0100 | [diff] [blame] | 532 | struct amdgpu_bo *parent; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 533 | |
| 534 | struct ttm_bo_kmap_obj dma_buf_vmap; |
| 535 | pid_t pid; |
| 536 | struct amdgpu_mn *mn; |
| 537 | struct list_head mn_list; |
| 538 | }; |
| 539 | #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base) |
| 540 | |
| 541 | void amdgpu_gem_object_free(struct drm_gem_object *obj); |
| 542 | int amdgpu_gem_object_open(struct drm_gem_object *obj, |
| 543 | struct drm_file *file_priv); |
| 544 | void amdgpu_gem_object_close(struct drm_gem_object *obj, |
| 545 | struct drm_file *file_priv); |
| 546 | unsigned long amdgpu_gem_timeout(uint64_t timeout_ns); |
| 547 | struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); |
| 548 | struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev, |
| 549 | struct dma_buf_attachment *attach, |
| 550 | struct sg_table *sg); |
| 551 | struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, |
| 552 | struct drm_gem_object *gobj, |
| 553 | int flags); |
| 554 | int amdgpu_gem_prime_pin(struct drm_gem_object *obj); |
| 555 | void amdgpu_gem_prime_unpin(struct drm_gem_object *obj); |
| 556 | struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *); |
| 557 | void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); |
| 558 | void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); |
| 559 | int amdgpu_gem_debugfs_init(struct amdgpu_device *adev); |
| 560 | |
| 561 | /* sub-allocation manager, it has to be protected by another lock. |
| 562 | * By conception this is an helper for other part of the driver |
| 563 | * like the indirect buffer or semaphore, which both have their |
| 564 | * locking. |
| 565 | * |
| 566 | * Principe is simple, we keep a list of sub allocation in offset |
| 567 | * order (first entry has offset == 0, last entry has the highest |
| 568 | * offset). |
| 569 | * |
| 570 | * When allocating new object we first check if there is room at |
| 571 | * the end total_size - (last_object_offset + last_object_size) >= |
| 572 | * alloc_size. If so we allocate new object there. |
| 573 | * |
| 574 | * When there is not enough room at the end, we start waiting for |
| 575 | * each sub object until we reach object_offset+object_size >= |
| 576 | * alloc_size, this object then become the sub object we return. |
| 577 | * |
| 578 | * Alignment can't be bigger than page size. |
| 579 | * |
| 580 | * Hole are not considered for allocation to keep things simple. |
| 581 | * Assumption is that there won't be hole (all object on same |
| 582 | * alignment). |
| 583 | */ |
| 584 | struct amdgpu_sa_manager { |
| 585 | wait_queue_head_t wq; |
| 586 | struct amdgpu_bo *bo; |
| 587 | struct list_head *hole; |
| 588 | struct list_head flist[AMDGPU_MAX_RINGS]; |
| 589 | struct list_head olist; |
| 590 | unsigned size; |
| 591 | uint64_t gpu_addr; |
| 592 | void *cpu_ptr; |
| 593 | uint32_t domain; |
| 594 | uint32_t align; |
| 595 | }; |
| 596 | |
| 597 | struct amdgpu_sa_bo; |
| 598 | |
| 599 | /* sub-allocation buffer */ |
| 600 | struct amdgpu_sa_bo { |
| 601 | struct list_head olist; |
| 602 | struct list_head flist; |
| 603 | struct amdgpu_sa_manager *manager; |
| 604 | unsigned soffset; |
| 605 | unsigned eoffset; |
Chunming Zhou | 4ce9891 | 2015-08-19 16:41:19 +0800 | [diff] [blame] | 606 | struct fence *fence; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 607 | }; |
| 608 | |
| 609 | /* |
| 610 | * GEM objects. |
| 611 | */ |
| 612 | struct amdgpu_gem { |
| 613 | struct mutex mutex; |
| 614 | struct list_head objects; |
| 615 | }; |
| 616 | |
| 617 | int amdgpu_gem_init(struct amdgpu_device *adev); |
| 618 | void amdgpu_gem_fini(struct amdgpu_device *adev); |
| 619 | int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, |
| 620 | int alignment, u32 initial_domain, |
| 621 | u64 flags, bool kernel, |
| 622 | struct drm_gem_object **obj); |
| 623 | |
| 624 | int amdgpu_mode_dumb_create(struct drm_file *file_priv, |
| 625 | struct drm_device *dev, |
| 626 | struct drm_mode_create_dumb *args); |
| 627 | int amdgpu_mode_dumb_mmap(struct drm_file *filp, |
| 628 | struct drm_device *dev, |
| 629 | uint32_t handle, uint64_t *offset_p); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 630 | /* |
| 631 | * Synchronization |
| 632 | */ |
| 633 | struct amdgpu_sync { |
Christian König | f91b3a6 | 2015-08-20 14:47:40 +0800 | [diff] [blame] | 634 | DECLARE_HASHTABLE(fences, 4); |
Chunming Zhou | 3c62338 | 2015-08-20 18:33:59 +0800 | [diff] [blame] | 635 | struct fence *last_vm_update; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 636 | }; |
| 637 | |
| 638 | void amdgpu_sync_create(struct amdgpu_sync *sync); |
Christian König | 91e1a52 | 2015-07-06 22:06:40 +0200 | [diff] [blame] | 639 | int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, |
| 640 | struct fence *f); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 641 | int amdgpu_sync_resv(struct amdgpu_device *adev, |
| 642 | struct amdgpu_sync *sync, |
| 643 | struct reservation_object *resv, |
| 644 | void *owner); |
Christian König | e61235d | 2015-08-25 11:05:36 +0200 | [diff] [blame] | 645 | struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); |
Christian König | f91b3a6 | 2015-08-20 14:47:40 +0800 | [diff] [blame] | 646 | int amdgpu_sync_wait(struct amdgpu_sync *sync); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 647 | void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync, |
Chunming Zhou | 4ce9891 | 2015-08-19 16:41:19 +0800 | [diff] [blame] | 648 | struct fence *fence); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 649 | |
| 650 | /* |
| 651 | * GART structures, functions & helpers |
| 652 | */ |
| 653 | struct amdgpu_mc; |
| 654 | |
| 655 | #define AMDGPU_GPU_PAGE_SIZE 4096 |
| 656 | #define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1) |
| 657 | #define AMDGPU_GPU_PAGE_SHIFT 12 |
| 658 | #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK) |
| 659 | |
| 660 | struct amdgpu_gart { |
| 661 | dma_addr_t table_addr; |
| 662 | struct amdgpu_bo *robj; |
| 663 | void *ptr; |
| 664 | unsigned num_gpu_pages; |
| 665 | unsigned num_cpu_pages; |
| 666 | unsigned table_size; |
| 667 | struct page **pages; |
| 668 | dma_addr_t *pages_addr; |
| 669 | bool ready; |
| 670 | const struct amdgpu_gart_funcs *gart_funcs; |
| 671 | }; |
| 672 | |
| 673 | int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev); |
| 674 | void amdgpu_gart_table_ram_free(struct amdgpu_device *adev); |
| 675 | int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); |
| 676 | void amdgpu_gart_table_vram_free(struct amdgpu_device *adev); |
| 677 | int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); |
| 678 | void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); |
| 679 | int amdgpu_gart_init(struct amdgpu_device *adev); |
| 680 | void amdgpu_gart_fini(struct amdgpu_device *adev); |
| 681 | void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, |
| 682 | int pages); |
| 683 | int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, |
| 684 | int pages, struct page **pagelist, |
| 685 | dma_addr_t *dma_addr, uint32_t flags); |
| 686 | |
| 687 | /* |
| 688 | * GPU MC structures, functions & helpers |
| 689 | */ |
| 690 | struct amdgpu_mc { |
| 691 | resource_size_t aper_size; |
| 692 | resource_size_t aper_base; |
| 693 | resource_size_t agp_base; |
| 694 | /* for some chips with <= 32MB we need to lie |
| 695 | * about vram size near mc fb location */ |
| 696 | u64 mc_vram_size; |
| 697 | u64 visible_vram_size; |
| 698 | u64 gtt_size; |
| 699 | u64 gtt_start; |
| 700 | u64 gtt_end; |
| 701 | u64 vram_start; |
| 702 | u64 vram_end; |
| 703 | unsigned vram_width; |
| 704 | u64 real_vram_size; |
| 705 | int vram_mtrr; |
| 706 | u64 gtt_base_align; |
| 707 | u64 mc_mask; |
| 708 | const struct firmware *fw; /* MC firmware */ |
| 709 | uint32_t fw_version; |
| 710 | struct amdgpu_irq_src vm_fault; |
Ken Wang | 81c59f5 | 2015-06-03 21:02:01 +0800 | [diff] [blame] | 711 | uint32_t vram_type; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 712 | }; |
| 713 | |
| 714 | /* |
| 715 | * GPU doorbell structures, functions & helpers |
| 716 | */ |
| 717 | typedef enum _AMDGPU_DOORBELL_ASSIGNMENT |
| 718 | { |
| 719 | AMDGPU_DOORBELL_KIQ = 0x000, |
| 720 | AMDGPU_DOORBELL_HIQ = 0x001, |
| 721 | AMDGPU_DOORBELL_DIQ = 0x002, |
| 722 | AMDGPU_DOORBELL_MEC_RING0 = 0x010, |
| 723 | AMDGPU_DOORBELL_MEC_RING1 = 0x011, |
| 724 | AMDGPU_DOORBELL_MEC_RING2 = 0x012, |
| 725 | AMDGPU_DOORBELL_MEC_RING3 = 0x013, |
| 726 | AMDGPU_DOORBELL_MEC_RING4 = 0x014, |
| 727 | AMDGPU_DOORBELL_MEC_RING5 = 0x015, |
| 728 | AMDGPU_DOORBELL_MEC_RING6 = 0x016, |
| 729 | AMDGPU_DOORBELL_MEC_RING7 = 0x017, |
| 730 | AMDGPU_DOORBELL_GFX_RING0 = 0x020, |
| 731 | AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0, |
| 732 | AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1, |
| 733 | AMDGPU_DOORBELL_IH = 0x1E8, |
| 734 | AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF, |
| 735 | AMDGPU_DOORBELL_INVALID = 0xFFFF |
| 736 | } AMDGPU_DOORBELL_ASSIGNMENT; |
| 737 | |
| 738 | struct amdgpu_doorbell { |
| 739 | /* doorbell mmio */ |
| 740 | resource_size_t base; |
| 741 | resource_size_t size; |
| 742 | u32 __iomem *ptr; |
| 743 | u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */ |
| 744 | }; |
| 745 | |
| 746 | void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, |
| 747 | phys_addr_t *aperture_base, |
| 748 | size_t *aperture_size, |
| 749 | size_t *start_offset); |
| 750 | |
| 751 | /* |
| 752 | * IRQS. |
| 753 | */ |
| 754 | |
| 755 | struct amdgpu_flip_work { |
| 756 | struct work_struct flip_work; |
| 757 | struct work_struct unpin_work; |
| 758 | struct amdgpu_device *adev; |
| 759 | int crtc_id; |
| 760 | uint64_t base; |
| 761 | struct drm_pending_vblank_event *event; |
| 762 | struct amdgpu_bo *old_rbo; |
Christian König | 1ffd265 | 2015-08-11 17:29:52 +0200 | [diff] [blame] | 763 | struct fence *excl; |
| 764 | unsigned shared_count; |
| 765 | struct fence **shared; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 766 | }; |
| 767 | |
| 768 | |
| 769 | /* |
| 770 | * CP & rings. |
| 771 | */ |
| 772 | |
| 773 | struct amdgpu_ib { |
| 774 | struct amdgpu_sa_bo *sa_bo; |
| 775 | uint32_t length_dw; |
| 776 | uint64_t gpu_addr; |
| 777 | uint32_t *ptr; |
| 778 | struct amdgpu_ring *ring; |
| 779 | struct amdgpu_fence *fence; |
| 780 | struct amdgpu_user_fence *user; |
Christian König | 8d0a7ce | 2015-11-03 20:58:50 +0100 | [diff] [blame] | 781 | bool grabbed_vmid; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 782 | struct amdgpu_vm *vm; |
Christian König | 3cb485f | 2015-05-11 15:34:59 +0200 | [diff] [blame] | 783 | struct amdgpu_ctx *ctx; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 784 | struct amdgpu_sync sync; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 785 | uint32_t gds_base, gds_size; |
| 786 | uint32_t gws_base, gws_size; |
| 787 | uint32_t oa_base, oa_size; |
Jammy Zhou | de807f8 | 2015-05-11 23:41:41 +0800 | [diff] [blame] | 788 | uint32_t flags; |
Christian König | 5430a3f | 2015-07-21 18:02:21 +0200 | [diff] [blame] | 789 | /* resulting sequence number */ |
| 790 | uint64_t sequence; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 791 | }; |
| 792 | |
| 793 | enum amdgpu_ring_type { |
| 794 | AMDGPU_RING_TYPE_GFX, |
| 795 | AMDGPU_RING_TYPE_COMPUTE, |
| 796 | AMDGPU_RING_TYPE_SDMA, |
| 797 | AMDGPU_RING_TYPE_UVD, |
| 798 | AMDGPU_RING_TYPE_VCE |
| 799 | }; |
| 800 | |
Chunming Zhou | c1b69ed | 2015-07-21 13:45:14 +0800 | [diff] [blame] | 801 | extern struct amd_sched_backend_ops amdgpu_sched_ops; |
| 802 | |
Chunming Zhou | 3c704e9 | 2015-07-29 10:33:14 +0800 | [diff] [blame] | 803 | int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, |
| 804 | struct amdgpu_ring *ring, |
| 805 | struct amdgpu_ib *ibs, |
| 806 | unsigned num_ibs, |
Chunming Zhou | bb977d3 | 2015-08-18 15:16:40 +0800 | [diff] [blame] | 807 | int (*free_job)(struct amdgpu_job *), |
Chunming Zhou | 1763552 | 2015-08-03 11:43:19 +0800 | [diff] [blame] | 808 | void *owner, |
| 809 | struct fence **fence); |
Chunming Zhou | 3c704e9 | 2015-07-29 10:33:14 +0800 | [diff] [blame] | 810 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 811 | struct amdgpu_ring { |
| 812 | struct amdgpu_device *adev; |
| 813 | const struct amdgpu_ring_funcs *funcs; |
| 814 | struct amdgpu_fence_driver fence_drv; |
Christian König | 4f839a2 | 2015-09-08 20:22:31 +0200 | [diff] [blame] | 815 | struct amd_gpu_scheduler sched; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 816 | |
Chunming Zhou | 176e1ab | 2015-07-24 10:49:47 +0800 | [diff] [blame] | 817 | spinlock_t fence_lock; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 818 | struct amdgpu_bo *ring_obj; |
| 819 | volatile uint32_t *ring; |
| 820 | unsigned rptr_offs; |
| 821 | u64 next_rptr_gpu_addr; |
| 822 | volatile u32 *next_rptr_cpu_addr; |
| 823 | unsigned wptr; |
| 824 | unsigned wptr_old; |
| 825 | unsigned ring_size; |
Christian König | c7e6be2 | 2016-01-21 13:06:05 +0100 | [diff] [blame] | 826 | unsigned max_dw; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 827 | int count_dw; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 828 | uint64_t gpu_addr; |
| 829 | uint32_t align_mask; |
| 830 | uint32_t ptr_mask; |
| 831 | bool ready; |
| 832 | u32 nop; |
| 833 | u32 idx; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 834 | u32 me; |
| 835 | u32 pipe; |
| 836 | u32 queue; |
| 837 | struct amdgpu_bo *mqd_obj; |
| 838 | u32 doorbell_index; |
| 839 | bool use_doorbell; |
| 840 | unsigned wptr_offs; |
| 841 | unsigned next_rptr_offs; |
| 842 | unsigned fence_offs; |
Christian König | 3cb485f | 2015-05-11 15:34:59 +0200 | [diff] [blame] | 843 | struct amdgpu_ctx *current_ctx; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 844 | enum amdgpu_ring_type type; |
| 845 | char name[16]; |
Chunming Zhou | 4274f5d | 2015-07-21 16:04:39 +0800 | [diff] [blame] | 846 | bool is_pte_ring; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 847 | }; |
| 848 | |
| 849 | /* |
| 850 | * VM |
| 851 | */ |
| 852 | |
| 853 | /* maximum number of VMIDs */ |
| 854 | #define AMDGPU_NUM_VM 16 |
| 855 | |
| 856 | /* number of entries in page table */ |
| 857 | #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size) |
| 858 | |
| 859 | /* PTBs (Page Table Blocks) need to be aligned to 32K */ |
| 860 | #define AMDGPU_VM_PTB_ALIGN_SIZE 32768 |
| 861 | #define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1) |
| 862 | #define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK) |
| 863 | |
| 864 | #define AMDGPU_PTE_VALID (1 << 0) |
| 865 | #define AMDGPU_PTE_SYSTEM (1 << 1) |
| 866 | #define AMDGPU_PTE_SNOOPED (1 << 2) |
| 867 | |
| 868 | /* VI only */ |
| 869 | #define AMDGPU_PTE_EXECUTABLE (1 << 4) |
| 870 | |
| 871 | #define AMDGPU_PTE_READABLE (1 << 5) |
| 872 | #define AMDGPU_PTE_WRITEABLE (1 << 6) |
| 873 | |
| 874 | /* PTE (Page Table Entry) fragment field for different page sizes */ |
| 875 | #define AMDGPU_PTE_FRAG_4KB (0 << 7) |
| 876 | #define AMDGPU_PTE_FRAG_64KB (4 << 7) |
| 877 | #define AMDGPU_LOG2_PAGES_PER_FRAG 4 |
| 878 | |
Christian König | d9c1315 | 2015-09-28 12:31:26 +0200 | [diff] [blame] | 879 | /* How to programm VM fault handling */ |
| 880 | #define AMDGPU_VM_FAULT_STOP_NEVER 0 |
| 881 | #define AMDGPU_VM_FAULT_STOP_FIRST 1 |
| 882 | #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 |
| 883 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 884 | struct amdgpu_vm_pt { |
Christian König | ee1782c | 2015-12-11 21:01:23 +0100 | [diff] [blame] | 885 | struct amdgpu_bo_list_entry entry; |
| 886 | uint64_t addr; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 887 | }; |
| 888 | |
| 889 | struct amdgpu_vm_id { |
| 890 | unsigned id; |
| 891 | uint64_t pd_gpu_addr; |
| 892 | /* last flushed PD/PT update */ |
Chunming Zhou | 3c62338 | 2015-08-20 18:33:59 +0800 | [diff] [blame] | 893 | struct fence *flushed_updates; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 894 | }; |
| 895 | |
| 896 | struct amdgpu_vm { |
Christian König | 25cfc3c | 2015-12-19 19:42:05 +0100 | [diff] [blame] | 897 | /* tree of virtual addresses mapped */ |
| 898 | spinlock_t it_lock; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 899 | struct rb_root va; |
| 900 | |
Christian König | 7fc1195 | 2015-07-30 11:53:42 +0200 | [diff] [blame] | 901 | /* protecting invalidated */ |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 902 | spinlock_t status_lock; |
| 903 | |
| 904 | /* BOs moved, but not yet updated in the PT */ |
| 905 | struct list_head invalidated; |
| 906 | |
Christian König | 7fc1195 | 2015-07-30 11:53:42 +0200 | [diff] [blame] | 907 | /* BOs cleared in the PT because of a move */ |
| 908 | struct list_head cleared; |
| 909 | |
| 910 | /* BO mappings freed, but not yet updated in the PT */ |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 911 | struct list_head freed; |
| 912 | |
| 913 | /* contains the page directory */ |
| 914 | struct amdgpu_bo *page_directory; |
| 915 | unsigned max_pde_used; |
Bas Nieuwenhuizen | 05906de | 2015-08-14 20:08:40 +0200 | [diff] [blame] | 916 | struct fence *page_directory_fence; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 917 | |
| 918 | /* array of page tables, one for each page directory entry */ |
| 919 | struct amdgpu_vm_pt *page_tables; |
| 920 | |
| 921 | /* for id and flush management per ring */ |
| 922 | struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS]; |
Christian König | 25cfc3c | 2015-12-19 19:42:05 +0100 | [diff] [blame] | 923 | |
jimqu | 81d75a3 | 2015-12-04 17:17:00 +0800 | [diff] [blame] | 924 | /* protecting freed */ |
| 925 | spinlock_t freed_lock; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 926 | }; |
| 927 | |
Christian König | a9a78b3 | 2016-01-21 10:19:11 +0100 | [diff] [blame] | 928 | struct amdgpu_vm_manager_id { |
| 929 | struct list_head list; |
| 930 | struct fence *active; |
| 931 | atomic_long_t owner; |
| 932 | }; |
Christian König | 8d0a7ce | 2015-11-03 20:58:50 +0100 | [diff] [blame] | 933 | |
Christian König | a9a78b3 | 2016-01-21 10:19:11 +0100 | [diff] [blame] | 934 | struct amdgpu_vm_manager { |
| 935 | /* Handling of VMIDs */ |
| 936 | struct mutex lock; |
| 937 | unsigned num_ids; |
| 938 | struct list_head ids_lru; |
| 939 | struct amdgpu_vm_manager_id ids[AMDGPU_NUM_VM]; |
Christian König | 1c16c0a | 2015-11-14 21:31:40 +0100 | [diff] [blame] | 940 | |
Christian König | 8b4fb00 | 2015-11-15 16:04:16 +0100 | [diff] [blame] | 941 | uint32_t max_pfn; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 942 | /* vram base address for page table entry */ |
Christian König | 8b4fb00 | 2015-11-15 16:04:16 +0100 | [diff] [blame] | 943 | u64 vram_base_offset; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 944 | /* is vm enabled? */ |
Christian König | 8b4fb00 | 2015-11-15 16:04:16 +0100 | [diff] [blame] | 945 | bool enabled; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 946 | /* vm pte handling */ |
| 947 | const struct amdgpu_vm_pte_funcs *vm_pte_funcs; |
| 948 | struct amdgpu_ring *vm_pte_funcs_ring; |
| 949 | }; |
| 950 | |
Christian König | a9a78b3 | 2016-01-21 10:19:11 +0100 | [diff] [blame] | 951 | void amdgpu_vm_manager_init(struct amdgpu_device *adev); |
Christian König | ea89f8c | 2015-11-15 20:52:06 +0100 | [diff] [blame] | 952 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev); |
Christian König | 8b4fb00 | 2015-11-15 16:04:16 +0100 | [diff] [blame] | 953 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); |
| 954 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); |
Christian König | 56467eb | 2015-12-11 15:16:32 +0100 | [diff] [blame] | 955 | void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, |
| 956 | struct list_head *validated, |
| 957 | struct amdgpu_bo_list_entry *entry); |
Christian König | ee1782c | 2015-12-11 21:01:23 +0100 | [diff] [blame] | 958 | void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates); |
Christian König | eceb8a1 | 2016-01-11 15:35:21 +0100 | [diff] [blame] | 959 | void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, |
| 960 | struct amdgpu_vm *vm); |
Christian König | 8b4fb00 | 2015-11-15 16:04:16 +0100 | [diff] [blame] | 961 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, |
Christian König | 94dd0a4 | 2016-01-18 17:01:42 +0100 | [diff] [blame] | 962 | struct amdgpu_sync *sync, struct fence *fence); |
Christian König | 8b4fb00 | 2015-11-15 16:04:16 +0100 | [diff] [blame] | 963 | void amdgpu_vm_flush(struct amdgpu_ring *ring, |
| 964 | struct amdgpu_vm *vm, |
| 965 | struct fence *updates); |
Christian König | b07c9d2 | 2015-11-30 13:26:07 +0100 | [diff] [blame] | 966 | uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); |
Christian König | 8b4fb00 | 2015-11-15 16:04:16 +0100 | [diff] [blame] | 967 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, |
| 968 | struct amdgpu_vm *vm); |
| 969 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, |
| 970 | struct amdgpu_vm *vm); |
| 971 | int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
| 972 | struct amdgpu_sync *sync); |
| 973 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, |
| 974 | struct amdgpu_bo_va *bo_va, |
| 975 | struct ttm_mem_reg *mem); |
| 976 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, |
| 977 | struct amdgpu_bo *bo); |
| 978 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, |
| 979 | struct amdgpu_bo *bo); |
| 980 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, |
| 981 | struct amdgpu_vm *vm, |
| 982 | struct amdgpu_bo *bo); |
| 983 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, |
| 984 | struct amdgpu_bo_va *bo_va, |
| 985 | uint64_t addr, uint64_t offset, |
| 986 | uint64_t size, uint32_t flags); |
| 987 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, |
| 988 | struct amdgpu_bo_va *bo_va, |
| 989 | uint64_t addr); |
| 990 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, |
| 991 | struct amdgpu_bo_va *bo_va); |
| 992 | int amdgpu_vm_free_job(struct amdgpu_job *job); |
| 993 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 994 | /* |
| 995 | * context related structures |
| 996 | */ |
| 997 | |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 998 | struct amdgpu_ctx_ring { |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 999 | uint64_t sequence; |
Chunming Zhou | 37cd0ca | 2015-12-10 15:45:11 +0800 | [diff] [blame] | 1000 | struct fence **fences; |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 1001 | struct amd_sched_entity entity; |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 1002 | }; |
| 1003 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1004 | struct amdgpu_ctx { |
Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 1005 | struct kref refcount; |
Chunming Zhou | 9cb7e5a | 2015-07-21 13:17:19 +0800 | [diff] [blame] | 1006 | struct amdgpu_device *adev; |
Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 1007 | unsigned reset_counter; |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 1008 | spinlock_t ring_lock; |
Chunming Zhou | 37cd0ca | 2015-12-10 15:45:11 +0800 | [diff] [blame] | 1009 | struct fence **fences; |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 1010 | struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1011 | }; |
| 1012 | |
| 1013 | struct amdgpu_ctx_mgr { |
Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 1014 | struct amdgpu_device *adev; |
| 1015 | struct mutex lock; |
| 1016 | /* protected by lock */ |
| 1017 | struct idr ctx_handles; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1018 | }; |
| 1019 | |
Chunming Zhou | d033a6d | 2015-11-05 15:23:09 +0800 | [diff] [blame] | 1020 | int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri, |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 1021 | struct amdgpu_ctx *ctx); |
| 1022 | void amdgpu_ctx_fini(struct amdgpu_ctx *ctx); |
Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 1023 | |
Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 1024 | struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); |
| 1025 | int amdgpu_ctx_put(struct amdgpu_ctx *ctx); |
| 1026 | |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 1027 | uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, |
Christian König | ce882e6 | 2015-08-19 15:00:55 +0200 | [diff] [blame] | 1028 | struct fence *fence); |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 1029 | struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, |
| 1030 | struct amdgpu_ring *ring, uint64_t seq); |
| 1031 | |
Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 1032 | int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, |
| 1033 | struct drm_file *filp); |
| 1034 | |
Christian König | efd4ccb | 2015-08-04 16:20:31 +0200 | [diff] [blame] | 1035 | void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); |
| 1036 | void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); |
Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 1037 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1038 | /* |
| 1039 | * file private structure |
| 1040 | */ |
| 1041 | |
| 1042 | struct amdgpu_fpriv { |
| 1043 | struct amdgpu_vm vm; |
| 1044 | struct mutex bo_list_lock; |
| 1045 | struct idr bo_list_handles; |
Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 1046 | struct amdgpu_ctx_mgr ctx_mgr; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1047 | }; |
| 1048 | |
| 1049 | /* |
| 1050 | * residency list |
| 1051 | */ |
| 1052 | |
| 1053 | struct amdgpu_bo_list { |
| 1054 | struct mutex lock; |
| 1055 | struct amdgpu_bo *gds_obj; |
| 1056 | struct amdgpu_bo *gws_obj; |
| 1057 | struct amdgpu_bo *oa_obj; |
| 1058 | bool has_userptr; |
| 1059 | unsigned num_entries; |
| 1060 | struct amdgpu_bo_list_entry *array; |
| 1061 | }; |
| 1062 | |
| 1063 | struct amdgpu_bo_list * |
| 1064 | amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id); |
Christian König | 636ce25 | 2015-12-18 21:26:47 +0100 | [diff] [blame] | 1065 | void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, |
| 1066 | struct list_head *validated); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1067 | void amdgpu_bo_list_put(struct amdgpu_bo_list *list); |
| 1068 | void amdgpu_bo_list_free(struct amdgpu_bo_list *list); |
| 1069 | |
| 1070 | /* |
| 1071 | * GFX stuff |
| 1072 | */ |
| 1073 | #include "clearstate_defs.h" |
| 1074 | |
| 1075 | struct amdgpu_rlc { |
| 1076 | /* for power gating */ |
| 1077 | struct amdgpu_bo *save_restore_obj; |
| 1078 | uint64_t save_restore_gpu_addr; |
| 1079 | volatile uint32_t *sr_ptr; |
| 1080 | const u32 *reg_list; |
| 1081 | u32 reg_list_size; |
| 1082 | /* for clear state */ |
| 1083 | struct amdgpu_bo *clear_state_obj; |
| 1084 | uint64_t clear_state_gpu_addr; |
| 1085 | volatile uint32_t *cs_ptr; |
| 1086 | const struct cs_section_def *cs_data; |
| 1087 | u32 clear_state_size; |
| 1088 | /* for cp tables */ |
| 1089 | struct amdgpu_bo *cp_table_obj; |
| 1090 | uint64_t cp_table_gpu_addr; |
| 1091 | volatile uint32_t *cp_table_ptr; |
| 1092 | u32 cp_table_size; |
| 1093 | }; |
| 1094 | |
| 1095 | struct amdgpu_mec { |
| 1096 | struct amdgpu_bo *hpd_eop_obj; |
| 1097 | u64 hpd_eop_gpu_addr; |
| 1098 | u32 num_pipe; |
| 1099 | u32 num_mec; |
| 1100 | u32 num_queue; |
| 1101 | }; |
| 1102 | |
| 1103 | /* |
| 1104 | * GPU scratch registers structures, functions & helpers |
| 1105 | */ |
| 1106 | struct amdgpu_scratch { |
| 1107 | unsigned num_reg; |
| 1108 | uint32_t reg_base; |
| 1109 | bool free[32]; |
| 1110 | uint32_t reg[32]; |
| 1111 | }; |
| 1112 | |
| 1113 | /* |
| 1114 | * GFX configurations |
| 1115 | */ |
| 1116 | struct amdgpu_gca_config { |
| 1117 | unsigned max_shader_engines; |
| 1118 | unsigned max_tile_pipes; |
| 1119 | unsigned max_cu_per_sh; |
| 1120 | unsigned max_sh_per_se; |
| 1121 | unsigned max_backends_per_se; |
| 1122 | unsigned max_texture_channel_caches; |
| 1123 | unsigned max_gprs; |
| 1124 | unsigned max_gs_threads; |
| 1125 | unsigned max_hw_contexts; |
| 1126 | unsigned sc_prim_fifo_size_frontend; |
| 1127 | unsigned sc_prim_fifo_size_backend; |
| 1128 | unsigned sc_hiz_tile_fifo_size; |
| 1129 | unsigned sc_earlyz_tile_fifo_size; |
| 1130 | |
| 1131 | unsigned num_tile_pipes; |
| 1132 | unsigned backend_enable_mask; |
| 1133 | unsigned mem_max_burst_length_bytes; |
| 1134 | unsigned mem_row_size_in_kb; |
| 1135 | unsigned shader_engine_tile_size; |
| 1136 | unsigned num_gpus; |
| 1137 | unsigned multi_gpu_tile_size; |
| 1138 | unsigned mc_arb_ramcfg; |
| 1139 | unsigned gb_addr_config; |
| 1140 | |
| 1141 | uint32_t tile_mode_array[32]; |
| 1142 | uint32_t macrotile_mode_array[16]; |
| 1143 | }; |
| 1144 | |
| 1145 | struct amdgpu_gfx { |
| 1146 | struct mutex gpu_clock_mutex; |
| 1147 | struct amdgpu_gca_config config; |
| 1148 | struct amdgpu_rlc rlc; |
| 1149 | struct amdgpu_mec mec; |
| 1150 | struct amdgpu_scratch scratch; |
| 1151 | const struct firmware *me_fw; /* ME firmware */ |
| 1152 | uint32_t me_fw_version; |
| 1153 | const struct firmware *pfp_fw; /* PFP firmware */ |
| 1154 | uint32_t pfp_fw_version; |
| 1155 | const struct firmware *ce_fw; /* CE firmware */ |
| 1156 | uint32_t ce_fw_version; |
| 1157 | const struct firmware *rlc_fw; /* RLC firmware */ |
| 1158 | uint32_t rlc_fw_version; |
| 1159 | const struct firmware *mec_fw; /* MEC firmware */ |
| 1160 | uint32_t mec_fw_version; |
| 1161 | const struct firmware *mec2_fw; /* MEC2 firmware */ |
| 1162 | uint32_t mec2_fw_version; |
Ken Wang | 02558a0 | 2015-06-03 19:52:06 +0800 | [diff] [blame] | 1163 | uint32_t me_feature_version; |
| 1164 | uint32_t ce_feature_version; |
| 1165 | uint32_t pfp_feature_version; |
Jammy Zhou | 351643d | 2015-08-04 10:43:50 +0800 | [diff] [blame] | 1166 | uint32_t rlc_feature_version; |
| 1167 | uint32_t mec_feature_version; |
| 1168 | uint32_t mec2_feature_version; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1169 | struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; |
| 1170 | unsigned num_gfx_rings; |
| 1171 | struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; |
| 1172 | unsigned num_compute_rings; |
| 1173 | struct amdgpu_irq_src eop_irq; |
| 1174 | struct amdgpu_irq_src priv_reg_irq; |
| 1175 | struct amdgpu_irq_src priv_inst_irq; |
| 1176 | /* gfx status */ |
| 1177 | uint32_t gfx_current_status; |
Ken Wang | a101a89 | 2015-06-03 17:47:54 +0800 | [diff] [blame] | 1178 | /* ce ram size*/ |
| 1179 | unsigned ce_ram_size; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1180 | }; |
| 1181 | |
| 1182 | int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, |
| 1183 | unsigned size, struct amdgpu_ib *ib); |
| 1184 | void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib); |
| 1185 | int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, |
| 1186 | struct amdgpu_ib *ib, void *owner); |
| 1187 | int amdgpu_ib_pool_init(struct amdgpu_device *adev); |
| 1188 | void amdgpu_ib_pool_fini(struct amdgpu_device *adev); |
| 1189 | int amdgpu_ib_ring_tests(struct amdgpu_device *adev); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1190 | int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); |
Jammy Zhou | edff0e2 | 2015-09-01 13:04:08 +0800 | [diff] [blame] | 1191 | void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1192 | void amdgpu_ring_commit(struct amdgpu_ring *ring); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1193 | void amdgpu_ring_undo(struct amdgpu_ring *ring); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1194 | unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, |
| 1195 | uint32_t **data); |
| 1196 | int amdgpu_ring_restore(struct amdgpu_ring *ring, |
| 1197 | unsigned size, uint32_t *data); |
| 1198 | int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, |
| 1199 | unsigned ring_size, u32 nop, u32 align_mask, |
| 1200 | struct amdgpu_irq_src *irq_src, unsigned irq_type, |
| 1201 | enum amdgpu_ring_type ring_type); |
| 1202 | void amdgpu_ring_fini(struct amdgpu_ring *ring); |
Christian König | 8120b61 | 2015-10-22 11:29:33 +0200 | [diff] [blame] | 1203 | struct amdgpu_ring *amdgpu_ring_from_fence(struct fence *f); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1204 | |
| 1205 | /* |
| 1206 | * CS. |
| 1207 | */ |
| 1208 | struct amdgpu_cs_chunk { |
| 1209 | uint32_t chunk_id; |
| 1210 | uint32_t length_dw; |
| 1211 | uint32_t *kdata; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1212 | }; |
| 1213 | |
| 1214 | struct amdgpu_cs_parser { |
| 1215 | struct amdgpu_device *adev; |
| 1216 | struct drm_file *filp; |
Christian König | 3cb485f | 2015-05-11 15:34:59 +0200 | [diff] [blame] | 1217 | struct amdgpu_ctx *ctx; |
Christian König | c3cca41 | 2015-12-15 14:41:33 +0100 | [diff] [blame] | 1218 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1219 | /* chunks */ |
| 1220 | unsigned nchunks; |
| 1221 | struct amdgpu_cs_chunk *chunks; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1222 | |
Christian König | c3cca41 | 2015-12-15 14:41:33 +0100 | [diff] [blame] | 1223 | /* indirect buffers */ |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1224 | uint32_t num_ibs; |
Christian König | c3cca41 | 2015-12-15 14:41:33 +0100 | [diff] [blame] | 1225 | struct amdgpu_ib *ibs; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1226 | |
Christian König | c3cca41 | 2015-12-15 14:41:33 +0100 | [diff] [blame] | 1227 | /* buffer objects */ |
| 1228 | struct ww_acquire_ctx ticket; |
| 1229 | struct amdgpu_bo_list *bo_list; |
| 1230 | struct amdgpu_bo_list_entry vm_pd; |
| 1231 | struct list_head validated; |
| 1232 | struct fence *fence; |
| 1233 | uint64_t bytes_moved_threshold; |
| 1234 | uint64_t bytes_moved; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1235 | |
| 1236 | /* user fence */ |
Christian König | 91acbeb | 2015-12-14 16:42:31 +0100 | [diff] [blame] | 1237 | struct amdgpu_user_fence uf; |
| 1238 | struct amdgpu_bo_list_entry uf_entry; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1239 | }; |
| 1240 | |
Chunming Zhou | bb977d3 | 2015-08-18 15:16:40 +0800 | [diff] [blame] | 1241 | struct amdgpu_job { |
| 1242 | struct amd_sched_job base; |
| 1243 | struct amdgpu_device *adev; |
Chunming Zhou | bb977d3 | 2015-08-18 15:16:40 +0800 | [diff] [blame] | 1244 | struct amdgpu_ib *ibs; |
| 1245 | uint32_t num_ibs; |
Christian König | e284022 | 2015-11-05 19:49:48 +0100 | [diff] [blame] | 1246 | void *owner; |
Chunming Zhou | bb977d3 | 2015-08-18 15:16:40 +0800 | [diff] [blame] | 1247 | struct amdgpu_user_fence uf; |
Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 1248 | int (*free_job)(struct amdgpu_job *job); |
Chunming Zhou | bb977d3 | 2015-08-18 15:16:40 +0800 | [diff] [blame] | 1249 | }; |
Junwei Zhang | a6db8a3 | 2015-09-09 09:21:19 +0800 | [diff] [blame] | 1250 | #define to_amdgpu_job(sched_job) \ |
| 1251 | container_of((sched_job), struct amdgpu_job, base) |
Chunming Zhou | bb977d3 | 2015-08-18 15:16:40 +0800 | [diff] [blame] | 1252 | |
Christian König | 7270f83 | 2016-01-31 11:00:41 +0100 | [diff] [blame] | 1253 | static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, |
| 1254 | uint32_t ib_idx, int idx) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1255 | { |
| 1256 | return p->ibs[ib_idx].ptr[idx]; |
| 1257 | } |
| 1258 | |
Christian König | 7270f83 | 2016-01-31 11:00:41 +0100 | [diff] [blame] | 1259 | static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p, |
| 1260 | uint32_t ib_idx, int idx, |
| 1261 | uint32_t value) |
| 1262 | { |
| 1263 | p->ibs[ib_idx].ptr[idx] = value; |
| 1264 | } |
| 1265 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1266 | /* |
| 1267 | * Writeback |
| 1268 | */ |
| 1269 | #define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */ |
| 1270 | |
| 1271 | struct amdgpu_wb { |
| 1272 | struct amdgpu_bo *wb_obj; |
| 1273 | volatile uint32_t *wb; |
| 1274 | uint64_t gpu_addr; |
| 1275 | u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */ |
| 1276 | unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)]; |
| 1277 | }; |
| 1278 | |
| 1279 | int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb); |
| 1280 | void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb); |
| 1281 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1282 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1283 | |
| 1284 | enum amdgpu_int_thermal_type { |
| 1285 | THERMAL_TYPE_NONE, |
| 1286 | THERMAL_TYPE_EXTERNAL, |
| 1287 | THERMAL_TYPE_EXTERNAL_GPIO, |
| 1288 | THERMAL_TYPE_RV6XX, |
| 1289 | THERMAL_TYPE_RV770, |
| 1290 | THERMAL_TYPE_ADT7473_WITH_INTERNAL, |
| 1291 | THERMAL_TYPE_EVERGREEN, |
| 1292 | THERMAL_TYPE_SUMO, |
| 1293 | THERMAL_TYPE_NI, |
| 1294 | THERMAL_TYPE_SI, |
| 1295 | THERMAL_TYPE_EMC2103_WITH_INTERNAL, |
| 1296 | THERMAL_TYPE_CI, |
| 1297 | THERMAL_TYPE_KV, |
| 1298 | }; |
| 1299 | |
| 1300 | enum amdgpu_dpm_auto_throttle_src { |
| 1301 | AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, |
| 1302 | AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL |
| 1303 | }; |
| 1304 | |
| 1305 | enum amdgpu_dpm_event_src { |
| 1306 | AMDGPU_DPM_EVENT_SRC_ANALOG = 0, |
| 1307 | AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1, |
| 1308 | AMDGPU_DPM_EVENT_SRC_DIGITAL = 2, |
| 1309 | AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, |
| 1310 | AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4 |
| 1311 | }; |
| 1312 | |
| 1313 | #define AMDGPU_MAX_VCE_LEVELS 6 |
| 1314 | |
| 1315 | enum amdgpu_vce_level { |
| 1316 | AMDGPU_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */ |
| 1317 | AMDGPU_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */ |
| 1318 | AMDGPU_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */ |
| 1319 | AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */ |
| 1320 | AMDGPU_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */ |
| 1321 | AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */ |
| 1322 | }; |
| 1323 | |
| 1324 | struct amdgpu_ps { |
| 1325 | u32 caps; /* vbios flags */ |
| 1326 | u32 class; /* vbios flags */ |
| 1327 | u32 class2; /* vbios flags */ |
| 1328 | /* UVD clocks */ |
| 1329 | u32 vclk; |
| 1330 | u32 dclk; |
| 1331 | /* VCE clocks */ |
| 1332 | u32 evclk; |
| 1333 | u32 ecclk; |
| 1334 | bool vce_active; |
| 1335 | enum amdgpu_vce_level vce_level; |
| 1336 | /* asic priv */ |
| 1337 | void *ps_priv; |
| 1338 | }; |
| 1339 | |
| 1340 | struct amdgpu_dpm_thermal { |
| 1341 | /* thermal interrupt work */ |
| 1342 | struct work_struct work; |
| 1343 | /* low temperature threshold */ |
| 1344 | int min_temp; |
| 1345 | /* high temperature threshold */ |
| 1346 | int max_temp; |
| 1347 | /* was last interrupt low to high or high to low */ |
| 1348 | bool high_to_low; |
| 1349 | /* interrupt source */ |
| 1350 | struct amdgpu_irq_src irq; |
| 1351 | }; |
| 1352 | |
| 1353 | enum amdgpu_clk_action |
| 1354 | { |
| 1355 | AMDGPU_SCLK_UP = 1, |
| 1356 | AMDGPU_SCLK_DOWN |
| 1357 | }; |
| 1358 | |
| 1359 | struct amdgpu_blacklist_clocks |
| 1360 | { |
| 1361 | u32 sclk; |
| 1362 | u32 mclk; |
| 1363 | enum amdgpu_clk_action action; |
| 1364 | }; |
| 1365 | |
| 1366 | struct amdgpu_clock_and_voltage_limits { |
| 1367 | u32 sclk; |
| 1368 | u32 mclk; |
| 1369 | u16 vddc; |
| 1370 | u16 vddci; |
| 1371 | }; |
| 1372 | |
| 1373 | struct amdgpu_clock_array { |
| 1374 | u32 count; |
| 1375 | u32 *values; |
| 1376 | }; |
| 1377 | |
| 1378 | struct amdgpu_clock_voltage_dependency_entry { |
| 1379 | u32 clk; |
| 1380 | u16 v; |
| 1381 | }; |
| 1382 | |
| 1383 | struct amdgpu_clock_voltage_dependency_table { |
| 1384 | u32 count; |
| 1385 | struct amdgpu_clock_voltage_dependency_entry *entries; |
| 1386 | }; |
| 1387 | |
| 1388 | union amdgpu_cac_leakage_entry { |
| 1389 | struct { |
| 1390 | u16 vddc; |
| 1391 | u32 leakage; |
| 1392 | }; |
| 1393 | struct { |
| 1394 | u16 vddc1; |
| 1395 | u16 vddc2; |
| 1396 | u16 vddc3; |
| 1397 | }; |
| 1398 | }; |
| 1399 | |
| 1400 | struct amdgpu_cac_leakage_table { |
| 1401 | u32 count; |
| 1402 | union amdgpu_cac_leakage_entry *entries; |
| 1403 | }; |
| 1404 | |
| 1405 | struct amdgpu_phase_shedding_limits_entry { |
| 1406 | u16 voltage; |
| 1407 | u32 sclk; |
| 1408 | u32 mclk; |
| 1409 | }; |
| 1410 | |
| 1411 | struct amdgpu_phase_shedding_limits_table { |
| 1412 | u32 count; |
| 1413 | struct amdgpu_phase_shedding_limits_entry *entries; |
| 1414 | }; |
| 1415 | |
| 1416 | struct amdgpu_uvd_clock_voltage_dependency_entry { |
| 1417 | u32 vclk; |
| 1418 | u32 dclk; |
| 1419 | u16 v; |
| 1420 | }; |
| 1421 | |
| 1422 | struct amdgpu_uvd_clock_voltage_dependency_table { |
| 1423 | u8 count; |
| 1424 | struct amdgpu_uvd_clock_voltage_dependency_entry *entries; |
| 1425 | }; |
| 1426 | |
| 1427 | struct amdgpu_vce_clock_voltage_dependency_entry { |
| 1428 | u32 ecclk; |
| 1429 | u32 evclk; |
| 1430 | u16 v; |
| 1431 | }; |
| 1432 | |
| 1433 | struct amdgpu_vce_clock_voltage_dependency_table { |
| 1434 | u8 count; |
| 1435 | struct amdgpu_vce_clock_voltage_dependency_entry *entries; |
| 1436 | }; |
| 1437 | |
| 1438 | struct amdgpu_ppm_table { |
| 1439 | u8 ppm_design; |
| 1440 | u16 cpu_core_number; |
| 1441 | u32 platform_tdp; |
| 1442 | u32 small_ac_platform_tdp; |
| 1443 | u32 platform_tdc; |
| 1444 | u32 small_ac_platform_tdc; |
| 1445 | u32 apu_tdp; |
| 1446 | u32 dgpu_tdp; |
| 1447 | u32 dgpu_ulv_power; |
| 1448 | u32 tj_max; |
| 1449 | }; |
| 1450 | |
| 1451 | struct amdgpu_cac_tdp_table { |
| 1452 | u16 tdp; |
| 1453 | u16 configurable_tdp; |
| 1454 | u16 tdc; |
| 1455 | u16 battery_power_limit; |
| 1456 | u16 small_power_limit; |
| 1457 | u16 low_cac_leakage; |
| 1458 | u16 high_cac_leakage; |
| 1459 | u16 maximum_power_delivery_limit; |
| 1460 | }; |
| 1461 | |
| 1462 | struct amdgpu_dpm_dynamic_state { |
| 1463 | struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk; |
| 1464 | struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk; |
| 1465 | struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk; |
| 1466 | struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk; |
| 1467 | struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk; |
| 1468 | struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table; |
| 1469 | struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table; |
| 1470 | struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table; |
| 1471 | struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table; |
| 1472 | struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk; |
| 1473 | struct amdgpu_clock_array valid_sclk_values; |
| 1474 | struct amdgpu_clock_array valid_mclk_values; |
| 1475 | struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc; |
| 1476 | struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac; |
| 1477 | u32 mclk_sclk_ratio; |
| 1478 | u32 sclk_mclk_delta; |
| 1479 | u16 vddc_vddci_delta; |
| 1480 | u16 min_vddc_for_pcie_gen2; |
| 1481 | struct amdgpu_cac_leakage_table cac_leakage_table; |
| 1482 | struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table; |
| 1483 | struct amdgpu_ppm_table *ppm_table; |
| 1484 | struct amdgpu_cac_tdp_table *cac_tdp_table; |
| 1485 | }; |
| 1486 | |
| 1487 | struct amdgpu_dpm_fan { |
| 1488 | u16 t_min; |
| 1489 | u16 t_med; |
| 1490 | u16 t_high; |
| 1491 | u16 pwm_min; |
| 1492 | u16 pwm_med; |
| 1493 | u16 pwm_high; |
| 1494 | u8 t_hyst; |
| 1495 | u32 cycle_delay; |
| 1496 | u16 t_max; |
| 1497 | u8 control_mode; |
| 1498 | u16 default_max_fan_pwm; |
| 1499 | u16 default_fan_output_sensitivity; |
| 1500 | u16 fan_output_sensitivity; |
| 1501 | bool ucode_fan_control; |
| 1502 | }; |
| 1503 | |
| 1504 | enum amdgpu_pcie_gen { |
| 1505 | AMDGPU_PCIE_GEN1 = 0, |
| 1506 | AMDGPU_PCIE_GEN2 = 1, |
| 1507 | AMDGPU_PCIE_GEN3 = 2, |
| 1508 | AMDGPU_PCIE_GEN_INVALID = 0xffff |
| 1509 | }; |
| 1510 | |
| 1511 | enum amdgpu_dpm_forced_level { |
| 1512 | AMDGPU_DPM_FORCED_LEVEL_AUTO = 0, |
| 1513 | AMDGPU_DPM_FORCED_LEVEL_LOW = 1, |
| 1514 | AMDGPU_DPM_FORCED_LEVEL_HIGH = 2, |
Eric Huang | f3898ea | 2015-12-11 16:24:34 -0500 | [diff] [blame] | 1515 | AMDGPU_DPM_FORCED_LEVEL_MANUAL = 3, |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1516 | }; |
| 1517 | |
| 1518 | struct amdgpu_vce_state { |
| 1519 | /* vce clocks */ |
| 1520 | u32 evclk; |
| 1521 | u32 ecclk; |
| 1522 | /* gpu clocks */ |
| 1523 | u32 sclk; |
| 1524 | u32 mclk; |
| 1525 | u8 clk_idx; |
| 1526 | u8 pstate; |
| 1527 | }; |
| 1528 | |
| 1529 | struct amdgpu_dpm_funcs { |
| 1530 | int (*get_temperature)(struct amdgpu_device *adev); |
| 1531 | int (*pre_set_power_state)(struct amdgpu_device *adev); |
| 1532 | int (*set_power_state)(struct amdgpu_device *adev); |
| 1533 | void (*post_set_power_state)(struct amdgpu_device *adev); |
| 1534 | void (*display_configuration_changed)(struct amdgpu_device *adev); |
| 1535 | u32 (*get_sclk)(struct amdgpu_device *adev, bool low); |
| 1536 | u32 (*get_mclk)(struct amdgpu_device *adev, bool low); |
| 1537 | void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps); |
| 1538 | void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m); |
| 1539 | int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level); |
| 1540 | bool (*vblank_too_short)(struct amdgpu_device *adev); |
| 1541 | void (*powergate_uvd)(struct amdgpu_device *adev, bool gate); |
Sonny Jiang | b7a0776 | 2015-05-28 15:47:53 -0400 | [diff] [blame] | 1542 | void (*powergate_vce)(struct amdgpu_device *adev, bool gate); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1543 | void (*enable_bapm)(struct amdgpu_device *adev, bool enable); |
| 1544 | void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode); |
| 1545 | u32 (*get_fan_control_mode)(struct amdgpu_device *adev); |
| 1546 | int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed); |
| 1547 | int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed); |
| 1548 | }; |
| 1549 | |
| 1550 | struct amdgpu_dpm { |
| 1551 | struct amdgpu_ps *ps; |
| 1552 | /* number of valid power states */ |
| 1553 | int num_ps; |
| 1554 | /* current power state that is active */ |
| 1555 | struct amdgpu_ps *current_ps; |
| 1556 | /* requested power state */ |
| 1557 | struct amdgpu_ps *requested_ps; |
| 1558 | /* boot up power state */ |
| 1559 | struct amdgpu_ps *boot_ps; |
| 1560 | /* default uvd power state */ |
| 1561 | struct amdgpu_ps *uvd_ps; |
| 1562 | /* vce requirements */ |
| 1563 | struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS]; |
| 1564 | enum amdgpu_vce_level vce_level; |
Rex Zhu | 3a2c788 | 2015-08-25 15:57:43 +0800 | [diff] [blame] | 1565 | enum amd_pm_state_type state; |
| 1566 | enum amd_pm_state_type user_state; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1567 | u32 platform_caps; |
| 1568 | u32 voltage_response_time; |
| 1569 | u32 backbias_response_time; |
| 1570 | void *priv; |
| 1571 | u32 new_active_crtcs; |
| 1572 | int new_active_crtc_count; |
| 1573 | u32 current_active_crtcs; |
| 1574 | int current_active_crtc_count; |
| 1575 | struct amdgpu_dpm_dynamic_state dyn_state; |
| 1576 | struct amdgpu_dpm_fan fan; |
| 1577 | u32 tdp_limit; |
| 1578 | u32 near_tdp_limit; |
| 1579 | u32 near_tdp_limit_adjusted; |
| 1580 | u32 sq_ramping_threshold; |
| 1581 | u32 cac_leakage; |
| 1582 | u16 tdp_od_limit; |
| 1583 | u32 tdp_adjustment; |
| 1584 | u16 load_line_slope; |
| 1585 | bool power_control; |
| 1586 | bool ac_power; |
| 1587 | /* special states active */ |
| 1588 | bool thermal_active; |
| 1589 | bool uvd_active; |
| 1590 | bool vce_active; |
| 1591 | /* thermal handling */ |
| 1592 | struct amdgpu_dpm_thermal thermal; |
| 1593 | /* forced levels */ |
| 1594 | enum amdgpu_dpm_forced_level forced_level; |
| 1595 | }; |
| 1596 | |
| 1597 | struct amdgpu_pm { |
| 1598 | struct mutex mutex; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1599 | u32 current_sclk; |
| 1600 | u32 current_mclk; |
| 1601 | u32 default_sclk; |
| 1602 | u32 default_mclk; |
| 1603 | struct amdgpu_i2c_chan *i2c_bus; |
| 1604 | /* internal thermal controller on rv6xx+ */ |
| 1605 | enum amdgpu_int_thermal_type int_thermal_type; |
| 1606 | struct device *int_hwmon_dev; |
| 1607 | /* fan control parameters */ |
| 1608 | bool no_fan; |
| 1609 | u8 fan_pulses_per_revolution; |
| 1610 | u8 fan_min_rpm; |
| 1611 | u8 fan_max_rpm; |
| 1612 | /* dpm */ |
| 1613 | bool dpm_enabled; |
Alex Deucher | c86f5ebf | 2015-10-23 10:45:14 -0400 | [diff] [blame] | 1614 | bool sysfs_initialized; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1615 | struct amdgpu_dpm dpm; |
| 1616 | const struct firmware *fw; /* SMC firmware */ |
| 1617 | uint32_t fw_version; |
| 1618 | const struct amdgpu_dpm_funcs *funcs; |
Alex Deucher | d0dd7f0 | 2015-11-11 19:45:06 -0500 | [diff] [blame] | 1619 | uint32_t pcie_gen_mask; |
| 1620 | uint32_t pcie_mlw_mask; |
Rex Zhu | 7fb72a1 | 2015-11-19 13:35:30 +0800 | [diff] [blame] | 1621 | struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */ |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1622 | }; |
| 1623 | |
Alex Deucher | d0dd7f0 | 2015-11-11 19:45:06 -0500 | [diff] [blame] | 1624 | void amdgpu_get_pcie_info(struct amdgpu_device *adev); |
| 1625 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1626 | /* |
| 1627 | * UVD |
| 1628 | */ |
| 1629 | #define AMDGPU_MAX_UVD_HANDLES 10 |
| 1630 | #define AMDGPU_UVD_STACK_SIZE (1024*1024) |
| 1631 | #define AMDGPU_UVD_HEAP_SIZE (1024*1024) |
| 1632 | #define AMDGPU_UVD_FIRMWARE_OFFSET 256 |
| 1633 | |
| 1634 | struct amdgpu_uvd { |
| 1635 | struct amdgpu_bo *vcpu_bo; |
| 1636 | void *cpu_addr; |
| 1637 | uint64_t gpu_addr; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1638 | atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; |
| 1639 | struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; |
| 1640 | struct delayed_work idle_work; |
| 1641 | const struct firmware *fw; /* UVD firmware */ |
| 1642 | struct amdgpu_ring ring; |
| 1643 | struct amdgpu_irq_src irq; |
| 1644 | bool address_64_bit; |
| 1645 | }; |
| 1646 | |
| 1647 | /* |
| 1648 | * VCE |
| 1649 | */ |
| 1650 | #define AMDGPU_MAX_VCE_HANDLES 16 |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1651 | #define AMDGPU_VCE_FIRMWARE_OFFSET 256 |
| 1652 | |
Alex Deucher | 6a58577 | 2015-07-10 14:16:24 -0400 | [diff] [blame] | 1653 | #define AMDGPU_VCE_HARVEST_VCE0 (1 << 0) |
| 1654 | #define AMDGPU_VCE_HARVEST_VCE1 (1 << 1) |
| 1655 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1656 | struct amdgpu_vce { |
| 1657 | struct amdgpu_bo *vcpu_bo; |
| 1658 | uint64_t gpu_addr; |
| 1659 | unsigned fw_version; |
| 1660 | unsigned fb_version; |
| 1661 | atomic_t handles[AMDGPU_MAX_VCE_HANDLES]; |
| 1662 | struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES]; |
Christian König | f1689ec | 2015-06-11 20:56:18 +0200 | [diff] [blame] | 1663 | uint32_t img_size[AMDGPU_MAX_VCE_HANDLES]; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1664 | struct delayed_work idle_work; |
| 1665 | const struct firmware *fw; /* VCE firmware */ |
| 1666 | struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; |
| 1667 | struct amdgpu_irq_src irq; |
Alex Deucher | 6a58577 | 2015-07-10 14:16:24 -0400 | [diff] [blame] | 1668 | unsigned harvest_config; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1669 | }; |
| 1670 | |
| 1671 | /* |
| 1672 | * SDMA |
| 1673 | */ |
Alex Deucher | c113ea1 | 2015-10-08 16:30:37 -0400 | [diff] [blame] | 1674 | struct amdgpu_sdma_instance { |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1675 | /* SDMA firmware */ |
| 1676 | const struct firmware *fw; |
| 1677 | uint32_t fw_version; |
Jammy Zhou | cfa2104 | 2015-08-04 10:50:47 +0800 | [diff] [blame] | 1678 | uint32_t feature_version; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1679 | |
| 1680 | struct amdgpu_ring ring; |
Jammy Zhou | 18111de | 2015-08-31 14:06:39 +0800 | [diff] [blame] | 1681 | bool burst_nop; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1682 | }; |
| 1683 | |
Alex Deucher | c113ea1 | 2015-10-08 16:30:37 -0400 | [diff] [blame] | 1684 | struct amdgpu_sdma { |
| 1685 | struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; |
| 1686 | struct amdgpu_irq_src trap_irq; |
| 1687 | struct amdgpu_irq_src illegal_inst_irq; |
| 1688 | int num_instances; |
| 1689 | }; |
| 1690 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1691 | /* |
| 1692 | * Firmware |
| 1693 | */ |
| 1694 | struct amdgpu_firmware { |
| 1695 | struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM]; |
| 1696 | bool smu_load; |
| 1697 | struct amdgpu_bo *fw_buf; |
| 1698 | unsigned int fw_size; |
| 1699 | }; |
| 1700 | |
| 1701 | /* |
| 1702 | * Benchmarking |
| 1703 | */ |
| 1704 | void amdgpu_benchmark(struct amdgpu_device *adev, int test_number); |
| 1705 | |
| 1706 | |
| 1707 | /* |
| 1708 | * Testing |
| 1709 | */ |
| 1710 | void amdgpu_test_moves(struct amdgpu_device *adev); |
| 1711 | void amdgpu_test_ring_sync(struct amdgpu_device *adev, |
| 1712 | struct amdgpu_ring *cpA, |
| 1713 | struct amdgpu_ring *cpB); |
| 1714 | void amdgpu_test_syncing(struct amdgpu_device *adev); |
| 1715 | |
| 1716 | /* |
| 1717 | * MMU Notifier |
| 1718 | */ |
| 1719 | #if defined(CONFIG_MMU_NOTIFIER) |
| 1720 | int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr); |
| 1721 | void amdgpu_mn_unregister(struct amdgpu_bo *bo); |
| 1722 | #else |
Harry Wentland | 1d1106b | 2015-07-15 07:10:41 -0400 | [diff] [blame] | 1723 | static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1724 | { |
| 1725 | return -ENODEV; |
| 1726 | } |
Harry Wentland | 1d1106b | 2015-07-15 07:10:41 -0400 | [diff] [blame] | 1727 | static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {} |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1728 | #endif |
| 1729 | |
| 1730 | /* |
| 1731 | * Debugfs |
| 1732 | */ |
| 1733 | struct amdgpu_debugfs { |
| 1734 | struct drm_info_list *files; |
| 1735 | unsigned num_files; |
| 1736 | }; |
| 1737 | |
| 1738 | int amdgpu_debugfs_add_files(struct amdgpu_device *adev, |
| 1739 | struct drm_info_list *files, |
| 1740 | unsigned nfiles); |
| 1741 | int amdgpu_debugfs_fence_init(struct amdgpu_device *adev); |
| 1742 | |
| 1743 | #if defined(CONFIG_DEBUG_FS) |
| 1744 | int amdgpu_debugfs_init(struct drm_minor *minor); |
| 1745 | void amdgpu_debugfs_cleanup(struct drm_minor *minor); |
| 1746 | #endif |
| 1747 | |
| 1748 | /* |
| 1749 | * amdgpu smumgr functions |
| 1750 | */ |
| 1751 | struct amdgpu_smumgr_funcs { |
| 1752 | int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype); |
| 1753 | int (*request_smu_load_fw)(struct amdgpu_device *adev); |
| 1754 | int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype); |
| 1755 | }; |
| 1756 | |
| 1757 | /* |
| 1758 | * amdgpu smumgr |
| 1759 | */ |
| 1760 | struct amdgpu_smumgr { |
| 1761 | struct amdgpu_bo *toc_buf; |
| 1762 | struct amdgpu_bo *smu_buf; |
| 1763 | /* asic priv smu data */ |
| 1764 | void *priv; |
| 1765 | spinlock_t smu_lock; |
| 1766 | /* smumgr functions */ |
| 1767 | const struct amdgpu_smumgr_funcs *smumgr_funcs; |
| 1768 | /* ucode loading complete flag */ |
| 1769 | uint32_t fw_flags; |
| 1770 | }; |
| 1771 | |
| 1772 | /* |
| 1773 | * ASIC specific register table accessible by UMD |
| 1774 | */ |
| 1775 | struct amdgpu_allowed_register_entry { |
| 1776 | uint32_t reg_offset; |
| 1777 | bool untouched; |
| 1778 | bool grbm_indexed; |
| 1779 | }; |
| 1780 | |
| 1781 | struct amdgpu_cu_info { |
| 1782 | uint32_t number; /* total active CU number */ |
| 1783 | uint32_t ao_cu_mask; |
| 1784 | uint32_t bitmap[4][4]; |
| 1785 | }; |
| 1786 | |
| 1787 | |
| 1788 | /* |
| 1789 | * ASIC specific functions. |
| 1790 | */ |
| 1791 | struct amdgpu_asic_funcs { |
| 1792 | bool (*read_disabled_bios)(struct amdgpu_device *adev); |
Alex Deucher | 7946b87 | 2015-11-24 10:14:28 -0500 | [diff] [blame] | 1793 | bool (*read_bios_from_rom)(struct amdgpu_device *adev, |
| 1794 | u8 *bios, u32 length_bytes); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1795 | int (*read_register)(struct amdgpu_device *adev, u32 se_num, |
| 1796 | u32 sh_num, u32 reg_offset, u32 *value); |
| 1797 | void (*set_vga_state)(struct amdgpu_device *adev, bool state); |
| 1798 | int (*reset)(struct amdgpu_device *adev); |
| 1799 | /* wait for mc_idle */ |
| 1800 | int (*wait_for_mc_idle)(struct amdgpu_device *adev); |
| 1801 | /* get the reference clock */ |
| 1802 | u32 (*get_xclk)(struct amdgpu_device *adev); |
| 1803 | /* get the gpu clock counter */ |
| 1804 | uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); |
| 1805 | int (*get_cu_info)(struct amdgpu_device *adev, struct amdgpu_cu_info *info); |
| 1806 | /* MM block clocks */ |
| 1807 | int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); |
| 1808 | int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); |
| 1809 | }; |
| 1810 | |
| 1811 | /* |
| 1812 | * IOCTL. |
| 1813 | */ |
| 1814 | int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, |
| 1815 | struct drm_file *filp); |
| 1816 | int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, |
| 1817 | struct drm_file *filp); |
| 1818 | |
| 1819 | int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data, |
| 1820 | struct drm_file *filp); |
| 1821 | int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, |
| 1822 | struct drm_file *filp); |
| 1823 | int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, |
| 1824 | struct drm_file *filp); |
| 1825 | int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, |
| 1826 | struct drm_file *filp); |
| 1827 | int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, |
| 1828 | struct drm_file *filp); |
| 1829 | int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, |
| 1830 | struct drm_file *filp); |
| 1831 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); |
| 1832 | int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); |
| 1833 | |
| 1834 | int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, |
| 1835 | struct drm_file *filp); |
| 1836 | |
| 1837 | /* VRAM scratch page for HDP bug, default vram page */ |
| 1838 | struct amdgpu_vram_scratch { |
| 1839 | struct amdgpu_bo *robj; |
| 1840 | volatile uint32_t *ptr; |
| 1841 | u64 gpu_addr; |
| 1842 | }; |
| 1843 | |
| 1844 | /* |
| 1845 | * ACPI |
| 1846 | */ |
| 1847 | struct amdgpu_atif_notification_cfg { |
| 1848 | bool enabled; |
| 1849 | int command_code; |
| 1850 | }; |
| 1851 | |
| 1852 | struct amdgpu_atif_notifications { |
| 1853 | bool display_switch; |
| 1854 | bool expansion_mode_change; |
| 1855 | bool thermal_state; |
| 1856 | bool forced_power_state; |
| 1857 | bool system_power_state; |
| 1858 | bool display_conf_change; |
| 1859 | bool px_gfx_switch; |
| 1860 | bool brightness_change; |
| 1861 | bool dgpu_display_event; |
| 1862 | }; |
| 1863 | |
| 1864 | struct amdgpu_atif_functions { |
| 1865 | bool system_params; |
| 1866 | bool sbios_requests; |
| 1867 | bool select_active_disp; |
| 1868 | bool lid_state; |
| 1869 | bool get_tv_standard; |
| 1870 | bool set_tv_standard; |
| 1871 | bool get_panel_expansion_mode; |
| 1872 | bool set_panel_expansion_mode; |
| 1873 | bool temperature_change; |
| 1874 | bool graphics_device_types; |
| 1875 | }; |
| 1876 | |
| 1877 | struct amdgpu_atif { |
| 1878 | struct amdgpu_atif_notifications notifications; |
| 1879 | struct amdgpu_atif_functions functions; |
| 1880 | struct amdgpu_atif_notification_cfg notification_cfg; |
| 1881 | struct amdgpu_encoder *encoder_for_bl; |
| 1882 | }; |
| 1883 | |
| 1884 | struct amdgpu_atcs_functions { |
| 1885 | bool get_ext_state; |
| 1886 | bool pcie_perf_req; |
| 1887 | bool pcie_dev_rdy; |
| 1888 | bool pcie_bus_width; |
| 1889 | }; |
| 1890 | |
| 1891 | struct amdgpu_atcs { |
| 1892 | struct amdgpu_atcs_functions functions; |
| 1893 | }; |
| 1894 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1895 | /* |
Chunming Zhou | d03846a | 2015-07-28 14:20:03 -0400 | [diff] [blame] | 1896 | * CGS |
| 1897 | */ |
| 1898 | void *amdgpu_cgs_create_device(struct amdgpu_device *adev); |
| 1899 | void amdgpu_cgs_destroy_device(void *cgs_device); |
| 1900 | |
| 1901 | |
| 1902 | /* |
Maruthi Bayyavarapu | a8fe58c | 2015-09-22 17:05:20 -0400 | [diff] [blame] | 1903 | * CGS |
| 1904 | */ |
| 1905 | void *amdgpu_cgs_create_device(struct amdgpu_device *adev); |
| 1906 | void amdgpu_cgs_destroy_device(void *cgs_device); |
| 1907 | |
| 1908 | |
Alex Deucher | 7e471e6 | 2016-02-01 11:13:04 -0500 | [diff] [blame] | 1909 | /* GPU virtualization */ |
| 1910 | struct amdgpu_virtualization { |
| 1911 | bool supports_sr_iov; |
| 1912 | }; |
| 1913 | |
Maruthi Bayyavarapu | a8fe58c | 2015-09-22 17:05:20 -0400 | [diff] [blame] | 1914 | /* |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1915 | * Core structure, functions and helpers. |
| 1916 | */ |
| 1917 | typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t); |
| 1918 | typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); |
| 1919 | |
| 1920 | typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); |
| 1921 | typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); |
| 1922 | |
Alex Deucher | 8faf0e0 | 2015-07-28 11:50:31 -0400 | [diff] [blame] | 1923 | struct amdgpu_ip_block_status { |
| 1924 | bool valid; |
| 1925 | bool sw; |
| 1926 | bool hw; |
| 1927 | }; |
| 1928 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1929 | struct amdgpu_device { |
| 1930 | struct device *dev; |
| 1931 | struct drm_device *ddev; |
| 1932 | struct pci_dev *pdev; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1933 | |
Maruthi Bayyavarapu | a8fe58c | 2015-09-22 17:05:20 -0400 | [diff] [blame] | 1934 | #ifdef CONFIG_DRM_AMD_ACP |
| 1935 | struct amdgpu_acp acp; |
| 1936 | #endif |
| 1937 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1938 | /* ASIC */ |
Jammy Zhou | 2f7d10b | 2015-07-22 11:29:01 +0800 | [diff] [blame] | 1939 | enum amd_asic_type asic_type; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1940 | uint32_t family; |
| 1941 | uint32_t rev_id; |
| 1942 | uint32_t external_rev_id; |
| 1943 | unsigned long flags; |
| 1944 | int usec_timeout; |
| 1945 | const struct amdgpu_asic_funcs *asic_funcs; |
| 1946 | bool shutdown; |
| 1947 | bool suspend; |
| 1948 | bool need_dma32; |
| 1949 | bool accel_working; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1950 | struct work_struct reset_work; |
| 1951 | struct notifier_block acpi_nb; |
| 1952 | struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; |
| 1953 | struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; |
| 1954 | unsigned debugfs_count; |
| 1955 | #if defined(CONFIG_DEBUG_FS) |
| 1956 | struct dentry *debugfs_regs; |
| 1957 | #endif |
| 1958 | struct amdgpu_atif atif; |
| 1959 | struct amdgpu_atcs atcs; |
| 1960 | struct mutex srbm_mutex; |
| 1961 | /* GRBM index mutex. Protects concurrent access to GRBM index */ |
| 1962 | struct mutex grbm_idx_mutex; |
| 1963 | struct dev_pm_domain vga_pm_domain; |
| 1964 | bool have_disp_power_ref; |
| 1965 | |
| 1966 | /* BIOS */ |
| 1967 | uint8_t *bios; |
| 1968 | bool is_atom_bios; |
| 1969 | uint16_t bios_header_start; |
| 1970 | struct amdgpu_bo *stollen_vga_memory; |
| 1971 | uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; |
| 1972 | |
| 1973 | /* Register/doorbell mmio */ |
| 1974 | resource_size_t rmmio_base; |
| 1975 | resource_size_t rmmio_size; |
| 1976 | void __iomem *rmmio; |
| 1977 | /* protects concurrent MM_INDEX/DATA based register access */ |
| 1978 | spinlock_t mmio_idx_lock; |
| 1979 | /* protects concurrent SMC based register access */ |
| 1980 | spinlock_t smc_idx_lock; |
| 1981 | amdgpu_rreg_t smc_rreg; |
| 1982 | amdgpu_wreg_t smc_wreg; |
| 1983 | /* protects concurrent PCIE register access */ |
| 1984 | spinlock_t pcie_idx_lock; |
| 1985 | amdgpu_rreg_t pcie_rreg; |
| 1986 | amdgpu_wreg_t pcie_wreg; |
| 1987 | /* protects concurrent UVD register access */ |
| 1988 | spinlock_t uvd_ctx_idx_lock; |
| 1989 | amdgpu_rreg_t uvd_ctx_rreg; |
| 1990 | amdgpu_wreg_t uvd_ctx_wreg; |
| 1991 | /* protects concurrent DIDT register access */ |
| 1992 | spinlock_t didt_idx_lock; |
| 1993 | amdgpu_rreg_t didt_rreg; |
| 1994 | amdgpu_wreg_t didt_wreg; |
| 1995 | /* protects concurrent ENDPOINT (audio) register access */ |
| 1996 | spinlock_t audio_endpt_idx_lock; |
| 1997 | amdgpu_block_rreg_t audio_endpt_rreg; |
| 1998 | amdgpu_block_wreg_t audio_endpt_wreg; |
| 1999 | void __iomem *rio_mem; |
| 2000 | resource_size_t rio_mem_size; |
| 2001 | struct amdgpu_doorbell doorbell; |
| 2002 | |
| 2003 | /* clock/pll info */ |
| 2004 | struct amdgpu_clock clock; |
| 2005 | |
| 2006 | /* MC */ |
| 2007 | struct amdgpu_mc mc; |
| 2008 | struct amdgpu_gart gart; |
| 2009 | struct amdgpu_dummy_page dummy_page; |
| 2010 | struct amdgpu_vm_manager vm_manager; |
| 2011 | |
| 2012 | /* memory management */ |
| 2013 | struct amdgpu_mman mman; |
| 2014 | struct amdgpu_gem gem; |
| 2015 | struct amdgpu_vram_scratch vram_scratch; |
| 2016 | struct amdgpu_wb wb; |
| 2017 | atomic64_t vram_usage; |
| 2018 | atomic64_t vram_vis_usage; |
| 2019 | atomic64_t gtt_usage; |
| 2020 | atomic64_t num_bytes_moved; |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 2021 | atomic_t gpu_reset_counter; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2022 | |
| 2023 | /* display */ |
| 2024 | struct amdgpu_mode_info mode_info; |
| 2025 | struct work_struct hotplug_work; |
| 2026 | struct amdgpu_irq_src crtc_irq; |
| 2027 | struct amdgpu_irq_src pageflip_irq; |
| 2028 | struct amdgpu_irq_src hpd_irq; |
| 2029 | |
| 2030 | /* rings */ |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2031 | unsigned fence_context; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2032 | unsigned num_rings; |
| 2033 | struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; |
| 2034 | bool ib_pool_ready; |
| 2035 | struct amdgpu_sa_manager ring_tmp_bo; |
| 2036 | |
| 2037 | /* interrupts */ |
| 2038 | struct amdgpu_irq irq; |
| 2039 | |
Alex Deucher | 1f7371b | 2015-12-02 17:46:21 -0500 | [diff] [blame] | 2040 | /* powerplay */ |
| 2041 | struct amd_powerplay powerplay; |
Jammy Zhou | e61710c | 2015-11-10 18:31:08 -0500 | [diff] [blame] | 2042 | bool pp_enabled; |
Eric Huang | f3898ea | 2015-12-11 16:24:34 -0500 | [diff] [blame] | 2043 | bool pp_force_state_enabled; |
Alex Deucher | 1f7371b | 2015-12-02 17:46:21 -0500 | [diff] [blame] | 2044 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2045 | /* dpm */ |
| 2046 | struct amdgpu_pm pm; |
| 2047 | u32 cg_flags; |
| 2048 | u32 pg_flags; |
| 2049 | |
| 2050 | /* amdgpu smumgr */ |
| 2051 | struct amdgpu_smumgr smu; |
| 2052 | |
| 2053 | /* gfx */ |
| 2054 | struct amdgpu_gfx gfx; |
| 2055 | |
| 2056 | /* sdma */ |
Alex Deucher | c113ea1 | 2015-10-08 16:30:37 -0400 | [diff] [blame] | 2057 | struct amdgpu_sdma sdma; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2058 | |
| 2059 | /* uvd */ |
| 2060 | bool has_uvd; |
| 2061 | struct amdgpu_uvd uvd; |
| 2062 | |
| 2063 | /* vce */ |
| 2064 | struct amdgpu_vce vce; |
| 2065 | |
| 2066 | /* firmwares */ |
| 2067 | struct amdgpu_firmware firmware; |
| 2068 | |
| 2069 | /* GDS */ |
| 2070 | struct amdgpu_gds gds; |
| 2071 | |
| 2072 | const struct amdgpu_ip_block_version *ip_blocks; |
| 2073 | int num_ip_blocks; |
Alex Deucher | 8faf0e0 | 2015-07-28 11:50:31 -0400 | [diff] [blame] | 2074 | struct amdgpu_ip_block_status *ip_block_status; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2075 | struct mutex mn_lock; |
| 2076 | DECLARE_HASHTABLE(mn_hash, 7); |
| 2077 | |
| 2078 | /* tracking pinned memory */ |
| 2079 | u64 vram_pin_size; |
| 2080 | u64 gart_pin_size; |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 2081 | |
| 2082 | /* amdkfd interface */ |
| 2083 | struct kfd_dev *kfd; |
Chunming Zhou | 23ca0e4 | 2015-07-06 13:42:58 +0800 | [diff] [blame] | 2084 | |
| 2085 | /* kernel conext for IB submission */ |
Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 2086 | struct amdgpu_ctx kernel_ctx; |
Alex Deucher | 7e471e6 | 2016-02-01 11:13:04 -0500 | [diff] [blame] | 2087 | |
| 2088 | struct amdgpu_virtualization virtualization; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2089 | }; |
| 2090 | |
| 2091 | bool amdgpu_device_is_px(struct drm_device *dev); |
| 2092 | int amdgpu_device_init(struct amdgpu_device *adev, |
| 2093 | struct drm_device *ddev, |
| 2094 | struct pci_dev *pdev, |
| 2095 | uint32_t flags); |
| 2096 | void amdgpu_device_fini(struct amdgpu_device *adev); |
| 2097 | int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); |
| 2098 | |
| 2099 | uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, |
| 2100 | bool always_indirect); |
| 2101 | void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, |
| 2102 | bool always_indirect); |
| 2103 | u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg); |
| 2104 | void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v); |
| 2105 | |
| 2106 | u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index); |
| 2107 | void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); |
| 2108 | |
| 2109 | /* |
| 2110 | * Cast helper |
| 2111 | */ |
| 2112 | extern const struct fence_ops amdgpu_fence_ops; |
| 2113 | static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f) |
| 2114 | { |
| 2115 | struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); |
| 2116 | |
| 2117 | if (__f->base.ops == &amdgpu_fence_ops) |
| 2118 | return __f; |
| 2119 | |
| 2120 | return NULL; |
| 2121 | } |
| 2122 | |
| 2123 | /* |
| 2124 | * Registers read & write functions. |
| 2125 | */ |
| 2126 | #define RREG32(reg) amdgpu_mm_rreg(adev, (reg), false) |
| 2127 | #define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), true) |
| 2128 | #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), false)) |
| 2129 | #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), false) |
| 2130 | #define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), true) |
| 2131 | #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) |
| 2132 | #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) |
| 2133 | #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) |
| 2134 | #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v)) |
| 2135 | #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg)) |
| 2136 | #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v)) |
| 2137 | #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg)) |
| 2138 | #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v)) |
| 2139 | #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg)) |
| 2140 | #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) |
| 2141 | #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) |
| 2142 | #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) |
| 2143 | #define WREG32_P(reg, val, mask) \ |
| 2144 | do { \ |
| 2145 | uint32_t tmp_ = RREG32(reg); \ |
| 2146 | tmp_ &= (mask); \ |
| 2147 | tmp_ |= ((val) & ~(mask)); \ |
| 2148 | WREG32(reg, tmp_); \ |
| 2149 | } while (0) |
| 2150 | #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) |
| 2151 | #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) |
| 2152 | #define WREG32_PLL_P(reg, val, mask) \ |
| 2153 | do { \ |
| 2154 | uint32_t tmp_ = RREG32_PLL(reg); \ |
| 2155 | tmp_ &= (mask); \ |
| 2156 | tmp_ |= ((val) & ~(mask)); \ |
| 2157 | WREG32_PLL(reg, tmp_); \ |
| 2158 | } while (0) |
| 2159 | #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false)) |
| 2160 | #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg)) |
| 2161 | #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v)) |
| 2162 | |
| 2163 | #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index)) |
| 2164 | #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v)) |
| 2165 | |
| 2166 | #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT |
| 2167 | #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK |
| 2168 | |
| 2169 | #define REG_SET_FIELD(orig_val, reg, field, field_val) \ |
| 2170 | (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \ |
| 2171 | (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field)))) |
| 2172 | |
| 2173 | #define REG_GET_FIELD(value, reg, field) \ |
| 2174 | (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field)) |
| 2175 | |
| 2176 | /* |
| 2177 | * BIOS helpers. |
| 2178 | */ |
| 2179 | #define RBIOS8(i) (adev->bios[i]) |
| 2180 | #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) |
| 2181 | #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) |
| 2182 | |
| 2183 | /* |
| 2184 | * RING helpers. |
| 2185 | */ |
| 2186 | static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) |
| 2187 | { |
| 2188 | if (ring->count_dw <= 0) |
Jammy Zhou | 86c2b79 | 2015-05-13 22:52:42 +0800 | [diff] [blame] | 2189 | DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2190 | ring->ring[ring->wptr++] = v; |
| 2191 | ring->wptr &= ring->ptr_mask; |
| 2192 | ring->count_dw--; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2193 | } |
| 2194 | |
Alex Deucher | c113ea1 | 2015-10-08 16:30:37 -0400 | [diff] [blame] | 2195 | static inline struct amdgpu_sdma_instance * |
| 2196 | amdgpu_get_sdma_instance(struct amdgpu_ring *ring) |
Jammy Zhou | 4b2f7e2 | 2015-09-01 12:56:17 +0800 | [diff] [blame] | 2197 | { |
| 2198 | struct amdgpu_device *adev = ring->adev; |
| 2199 | int i; |
| 2200 | |
Alex Deucher | c113ea1 | 2015-10-08 16:30:37 -0400 | [diff] [blame] | 2201 | for (i = 0; i < adev->sdma.num_instances; i++) |
| 2202 | if (&adev->sdma.instance[i].ring == ring) |
Jammy Zhou | 4b2f7e2 | 2015-09-01 12:56:17 +0800 | [diff] [blame] | 2203 | break; |
| 2204 | |
| 2205 | if (i < AMDGPU_MAX_SDMA_INSTANCES) |
Alex Deucher | c113ea1 | 2015-10-08 16:30:37 -0400 | [diff] [blame] | 2206 | return &adev->sdma.instance[i]; |
Jammy Zhou | 4b2f7e2 | 2015-09-01 12:56:17 +0800 | [diff] [blame] | 2207 | else |
| 2208 | return NULL; |
| 2209 | } |
| 2210 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2211 | /* |
| 2212 | * ASICs macro. |
| 2213 | */ |
| 2214 | #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state)) |
| 2215 | #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) |
| 2216 | #define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev)) |
| 2217 | #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) |
| 2218 | #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) |
| 2219 | #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) |
| 2220 | #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) |
| 2221 | #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) |
Alex Deucher | 7946b87 | 2015-11-24 10:14:28 -0500 | [diff] [blame] | 2222 | #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2223 | #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) |
| 2224 | #define amdgpu_asic_get_cu_info(adev, info) (adev)->asic_funcs->get_cu_info((adev), (info)) |
| 2225 | #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) |
| 2226 | #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) |
| 2227 | #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) |
Christian König | b07c9d2 | 2015-11-30 13:26:07 +0100 | [diff] [blame] | 2228 | #define amdgpu_vm_write_pte(adev, ib, pa, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pa), (pe), (addr), (count), (incr), (flags))) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2229 | #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) |
| 2230 | #define amdgpu_vm_pad_ib(adev, ib) ((adev)->vm_manager.vm_pte_funcs->pad_ib((ib))) |
| 2231 | #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) |
| 2232 | #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) |
| 2233 | #define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r)) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2234 | #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) |
| 2235 | #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) |
| 2236 | #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) |
| 2237 | #define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib)) |
| 2238 | #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) |
Chunming Zhou | 890ee23 | 2015-06-01 14:35:03 +0800 | [diff] [blame] | 2239 | #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2240 | #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) |
Christian König | d2edb07 | 2015-05-11 14:10:34 +0200 | [diff] [blame] | 2241 | #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2242 | #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) |
| 2243 | #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) |
| 2244 | #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) |
| 2245 | #define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r)) |
| 2246 | #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc)) |
| 2247 | #define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc)) |
| 2248 | #define amdgpu_display_is_display_hung(adev) (adev)->mode_info.funcs->is_display_hung((adev)) |
| 2249 | #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l)) |
| 2250 | #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e)) |
| 2251 | #define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h)) |
| 2252 | #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h)) |
| 2253 | #define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev)) |
| 2254 | #define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev)) |
| 2255 | #define amdgpu_display_page_flip(adev, crtc, base) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base)) |
| 2256 | #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos)) |
| 2257 | #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c)) |
| 2258 | #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) |
| 2259 | #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s)) |
| 2260 | #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 2261 | #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) |
Chunming Zhou | 6e7a384 | 2015-08-27 13:46:09 +0800 | [diff] [blame] | 2262 | #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2263 | #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev)) |
| 2264 | #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev)) |
| 2265 | #define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev)) |
| 2266 | #define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev)) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2267 | #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps)) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2268 | #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev)) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2269 | #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) |
Rex Zhu | 3af76f2 | 2015-10-15 17:23:43 +0800 | [diff] [blame] | 2270 | |
| 2271 | #define amdgpu_dpm_get_temperature(adev) \ |
Eric Huang | 4b5ece2 | 2016-01-19 14:28:56 -0500 | [diff] [blame] | 2272 | ((adev)->pp_enabled ? \ |
Jammy Zhou | e61710c | 2015-11-10 18:31:08 -0500 | [diff] [blame] | 2273 | (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \ |
Eric Huang | 4b5ece2 | 2016-01-19 14:28:56 -0500 | [diff] [blame] | 2274 | (adev)->pm.funcs->get_temperature((adev))) |
Rex Zhu | 3af76f2 | 2015-10-15 17:23:43 +0800 | [diff] [blame] | 2275 | |
| 2276 | #define amdgpu_dpm_set_fan_control_mode(adev, m) \ |
Eric Huang | 4b5ece2 | 2016-01-19 14:28:56 -0500 | [diff] [blame] | 2277 | ((adev)->pp_enabled ? \ |
Jammy Zhou | e61710c | 2015-11-10 18:31:08 -0500 | [diff] [blame] | 2278 | (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \ |
Eric Huang | 4b5ece2 | 2016-01-19 14:28:56 -0500 | [diff] [blame] | 2279 | (adev)->pm.funcs->set_fan_control_mode((adev), (m))) |
Rex Zhu | 3af76f2 | 2015-10-15 17:23:43 +0800 | [diff] [blame] | 2280 | |
| 2281 | #define amdgpu_dpm_get_fan_control_mode(adev) \ |
Eric Huang | 4b5ece2 | 2016-01-19 14:28:56 -0500 | [diff] [blame] | 2282 | ((adev)->pp_enabled ? \ |
Jammy Zhou | e61710c | 2015-11-10 18:31:08 -0500 | [diff] [blame] | 2283 | (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \ |
Eric Huang | 4b5ece2 | 2016-01-19 14:28:56 -0500 | [diff] [blame] | 2284 | (adev)->pm.funcs->get_fan_control_mode((adev))) |
Rex Zhu | 3af76f2 | 2015-10-15 17:23:43 +0800 | [diff] [blame] | 2285 | |
| 2286 | #define amdgpu_dpm_set_fan_speed_percent(adev, s) \ |
Eric Huang | 4b5ece2 | 2016-01-19 14:28:56 -0500 | [diff] [blame] | 2287 | ((adev)->pp_enabled ? \ |
Jammy Zhou | e61710c | 2015-11-10 18:31:08 -0500 | [diff] [blame] | 2288 | (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ |
Eric Huang | 4b5ece2 | 2016-01-19 14:28:56 -0500 | [diff] [blame] | 2289 | (adev)->pm.funcs->set_fan_speed_percent((adev), (s))) |
Rex Zhu | 3af76f2 | 2015-10-15 17:23:43 +0800 | [diff] [blame] | 2290 | |
| 2291 | #define amdgpu_dpm_get_fan_speed_percent(adev, s) \ |
Eric Huang | 4b5ece2 | 2016-01-19 14:28:56 -0500 | [diff] [blame] | 2292 | ((adev)->pp_enabled ? \ |
Jammy Zhou | e61710c | 2015-11-10 18:31:08 -0500 | [diff] [blame] | 2293 | (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \ |
Eric Huang | 4b5ece2 | 2016-01-19 14:28:56 -0500 | [diff] [blame] | 2294 | (adev)->pm.funcs->get_fan_speed_percent((adev), (s))) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2295 | |
Rex Zhu | 1b5708f | 2015-11-10 18:25:24 -0500 | [diff] [blame] | 2296 | #define amdgpu_dpm_get_sclk(adev, l) \ |
Eric Huang | 4b5ece2 | 2016-01-19 14:28:56 -0500 | [diff] [blame] | 2297 | ((adev)->pp_enabled ? \ |
Jammy Zhou | e61710c | 2015-11-10 18:31:08 -0500 | [diff] [blame] | 2298 | (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \ |
Eric Huang | 4b5ece2 | 2016-01-19 14:28:56 -0500 | [diff] [blame] | 2299 | (adev)->pm.funcs->get_sclk((adev), (l))) |
Rex Zhu | 1b5708f | 2015-11-10 18:25:24 -0500 | [diff] [blame] | 2300 | |
| 2301 | #define amdgpu_dpm_get_mclk(adev, l) \ |
Eric Huang | 4b5ece2 | 2016-01-19 14:28:56 -0500 | [diff] [blame] | 2302 | ((adev)->pp_enabled ? \ |
Jammy Zhou | e61710c | 2015-11-10 18:31:08 -0500 | [diff] [blame] | 2303 | (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \ |
Eric Huang | 4b5ece2 | 2016-01-19 14:28:56 -0500 | [diff] [blame] | 2304 | (adev)->pm.funcs->get_mclk((adev), (l))) |
Rex Zhu | 1b5708f | 2015-11-10 18:25:24 -0500 | [diff] [blame] | 2305 | |
| 2306 | |
| 2307 | #define amdgpu_dpm_force_performance_level(adev, l) \ |
Eric Huang | 4b5ece2 | 2016-01-19 14:28:56 -0500 | [diff] [blame] | 2308 | ((adev)->pp_enabled ? \ |
Jammy Zhou | e61710c | 2015-11-10 18:31:08 -0500 | [diff] [blame] | 2309 | (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \ |
Eric Huang | 4b5ece2 | 2016-01-19 14:28:56 -0500 | [diff] [blame] | 2310 | (adev)->pm.funcs->force_performance_level((adev), (l))) |
Rex Zhu | 1b5708f | 2015-11-10 18:25:24 -0500 | [diff] [blame] | 2311 | |
| 2312 | #define amdgpu_dpm_powergate_uvd(adev, g) \ |
Eric Huang | 4b5ece2 | 2016-01-19 14:28:56 -0500 | [diff] [blame] | 2313 | ((adev)->pp_enabled ? \ |
Jammy Zhou | e61710c | 2015-11-10 18:31:08 -0500 | [diff] [blame] | 2314 | (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \ |
Eric Huang | 4b5ece2 | 2016-01-19 14:28:56 -0500 | [diff] [blame] | 2315 | (adev)->pm.funcs->powergate_uvd((adev), (g))) |
Rex Zhu | 1b5708f | 2015-11-10 18:25:24 -0500 | [diff] [blame] | 2316 | |
| 2317 | #define amdgpu_dpm_powergate_vce(adev, g) \ |
Eric Huang | 4b5ece2 | 2016-01-19 14:28:56 -0500 | [diff] [blame] | 2318 | ((adev)->pp_enabled ? \ |
Jammy Zhou | e61710c | 2015-11-10 18:31:08 -0500 | [diff] [blame] | 2319 | (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \ |
Eric Huang | 4b5ece2 | 2016-01-19 14:28:56 -0500 | [diff] [blame] | 2320 | (adev)->pm.funcs->powergate_vce((adev), (g))) |
Rex Zhu | 1b5708f | 2015-11-10 18:25:24 -0500 | [diff] [blame] | 2321 | |
| 2322 | #define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \ |
Eric Huang | 4b5ece2 | 2016-01-19 14:28:56 -0500 | [diff] [blame] | 2323 | ((adev)->pp_enabled ? \ |
Jammy Zhou | e61710c | 2015-11-10 18:31:08 -0500 | [diff] [blame] | 2324 | (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \ |
Eric Huang | 4b5ece2 | 2016-01-19 14:28:56 -0500 | [diff] [blame] | 2325 | (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m))) |
Rex Zhu | 1b5708f | 2015-11-10 18:25:24 -0500 | [diff] [blame] | 2326 | |
| 2327 | #define amdgpu_dpm_get_current_power_state(adev) \ |
Jammy Zhou | e61710c | 2015-11-10 18:31:08 -0500 | [diff] [blame] | 2328 | (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle) |
Rex Zhu | 1b5708f | 2015-11-10 18:25:24 -0500 | [diff] [blame] | 2329 | |
| 2330 | #define amdgpu_dpm_get_performance_level(adev) \ |
Jammy Zhou | e61710c | 2015-11-10 18:31:08 -0500 | [diff] [blame] | 2331 | (adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) |
Rex Zhu | 1b5708f | 2015-11-10 18:25:24 -0500 | [diff] [blame] | 2332 | |
Eric Huang | f3898ea | 2015-12-11 16:24:34 -0500 | [diff] [blame] | 2333 | #define amdgpu_dpm_get_pp_num_states(adev, data) \ |
| 2334 | (adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data) |
| 2335 | |
| 2336 | #define amdgpu_dpm_get_pp_table(adev, table) \ |
| 2337 | (adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table) |
| 2338 | |
| 2339 | #define amdgpu_dpm_set_pp_table(adev, buf, size) \ |
| 2340 | (adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size) |
| 2341 | |
| 2342 | #define amdgpu_dpm_print_clock_levels(adev, type, buf) \ |
| 2343 | (adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf) |
| 2344 | |
| 2345 | #define amdgpu_dpm_force_clock_level(adev, type, level) \ |
| 2346 | (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level) |
| 2347 | |
Jammy Zhou | e61710c | 2015-11-10 18:31:08 -0500 | [diff] [blame] | 2348 | #define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \ |
Rex Zhu | 1b5708f | 2015-11-10 18:25:24 -0500 | [diff] [blame] | 2349 | (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output)) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2350 | |
| 2351 | #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a)) |
| 2352 | |
| 2353 | /* Common functions */ |
| 2354 | int amdgpu_gpu_reset(struct amdgpu_device *adev); |
| 2355 | void amdgpu_pci_config_reset(struct amdgpu_device *adev); |
| 2356 | bool amdgpu_card_posted(struct amdgpu_device *adev); |
| 2357 | void amdgpu_update_display_priority(struct amdgpu_device *adev); |
Chunming Zhou | d5fc5e8 | 2015-07-21 16:52:10 +0800 | [diff] [blame] | 2358 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2359 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); |
| 2360 | int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, |
| 2361 | u32 ip_instance, u32 ring, |
| 2362 | struct amdgpu_ring **out_ring); |
| 2363 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain); |
| 2364 | bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); |
| 2365 | int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, |
| 2366 | uint32_t flags); |
Christian König | cc325d1 | 2016-02-08 11:08:35 +0100 | [diff] [blame^] | 2367 | struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); |
Christian König | d700696 | 2016-02-08 10:57:22 +0100 | [diff] [blame] | 2368 | bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, |
| 2369 | unsigned long end); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2370 | bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); |
| 2371 | uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, |
| 2372 | struct ttm_mem_reg *mem); |
| 2373 | void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base); |
| 2374 | void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); |
| 2375 | void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); |
| 2376 | void amdgpu_program_register_sequence(struct amdgpu_device *adev, |
| 2377 | const u32 *registers, |
| 2378 | const u32 array_size); |
| 2379 | |
| 2380 | bool amdgpu_device_is_px(struct drm_device *dev); |
| 2381 | /* atpx handler */ |
| 2382 | #if defined(CONFIG_VGA_SWITCHEROO) |
| 2383 | void amdgpu_register_atpx_handler(void); |
| 2384 | void amdgpu_unregister_atpx_handler(void); |
| 2385 | #else |
| 2386 | static inline void amdgpu_register_atpx_handler(void) {} |
| 2387 | static inline void amdgpu_unregister_atpx_handler(void) {} |
| 2388 | #endif |
| 2389 | |
| 2390 | /* |
| 2391 | * KMS |
| 2392 | */ |
| 2393 | extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; |
| 2394 | extern int amdgpu_max_kms_ioctl; |
| 2395 | |
| 2396 | int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags); |
| 2397 | int amdgpu_driver_unload_kms(struct drm_device *dev); |
| 2398 | void amdgpu_driver_lastclose_kms(struct drm_device *dev); |
| 2399 | int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); |
| 2400 | void amdgpu_driver_postclose_kms(struct drm_device *dev, |
| 2401 | struct drm_file *file_priv); |
| 2402 | void amdgpu_driver_preclose_kms(struct drm_device *dev, |
| 2403 | struct drm_file *file_priv); |
| 2404 | int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon); |
| 2405 | int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon); |
Thierry Reding | 88e7271 | 2015-09-24 18:35:31 +0200 | [diff] [blame] | 2406 | u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe); |
| 2407 | int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe); |
| 2408 | void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe); |
| 2409 | int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe, |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2410 | int *max_error, |
| 2411 | struct timeval *vblank_time, |
| 2412 | unsigned flags); |
| 2413 | long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, |
| 2414 | unsigned long arg); |
| 2415 | |
| 2416 | /* |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2417 | * functions used by amdgpu_encoder.c |
| 2418 | */ |
| 2419 | struct amdgpu_afmt_acr { |
| 2420 | u32 clock; |
| 2421 | |
| 2422 | int n_32khz; |
| 2423 | int cts_32khz; |
| 2424 | |
| 2425 | int n_44_1khz; |
| 2426 | int cts_44_1khz; |
| 2427 | |
| 2428 | int n_48khz; |
| 2429 | int cts_48khz; |
| 2430 | |
| 2431 | }; |
| 2432 | |
| 2433 | struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock); |
| 2434 | |
| 2435 | /* amdgpu_acpi.c */ |
| 2436 | #if defined(CONFIG_ACPI) |
| 2437 | int amdgpu_acpi_init(struct amdgpu_device *adev); |
| 2438 | void amdgpu_acpi_fini(struct amdgpu_device *adev); |
| 2439 | bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev); |
| 2440 | int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, |
| 2441 | u8 perf_req, bool advertise); |
| 2442 | int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); |
| 2443 | #else |
| 2444 | static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } |
| 2445 | static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } |
| 2446 | #endif |
| 2447 | |
| 2448 | struct amdgpu_bo_va_mapping * |
| 2449 | amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, |
| 2450 | uint64_t addr, struct amdgpu_bo **bo); |
| 2451 | |
| 2452 | #include "amdgpu_object.h" |
| 2453 | |
| 2454 | #endif |