Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2008 Advanced Micro Devices, Inc. |
| 3 | * Copyright 2008 Red Hat Inc. |
| 4 | * Copyright 2009 Jerome Glisse. |
| 5 | * |
| 6 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 7 | * copy of this software and associated documentation files (the "Software"), |
| 8 | * to deal in the Software without restriction, including without limitation |
| 9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 10 | * and/or sell copies of the Software, and to permit persons to whom the |
| 11 | * Software is furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in |
| 14 | * all copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 22 | * OTHER DEALINGS IN THE SOFTWARE. |
| 23 | * |
| 24 | * Authors: Dave Airlie |
| 25 | * Alex Deucher |
| 26 | * Jerome Glisse |
| 27 | */ |
| 28 | #ifndef __AMDGPU_H__ |
| 29 | #define __AMDGPU_H__ |
| 30 | |
| 31 | #include <linux/atomic.h> |
| 32 | #include <linux/wait.h> |
| 33 | #include <linux/list.h> |
| 34 | #include <linux/kref.h> |
| 35 | #include <linux/interval_tree.h> |
| 36 | #include <linux/hashtable.h> |
| 37 | #include <linux/fence.h> |
| 38 | |
| 39 | #include <ttm/ttm_bo_api.h> |
| 40 | #include <ttm/ttm_bo_driver.h> |
| 41 | #include <ttm/ttm_placement.h> |
| 42 | #include <ttm/ttm_module.h> |
| 43 | #include <ttm/ttm_execbuf_util.h> |
| 44 | |
Chunming Zhou | d03846a | 2015-07-28 14:20:03 -0400 | [diff] [blame] | 45 | #include <drm/drmP.h> |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 46 | #include <drm/drm_gem.h> |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 47 | #include <drm/amdgpu_drm.h> |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 48 | |
yanyang1 | 5fc3aee | 2015-05-22 14:39:35 -0400 | [diff] [blame] | 49 | #include "amd_shared.h" |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 50 | #include "amdgpu_mode.h" |
| 51 | #include "amdgpu_ih.h" |
| 52 | #include "amdgpu_irq.h" |
| 53 | #include "amdgpu_ucode.h" |
Flora Cui | c632d79 | 2016-08-02 11:32:41 +0800 | [diff] [blame] | 54 | #include "amdgpu_ttm.h" |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 55 | #include "amdgpu_gds.h" |
Christian König | 5611350 | 2016-09-28 12:36:44 +0200 | [diff] [blame] | 56 | #include "amdgpu_sync.h" |
Christian König | 7802301 | 2016-09-28 15:33:18 +0200 | [diff] [blame] | 57 | #include "amdgpu_ring.h" |
Christian König | 073440d | 2016-09-28 15:41:50 +0200 | [diff] [blame] | 58 | #include "amdgpu_vm.h" |
Alex Deucher | 1f7371b | 2015-12-02 17:46:21 -0500 | [diff] [blame] | 59 | #include "amd_powerplay.h" |
Alex Deucher | cf097881 | 2016-10-07 11:40:09 -0400 | [diff] [blame] | 60 | #include "amdgpu_dpm.h" |
Maruthi Bayyavarapu | a8fe58c | 2015-09-22 17:05:20 -0400 | [diff] [blame] | 61 | #include "amdgpu_acp.h" |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 62 | |
Alex Deucher | b80d847 | 2015-08-16 22:55:02 -0400 | [diff] [blame] | 63 | #include "gpu_scheduler.h" |
Monk Liu | ceeb50e | 2016-09-19 12:13:58 +0800 | [diff] [blame] | 64 | #include "amdgpu_virt.h" |
Alex Deucher | b80d847 | 2015-08-16 22:55:02 -0400 | [diff] [blame] | 65 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 66 | /* |
| 67 | * Modules parameters. |
| 68 | */ |
| 69 | extern int amdgpu_modeset; |
| 70 | extern int amdgpu_vram_limit; |
| 71 | extern int amdgpu_gart_size; |
Marek Olšák | 95844d2 | 2016-08-17 23:49:27 +0200 | [diff] [blame] | 72 | extern int amdgpu_moverate; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 73 | extern int amdgpu_benchmarking; |
| 74 | extern int amdgpu_testing; |
| 75 | extern int amdgpu_audio; |
| 76 | extern int amdgpu_disp_priority; |
| 77 | extern int amdgpu_hw_i2c; |
| 78 | extern int amdgpu_pcie_gen2; |
| 79 | extern int amdgpu_msi; |
| 80 | extern int amdgpu_lockup_timeout; |
| 81 | extern int amdgpu_dpm; |
| 82 | extern int amdgpu_smc_load_fw; |
| 83 | extern int amdgpu_aspm; |
| 84 | extern int amdgpu_runtime_pm; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 85 | extern unsigned amdgpu_ip_block_mask; |
| 86 | extern int amdgpu_bapm; |
| 87 | extern int amdgpu_deep_color; |
| 88 | extern int amdgpu_vm_size; |
| 89 | extern int amdgpu_vm_block_size; |
Christian König | d9c1315 | 2015-09-28 12:31:26 +0200 | [diff] [blame] | 90 | extern int amdgpu_vm_fault_stop; |
Christian König | b495bd3 | 2015-09-10 14:00:35 +0200 | [diff] [blame] | 91 | extern int amdgpu_vm_debug; |
Jammy Zhou | 1333f72 | 2015-07-30 16:36:58 +0800 | [diff] [blame] | 92 | extern int amdgpu_sched_jobs; |
Jammy Zhou | 4afcb30 | 2015-07-30 16:44:05 +0800 | [diff] [blame] | 93 | extern int amdgpu_sched_hw_submission; |
Alex Deucher | 1f7371b | 2015-12-02 17:46:21 -0500 | [diff] [blame] | 94 | extern int amdgpu_powerplay; |
Huang Rui | 6bb6b29 | 2016-05-24 13:47:05 +0800 | [diff] [blame] | 95 | extern int amdgpu_powercontainment; |
Alex Deucher | cd474ba | 2016-02-04 10:21:23 -0500 | [diff] [blame] | 96 | extern unsigned amdgpu_pcie_gen_cap; |
| 97 | extern unsigned amdgpu_pcie_lane_cap; |
Nicolai Hähnle | 395d1fb | 2016-06-02 12:32:07 +0200 | [diff] [blame] | 98 | extern unsigned amdgpu_cg_mask; |
| 99 | extern unsigned amdgpu_pg_mask; |
Nicolai Hähnle | 6f8941a | 2016-06-17 19:31:33 +0200 | [diff] [blame] | 100 | extern char *amdgpu_disable_cu; |
Rex Zhu | 66bc3f7 | 2016-07-28 17:36:35 +0800 | [diff] [blame] | 101 | extern int amdgpu_sclk_deep_sleep_en; |
Emily Deng | 9accf2f | 2016-08-10 16:01:25 +0800 | [diff] [blame] | 102 | extern char *amdgpu_virtual_display; |
Rex Zhu | 5141e9d | 2016-09-06 16:34:37 +0800 | [diff] [blame] | 103 | extern unsigned amdgpu_pp_feature_mask; |
Christian König | 6a7f76e | 2016-08-24 15:51:49 +0200 | [diff] [blame] | 104 | extern int amdgpu_vram_page_split; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 105 | |
Chunming Zhou | 4b559c9 | 2015-07-21 15:53:04 +0800 | [diff] [blame] | 106 | #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 107 | #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ |
| 108 | #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) |
| 109 | /* AMDGPU_IB_POOL_SIZE must be a power of 2 */ |
| 110 | #define AMDGPU_IB_POOL_SIZE 16 |
| 111 | #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32 |
| 112 | #define AMDGPUFB_CONN_LIMIT 4 |
| 113 | #define AMDGPU_BIOS_NUM_SCRATCH 8 |
| 114 | |
Jammy Zhou | 36f523a | 2015-09-01 12:54:27 +0800 | [diff] [blame] | 115 | /* max number of IP instances */ |
| 116 | #define AMDGPU_MAX_SDMA_INSTANCES 2 |
| 117 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 118 | /* hardcode that limit for now */ |
| 119 | #define AMDGPU_VA_RESERVED_SIZE (8 << 20) |
| 120 | |
| 121 | /* hard reset data */ |
| 122 | #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b |
| 123 | |
| 124 | /* reset flags */ |
| 125 | #define AMDGPU_RESET_GFX (1 << 0) |
| 126 | #define AMDGPU_RESET_COMPUTE (1 << 1) |
| 127 | #define AMDGPU_RESET_DMA (1 << 2) |
| 128 | #define AMDGPU_RESET_CP (1 << 3) |
| 129 | #define AMDGPU_RESET_GRBM (1 << 4) |
| 130 | #define AMDGPU_RESET_DMA1 (1 << 5) |
| 131 | #define AMDGPU_RESET_RLC (1 << 6) |
| 132 | #define AMDGPU_RESET_SEM (1 << 7) |
| 133 | #define AMDGPU_RESET_IH (1 << 8) |
| 134 | #define AMDGPU_RESET_VMC (1 << 9) |
| 135 | #define AMDGPU_RESET_MC (1 << 10) |
| 136 | #define AMDGPU_RESET_DISPLAY (1 << 11) |
| 137 | #define AMDGPU_RESET_UVD (1 << 12) |
| 138 | #define AMDGPU_RESET_VCE (1 << 13) |
| 139 | #define AMDGPU_RESET_VCE1 (1 << 14) |
| 140 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 141 | /* GFX current status */ |
| 142 | #define AMDGPU_GFX_NORMAL_MODE 0x00000000L |
| 143 | #define AMDGPU_GFX_SAFE_MODE 0x00000001L |
| 144 | #define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L |
| 145 | #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L |
| 146 | #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L |
| 147 | |
| 148 | /* max cursor sizes (in pixels) */ |
| 149 | #define CIK_CURSOR_WIDTH 128 |
| 150 | #define CIK_CURSOR_HEIGHT 128 |
| 151 | |
| 152 | struct amdgpu_device; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 153 | struct amdgpu_ib; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 154 | struct amdgpu_cs_parser; |
Chunming Zhou | bb977d3 | 2015-08-18 15:16:40 +0800 | [diff] [blame] | 155 | struct amdgpu_job; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 156 | struct amdgpu_irq_src; |
Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 157 | struct amdgpu_fpriv; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 158 | |
| 159 | enum amdgpu_cp_irq { |
| 160 | AMDGPU_CP_IRQ_GFX_EOP = 0, |
| 161 | AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP, |
| 162 | AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP, |
| 163 | AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP, |
| 164 | AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP, |
| 165 | AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP, |
| 166 | AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP, |
| 167 | AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP, |
| 168 | AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP, |
| 169 | |
| 170 | AMDGPU_CP_IRQ_LAST |
| 171 | }; |
| 172 | |
| 173 | enum amdgpu_sdma_irq { |
| 174 | AMDGPU_SDMA_IRQ_TRAP0 = 0, |
| 175 | AMDGPU_SDMA_IRQ_TRAP1, |
| 176 | |
| 177 | AMDGPU_SDMA_IRQ_LAST |
| 178 | }; |
| 179 | |
| 180 | enum amdgpu_thermal_irq { |
| 181 | AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0, |
| 182 | AMDGPU_THERMAL_IRQ_HIGH_TO_LOW, |
| 183 | |
| 184 | AMDGPU_THERMAL_IRQ_LAST |
| 185 | }; |
| 186 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 187 | int amdgpu_set_clockgating_state(struct amdgpu_device *adev, |
yanyang1 | 5fc3aee | 2015-05-22 14:39:35 -0400 | [diff] [blame] | 188 | enum amd_ip_block_type block_type, |
| 189 | enum amd_clockgating_state state); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 190 | int amdgpu_set_powergating_state(struct amdgpu_device *adev, |
yanyang1 | 5fc3aee | 2015-05-22 14:39:35 -0400 | [diff] [blame] | 191 | enum amd_ip_block_type block_type, |
| 192 | enum amd_powergating_state state); |
Alex Deucher | 5dbbb60 | 2016-06-23 11:41:04 -0400 | [diff] [blame] | 193 | int amdgpu_wait_for_idle(struct amdgpu_device *adev, |
| 194 | enum amd_ip_block_type block_type); |
| 195 | bool amdgpu_is_idle(struct amdgpu_device *adev, |
| 196 | enum amd_ip_block_type block_type); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 197 | |
| 198 | struct amdgpu_ip_block_version { |
yanyang1 | 5fc3aee | 2015-05-22 14:39:35 -0400 | [diff] [blame] | 199 | enum amd_ip_block_type type; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 200 | u32 major; |
| 201 | u32 minor; |
| 202 | u32 rev; |
yanyang1 | 5fc3aee | 2015-05-22 14:39:35 -0400 | [diff] [blame] | 203 | const struct amd_ip_funcs *funcs; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 204 | }; |
| 205 | |
| 206 | int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, |
yanyang1 | 5fc3aee | 2015-05-22 14:39:35 -0400 | [diff] [blame] | 207 | enum amd_ip_block_type type, |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 208 | u32 major, u32 minor); |
| 209 | |
| 210 | const struct amdgpu_ip_block_version * amdgpu_get_ip_block( |
| 211 | struct amdgpu_device *adev, |
yanyang1 | 5fc3aee | 2015-05-22 14:39:35 -0400 | [diff] [blame] | 212 | enum amd_ip_block_type type); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 213 | |
| 214 | /* provided by hw blocks that can move/clear data. e.g., gfx or sdma */ |
| 215 | struct amdgpu_buffer_funcs { |
| 216 | /* maximum bytes in a single operation */ |
| 217 | uint32_t copy_max_bytes; |
| 218 | |
| 219 | /* number of dw to reserve per operation */ |
| 220 | unsigned copy_num_dw; |
| 221 | |
| 222 | /* used for buffer migration */ |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 223 | void (*emit_copy_buffer)(struct amdgpu_ib *ib, |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 224 | /* src addr in bytes */ |
| 225 | uint64_t src_offset, |
| 226 | /* dst addr in bytes */ |
| 227 | uint64_t dst_offset, |
| 228 | /* number of byte to transfer */ |
| 229 | uint32_t byte_count); |
| 230 | |
| 231 | /* maximum bytes in a single operation */ |
| 232 | uint32_t fill_max_bytes; |
| 233 | |
| 234 | /* number of dw to reserve per operation */ |
| 235 | unsigned fill_num_dw; |
| 236 | |
| 237 | /* used for buffer clearing */ |
Chunming Zhou | 6e7a384 | 2015-08-27 13:46:09 +0800 | [diff] [blame] | 238 | void (*emit_fill_buffer)(struct amdgpu_ib *ib, |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 239 | /* value to write to memory */ |
| 240 | uint32_t src_data, |
| 241 | /* dst addr in bytes */ |
| 242 | uint64_t dst_offset, |
| 243 | /* number of byte to fill */ |
| 244 | uint32_t byte_count); |
| 245 | }; |
| 246 | |
| 247 | /* provided by hw blocks that can write ptes, e.g., sdma */ |
| 248 | struct amdgpu_vm_pte_funcs { |
| 249 | /* copy pte entries from GART */ |
| 250 | void (*copy_pte)(struct amdgpu_ib *ib, |
| 251 | uint64_t pe, uint64_t src, |
| 252 | unsigned count); |
| 253 | /* write pte one entry at a time with addr mapping */ |
Christian König | de9ea7b | 2016-08-12 11:33:30 +0200 | [diff] [blame] | 254 | void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe, |
| 255 | uint64_t value, unsigned count, |
| 256 | uint32_t incr); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 257 | /* for linear pte/pde updates without addr mapping */ |
| 258 | void (*set_pte_pde)(struct amdgpu_ib *ib, |
| 259 | uint64_t pe, |
| 260 | uint64_t addr, unsigned count, |
| 261 | uint32_t incr, uint32_t flags); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 262 | }; |
| 263 | |
| 264 | /* provided by the gmc block */ |
| 265 | struct amdgpu_gart_funcs { |
| 266 | /* flush the vm tlb via mmio */ |
| 267 | void (*flush_gpu_tlb)(struct amdgpu_device *adev, |
| 268 | uint32_t vmid); |
| 269 | /* write pte/pde updates using the cpu */ |
| 270 | int (*set_pte_pde)(struct amdgpu_device *adev, |
| 271 | void *cpu_pt_addr, /* cpu addr of page table */ |
| 272 | uint32_t gpu_page_idx, /* pte/pde to update */ |
| 273 | uint64_t addr, /* addr to write into pte/pde */ |
| 274 | uint32_t flags); /* access flags */ |
| 275 | }; |
| 276 | |
| 277 | /* provided by the ih block */ |
| 278 | struct amdgpu_ih_funcs { |
| 279 | /* ring read/write ptr handling, called from interrupt context */ |
| 280 | u32 (*get_wptr)(struct amdgpu_device *adev); |
| 281 | void (*decode_iv)(struct amdgpu_device *adev, |
| 282 | struct amdgpu_iv_entry *entry); |
| 283 | void (*set_rptr)(struct amdgpu_device *adev); |
| 284 | }; |
| 285 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 286 | /* |
| 287 | * BIOS. |
| 288 | */ |
| 289 | bool amdgpu_get_bios(struct amdgpu_device *adev); |
| 290 | bool amdgpu_read_bios(struct amdgpu_device *adev); |
| 291 | |
| 292 | /* |
| 293 | * Dummy page |
| 294 | */ |
| 295 | struct amdgpu_dummy_page { |
| 296 | struct page *page; |
| 297 | dma_addr_t addr; |
| 298 | }; |
| 299 | int amdgpu_dummy_page_init(struct amdgpu_device *adev); |
| 300 | void amdgpu_dummy_page_fini(struct amdgpu_device *adev); |
| 301 | |
| 302 | |
| 303 | /* |
| 304 | * Clocks |
| 305 | */ |
| 306 | |
| 307 | #define AMDGPU_MAX_PPLL 3 |
| 308 | |
| 309 | struct amdgpu_clock { |
| 310 | struct amdgpu_pll ppll[AMDGPU_MAX_PPLL]; |
| 311 | struct amdgpu_pll spll; |
| 312 | struct amdgpu_pll mpll; |
| 313 | /* 10 Khz units */ |
| 314 | uint32_t default_mclk; |
| 315 | uint32_t default_sclk; |
| 316 | uint32_t default_dispclk; |
| 317 | uint32_t current_dispclk; |
| 318 | uint32_t dp_extclk; |
| 319 | uint32_t max_pixel_clock; |
| 320 | }; |
| 321 | |
| 322 | /* |
Flora Cui | c632d79 | 2016-08-02 11:32:41 +0800 | [diff] [blame] | 323 | * BO. |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 324 | */ |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 325 | struct amdgpu_bo_list_entry { |
| 326 | struct amdgpu_bo *robj; |
| 327 | struct ttm_validate_buffer tv; |
| 328 | struct amdgpu_bo_va *bo_va; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 329 | uint32_t priority; |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 330 | struct page **user_pages; |
| 331 | int user_invalidated; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 332 | }; |
| 333 | |
| 334 | struct amdgpu_bo_va_mapping { |
| 335 | struct list_head list; |
| 336 | struct interval_tree_node it; |
| 337 | uint64_t offset; |
| 338 | uint32_t flags; |
| 339 | }; |
| 340 | |
| 341 | /* bo virtual addresses in a specific vm */ |
| 342 | struct amdgpu_bo_va { |
| 343 | /* protected by bo being reserved */ |
| 344 | struct list_head bo_list; |
Chunming Zhou | bb1e38a4 | 2015-08-03 18:19:38 +0800 | [diff] [blame] | 345 | struct fence *last_pt_update; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 346 | unsigned ref_count; |
| 347 | |
Christian König | 7fc1195 | 2015-07-30 11:53:42 +0200 | [diff] [blame] | 348 | /* protected by vm mutex and spinlock */ |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 349 | struct list_head vm_status; |
| 350 | |
Christian König | 7fc1195 | 2015-07-30 11:53:42 +0200 | [diff] [blame] | 351 | /* mappings for this bo_va */ |
| 352 | struct list_head invalids; |
| 353 | struct list_head valids; |
| 354 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 355 | /* constant after initialization */ |
| 356 | struct amdgpu_vm *vm; |
| 357 | struct amdgpu_bo *bo; |
| 358 | }; |
| 359 | |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 360 | #define AMDGPU_GEM_DOMAIN_MAX 0x3 |
| 361 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 362 | struct amdgpu_bo { |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 363 | /* Protected by tbo.reserved */ |
Christian König | 1ea863f | 2015-12-18 22:13:12 +0100 | [diff] [blame] | 364 | u32 prefered_domains; |
| 365 | u32 allowed_domains; |
Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 366 | struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1]; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 367 | struct ttm_placement placement; |
| 368 | struct ttm_buffer_object tbo; |
| 369 | struct ttm_bo_kmap_obj kmap; |
| 370 | u64 flags; |
| 371 | unsigned pin_count; |
| 372 | void *kptr; |
| 373 | u64 tiling_flags; |
| 374 | u64 metadata_flags; |
| 375 | void *metadata; |
| 376 | u32 metadata_size; |
| 377 | /* list of all virtual address to which this bo |
| 378 | * is associated to |
| 379 | */ |
| 380 | struct list_head va; |
| 381 | /* Constant after initialization */ |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 382 | struct drm_gem_object gem_base; |
Christian König | 82b9c55 | 2015-11-27 16:49:00 +0100 | [diff] [blame] | 383 | struct amdgpu_bo *parent; |
Chunming Zhou | e7893c4 | 2016-07-26 14:13:21 +0800 | [diff] [blame] | 384 | struct amdgpu_bo *shadow; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 385 | |
| 386 | struct ttm_bo_kmap_obj dma_buf_vmap; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 387 | struct amdgpu_mn *mn; |
| 388 | struct list_head mn_list; |
Chunming Zhou | 0c4e7fa | 2016-08-17 11:41:30 +0800 | [diff] [blame] | 389 | struct list_head shadow_list; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 390 | }; |
| 391 | #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base) |
| 392 | |
| 393 | void amdgpu_gem_object_free(struct drm_gem_object *obj); |
| 394 | int amdgpu_gem_object_open(struct drm_gem_object *obj, |
| 395 | struct drm_file *file_priv); |
| 396 | void amdgpu_gem_object_close(struct drm_gem_object *obj, |
| 397 | struct drm_file *file_priv); |
| 398 | unsigned long amdgpu_gem_timeout(uint64_t timeout_ns); |
| 399 | struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); |
Christian König | 4d9c514 | 2016-05-03 18:46:19 +0200 | [diff] [blame] | 400 | struct drm_gem_object * |
| 401 | amdgpu_gem_prime_import_sg_table(struct drm_device *dev, |
| 402 | struct dma_buf_attachment *attach, |
| 403 | struct sg_table *sg); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 404 | struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, |
| 405 | struct drm_gem_object *gobj, |
| 406 | int flags); |
| 407 | int amdgpu_gem_prime_pin(struct drm_gem_object *obj); |
| 408 | void amdgpu_gem_prime_unpin(struct drm_gem_object *obj); |
| 409 | struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *); |
| 410 | void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); |
| 411 | void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); |
| 412 | int amdgpu_gem_debugfs_init(struct amdgpu_device *adev); |
| 413 | |
| 414 | /* sub-allocation manager, it has to be protected by another lock. |
| 415 | * By conception this is an helper for other part of the driver |
| 416 | * like the indirect buffer or semaphore, which both have their |
| 417 | * locking. |
| 418 | * |
| 419 | * Principe is simple, we keep a list of sub allocation in offset |
| 420 | * order (first entry has offset == 0, last entry has the highest |
| 421 | * offset). |
| 422 | * |
| 423 | * When allocating new object we first check if there is room at |
| 424 | * the end total_size - (last_object_offset + last_object_size) >= |
| 425 | * alloc_size. If so we allocate new object there. |
| 426 | * |
| 427 | * When there is not enough room at the end, we start waiting for |
| 428 | * each sub object until we reach object_offset+object_size >= |
| 429 | * alloc_size, this object then become the sub object we return. |
| 430 | * |
| 431 | * Alignment can't be bigger than page size. |
| 432 | * |
| 433 | * Hole are not considered for allocation to keep things simple. |
| 434 | * Assumption is that there won't be hole (all object on same |
| 435 | * alignment). |
| 436 | */ |
Christian König | 6ba60b8 | 2016-03-11 14:50:08 +0100 | [diff] [blame] | 437 | |
| 438 | #define AMDGPU_SA_NUM_FENCE_LISTS 32 |
| 439 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 440 | struct amdgpu_sa_manager { |
| 441 | wait_queue_head_t wq; |
| 442 | struct amdgpu_bo *bo; |
| 443 | struct list_head *hole; |
Christian König | 6ba60b8 | 2016-03-11 14:50:08 +0100 | [diff] [blame] | 444 | struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS]; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 445 | struct list_head olist; |
| 446 | unsigned size; |
| 447 | uint64_t gpu_addr; |
| 448 | void *cpu_ptr; |
| 449 | uint32_t domain; |
| 450 | uint32_t align; |
| 451 | }; |
| 452 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 453 | /* sub-allocation buffer */ |
| 454 | struct amdgpu_sa_bo { |
| 455 | struct list_head olist; |
| 456 | struct list_head flist; |
| 457 | struct amdgpu_sa_manager *manager; |
| 458 | unsigned soffset; |
| 459 | unsigned eoffset; |
Chunming Zhou | 4ce9891 | 2015-08-19 16:41:19 +0800 | [diff] [blame] | 460 | struct fence *fence; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 461 | }; |
| 462 | |
| 463 | /* |
| 464 | * GEM objects. |
| 465 | */ |
Christian König | 418aa0c | 2016-02-15 16:59:57 +0100 | [diff] [blame] | 466 | void amdgpu_gem_force_release(struct amdgpu_device *adev); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 467 | int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, |
| 468 | int alignment, u32 initial_domain, |
| 469 | u64 flags, bool kernel, |
| 470 | struct drm_gem_object **obj); |
| 471 | |
| 472 | int amdgpu_mode_dumb_create(struct drm_file *file_priv, |
| 473 | struct drm_device *dev, |
| 474 | struct drm_mode_create_dumb *args); |
| 475 | int amdgpu_mode_dumb_mmap(struct drm_file *filp, |
| 476 | struct drm_device *dev, |
| 477 | uint32_t handle, uint64_t *offset_p); |
Rex Zhu | d573de2 | 2016-05-12 13:27:28 +0800 | [diff] [blame] | 478 | int amdgpu_fence_slab_init(void); |
| 479 | void amdgpu_fence_slab_fini(void); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 480 | |
| 481 | /* |
| 482 | * GART structures, functions & helpers |
| 483 | */ |
| 484 | struct amdgpu_mc; |
| 485 | |
| 486 | #define AMDGPU_GPU_PAGE_SIZE 4096 |
| 487 | #define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1) |
| 488 | #define AMDGPU_GPU_PAGE_SHIFT 12 |
| 489 | #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK) |
| 490 | |
| 491 | struct amdgpu_gart { |
| 492 | dma_addr_t table_addr; |
| 493 | struct amdgpu_bo *robj; |
| 494 | void *ptr; |
| 495 | unsigned num_gpu_pages; |
| 496 | unsigned num_cpu_pages; |
| 497 | unsigned table_size; |
Christian König | a1d2947 | 2016-03-30 14:42:57 +0200 | [diff] [blame] | 498 | #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 499 | struct page **pages; |
Christian König | a1d2947 | 2016-03-30 14:42:57 +0200 | [diff] [blame] | 500 | #endif |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 501 | bool ready; |
| 502 | const struct amdgpu_gart_funcs *gart_funcs; |
| 503 | }; |
| 504 | |
| 505 | int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev); |
| 506 | void amdgpu_gart_table_ram_free(struct amdgpu_device *adev); |
| 507 | int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); |
| 508 | void amdgpu_gart_table_vram_free(struct amdgpu_device *adev); |
| 509 | int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); |
| 510 | void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); |
| 511 | int amdgpu_gart_init(struct amdgpu_device *adev); |
| 512 | void amdgpu_gart_fini(struct amdgpu_device *adev); |
Felix Kuehling | cab0b8d | 2016-08-12 19:25:21 -0400 | [diff] [blame] | 513 | void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 514 | int pages); |
Felix Kuehling | cab0b8d | 2016-08-12 19:25:21 -0400 | [diff] [blame] | 515 | int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 516 | int pages, struct page **pagelist, |
| 517 | dma_addr_t *dma_addr, uint32_t flags); |
Chunming Zhou | 2c0d731 | 2016-08-30 16:36:25 +0800 | [diff] [blame] | 518 | int amdgpu_ttm_recover_gart(struct amdgpu_device *adev); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 519 | |
| 520 | /* |
| 521 | * GPU MC structures, functions & helpers |
| 522 | */ |
| 523 | struct amdgpu_mc { |
| 524 | resource_size_t aper_size; |
| 525 | resource_size_t aper_base; |
| 526 | resource_size_t agp_base; |
| 527 | /* for some chips with <= 32MB we need to lie |
| 528 | * about vram size near mc fb location */ |
| 529 | u64 mc_vram_size; |
| 530 | u64 visible_vram_size; |
| 531 | u64 gtt_size; |
| 532 | u64 gtt_start; |
| 533 | u64 gtt_end; |
| 534 | u64 vram_start; |
| 535 | u64 vram_end; |
| 536 | unsigned vram_width; |
| 537 | u64 real_vram_size; |
| 538 | int vram_mtrr; |
| 539 | u64 gtt_base_align; |
| 540 | u64 mc_mask; |
| 541 | const struct firmware *fw; /* MC firmware */ |
| 542 | uint32_t fw_version; |
| 543 | struct amdgpu_irq_src vm_fault; |
Ken Wang | 81c59f5 | 2015-06-03 21:02:01 +0800 | [diff] [blame] | 544 | uint32_t vram_type; |
Chunming Zhou | 50b0197 | 2016-07-18 16:59:24 +0800 | [diff] [blame] | 545 | uint32_t srbm_soft_reset; |
| 546 | struct amdgpu_mode_mc_save save; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 547 | }; |
| 548 | |
| 549 | /* |
| 550 | * GPU doorbell structures, functions & helpers |
| 551 | */ |
| 552 | typedef enum _AMDGPU_DOORBELL_ASSIGNMENT |
| 553 | { |
| 554 | AMDGPU_DOORBELL_KIQ = 0x000, |
| 555 | AMDGPU_DOORBELL_HIQ = 0x001, |
| 556 | AMDGPU_DOORBELL_DIQ = 0x002, |
| 557 | AMDGPU_DOORBELL_MEC_RING0 = 0x010, |
| 558 | AMDGPU_DOORBELL_MEC_RING1 = 0x011, |
| 559 | AMDGPU_DOORBELL_MEC_RING2 = 0x012, |
| 560 | AMDGPU_DOORBELL_MEC_RING3 = 0x013, |
| 561 | AMDGPU_DOORBELL_MEC_RING4 = 0x014, |
| 562 | AMDGPU_DOORBELL_MEC_RING5 = 0x015, |
| 563 | AMDGPU_DOORBELL_MEC_RING6 = 0x016, |
| 564 | AMDGPU_DOORBELL_MEC_RING7 = 0x017, |
| 565 | AMDGPU_DOORBELL_GFX_RING0 = 0x020, |
| 566 | AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0, |
| 567 | AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1, |
| 568 | AMDGPU_DOORBELL_IH = 0x1E8, |
| 569 | AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF, |
| 570 | AMDGPU_DOORBELL_INVALID = 0xFFFF |
| 571 | } AMDGPU_DOORBELL_ASSIGNMENT; |
| 572 | |
| 573 | struct amdgpu_doorbell { |
| 574 | /* doorbell mmio */ |
| 575 | resource_size_t base; |
| 576 | resource_size_t size; |
| 577 | u32 __iomem *ptr; |
| 578 | u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */ |
| 579 | }; |
| 580 | |
| 581 | void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, |
| 582 | phys_addr_t *aperture_base, |
| 583 | size_t *aperture_size, |
| 584 | size_t *start_offset); |
| 585 | |
| 586 | /* |
| 587 | * IRQS. |
| 588 | */ |
| 589 | |
| 590 | struct amdgpu_flip_work { |
Michel Dänzer | 325cbba | 2016-08-04 12:39:37 +0900 | [diff] [blame] | 591 | struct delayed_work flip_work; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 592 | struct work_struct unpin_work; |
| 593 | struct amdgpu_device *adev; |
| 594 | int crtc_id; |
Michel Dänzer | 325cbba | 2016-08-04 12:39:37 +0900 | [diff] [blame] | 595 | u32 target_vblank; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 596 | uint64_t base; |
| 597 | struct drm_pending_vblank_event *event; |
Christian König | 765e7fb | 2016-09-15 15:06:50 +0200 | [diff] [blame] | 598 | struct amdgpu_bo *old_abo; |
Christian König | 1ffd265 | 2015-08-11 17:29:52 +0200 | [diff] [blame] | 599 | struct fence *excl; |
| 600 | unsigned shared_count; |
| 601 | struct fence **shared; |
Christian König | c3874b7 | 2016-02-11 15:48:30 +0100 | [diff] [blame] | 602 | struct fence_cb cb; |
Alex Deucher | cb9e59d | 2016-05-05 16:03:57 -0400 | [diff] [blame] | 603 | bool async; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 604 | }; |
| 605 | |
| 606 | |
| 607 | /* |
| 608 | * CP & rings. |
| 609 | */ |
| 610 | |
| 611 | struct amdgpu_ib { |
| 612 | struct amdgpu_sa_bo *sa_bo; |
| 613 | uint32_t length_dw; |
| 614 | uint64_t gpu_addr; |
| 615 | uint32_t *ptr; |
Jammy Zhou | de807f8 | 2015-05-11 23:41:41 +0800 | [diff] [blame] | 616 | uint32_t flags; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 617 | }; |
| 618 | |
Nils Wallménius | 62250a9 | 2016-04-10 16:30:00 +0200 | [diff] [blame] | 619 | extern const struct amd_sched_backend_ops amdgpu_sched_ops; |
Chunming Zhou | c1b69ed | 2015-07-21 13:45:14 +0800 | [diff] [blame] | 620 | |
Christian König | 50838c8 | 2016-02-03 13:44:52 +0100 | [diff] [blame] | 621 | int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, |
Monk Liu | c563783 | 2016-04-19 20:11:32 +0800 | [diff] [blame] | 622 | struct amdgpu_job **job, struct amdgpu_vm *vm); |
Christian König | d71518b | 2016-02-01 12:20:25 +0100 | [diff] [blame] | 623 | int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, |
| 624 | struct amdgpu_job **job); |
Monk Liu | b6723c8 | 2016-03-10 12:14:44 +0800 | [diff] [blame] | 625 | |
Christian König | a5fb4ec | 2016-06-29 15:10:31 +0200 | [diff] [blame] | 626 | void amdgpu_job_free_resources(struct amdgpu_job *job); |
Christian König | 50838c8 | 2016-02-03 13:44:52 +0100 | [diff] [blame] | 627 | void amdgpu_job_free(struct amdgpu_job *job); |
Christian König | d71518b | 2016-02-01 12:20:25 +0100 | [diff] [blame] | 628 | int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, |
Christian König | 2bd9ccf | 2016-02-01 12:53:58 +0100 | [diff] [blame] | 629 | struct amd_sched_entity *entity, void *owner, |
| 630 | struct fence **f); |
Chunming Zhou | 3c704e9 | 2015-07-29 10:33:14 +0800 | [diff] [blame] | 631 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 632 | /* |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 633 | * context related structures |
| 634 | */ |
| 635 | |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 636 | struct amdgpu_ctx_ring { |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 637 | uint64_t sequence; |
Chunming Zhou | 37cd0ca | 2015-12-10 15:45:11 +0800 | [diff] [blame] | 638 | struct fence **fences; |
Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 639 | struct amd_sched_entity entity; |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 640 | }; |
| 641 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 642 | struct amdgpu_ctx { |
Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 643 | struct kref refcount; |
Chunming Zhou | 9cb7e5a | 2015-07-21 13:17:19 +0800 | [diff] [blame] | 644 | struct amdgpu_device *adev; |
Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 645 | unsigned reset_counter; |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 646 | spinlock_t ring_lock; |
Chunming Zhou | 37cd0ca | 2015-12-10 15:45:11 +0800 | [diff] [blame] | 647 | struct fence **fences; |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 648 | struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; |
Monk Liu | 753ad49 | 2016-08-26 13:28:28 +0800 | [diff] [blame] | 649 | bool preamble_presented; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 650 | }; |
| 651 | |
| 652 | struct amdgpu_ctx_mgr { |
Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 653 | struct amdgpu_device *adev; |
| 654 | struct mutex lock; |
| 655 | /* protected by lock */ |
| 656 | struct idr ctx_handles; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 657 | }; |
| 658 | |
Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 659 | struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); |
| 660 | int amdgpu_ctx_put(struct amdgpu_ctx *ctx); |
| 661 | |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 662 | uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, |
Christian König | ce882e6 | 2015-08-19 15:00:55 +0200 | [diff] [blame] | 663 | struct fence *fence); |
Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 664 | struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, |
| 665 | struct amdgpu_ring *ring, uint64_t seq); |
| 666 | |
Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 667 | int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, |
| 668 | struct drm_file *filp); |
| 669 | |
Christian König | efd4ccb | 2015-08-04 16:20:31 +0200 | [diff] [blame] | 670 | void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); |
| 671 | void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); |
Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 672 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 673 | /* |
| 674 | * file private structure |
| 675 | */ |
| 676 | |
| 677 | struct amdgpu_fpriv { |
| 678 | struct amdgpu_vm vm; |
| 679 | struct mutex bo_list_lock; |
| 680 | struct idr bo_list_handles; |
Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 681 | struct amdgpu_ctx_mgr ctx_mgr; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 682 | }; |
| 683 | |
| 684 | /* |
| 685 | * residency list |
| 686 | */ |
| 687 | |
| 688 | struct amdgpu_bo_list { |
| 689 | struct mutex lock; |
| 690 | struct amdgpu_bo *gds_obj; |
| 691 | struct amdgpu_bo *gws_obj; |
| 692 | struct amdgpu_bo *oa_obj; |
Christian König | 211dff5 | 2016-02-22 15:40:59 +0100 | [diff] [blame] | 693 | unsigned first_userptr; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 694 | unsigned num_entries; |
| 695 | struct amdgpu_bo_list_entry *array; |
| 696 | }; |
| 697 | |
| 698 | struct amdgpu_bo_list * |
| 699 | amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id); |
Christian König | 636ce25 | 2015-12-18 21:26:47 +0100 | [diff] [blame] | 700 | void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, |
| 701 | struct list_head *validated); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 702 | void amdgpu_bo_list_put(struct amdgpu_bo_list *list); |
| 703 | void amdgpu_bo_list_free(struct amdgpu_bo_list *list); |
| 704 | |
| 705 | /* |
| 706 | * GFX stuff |
| 707 | */ |
| 708 | #include "clearstate_defs.h" |
| 709 | |
Alex Deucher | 79e5412 | 2016-04-08 15:45:13 -0400 | [diff] [blame] | 710 | struct amdgpu_rlc_funcs { |
| 711 | void (*enter_safe_mode)(struct amdgpu_device *adev); |
| 712 | void (*exit_safe_mode)(struct amdgpu_device *adev); |
| 713 | }; |
| 714 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 715 | struct amdgpu_rlc { |
| 716 | /* for power gating */ |
| 717 | struct amdgpu_bo *save_restore_obj; |
| 718 | uint64_t save_restore_gpu_addr; |
| 719 | volatile uint32_t *sr_ptr; |
| 720 | const u32 *reg_list; |
| 721 | u32 reg_list_size; |
| 722 | /* for clear state */ |
| 723 | struct amdgpu_bo *clear_state_obj; |
| 724 | uint64_t clear_state_gpu_addr; |
| 725 | volatile uint32_t *cs_ptr; |
| 726 | const struct cs_section_def *cs_data; |
| 727 | u32 clear_state_size; |
| 728 | /* for cp tables */ |
| 729 | struct amdgpu_bo *cp_table_obj; |
| 730 | uint64_t cp_table_gpu_addr; |
| 731 | volatile uint32_t *cp_table_ptr; |
| 732 | u32 cp_table_size; |
Alex Deucher | 79e5412 | 2016-04-08 15:45:13 -0400 | [diff] [blame] | 733 | |
| 734 | /* safe mode for updating CG/PG state */ |
| 735 | bool in_safe_mode; |
| 736 | const struct amdgpu_rlc_funcs *funcs; |
Eric Huang | 2b6cd97 | 2016-04-14 17:26:07 -0400 | [diff] [blame] | 737 | |
| 738 | /* for firmware data */ |
| 739 | u32 save_and_restore_offset; |
| 740 | u32 clear_state_descriptor_offset; |
| 741 | u32 avail_scratch_ram_locations; |
| 742 | u32 reg_restore_list_size; |
| 743 | u32 reg_list_format_start; |
| 744 | u32 reg_list_format_separate_start; |
| 745 | u32 starting_offsets_start; |
| 746 | u32 reg_list_format_size_bytes; |
| 747 | u32 reg_list_size_bytes; |
| 748 | |
| 749 | u32 *register_list_format; |
| 750 | u32 *register_restore; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 751 | }; |
| 752 | |
| 753 | struct amdgpu_mec { |
| 754 | struct amdgpu_bo *hpd_eop_obj; |
| 755 | u64 hpd_eop_gpu_addr; |
| 756 | u32 num_pipe; |
| 757 | u32 num_mec; |
| 758 | u32 num_queue; |
| 759 | }; |
| 760 | |
| 761 | /* |
| 762 | * GPU scratch registers structures, functions & helpers |
| 763 | */ |
| 764 | struct amdgpu_scratch { |
| 765 | unsigned num_reg; |
| 766 | uint32_t reg_base; |
| 767 | bool free[32]; |
| 768 | uint32_t reg[32]; |
| 769 | }; |
| 770 | |
| 771 | /* |
| 772 | * GFX configurations |
| 773 | */ |
Alex Deucher | e3fa763 | 2016-10-10 10:56:21 -0400 | [diff] [blame] | 774 | #define AMDGPU_GFX_MAX_SE 4 |
| 775 | #define AMDGPU_GFX_MAX_SH_PER_SE 2 |
| 776 | |
| 777 | struct amdgpu_rb_config { |
| 778 | uint32_t rb_backend_disable; |
| 779 | uint32_t user_rb_backend_disable; |
| 780 | uint32_t raster_config; |
| 781 | uint32_t raster_config_1; |
| 782 | }; |
| 783 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 784 | struct amdgpu_gca_config { |
| 785 | unsigned max_shader_engines; |
| 786 | unsigned max_tile_pipes; |
| 787 | unsigned max_cu_per_sh; |
| 788 | unsigned max_sh_per_se; |
| 789 | unsigned max_backends_per_se; |
| 790 | unsigned max_texture_channel_caches; |
| 791 | unsigned max_gprs; |
| 792 | unsigned max_gs_threads; |
| 793 | unsigned max_hw_contexts; |
| 794 | unsigned sc_prim_fifo_size_frontend; |
| 795 | unsigned sc_prim_fifo_size_backend; |
| 796 | unsigned sc_hiz_tile_fifo_size; |
| 797 | unsigned sc_earlyz_tile_fifo_size; |
| 798 | |
| 799 | unsigned num_tile_pipes; |
| 800 | unsigned backend_enable_mask; |
| 801 | unsigned mem_max_burst_length_bytes; |
| 802 | unsigned mem_row_size_in_kb; |
| 803 | unsigned shader_engine_tile_size; |
| 804 | unsigned num_gpus; |
| 805 | unsigned multi_gpu_tile_size; |
| 806 | unsigned mc_arb_ramcfg; |
| 807 | unsigned gb_addr_config; |
Alex Deucher | 8f8e00c | 2016-02-12 00:39:13 -0500 | [diff] [blame] | 808 | unsigned num_rbs; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 809 | |
| 810 | uint32_t tile_mode_array[32]; |
| 811 | uint32_t macrotile_mode_array[16]; |
Alex Deucher | e3fa763 | 2016-10-10 10:56:21 -0400 | [diff] [blame] | 812 | |
| 813 | struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE]; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 814 | }; |
| 815 | |
Alex Deucher | 7dae69a | 2016-05-03 16:25:53 -0400 | [diff] [blame] | 816 | struct amdgpu_cu_info { |
| 817 | uint32_t number; /* total active CU number */ |
| 818 | uint32_t ao_cu_mask; |
| 819 | uint32_t bitmap[4][4]; |
| 820 | }; |
| 821 | |
Alex Deucher | b95e31f | 2016-07-07 15:01:42 -0400 | [diff] [blame] | 822 | struct amdgpu_gfx_funcs { |
| 823 | /* get the gpu clock counter */ |
| 824 | uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); |
Tom St Denis | 9559ef5 | 2016-06-28 10:26:48 -0400 | [diff] [blame] | 825 | void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance); |
Alex Deucher | b95e31f | 2016-07-07 15:01:42 -0400 | [diff] [blame] | 826 | }; |
| 827 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 828 | struct amdgpu_gfx { |
| 829 | struct mutex gpu_clock_mutex; |
| 830 | struct amdgpu_gca_config config; |
| 831 | struct amdgpu_rlc rlc; |
| 832 | struct amdgpu_mec mec; |
| 833 | struct amdgpu_scratch scratch; |
| 834 | const struct firmware *me_fw; /* ME firmware */ |
| 835 | uint32_t me_fw_version; |
| 836 | const struct firmware *pfp_fw; /* PFP firmware */ |
| 837 | uint32_t pfp_fw_version; |
| 838 | const struct firmware *ce_fw; /* CE firmware */ |
| 839 | uint32_t ce_fw_version; |
| 840 | const struct firmware *rlc_fw; /* RLC firmware */ |
| 841 | uint32_t rlc_fw_version; |
| 842 | const struct firmware *mec_fw; /* MEC firmware */ |
| 843 | uint32_t mec_fw_version; |
| 844 | const struct firmware *mec2_fw; /* MEC2 firmware */ |
| 845 | uint32_t mec2_fw_version; |
Ken Wang | 02558a0 | 2015-06-03 19:52:06 +0800 | [diff] [blame] | 846 | uint32_t me_feature_version; |
| 847 | uint32_t ce_feature_version; |
| 848 | uint32_t pfp_feature_version; |
Jammy Zhou | 351643d | 2015-08-04 10:43:50 +0800 | [diff] [blame] | 849 | uint32_t rlc_feature_version; |
| 850 | uint32_t mec_feature_version; |
| 851 | uint32_t mec2_feature_version; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 852 | struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; |
| 853 | unsigned num_gfx_rings; |
| 854 | struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; |
| 855 | unsigned num_compute_rings; |
| 856 | struct amdgpu_irq_src eop_irq; |
| 857 | struct amdgpu_irq_src priv_reg_irq; |
| 858 | struct amdgpu_irq_src priv_inst_irq; |
| 859 | /* gfx status */ |
Alex Deucher | 7dae69a | 2016-05-03 16:25:53 -0400 | [diff] [blame] | 860 | uint32_t gfx_current_status; |
Ken Wang | a101a89 | 2015-06-03 17:47:54 +0800 | [diff] [blame] | 861 | /* ce ram size*/ |
Alex Deucher | 7dae69a | 2016-05-03 16:25:53 -0400 | [diff] [blame] | 862 | unsigned ce_ram_size; |
| 863 | struct amdgpu_cu_info cu_info; |
Alex Deucher | b95e31f | 2016-07-07 15:01:42 -0400 | [diff] [blame] | 864 | const struct amdgpu_gfx_funcs *funcs; |
Chunming Zhou | 3d7c638 | 2016-07-15 11:28:30 +0800 | [diff] [blame] | 865 | |
| 866 | /* reset mask */ |
| 867 | uint32_t grbm_soft_reset; |
| 868 | uint32_t srbm_soft_reset; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 869 | }; |
| 870 | |
Christian König | b07c60c | 2016-01-31 12:29:04 +0100 | [diff] [blame] | 871 | int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 872 | unsigned size, struct amdgpu_ib *ib); |
Christian König | 4d9c514 | 2016-05-03 18:46:19 +0200 | [diff] [blame] | 873 | void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, |
| 874 | struct fence *f); |
Christian König | b07c60c | 2016-01-31 12:29:04 +0100 | [diff] [blame] | 875 | int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, |
Christian König | 336d1f5 | 2016-02-16 10:57:10 +0100 | [diff] [blame] | 876 | struct amdgpu_ib *ib, struct fence *last_vm_update, |
Monk Liu | c563783 | 2016-04-19 20:11:32 +0800 | [diff] [blame] | 877 | struct amdgpu_job *job, struct fence **f); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 878 | int amdgpu_ib_pool_init(struct amdgpu_device *adev); |
| 879 | void amdgpu_ib_pool_fini(struct amdgpu_device *adev); |
| 880 | int amdgpu_ib_ring_tests(struct amdgpu_device *adev); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 881 | |
| 882 | /* |
| 883 | * CS. |
| 884 | */ |
| 885 | struct amdgpu_cs_chunk { |
| 886 | uint32_t chunk_id; |
| 887 | uint32_t length_dw; |
Christian König | 758ac17 | 2016-05-06 22:14:00 +0200 | [diff] [blame] | 888 | void *kdata; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 889 | }; |
| 890 | |
| 891 | struct amdgpu_cs_parser { |
| 892 | struct amdgpu_device *adev; |
| 893 | struct drm_file *filp; |
Christian König | 3cb485f | 2015-05-11 15:34:59 +0200 | [diff] [blame] | 894 | struct amdgpu_ctx *ctx; |
Christian König | c3cca41 | 2015-12-15 14:41:33 +0100 | [diff] [blame] | 895 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 896 | /* chunks */ |
| 897 | unsigned nchunks; |
| 898 | struct amdgpu_cs_chunk *chunks; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 899 | |
Christian König | 50838c8 | 2016-02-03 13:44:52 +0100 | [diff] [blame] | 900 | /* scheduler job object */ |
| 901 | struct amdgpu_job *job; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 902 | |
Christian König | c3cca41 | 2015-12-15 14:41:33 +0100 | [diff] [blame] | 903 | /* buffer objects */ |
| 904 | struct ww_acquire_ctx ticket; |
| 905 | struct amdgpu_bo_list *bo_list; |
| 906 | struct amdgpu_bo_list_entry vm_pd; |
| 907 | struct list_head validated; |
| 908 | struct fence *fence; |
| 909 | uint64_t bytes_moved_threshold; |
| 910 | uint64_t bytes_moved; |
Christian König | 662bfa6 | 2016-09-01 12:13:18 +0200 | [diff] [blame] | 911 | struct amdgpu_bo_list_entry *evictable; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 912 | |
| 913 | /* user fence */ |
Christian König | 91acbeb | 2015-12-14 16:42:31 +0100 | [diff] [blame] | 914 | struct amdgpu_bo_list_entry uf_entry; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 915 | }; |
| 916 | |
Monk Liu | 753ad49 | 2016-08-26 13:28:28 +0800 | [diff] [blame] | 917 | #define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */ |
| 918 | #define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */ |
| 919 | #define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */ |
| 920 | |
Chunming Zhou | bb977d3 | 2015-08-18 15:16:40 +0800 | [diff] [blame] | 921 | struct amdgpu_job { |
| 922 | struct amd_sched_job base; |
| 923 | struct amdgpu_device *adev; |
Christian König | edf600d | 2016-05-03 15:54:54 +0200 | [diff] [blame] | 924 | struct amdgpu_vm *vm; |
Christian König | b07c60c | 2016-01-31 12:29:04 +0100 | [diff] [blame] | 925 | struct amdgpu_ring *ring; |
Christian König | e86f9ce | 2016-02-08 12:13:05 +0100 | [diff] [blame] | 926 | struct amdgpu_sync sync; |
Chunming Zhou | bb977d3 | 2015-08-18 15:16:40 +0800 | [diff] [blame] | 927 | struct amdgpu_ib *ibs; |
Monk Liu | 73cfa5f | 2016-03-17 13:48:13 +0800 | [diff] [blame] | 928 | struct fence *fence; /* the hw fence */ |
Monk Liu | 753ad49 | 2016-08-26 13:28:28 +0800 | [diff] [blame] | 929 | uint32_t preamble_status; |
Chunming Zhou | bb977d3 | 2015-08-18 15:16:40 +0800 | [diff] [blame] | 930 | uint32_t num_ibs; |
Christian König | e284022 | 2015-11-05 19:49:48 +0100 | [diff] [blame] | 931 | void *owner; |
Monk Liu | 3aecd24 | 2016-08-25 15:40:48 +0800 | [diff] [blame] | 932 | uint64_t fence_ctx; /* the fence_context this job uses */ |
Chunming Zhou | fd53be3 | 2016-07-01 17:59:01 +0800 | [diff] [blame] | 933 | bool vm_needs_flush; |
Christian König | d88bf58 | 2016-05-06 17:50:03 +0200 | [diff] [blame] | 934 | unsigned vm_id; |
| 935 | uint64_t vm_pd_addr; |
| 936 | uint32_t gds_base, gds_size; |
| 937 | uint32_t gws_base, gws_size; |
| 938 | uint32_t oa_base, oa_size; |
Christian König | 758ac17 | 2016-05-06 22:14:00 +0200 | [diff] [blame] | 939 | |
| 940 | /* user fence handling */ |
Christian König | b5f5acb | 2016-06-29 13:26:41 +0200 | [diff] [blame] | 941 | uint64_t uf_addr; |
Christian König | 758ac17 | 2016-05-06 22:14:00 +0200 | [diff] [blame] | 942 | uint64_t uf_sequence; |
| 943 | |
Chunming Zhou | bb977d3 | 2015-08-18 15:16:40 +0800 | [diff] [blame] | 944 | }; |
Junwei Zhang | a6db8a3 | 2015-09-09 09:21:19 +0800 | [diff] [blame] | 945 | #define to_amdgpu_job(sched_job) \ |
| 946 | container_of((sched_job), struct amdgpu_job, base) |
Chunming Zhou | bb977d3 | 2015-08-18 15:16:40 +0800 | [diff] [blame] | 947 | |
Christian König | 7270f83 | 2016-01-31 11:00:41 +0100 | [diff] [blame] | 948 | static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, |
| 949 | uint32_t ib_idx, int idx) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 950 | { |
Christian König | 50838c8 | 2016-02-03 13:44:52 +0100 | [diff] [blame] | 951 | return p->job->ibs[ib_idx].ptr[idx]; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 952 | } |
| 953 | |
Christian König | 7270f83 | 2016-01-31 11:00:41 +0100 | [diff] [blame] | 954 | static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p, |
| 955 | uint32_t ib_idx, int idx, |
| 956 | uint32_t value) |
| 957 | { |
Christian König | 50838c8 | 2016-02-03 13:44:52 +0100 | [diff] [blame] | 958 | p->job->ibs[ib_idx].ptr[idx] = value; |
Christian König | 7270f83 | 2016-01-31 11:00:41 +0100 | [diff] [blame] | 959 | } |
| 960 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 961 | /* |
| 962 | * Writeback |
| 963 | */ |
| 964 | #define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */ |
| 965 | |
| 966 | struct amdgpu_wb { |
| 967 | struct amdgpu_bo *wb_obj; |
| 968 | volatile uint32_t *wb; |
| 969 | uint64_t gpu_addr; |
| 970 | u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */ |
| 971 | unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)]; |
| 972 | }; |
| 973 | |
| 974 | int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb); |
| 975 | void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb); |
| 976 | |
Alex Deucher | d0dd7f0 | 2015-11-11 19:45:06 -0500 | [diff] [blame] | 977 | void amdgpu_get_pcie_info(struct amdgpu_device *adev); |
| 978 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 979 | /* |
| 980 | * UVD |
| 981 | */ |
Arindam Nath | c036554 | 2016-04-12 13:46:15 +0200 | [diff] [blame] | 982 | #define AMDGPU_DEFAULT_UVD_HANDLES 10 |
| 983 | #define AMDGPU_MAX_UVD_HANDLES 40 |
| 984 | #define AMDGPU_UVD_STACK_SIZE (200*1024) |
| 985 | #define AMDGPU_UVD_HEAP_SIZE (256*1024) |
| 986 | #define AMDGPU_UVD_SESSION_SIZE (50*1024) |
| 987 | #define AMDGPU_UVD_FIRMWARE_OFFSET 256 |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 988 | |
| 989 | struct amdgpu_uvd { |
| 990 | struct amdgpu_bo *vcpu_bo; |
| 991 | void *cpu_addr; |
| 992 | uint64_t gpu_addr; |
Sonny Jiang | 562e268 | 2016-04-18 16:05:04 -0400 | [diff] [blame] | 993 | unsigned fw_version; |
Leo Liu | 3f99dd8 | 2016-04-01 10:36:06 -0400 | [diff] [blame] | 994 | void *saved_bo; |
Arindam Nath | c036554 | 2016-04-12 13:46:15 +0200 | [diff] [blame] | 995 | unsigned max_handles; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 996 | atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; |
| 997 | struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; |
| 998 | struct delayed_work idle_work; |
| 999 | const struct firmware *fw; /* UVD firmware */ |
| 1000 | struct amdgpu_ring ring; |
| 1001 | struct amdgpu_irq_src irq; |
| 1002 | bool address_64_bit; |
Christian König | 4cb5877c | 2016-07-26 12:05:40 +0200 | [diff] [blame] | 1003 | bool use_ctx_buf; |
Christian König | ead833e | 2016-02-10 14:35:19 +0100 | [diff] [blame] | 1004 | struct amd_sched_entity entity; |
Chunming Zhou | fc0b3b9 | 2016-07-18 17:18:01 +0800 | [diff] [blame] | 1005 | uint32_t srbm_soft_reset; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1006 | }; |
| 1007 | |
| 1008 | /* |
| 1009 | * VCE |
| 1010 | */ |
| 1011 | #define AMDGPU_MAX_VCE_HANDLES 16 |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1012 | #define AMDGPU_VCE_FIRMWARE_OFFSET 256 |
| 1013 | |
Alex Deucher | 6a58577 | 2015-07-10 14:16:24 -0400 | [diff] [blame] | 1014 | #define AMDGPU_VCE_HARVEST_VCE0 (1 << 0) |
| 1015 | #define AMDGPU_VCE_HARVEST_VCE1 (1 << 1) |
| 1016 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1017 | struct amdgpu_vce { |
| 1018 | struct amdgpu_bo *vcpu_bo; |
| 1019 | uint64_t gpu_addr; |
| 1020 | unsigned fw_version; |
| 1021 | unsigned fb_version; |
| 1022 | atomic_t handles[AMDGPU_MAX_VCE_HANDLES]; |
| 1023 | struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES]; |
Christian König | f1689ec | 2015-06-11 20:56:18 +0200 | [diff] [blame] | 1024 | uint32_t img_size[AMDGPU_MAX_VCE_HANDLES]; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1025 | struct delayed_work idle_work; |
Christian König | ebff485 | 2016-07-20 16:53:36 +0200 | [diff] [blame] | 1026 | struct mutex idle_mutex; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1027 | const struct firmware *fw; /* VCE firmware */ |
| 1028 | struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; |
| 1029 | struct amdgpu_irq_src irq; |
Alex Deucher | 6a58577 | 2015-07-10 14:16:24 -0400 | [diff] [blame] | 1030 | unsigned harvest_config; |
Christian König | c594989 | 2016-02-10 17:43:00 +0100 | [diff] [blame] | 1031 | struct amd_sched_entity entity; |
Chunming Zhou | 115933a | 2016-07-18 17:38:50 +0800 | [diff] [blame] | 1032 | uint32_t srbm_soft_reset; |
Alex Deucher | 75c6548 | 2016-08-24 16:56:21 -0400 | [diff] [blame] | 1033 | unsigned num_rings; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1034 | }; |
| 1035 | |
| 1036 | /* |
| 1037 | * SDMA |
| 1038 | */ |
Alex Deucher | c113ea1 | 2015-10-08 16:30:37 -0400 | [diff] [blame] | 1039 | struct amdgpu_sdma_instance { |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1040 | /* SDMA firmware */ |
| 1041 | const struct firmware *fw; |
| 1042 | uint32_t fw_version; |
Jammy Zhou | cfa2104 | 2015-08-04 10:50:47 +0800 | [diff] [blame] | 1043 | uint32_t feature_version; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1044 | |
| 1045 | struct amdgpu_ring ring; |
Jammy Zhou | 18111de | 2015-08-31 14:06:39 +0800 | [diff] [blame] | 1046 | bool burst_nop; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1047 | }; |
| 1048 | |
Alex Deucher | c113ea1 | 2015-10-08 16:30:37 -0400 | [diff] [blame] | 1049 | struct amdgpu_sdma { |
| 1050 | struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; |
Ken Wang | 30d1574 | 2016-01-19 14:05:23 +0800 | [diff] [blame] | 1051 | #ifdef CONFIG_DRM_AMDGPU_SI |
| 1052 | //SI DMA has a difference trap irq number for the second engine |
| 1053 | struct amdgpu_irq_src trap_irq_1; |
| 1054 | #endif |
Alex Deucher | c113ea1 | 2015-10-08 16:30:37 -0400 | [diff] [blame] | 1055 | struct amdgpu_irq_src trap_irq; |
| 1056 | struct amdgpu_irq_src illegal_inst_irq; |
Christian König | edf600d | 2016-05-03 15:54:54 +0200 | [diff] [blame] | 1057 | int num_instances; |
Chunming Zhou | e702a68 | 2016-07-13 10:28:56 +0800 | [diff] [blame] | 1058 | uint32_t srbm_soft_reset; |
Alex Deucher | c113ea1 | 2015-10-08 16:30:37 -0400 | [diff] [blame] | 1059 | }; |
| 1060 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1061 | /* |
| 1062 | * Firmware |
| 1063 | */ |
| 1064 | struct amdgpu_firmware { |
| 1065 | struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM]; |
| 1066 | bool smu_load; |
| 1067 | struct amdgpu_bo *fw_buf; |
| 1068 | unsigned int fw_size; |
| 1069 | }; |
| 1070 | |
| 1071 | /* |
| 1072 | * Benchmarking |
| 1073 | */ |
| 1074 | void amdgpu_benchmark(struct amdgpu_device *adev, int test_number); |
| 1075 | |
| 1076 | |
| 1077 | /* |
| 1078 | * Testing |
| 1079 | */ |
| 1080 | void amdgpu_test_moves(struct amdgpu_device *adev); |
| 1081 | void amdgpu_test_ring_sync(struct amdgpu_device *adev, |
| 1082 | struct amdgpu_ring *cpA, |
| 1083 | struct amdgpu_ring *cpB); |
| 1084 | void amdgpu_test_syncing(struct amdgpu_device *adev); |
| 1085 | |
| 1086 | /* |
| 1087 | * MMU Notifier |
| 1088 | */ |
| 1089 | #if defined(CONFIG_MMU_NOTIFIER) |
| 1090 | int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr); |
| 1091 | void amdgpu_mn_unregister(struct amdgpu_bo *bo); |
| 1092 | #else |
Harry Wentland | 1d1106b | 2015-07-15 07:10:41 -0400 | [diff] [blame] | 1093 | static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1094 | { |
| 1095 | return -ENODEV; |
| 1096 | } |
Harry Wentland | 1d1106b | 2015-07-15 07:10:41 -0400 | [diff] [blame] | 1097 | static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {} |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1098 | #endif |
| 1099 | |
| 1100 | /* |
| 1101 | * Debugfs |
| 1102 | */ |
| 1103 | struct amdgpu_debugfs { |
Nils Wallménius | 06ab683 | 2016-05-02 12:46:15 -0400 | [diff] [blame] | 1104 | const struct drm_info_list *files; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1105 | unsigned num_files; |
| 1106 | }; |
| 1107 | |
| 1108 | int amdgpu_debugfs_add_files(struct amdgpu_device *adev, |
Nils Wallménius | 06ab683 | 2016-05-02 12:46:15 -0400 | [diff] [blame] | 1109 | const struct drm_info_list *files, |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1110 | unsigned nfiles); |
| 1111 | int amdgpu_debugfs_fence_init(struct amdgpu_device *adev); |
| 1112 | |
| 1113 | #if defined(CONFIG_DEBUG_FS) |
| 1114 | int amdgpu_debugfs_init(struct drm_minor *minor); |
| 1115 | void amdgpu_debugfs_cleanup(struct drm_minor *minor); |
| 1116 | #endif |
| 1117 | |
Huang Rui | 50ab253 | 2016-06-12 15:51:09 +0800 | [diff] [blame] | 1118 | int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); |
| 1119 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1120 | /* |
| 1121 | * amdgpu smumgr functions |
| 1122 | */ |
| 1123 | struct amdgpu_smumgr_funcs { |
| 1124 | int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype); |
| 1125 | int (*request_smu_load_fw)(struct amdgpu_device *adev); |
| 1126 | int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype); |
| 1127 | }; |
| 1128 | |
| 1129 | /* |
| 1130 | * amdgpu smumgr |
| 1131 | */ |
| 1132 | struct amdgpu_smumgr { |
| 1133 | struct amdgpu_bo *toc_buf; |
| 1134 | struct amdgpu_bo *smu_buf; |
| 1135 | /* asic priv smu data */ |
| 1136 | void *priv; |
| 1137 | spinlock_t smu_lock; |
| 1138 | /* smumgr functions */ |
| 1139 | const struct amdgpu_smumgr_funcs *smumgr_funcs; |
| 1140 | /* ucode loading complete flag */ |
| 1141 | uint32_t fw_flags; |
| 1142 | }; |
| 1143 | |
| 1144 | /* |
| 1145 | * ASIC specific register table accessible by UMD |
| 1146 | */ |
| 1147 | struct amdgpu_allowed_register_entry { |
| 1148 | uint32_t reg_offset; |
| 1149 | bool untouched; |
| 1150 | bool grbm_indexed; |
| 1151 | }; |
| 1152 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1153 | /* |
| 1154 | * ASIC specific functions. |
| 1155 | */ |
| 1156 | struct amdgpu_asic_funcs { |
| 1157 | bool (*read_disabled_bios)(struct amdgpu_device *adev); |
Alex Deucher | 7946b87 | 2015-11-24 10:14:28 -0500 | [diff] [blame] | 1158 | bool (*read_bios_from_rom)(struct amdgpu_device *adev, |
| 1159 | u8 *bios, u32 length_bytes); |
Monk Liu | 4e99a44 | 2016-03-31 13:26:59 +0800 | [diff] [blame] | 1160 | void (*detect_hw_virtualization) (struct amdgpu_device *adev); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1161 | int (*read_register)(struct amdgpu_device *adev, u32 se_num, |
| 1162 | u32 sh_num, u32 reg_offset, u32 *value); |
| 1163 | void (*set_vga_state)(struct amdgpu_device *adev, bool state); |
| 1164 | int (*reset)(struct amdgpu_device *adev); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1165 | /* get the reference clock */ |
| 1166 | u32 (*get_xclk)(struct amdgpu_device *adev); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1167 | /* MM block clocks */ |
| 1168 | int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); |
| 1169 | int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); |
Maruthi Bayyavarapu | 841686d | 2016-08-01 12:42:32 -0400 | [diff] [blame] | 1170 | /* static power management */ |
| 1171 | int (*get_pcie_lanes)(struct amdgpu_device *adev); |
| 1172 | void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1173 | }; |
| 1174 | |
| 1175 | /* |
| 1176 | * IOCTL. |
| 1177 | */ |
| 1178 | int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, |
| 1179 | struct drm_file *filp); |
| 1180 | int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, |
| 1181 | struct drm_file *filp); |
| 1182 | |
| 1183 | int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data, |
| 1184 | struct drm_file *filp); |
| 1185 | int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, |
| 1186 | struct drm_file *filp); |
| 1187 | int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, |
| 1188 | struct drm_file *filp); |
| 1189 | int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, |
| 1190 | struct drm_file *filp); |
| 1191 | int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, |
| 1192 | struct drm_file *filp); |
| 1193 | int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, |
| 1194 | struct drm_file *filp); |
| 1195 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); |
| 1196 | int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); |
| 1197 | |
| 1198 | int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, |
| 1199 | struct drm_file *filp); |
| 1200 | |
| 1201 | /* VRAM scratch page for HDP bug, default vram page */ |
| 1202 | struct amdgpu_vram_scratch { |
| 1203 | struct amdgpu_bo *robj; |
| 1204 | volatile uint32_t *ptr; |
| 1205 | u64 gpu_addr; |
| 1206 | }; |
| 1207 | |
| 1208 | /* |
| 1209 | * ACPI |
| 1210 | */ |
| 1211 | struct amdgpu_atif_notification_cfg { |
| 1212 | bool enabled; |
| 1213 | int command_code; |
| 1214 | }; |
| 1215 | |
| 1216 | struct amdgpu_atif_notifications { |
| 1217 | bool display_switch; |
| 1218 | bool expansion_mode_change; |
| 1219 | bool thermal_state; |
| 1220 | bool forced_power_state; |
| 1221 | bool system_power_state; |
| 1222 | bool display_conf_change; |
| 1223 | bool px_gfx_switch; |
| 1224 | bool brightness_change; |
| 1225 | bool dgpu_display_event; |
| 1226 | }; |
| 1227 | |
| 1228 | struct amdgpu_atif_functions { |
| 1229 | bool system_params; |
| 1230 | bool sbios_requests; |
| 1231 | bool select_active_disp; |
| 1232 | bool lid_state; |
| 1233 | bool get_tv_standard; |
| 1234 | bool set_tv_standard; |
| 1235 | bool get_panel_expansion_mode; |
| 1236 | bool set_panel_expansion_mode; |
| 1237 | bool temperature_change; |
| 1238 | bool graphics_device_types; |
| 1239 | }; |
| 1240 | |
| 1241 | struct amdgpu_atif { |
| 1242 | struct amdgpu_atif_notifications notifications; |
| 1243 | struct amdgpu_atif_functions functions; |
| 1244 | struct amdgpu_atif_notification_cfg notification_cfg; |
| 1245 | struct amdgpu_encoder *encoder_for_bl; |
| 1246 | }; |
| 1247 | |
| 1248 | struct amdgpu_atcs_functions { |
| 1249 | bool get_ext_state; |
| 1250 | bool pcie_perf_req; |
| 1251 | bool pcie_dev_rdy; |
| 1252 | bool pcie_bus_width; |
| 1253 | }; |
| 1254 | |
| 1255 | struct amdgpu_atcs { |
| 1256 | struct amdgpu_atcs_functions functions; |
| 1257 | }; |
| 1258 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1259 | /* |
Chunming Zhou | d03846a | 2015-07-28 14:20:03 -0400 | [diff] [blame] | 1260 | * CGS |
| 1261 | */ |
Dave Airlie | 110e6f2 | 2016-04-12 13:25:48 +1000 | [diff] [blame] | 1262 | struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); |
| 1263 | void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); |
Maruthi Bayyavarapu | a8fe58c | 2015-09-22 17:05:20 -0400 | [diff] [blame] | 1264 | |
Maruthi Bayyavarapu | a8fe58c | 2015-09-22 17:05:20 -0400 | [diff] [blame] | 1265 | /* |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1266 | * Core structure, functions and helpers. |
| 1267 | */ |
| 1268 | typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t); |
| 1269 | typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); |
| 1270 | |
| 1271 | typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); |
| 1272 | typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); |
| 1273 | |
Alex Deucher | 8faf0e08 | 2015-07-28 11:50:31 -0400 | [diff] [blame] | 1274 | struct amdgpu_ip_block_status { |
| 1275 | bool valid; |
| 1276 | bool sw; |
| 1277 | bool hw; |
Grazvydas Ignotas | 8a2eef1 | 2016-10-03 00:06:44 +0300 | [diff] [blame] | 1278 | bool late_initialized; |
Chunming Zhou | 63fbf42 | 2016-07-15 11:19:20 +0800 | [diff] [blame] | 1279 | bool hang; |
Alex Deucher | 8faf0e08 | 2015-07-28 11:50:31 -0400 | [diff] [blame] | 1280 | }; |
| 1281 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1282 | struct amdgpu_device { |
| 1283 | struct device *dev; |
| 1284 | struct drm_device *ddev; |
| 1285 | struct pci_dev *pdev; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1286 | |
Maruthi Bayyavarapu | a8fe58c | 2015-09-22 17:05:20 -0400 | [diff] [blame] | 1287 | #ifdef CONFIG_DRM_AMD_ACP |
| 1288 | struct amdgpu_acp acp; |
| 1289 | #endif |
| 1290 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1291 | /* ASIC */ |
Jammy Zhou | 2f7d10b | 2015-07-22 11:29:01 +0800 | [diff] [blame] | 1292 | enum amd_asic_type asic_type; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1293 | uint32_t family; |
| 1294 | uint32_t rev_id; |
| 1295 | uint32_t external_rev_id; |
| 1296 | unsigned long flags; |
| 1297 | int usec_timeout; |
| 1298 | const struct amdgpu_asic_funcs *asic_funcs; |
| 1299 | bool shutdown; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1300 | bool need_dma32; |
| 1301 | bool accel_working; |
Christian König | edf600d | 2016-05-03 15:54:54 +0200 | [diff] [blame] | 1302 | struct work_struct reset_work; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1303 | struct notifier_block acpi_nb; |
| 1304 | struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; |
| 1305 | struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; |
Christian König | edf600d | 2016-05-03 15:54:54 +0200 | [diff] [blame] | 1306 | unsigned debugfs_count; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1307 | #if defined(CONFIG_DEBUG_FS) |
Tom St Denis | adcec28 | 2016-04-15 13:08:44 -0400 | [diff] [blame] | 1308 | struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1309 | #endif |
| 1310 | struct amdgpu_atif atif; |
| 1311 | struct amdgpu_atcs atcs; |
| 1312 | struct mutex srbm_mutex; |
| 1313 | /* GRBM index mutex. Protects concurrent access to GRBM index */ |
| 1314 | struct mutex grbm_idx_mutex; |
| 1315 | struct dev_pm_domain vga_pm_domain; |
| 1316 | bool have_disp_power_ref; |
| 1317 | |
| 1318 | /* BIOS */ |
| 1319 | uint8_t *bios; |
| 1320 | bool is_atom_bios; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1321 | struct amdgpu_bo *stollen_vga_memory; |
| 1322 | uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; |
| 1323 | |
| 1324 | /* Register/doorbell mmio */ |
| 1325 | resource_size_t rmmio_base; |
| 1326 | resource_size_t rmmio_size; |
| 1327 | void __iomem *rmmio; |
| 1328 | /* protects concurrent MM_INDEX/DATA based register access */ |
| 1329 | spinlock_t mmio_idx_lock; |
| 1330 | /* protects concurrent SMC based register access */ |
| 1331 | spinlock_t smc_idx_lock; |
| 1332 | amdgpu_rreg_t smc_rreg; |
| 1333 | amdgpu_wreg_t smc_wreg; |
| 1334 | /* protects concurrent PCIE register access */ |
| 1335 | spinlock_t pcie_idx_lock; |
| 1336 | amdgpu_rreg_t pcie_rreg; |
| 1337 | amdgpu_wreg_t pcie_wreg; |
Huang Rui | 36b9a95 | 2016-08-31 13:23:25 +0800 | [diff] [blame] | 1338 | amdgpu_rreg_t pciep_rreg; |
| 1339 | amdgpu_wreg_t pciep_wreg; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1340 | /* protects concurrent UVD register access */ |
| 1341 | spinlock_t uvd_ctx_idx_lock; |
| 1342 | amdgpu_rreg_t uvd_ctx_rreg; |
| 1343 | amdgpu_wreg_t uvd_ctx_wreg; |
| 1344 | /* protects concurrent DIDT register access */ |
| 1345 | spinlock_t didt_idx_lock; |
| 1346 | amdgpu_rreg_t didt_rreg; |
| 1347 | amdgpu_wreg_t didt_wreg; |
Rex Zhu | ccdbb20 | 2016-06-08 12:47:41 +0800 | [diff] [blame] | 1348 | /* protects concurrent gc_cac register access */ |
| 1349 | spinlock_t gc_cac_idx_lock; |
| 1350 | amdgpu_rreg_t gc_cac_rreg; |
| 1351 | amdgpu_wreg_t gc_cac_wreg; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1352 | /* protects concurrent ENDPOINT (audio) register access */ |
| 1353 | spinlock_t audio_endpt_idx_lock; |
| 1354 | amdgpu_block_rreg_t audio_endpt_rreg; |
| 1355 | amdgpu_block_wreg_t audio_endpt_wreg; |
| 1356 | void __iomem *rio_mem; |
| 1357 | resource_size_t rio_mem_size; |
| 1358 | struct amdgpu_doorbell doorbell; |
| 1359 | |
| 1360 | /* clock/pll info */ |
| 1361 | struct amdgpu_clock clock; |
| 1362 | |
| 1363 | /* MC */ |
| 1364 | struct amdgpu_mc mc; |
| 1365 | struct amdgpu_gart gart; |
| 1366 | struct amdgpu_dummy_page dummy_page; |
| 1367 | struct amdgpu_vm_manager vm_manager; |
| 1368 | |
| 1369 | /* memory management */ |
| 1370 | struct amdgpu_mman mman; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1371 | struct amdgpu_vram_scratch vram_scratch; |
| 1372 | struct amdgpu_wb wb; |
| 1373 | atomic64_t vram_usage; |
| 1374 | atomic64_t vram_vis_usage; |
| 1375 | atomic64_t gtt_usage; |
| 1376 | atomic64_t num_bytes_moved; |
Christian König | dbd5ed6 | 2016-06-21 16:28:14 +0200 | [diff] [blame] | 1377 | atomic64_t num_evictions; |
Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 1378 | atomic_t gpu_reset_counter; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1379 | |
Marek Olšák | 95844d2 | 2016-08-17 23:49:27 +0200 | [diff] [blame] | 1380 | /* data for buffer migration throttling */ |
| 1381 | struct { |
| 1382 | spinlock_t lock; |
| 1383 | s64 last_update_us; |
| 1384 | s64 accum_us; /* accumulated microseconds */ |
| 1385 | u32 log2_max_MBps; |
| 1386 | } mm_stats; |
| 1387 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1388 | /* display */ |
Emily Deng | 9accf2f | 2016-08-10 16:01:25 +0800 | [diff] [blame] | 1389 | bool enable_virtual_display; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1390 | struct amdgpu_mode_info mode_info; |
| 1391 | struct work_struct hotplug_work; |
| 1392 | struct amdgpu_irq_src crtc_irq; |
| 1393 | struct amdgpu_irq_src pageflip_irq; |
| 1394 | struct amdgpu_irq_src hpd_irq; |
| 1395 | |
| 1396 | /* rings */ |
Christian König | 76bf0db | 2016-06-01 15:10:02 +0200 | [diff] [blame] | 1397 | u64 fence_context; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1398 | unsigned num_rings; |
| 1399 | struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; |
| 1400 | bool ib_pool_ready; |
| 1401 | struct amdgpu_sa_manager ring_tmp_bo; |
| 1402 | |
| 1403 | /* interrupts */ |
| 1404 | struct amdgpu_irq irq; |
| 1405 | |
Alex Deucher | 1f7371b | 2015-12-02 17:46:21 -0500 | [diff] [blame] | 1406 | /* powerplay */ |
| 1407 | struct amd_powerplay powerplay; |
Jammy Zhou | e61710c | 2015-11-10 18:31:08 -0500 | [diff] [blame] | 1408 | bool pp_enabled; |
Eric Huang | f3898ea | 2015-12-11 16:24:34 -0500 | [diff] [blame] | 1409 | bool pp_force_state_enabled; |
Alex Deucher | 1f7371b | 2015-12-02 17:46:21 -0500 | [diff] [blame] | 1410 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1411 | /* dpm */ |
| 1412 | struct amdgpu_pm pm; |
| 1413 | u32 cg_flags; |
| 1414 | u32 pg_flags; |
| 1415 | |
| 1416 | /* amdgpu smumgr */ |
| 1417 | struct amdgpu_smumgr smu; |
| 1418 | |
| 1419 | /* gfx */ |
| 1420 | struct amdgpu_gfx gfx; |
| 1421 | |
| 1422 | /* sdma */ |
Alex Deucher | c113ea1 | 2015-10-08 16:30:37 -0400 | [diff] [blame] | 1423 | struct amdgpu_sdma sdma; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1424 | |
| 1425 | /* uvd */ |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1426 | struct amdgpu_uvd uvd; |
| 1427 | |
| 1428 | /* vce */ |
| 1429 | struct amdgpu_vce vce; |
| 1430 | |
| 1431 | /* firmwares */ |
| 1432 | struct amdgpu_firmware firmware; |
| 1433 | |
| 1434 | /* GDS */ |
| 1435 | struct amdgpu_gds gds; |
| 1436 | |
| 1437 | const struct amdgpu_ip_block_version *ip_blocks; |
| 1438 | int num_ip_blocks; |
Alex Deucher | 8faf0e08 | 2015-07-28 11:50:31 -0400 | [diff] [blame] | 1439 | struct amdgpu_ip_block_status *ip_block_status; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1440 | struct mutex mn_lock; |
| 1441 | DECLARE_HASHTABLE(mn_hash, 7); |
| 1442 | |
| 1443 | /* tracking pinned memory */ |
| 1444 | u64 vram_pin_size; |
Chunming Zhou | e131b91 | 2016-04-05 10:48:48 +0800 | [diff] [blame] | 1445 | u64 invisible_pin_size; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1446 | u64 gart_pin_size; |
Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 1447 | |
| 1448 | /* amdkfd interface */ |
| 1449 | struct kfd_dev *kfd; |
Chunming Zhou | 23ca0e4 | 2015-07-06 13:42:58 +0800 | [diff] [blame] | 1450 | |
Alex Deucher | 7e471e6 | 2016-02-01 11:13:04 -0500 | [diff] [blame] | 1451 | struct amdgpu_virtualization virtualization; |
Chunming Zhou | 0c4e7fa | 2016-08-17 11:41:30 +0800 | [diff] [blame] | 1452 | |
| 1453 | /* link all shadow bo */ |
| 1454 | struct list_head shadow_list; |
| 1455 | struct mutex shadow_list_lock; |
Chunming Zhou | 5c1354b | 2016-08-30 16:13:10 +0800 | [diff] [blame] | 1456 | /* link all gtt */ |
| 1457 | spinlock_t gtt_list_lock; |
| 1458 | struct list_head gtt_list; |
| 1459 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1460 | }; |
| 1461 | |
Christian König | a7d64de | 2016-09-15 14:58:48 +0200 | [diff] [blame] | 1462 | static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) |
| 1463 | { |
| 1464 | return container_of(bdev, struct amdgpu_device, mman.bdev); |
| 1465 | } |
| 1466 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1467 | bool amdgpu_device_is_px(struct drm_device *dev); |
| 1468 | int amdgpu_device_init(struct amdgpu_device *adev, |
| 1469 | struct drm_device *ddev, |
| 1470 | struct pci_dev *pdev, |
| 1471 | uint32_t flags); |
| 1472 | void amdgpu_device_fini(struct amdgpu_device *adev); |
| 1473 | int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); |
| 1474 | |
| 1475 | uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, |
| 1476 | bool always_indirect); |
| 1477 | void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, |
| 1478 | bool always_indirect); |
| 1479 | u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg); |
| 1480 | void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v); |
| 1481 | |
| 1482 | u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index); |
| 1483 | void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); |
| 1484 | |
| 1485 | /* |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1486 | * Registers read & write functions. |
| 1487 | */ |
| 1488 | #define RREG32(reg) amdgpu_mm_rreg(adev, (reg), false) |
| 1489 | #define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), true) |
| 1490 | #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), false)) |
| 1491 | #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), false) |
| 1492 | #define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), true) |
| 1493 | #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) |
| 1494 | #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) |
| 1495 | #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) |
| 1496 | #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v)) |
Huang Rui | 36b9a95 | 2016-08-31 13:23:25 +0800 | [diff] [blame] | 1497 | #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg)) |
| 1498 | #define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v)) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1499 | #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg)) |
| 1500 | #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v)) |
| 1501 | #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg)) |
| 1502 | #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v)) |
| 1503 | #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg)) |
| 1504 | #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) |
Rex Zhu | ccdbb20 | 2016-06-08 12:47:41 +0800 | [diff] [blame] | 1505 | #define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg)) |
| 1506 | #define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v)) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1507 | #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) |
| 1508 | #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) |
| 1509 | #define WREG32_P(reg, val, mask) \ |
| 1510 | do { \ |
| 1511 | uint32_t tmp_ = RREG32(reg); \ |
| 1512 | tmp_ &= (mask); \ |
| 1513 | tmp_ |= ((val) & ~(mask)); \ |
| 1514 | WREG32(reg, tmp_); \ |
| 1515 | } while (0) |
| 1516 | #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) |
| 1517 | #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) |
| 1518 | #define WREG32_PLL_P(reg, val, mask) \ |
| 1519 | do { \ |
| 1520 | uint32_t tmp_ = RREG32_PLL(reg); \ |
| 1521 | tmp_ &= (mask); \ |
| 1522 | tmp_ |= ((val) & ~(mask)); \ |
| 1523 | WREG32_PLL(reg, tmp_); \ |
| 1524 | } while (0) |
| 1525 | #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false)) |
| 1526 | #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg)) |
| 1527 | #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v)) |
| 1528 | |
| 1529 | #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index)) |
| 1530 | #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v)) |
| 1531 | |
| 1532 | #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT |
| 1533 | #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK |
| 1534 | |
| 1535 | #define REG_SET_FIELD(orig_val, reg, field, field_val) \ |
| 1536 | (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \ |
| 1537 | (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field)))) |
| 1538 | |
| 1539 | #define REG_GET_FIELD(value, reg, field) \ |
| 1540 | (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field)) |
| 1541 | |
Tom St Denis | 61cb8ce | 2016-08-09 10:13:21 -0400 | [diff] [blame] | 1542 | #define WREG32_FIELD(reg, field, val) \ |
| 1543 | WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) |
| 1544 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1545 | /* |
| 1546 | * BIOS helpers. |
| 1547 | */ |
| 1548 | #define RBIOS8(i) (adev->bios[i]) |
| 1549 | #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) |
| 1550 | #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) |
| 1551 | |
| 1552 | /* |
| 1553 | * RING helpers. |
| 1554 | */ |
| 1555 | static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) |
| 1556 | { |
| 1557 | if (ring->count_dw <= 0) |
Jammy Zhou | 86c2b79 | 2015-05-13 22:52:42 +0800 | [diff] [blame] | 1558 | DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1559 | ring->ring[ring->wptr++] = v; |
| 1560 | ring->wptr &= ring->ptr_mask; |
| 1561 | ring->count_dw--; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1562 | } |
| 1563 | |
Alex Deucher | c113ea1 | 2015-10-08 16:30:37 -0400 | [diff] [blame] | 1564 | static inline struct amdgpu_sdma_instance * |
| 1565 | amdgpu_get_sdma_instance(struct amdgpu_ring *ring) |
Jammy Zhou | 4b2f7e2 | 2015-09-01 12:56:17 +0800 | [diff] [blame] | 1566 | { |
| 1567 | struct amdgpu_device *adev = ring->adev; |
| 1568 | int i; |
| 1569 | |
Alex Deucher | c113ea1 | 2015-10-08 16:30:37 -0400 | [diff] [blame] | 1570 | for (i = 0; i < adev->sdma.num_instances; i++) |
| 1571 | if (&adev->sdma.instance[i].ring == ring) |
Jammy Zhou | 4b2f7e2 | 2015-09-01 12:56:17 +0800 | [diff] [blame] | 1572 | break; |
| 1573 | |
| 1574 | if (i < AMDGPU_MAX_SDMA_INSTANCES) |
Alex Deucher | c113ea1 | 2015-10-08 16:30:37 -0400 | [diff] [blame] | 1575 | return &adev->sdma.instance[i]; |
Jammy Zhou | 4b2f7e2 | 2015-09-01 12:56:17 +0800 | [diff] [blame] | 1576 | else |
| 1577 | return NULL; |
| 1578 | } |
| 1579 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1580 | /* |
| 1581 | * ASICs macro. |
| 1582 | */ |
| 1583 | #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state)) |
| 1584 | #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1585 | #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) |
| 1586 | #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) |
| 1587 | #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) |
Maruthi Bayyavarapu | 841686d | 2016-08-01 12:42:32 -0400 | [diff] [blame] | 1588 | #define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev)) |
| 1589 | #define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l)) |
| 1590 | #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1591 | #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) |
Alex Deucher | 7946b87 | 2015-11-24 10:14:28 -0500 | [diff] [blame] | 1592 | #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) |
Monk Liu | 4e99a44 | 2016-03-31 13:26:59 +0800 | [diff] [blame] | 1593 | #define amdgpu_asic_detect_hw_virtualization(adev) (adev)->asic_funcs->detect_hw_virtualization((adev)) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1594 | #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1595 | #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) |
| 1596 | #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) |
| 1597 | #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) |
Christian König | de9ea7b | 2016-08-12 11:33:30 +0200 | [diff] [blame] | 1598 | #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1599 | #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1600 | #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) |
| 1601 | #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) |
Christian König | bbec97a | 2016-07-05 21:07:17 +0200 | [diff] [blame] | 1602 | #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t)) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1603 | #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) |
| 1604 | #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) |
| 1605 | #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) |
Christian König | d88bf58 | 2016-05-06 17:50:03 +0200 | [diff] [blame] | 1606 | #define amdgpu_ring_emit_ib(r, ib, vm_id, c) (r)->funcs->emit_ib((r), (ib), (vm_id), (c)) |
Christian König | b8c7b39 | 2016-03-01 15:42:52 +0100 | [diff] [blame] | 1607 | #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1608 | #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) |
Chunming Zhou | 890ee23 | 2015-06-01 14:35:03 +0800 | [diff] [blame] | 1609 | #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1610 | #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) |
Christian König | d2edb07 | 2015-05-11 14:10:34 +0200 | [diff] [blame] | 1611 | #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) |
Chunming Zhou | 11afbde | 2016-03-03 11:38:48 +0800 | [diff] [blame] | 1612 | #define amdgpu_ring_emit_hdp_invalidate(r) (r)->funcs->emit_hdp_invalidate((r)) |
Monk Liu | c2167a6 | 2016-08-26 14:12:37 +0800 | [diff] [blame] | 1613 | #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r)) |
Monk Liu | 753ad49 | 2016-08-26 13:28:28 +0800 | [diff] [blame] | 1614 | #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d)) |
Christian König | 9e5d5309 | 2016-01-31 12:20:55 +0100 | [diff] [blame] | 1615 | #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) |
Monk Liu | 03ccf48 | 2016-01-14 19:07:38 +0800 | [diff] [blame] | 1616 | #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) |
| 1617 | #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1618 | #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) |
| 1619 | #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) |
| 1620 | #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) |
| 1621 | #define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r)) |
| 1622 | #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc)) |
| 1623 | #define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc)) |
| 1624 | #define amdgpu_display_is_display_hung(adev) (adev)->mode_info.funcs->is_display_hung((adev)) |
| 1625 | #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l)) |
| 1626 | #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e)) |
| 1627 | #define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h)) |
| 1628 | #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h)) |
| 1629 | #define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev)) |
| 1630 | #define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev)) |
Alex Deucher | cb9e59d | 2016-05-05 16:03:57 -0400 | [diff] [blame] | 1631 | #define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async)) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1632 | #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos)) |
| 1633 | #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c)) |
| 1634 | #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) |
| 1635 | #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s)) |
| 1636 | #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) |
Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 1637 | #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) |
Chunming Zhou | 6e7a384 | 2015-08-27 13:46:09 +0800 | [diff] [blame] | 1638 | #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) |
Alex Deucher | b95e31f | 2016-07-07 15:01:42 -0400 | [diff] [blame] | 1639 | #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) |
Tom St Denis | 9559ef5 | 2016-06-28 10:26:48 -0400 | [diff] [blame] | 1640 | #define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1641 | #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a)) |
| 1642 | |
| 1643 | /* Common functions */ |
| 1644 | int amdgpu_gpu_reset(struct amdgpu_device *adev); |
Chunming Zhou | 3ad81f1 | 2016-08-05 17:30:17 +0800 | [diff] [blame] | 1645 | bool amdgpu_need_backup(struct amdgpu_device *adev); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1646 | void amdgpu_pci_config_reset(struct amdgpu_device *adev); |
| 1647 | bool amdgpu_card_posted(struct amdgpu_device *adev); |
| 1648 | void amdgpu_update_display_priority(struct amdgpu_device *adev); |
Chunming Zhou | d5fc5e8 | 2015-07-21 16:52:10 +0800 | [diff] [blame] | 1649 | |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1650 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); |
| 1651 | int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, |
| 1652 | u32 ip_instance, u32 ring, |
| 1653 | struct amdgpu_ring **out_ring); |
Christian König | 765e7fb | 2016-09-15 15:06:50 +0200 | [diff] [blame] | 1654 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1655 | bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 1656 | int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1657 | int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, |
| 1658 | uint32_t flags); |
| 1659 | bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); |
Christian König | cc325d1 | 2016-02-08 11:08:35 +0100 | [diff] [blame] | 1660 | struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); |
Christian König | d700696 | 2016-02-08 10:57:22 +0100 | [diff] [blame] | 1661 | bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, |
| 1662 | unsigned long end); |
Christian König | 2f568db | 2016-02-23 12:36:59 +0100 | [diff] [blame] | 1663 | bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, |
| 1664 | int *last_invalidated); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1665 | bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); |
| 1666 | uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, |
| 1667 | struct ttm_mem_reg *mem); |
| 1668 | void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base); |
| 1669 | void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); |
| 1670 | void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); |
Ken Wang | a693e05 | 2016-07-27 19:18:01 +0800 | [diff] [blame] | 1671 | u64 amdgpu_ttm_get_gtt_mem_size(struct amdgpu_device *adev); |
| 1672 | int amdgpu_ttm_global_init(struct amdgpu_device *adev); |
Baoyou Xie | 9f31a0b0 | 2016-09-15 21:43:26 +0800 | [diff] [blame] | 1673 | int amdgpu_ttm_init(struct amdgpu_device *adev); |
| 1674 | void amdgpu_ttm_fini(struct amdgpu_device *adev); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1675 | void amdgpu_program_register_sequence(struct amdgpu_device *adev, |
| 1676 | const u32 *registers, |
| 1677 | const u32 array_size); |
| 1678 | |
| 1679 | bool amdgpu_device_is_px(struct drm_device *dev); |
| 1680 | /* atpx handler */ |
| 1681 | #if defined(CONFIG_VGA_SWITCHEROO) |
| 1682 | void amdgpu_register_atpx_handler(void); |
| 1683 | void amdgpu_unregister_atpx_handler(void); |
Alex Deucher | a78fe13 | 2016-06-01 13:08:21 -0400 | [diff] [blame] | 1684 | bool amdgpu_has_atpx_dgpu_power_cntl(void); |
Alex Deucher | 2f5af82 | 2016-06-02 09:04:01 -0400 | [diff] [blame] | 1685 | bool amdgpu_is_atpx_hybrid(void); |
Alex Deucher | efc83cf | 2016-09-14 14:01:41 -0400 | [diff] [blame] | 1686 | bool amdgpu_atpx_dgpu_req_power_for_displays(void); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1687 | #else |
| 1688 | static inline void amdgpu_register_atpx_handler(void) {} |
| 1689 | static inline void amdgpu_unregister_atpx_handler(void) {} |
Alex Deucher | a78fe13 | 2016-06-01 13:08:21 -0400 | [diff] [blame] | 1690 | static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } |
Alex Deucher | 2f5af82 | 2016-06-02 09:04:01 -0400 | [diff] [blame] | 1691 | static inline bool amdgpu_is_atpx_hybrid(void) { return false; } |
Alex Deucher | efc83cf | 2016-09-14 14:01:41 -0400 | [diff] [blame] | 1692 | static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; } |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1693 | #endif |
| 1694 | |
| 1695 | /* |
| 1696 | * KMS |
| 1697 | */ |
| 1698 | extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; |
Nils Wallménius | f498d9e | 2016-04-10 16:29:59 +0200 | [diff] [blame] | 1699 | extern const int amdgpu_max_kms_ioctl; |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1700 | |
| 1701 | int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags); |
| 1702 | int amdgpu_driver_unload_kms(struct drm_device *dev); |
| 1703 | void amdgpu_driver_lastclose_kms(struct drm_device *dev); |
| 1704 | int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); |
| 1705 | void amdgpu_driver_postclose_kms(struct drm_device *dev, |
| 1706 | struct drm_file *file_priv); |
| 1707 | void amdgpu_driver_preclose_kms(struct drm_device *dev, |
| 1708 | struct drm_file *file_priv); |
Alex Deucher | 810ddc3 | 2016-08-23 13:25:49 -0400 | [diff] [blame] | 1709 | int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon); |
| 1710 | int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon); |
Thierry Reding | 88e7271 | 2015-09-24 18:35:31 +0200 | [diff] [blame] | 1711 | u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe); |
| 1712 | int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe); |
| 1713 | void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe); |
| 1714 | int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe, |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1715 | int *max_error, |
| 1716 | struct timeval *vblank_time, |
| 1717 | unsigned flags); |
| 1718 | long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, |
| 1719 | unsigned long arg); |
| 1720 | |
| 1721 | /* |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1722 | * functions used by amdgpu_encoder.c |
| 1723 | */ |
| 1724 | struct amdgpu_afmt_acr { |
| 1725 | u32 clock; |
| 1726 | |
| 1727 | int n_32khz; |
| 1728 | int cts_32khz; |
| 1729 | |
| 1730 | int n_44_1khz; |
| 1731 | int cts_44_1khz; |
| 1732 | |
| 1733 | int n_48khz; |
| 1734 | int cts_48khz; |
| 1735 | |
| 1736 | }; |
| 1737 | |
| 1738 | struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock); |
| 1739 | |
| 1740 | /* amdgpu_acpi.c */ |
| 1741 | #if defined(CONFIG_ACPI) |
| 1742 | int amdgpu_acpi_init(struct amdgpu_device *adev); |
| 1743 | void amdgpu_acpi_fini(struct amdgpu_device *adev); |
| 1744 | bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev); |
| 1745 | int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, |
| 1746 | u8 perf_req, bool advertise); |
| 1747 | int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); |
| 1748 | #else |
| 1749 | static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } |
| 1750 | static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } |
| 1751 | #endif |
| 1752 | |
| 1753 | struct amdgpu_bo_va_mapping * |
| 1754 | amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, |
| 1755 | uint64_t addr, struct amdgpu_bo **bo); |
Christian König | c855e25 | 2016-09-05 17:00:57 +0200 | [diff] [blame] | 1756 | int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser); |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1757 | |
| 1758 | #include "amdgpu_object.h" |
Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1759 | #endif |