| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright 2008 Advanced Micro Devices, Inc. | 
 | 3 |  * Copyright 2008 Red Hat Inc. | 
 | 4 |  * Copyright 2009 Jerome Glisse. | 
 | 5 |  * | 
 | 6 |  * Permission is hereby granted, free of charge, to any person obtaining a | 
 | 7 |  * copy of this software and associated documentation files (the "Software"), | 
 | 8 |  * to deal in the Software without restriction, including without limitation | 
 | 9 |  * the rights to use, copy, modify, merge, publish, distribute, sublicense, | 
 | 10 |  * and/or sell copies of the Software, and to permit persons to whom the | 
 | 11 |  * Software is furnished to do so, subject to the following conditions: | 
 | 12 |  * | 
 | 13 |  * The above copyright notice and this permission notice shall be included in | 
 | 14 |  * all copies or substantial portions of the Software. | 
 | 15 |  * | 
 | 16 |  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | 
 | 17 |  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 
 | 18 |  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL | 
 | 19 |  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | 
 | 20 |  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | 
 | 21 |  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | 
 | 22 |  * OTHER DEALINGS IN THE SOFTWARE. | 
 | 23 |  * | 
 | 24 |  * Authors: Dave Airlie | 
 | 25 |  *          Alex Deucher | 
 | 26 |  *          Jerome Glisse | 
 | 27 |  */ | 
 | 28 | #ifndef __AMDGPU_H__ | 
 | 29 | #define __AMDGPU_H__ | 
 | 30 |  | 
 | 31 | #include <linux/atomic.h> | 
 | 32 | #include <linux/wait.h> | 
 | 33 | #include <linux/list.h> | 
 | 34 | #include <linux/kref.h> | 
 | 35 | #include <linux/interval_tree.h> | 
 | 36 | #include <linux/hashtable.h> | 
 | 37 | #include <linux/fence.h> | 
 | 38 |  | 
 | 39 | #include <ttm/ttm_bo_api.h> | 
 | 40 | #include <ttm/ttm_bo_driver.h> | 
 | 41 | #include <ttm/ttm_placement.h> | 
 | 42 | #include <ttm/ttm_module.h> | 
 | 43 | #include <ttm/ttm_execbuf_util.h> | 
 | 44 |  | 
| Chunming Zhou | d03846a | 2015-07-28 14:20:03 -0400 | [diff] [blame] | 45 | #include <drm/drmP.h> | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 46 | #include <drm/drm_gem.h> | 
| Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 47 | #include <drm/amdgpu_drm.h> | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 48 |  | 
| yanyang1 | 5fc3aee | 2015-05-22 14:39:35 -0400 | [diff] [blame] | 49 | #include "amd_shared.h" | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 50 | #include "amdgpu_mode.h" | 
 | 51 | #include "amdgpu_ih.h" | 
 | 52 | #include "amdgpu_irq.h" | 
 | 53 | #include "amdgpu_ucode.h" | 
 | 54 | #include "amdgpu_gds.h" | 
 | 55 |  | 
| Alex Deucher | b80d847 | 2015-08-16 22:55:02 -0400 | [diff] [blame] | 56 | #include "gpu_scheduler.h" | 
 | 57 |  | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 58 | /* | 
 | 59 |  * Modules parameters. | 
 | 60 |  */ | 
 | 61 | extern int amdgpu_modeset; | 
 | 62 | extern int amdgpu_vram_limit; | 
 | 63 | extern int amdgpu_gart_size; | 
 | 64 | extern int amdgpu_benchmarking; | 
 | 65 | extern int amdgpu_testing; | 
 | 66 | extern int amdgpu_audio; | 
 | 67 | extern int amdgpu_disp_priority; | 
 | 68 | extern int amdgpu_hw_i2c; | 
 | 69 | extern int amdgpu_pcie_gen2; | 
 | 70 | extern int amdgpu_msi; | 
 | 71 | extern int amdgpu_lockup_timeout; | 
 | 72 | extern int amdgpu_dpm; | 
 | 73 | extern int amdgpu_smc_load_fw; | 
 | 74 | extern int amdgpu_aspm; | 
 | 75 | extern int amdgpu_runtime_pm; | 
 | 76 | extern int amdgpu_hard_reset; | 
 | 77 | extern unsigned amdgpu_ip_block_mask; | 
 | 78 | extern int amdgpu_bapm; | 
 | 79 | extern int amdgpu_deep_color; | 
 | 80 | extern int amdgpu_vm_size; | 
 | 81 | extern int amdgpu_vm_block_size; | 
| Alex Deucher | b80d847 | 2015-08-16 22:55:02 -0400 | [diff] [blame] | 82 | extern int amdgpu_enable_scheduler; | 
| Jammy Zhou | 1333f72 | 2015-07-30 16:36:58 +0800 | [diff] [blame] | 83 | extern int amdgpu_sched_jobs; | 
| Jammy Zhou | 4afcb30 | 2015-07-30 16:44:05 +0800 | [diff] [blame] | 84 | extern int amdgpu_sched_hw_submission; | 
| Christian König | 3daea9e3d | 2015-09-05 11:12:27 +0200 | [diff] [blame] | 85 | extern int amdgpu_enable_semaphores; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 86 |  | 
| Chunming Zhou | 4b559c9 | 2015-07-21 15:53:04 +0800 | [diff] [blame] | 87 | #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS	        3000 | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 88 | #define AMDGPU_MAX_USEC_TIMEOUT			100000	/* 100 ms */ | 
 | 89 | #define AMDGPU_FENCE_JIFFIES_TIMEOUT		(HZ / 2) | 
 | 90 | /* AMDGPU_IB_POOL_SIZE must be a power of 2 */ | 
 | 91 | #define AMDGPU_IB_POOL_SIZE			16 | 
 | 92 | #define AMDGPU_DEBUGFS_MAX_COMPONENTS		32 | 
 | 93 | #define AMDGPUFB_CONN_LIMIT			4 | 
 | 94 | #define AMDGPU_BIOS_NUM_SCRATCH			8 | 
 | 95 |  | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 96 | /* max number of rings */ | 
 | 97 | #define AMDGPU_MAX_RINGS			16 | 
 | 98 | #define AMDGPU_MAX_GFX_RINGS			1 | 
 | 99 | #define AMDGPU_MAX_COMPUTE_RINGS		8 | 
 | 100 | #define AMDGPU_MAX_VCE_RINGS			2 | 
 | 101 |  | 
| Jammy Zhou | 36f523a | 2015-09-01 12:54:27 +0800 | [diff] [blame] | 102 | /* max number of IP instances */ | 
 | 103 | #define AMDGPU_MAX_SDMA_INSTANCES		2 | 
 | 104 |  | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 105 | /* number of hw syncs before falling back on blocking */ | 
 | 106 | #define AMDGPU_NUM_SYNCS			4 | 
 | 107 |  | 
 | 108 | /* hardcode that limit for now */ | 
 | 109 | #define AMDGPU_VA_RESERVED_SIZE			(8 << 20) | 
 | 110 |  | 
 | 111 | /* hard reset data */ | 
 | 112 | #define AMDGPU_ASIC_RESET_DATA                  0x39d5e86b | 
 | 113 |  | 
 | 114 | /* reset flags */ | 
 | 115 | #define AMDGPU_RESET_GFX			(1 << 0) | 
 | 116 | #define AMDGPU_RESET_COMPUTE			(1 << 1) | 
 | 117 | #define AMDGPU_RESET_DMA			(1 << 2) | 
 | 118 | #define AMDGPU_RESET_CP				(1 << 3) | 
 | 119 | #define AMDGPU_RESET_GRBM			(1 << 4) | 
 | 120 | #define AMDGPU_RESET_DMA1			(1 << 5) | 
 | 121 | #define AMDGPU_RESET_RLC			(1 << 6) | 
 | 122 | #define AMDGPU_RESET_SEM			(1 << 7) | 
 | 123 | #define AMDGPU_RESET_IH				(1 << 8) | 
 | 124 | #define AMDGPU_RESET_VMC			(1 << 9) | 
 | 125 | #define AMDGPU_RESET_MC				(1 << 10) | 
 | 126 | #define AMDGPU_RESET_DISPLAY			(1 << 11) | 
 | 127 | #define AMDGPU_RESET_UVD			(1 << 12) | 
 | 128 | #define AMDGPU_RESET_VCE			(1 << 13) | 
 | 129 | #define AMDGPU_RESET_VCE1			(1 << 14) | 
 | 130 |  | 
 | 131 | /* CG block flags */ | 
 | 132 | #define AMDGPU_CG_BLOCK_GFX			(1 << 0) | 
 | 133 | #define AMDGPU_CG_BLOCK_MC			(1 << 1) | 
 | 134 | #define AMDGPU_CG_BLOCK_SDMA			(1 << 2) | 
 | 135 | #define AMDGPU_CG_BLOCK_UVD			(1 << 3) | 
 | 136 | #define AMDGPU_CG_BLOCK_VCE			(1 << 4) | 
 | 137 | #define AMDGPU_CG_BLOCK_HDP			(1 << 5) | 
 | 138 | #define AMDGPU_CG_BLOCK_BIF			(1 << 6) | 
 | 139 |  | 
 | 140 | /* CG flags */ | 
 | 141 | #define AMDGPU_CG_SUPPORT_GFX_MGCG		(1 << 0) | 
 | 142 | #define AMDGPU_CG_SUPPORT_GFX_MGLS		(1 << 1) | 
 | 143 | #define AMDGPU_CG_SUPPORT_GFX_CGCG		(1 << 2) | 
 | 144 | #define AMDGPU_CG_SUPPORT_GFX_CGLS		(1 << 3) | 
 | 145 | #define AMDGPU_CG_SUPPORT_GFX_CGTS		(1 << 4) | 
 | 146 | #define AMDGPU_CG_SUPPORT_GFX_CGTS_LS		(1 << 5) | 
 | 147 | #define AMDGPU_CG_SUPPORT_GFX_CP_LS		(1 << 6) | 
 | 148 | #define AMDGPU_CG_SUPPORT_GFX_RLC_LS		(1 << 7) | 
 | 149 | #define AMDGPU_CG_SUPPORT_MC_LS			(1 << 8) | 
 | 150 | #define AMDGPU_CG_SUPPORT_MC_MGCG		(1 << 9) | 
 | 151 | #define AMDGPU_CG_SUPPORT_SDMA_LS		(1 << 10) | 
 | 152 | #define AMDGPU_CG_SUPPORT_SDMA_MGCG		(1 << 11) | 
 | 153 | #define AMDGPU_CG_SUPPORT_BIF_LS		(1 << 12) | 
 | 154 | #define AMDGPU_CG_SUPPORT_UVD_MGCG		(1 << 13) | 
 | 155 | #define AMDGPU_CG_SUPPORT_VCE_MGCG		(1 << 14) | 
 | 156 | #define AMDGPU_CG_SUPPORT_HDP_LS		(1 << 15) | 
 | 157 | #define AMDGPU_CG_SUPPORT_HDP_MGCG		(1 << 16) | 
 | 158 |  | 
 | 159 | /* PG flags */ | 
 | 160 | #define AMDGPU_PG_SUPPORT_GFX_PG		(1 << 0) | 
 | 161 | #define AMDGPU_PG_SUPPORT_GFX_SMG		(1 << 1) | 
 | 162 | #define AMDGPU_PG_SUPPORT_GFX_DMG		(1 << 2) | 
 | 163 | #define AMDGPU_PG_SUPPORT_UVD			(1 << 3) | 
 | 164 | #define AMDGPU_PG_SUPPORT_VCE			(1 << 4) | 
 | 165 | #define AMDGPU_PG_SUPPORT_CP			(1 << 5) | 
 | 166 | #define AMDGPU_PG_SUPPORT_GDS			(1 << 6) | 
 | 167 | #define AMDGPU_PG_SUPPORT_RLC_SMU_HS		(1 << 7) | 
 | 168 | #define AMDGPU_PG_SUPPORT_SDMA			(1 << 8) | 
 | 169 | #define AMDGPU_PG_SUPPORT_ACP			(1 << 9) | 
 | 170 | #define AMDGPU_PG_SUPPORT_SAMU			(1 << 10) | 
 | 171 |  | 
 | 172 | /* GFX current status */ | 
 | 173 | #define AMDGPU_GFX_NORMAL_MODE			0x00000000L | 
 | 174 | #define AMDGPU_GFX_SAFE_MODE			0x00000001L | 
 | 175 | #define AMDGPU_GFX_PG_DISABLED_MODE		0x00000002L | 
 | 176 | #define AMDGPU_GFX_CG_DISABLED_MODE		0x00000004L | 
 | 177 | #define AMDGPU_GFX_LBPW_DISABLED_MODE		0x00000008L | 
 | 178 |  | 
 | 179 | /* max cursor sizes (in pixels) */ | 
 | 180 | #define CIK_CURSOR_WIDTH 128 | 
 | 181 | #define CIK_CURSOR_HEIGHT 128 | 
 | 182 |  | 
 | 183 | struct amdgpu_device; | 
 | 184 | struct amdgpu_fence; | 
 | 185 | struct amdgpu_ib; | 
 | 186 | struct amdgpu_vm; | 
 | 187 | struct amdgpu_ring; | 
 | 188 | struct amdgpu_semaphore; | 
 | 189 | struct amdgpu_cs_parser; | 
| Chunming Zhou | bb977d3 | 2015-08-18 15:16:40 +0800 | [diff] [blame] | 190 | struct amdgpu_job; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 191 | struct amdgpu_irq_src; | 
| Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 192 | struct amdgpu_fpriv; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 193 |  | 
 | 194 | enum amdgpu_cp_irq { | 
 | 195 | 	AMDGPU_CP_IRQ_GFX_EOP = 0, | 
 | 196 | 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP, | 
 | 197 | 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP, | 
 | 198 | 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP, | 
 | 199 | 	AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP, | 
 | 200 | 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP, | 
 | 201 | 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP, | 
 | 202 | 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP, | 
 | 203 | 	AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP, | 
 | 204 |  | 
 | 205 | 	AMDGPU_CP_IRQ_LAST | 
 | 206 | }; | 
 | 207 |  | 
 | 208 | enum amdgpu_sdma_irq { | 
 | 209 | 	AMDGPU_SDMA_IRQ_TRAP0 = 0, | 
 | 210 | 	AMDGPU_SDMA_IRQ_TRAP1, | 
 | 211 |  | 
 | 212 | 	AMDGPU_SDMA_IRQ_LAST | 
 | 213 | }; | 
 | 214 |  | 
 | 215 | enum amdgpu_thermal_irq { | 
 | 216 | 	AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0, | 
 | 217 | 	AMDGPU_THERMAL_IRQ_HIGH_TO_LOW, | 
 | 218 |  | 
 | 219 | 	AMDGPU_THERMAL_IRQ_LAST | 
 | 220 | }; | 
 | 221 |  | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 222 | int amdgpu_set_clockgating_state(struct amdgpu_device *adev, | 
| yanyang1 | 5fc3aee | 2015-05-22 14:39:35 -0400 | [diff] [blame] | 223 | 				  enum amd_ip_block_type block_type, | 
 | 224 | 				  enum amd_clockgating_state state); | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 225 | int amdgpu_set_powergating_state(struct amdgpu_device *adev, | 
| yanyang1 | 5fc3aee | 2015-05-22 14:39:35 -0400 | [diff] [blame] | 226 | 				  enum amd_ip_block_type block_type, | 
 | 227 | 				  enum amd_powergating_state state); | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 228 |  | 
 | 229 | struct amdgpu_ip_block_version { | 
| yanyang1 | 5fc3aee | 2015-05-22 14:39:35 -0400 | [diff] [blame] | 230 | 	enum amd_ip_block_type type; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 231 | 	u32 major; | 
 | 232 | 	u32 minor; | 
 | 233 | 	u32 rev; | 
| yanyang1 | 5fc3aee | 2015-05-22 14:39:35 -0400 | [diff] [blame] | 234 | 	const struct amd_ip_funcs *funcs; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 235 | }; | 
 | 236 |  | 
 | 237 | int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, | 
| yanyang1 | 5fc3aee | 2015-05-22 14:39:35 -0400 | [diff] [blame] | 238 | 				enum amd_ip_block_type type, | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 239 | 				u32 major, u32 minor); | 
 | 240 |  | 
 | 241 | const struct amdgpu_ip_block_version * amdgpu_get_ip_block( | 
 | 242 | 					struct amdgpu_device *adev, | 
| yanyang1 | 5fc3aee | 2015-05-22 14:39:35 -0400 | [diff] [blame] | 243 | 					enum amd_ip_block_type type); | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 244 |  | 
 | 245 | /* provided by hw blocks that can move/clear data.  e.g., gfx or sdma */ | 
 | 246 | struct amdgpu_buffer_funcs { | 
 | 247 | 	/* maximum bytes in a single operation */ | 
 | 248 | 	uint32_t	copy_max_bytes; | 
 | 249 |  | 
 | 250 | 	/* number of dw to reserve per operation */ | 
 | 251 | 	unsigned	copy_num_dw; | 
 | 252 |  | 
 | 253 | 	/* used for buffer migration */ | 
| Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 254 | 	void (*emit_copy_buffer)(struct amdgpu_ib *ib, | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 255 | 				 /* src addr in bytes */ | 
 | 256 | 				 uint64_t src_offset, | 
 | 257 | 				 /* dst addr in bytes */ | 
 | 258 | 				 uint64_t dst_offset, | 
 | 259 | 				 /* number of byte to transfer */ | 
 | 260 | 				 uint32_t byte_count); | 
 | 261 |  | 
 | 262 | 	/* maximum bytes in a single operation */ | 
 | 263 | 	uint32_t	fill_max_bytes; | 
 | 264 |  | 
 | 265 | 	/* number of dw to reserve per operation */ | 
 | 266 | 	unsigned	fill_num_dw; | 
 | 267 |  | 
 | 268 | 	/* used for buffer clearing */ | 
| Chunming Zhou | 6e7a384 | 2015-08-27 13:46:09 +0800 | [diff] [blame] | 269 | 	void (*emit_fill_buffer)(struct amdgpu_ib *ib, | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 270 | 				 /* value to write to memory */ | 
 | 271 | 				 uint32_t src_data, | 
 | 272 | 				 /* dst addr in bytes */ | 
 | 273 | 				 uint64_t dst_offset, | 
 | 274 | 				 /* number of byte to fill */ | 
 | 275 | 				 uint32_t byte_count); | 
 | 276 | }; | 
 | 277 |  | 
 | 278 | /* provided by hw blocks that can write ptes, e.g., sdma */ | 
 | 279 | struct amdgpu_vm_pte_funcs { | 
 | 280 | 	/* copy pte entries from GART */ | 
 | 281 | 	void (*copy_pte)(struct amdgpu_ib *ib, | 
 | 282 | 			 uint64_t pe, uint64_t src, | 
 | 283 | 			 unsigned count); | 
 | 284 | 	/* write pte one entry at a time with addr mapping */ | 
 | 285 | 	void (*write_pte)(struct amdgpu_ib *ib, | 
 | 286 | 			  uint64_t pe, | 
 | 287 | 			  uint64_t addr, unsigned count, | 
 | 288 | 			  uint32_t incr, uint32_t flags); | 
 | 289 | 	/* for linear pte/pde updates without addr mapping */ | 
 | 290 | 	void (*set_pte_pde)(struct amdgpu_ib *ib, | 
 | 291 | 			    uint64_t pe, | 
 | 292 | 			    uint64_t addr, unsigned count, | 
 | 293 | 			    uint32_t incr, uint32_t flags); | 
 | 294 | 	/* pad the indirect buffer to the necessary number of dw */ | 
 | 295 | 	void (*pad_ib)(struct amdgpu_ib *ib); | 
 | 296 | }; | 
 | 297 |  | 
 | 298 | /* provided by the gmc block */ | 
 | 299 | struct amdgpu_gart_funcs { | 
 | 300 | 	/* flush the vm tlb via mmio */ | 
 | 301 | 	void (*flush_gpu_tlb)(struct amdgpu_device *adev, | 
 | 302 | 			      uint32_t vmid); | 
 | 303 | 	/* write pte/pde updates using the cpu */ | 
 | 304 | 	int (*set_pte_pde)(struct amdgpu_device *adev, | 
 | 305 | 			   void *cpu_pt_addr, /* cpu addr of page table */ | 
 | 306 | 			   uint32_t gpu_page_idx, /* pte/pde to update */ | 
 | 307 | 			   uint64_t addr, /* addr to write into pte/pde */ | 
 | 308 | 			   uint32_t flags); /* access flags */ | 
 | 309 | }; | 
 | 310 |  | 
 | 311 | /* provided by the ih block */ | 
 | 312 | struct amdgpu_ih_funcs { | 
 | 313 | 	/* ring read/write ptr handling, called from interrupt context */ | 
 | 314 | 	u32 (*get_wptr)(struct amdgpu_device *adev); | 
 | 315 | 	void (*decode_iv)(struct amdgpu_device *adev, | 
 | 316 | 			  struct amdgpu_iv_entry *entry); | 
 | 317 | 	void (*set_rptr)(struct amdgpu_device *adev); | 
 | 318 | }; | 
 | 319 |  | 
 | 320 | /* provided by hw blocks that expose a ring buffer for commands */ | 
 | 321 | struct amdgpu_ring_funcs { | 
 | 322 | 	/* ring read/write ptr handling */ | 
 | 323 | 	u32 (*get_rptr)(struct amdgpu_ring *ring); | 
 | 324 | 	u32 (*get_wptr)(struct amdgpu_ring *ring); | 
 | 325 | 	void (*set_wptr)(struct amdgpu_ring *ring); | 
 | 326 | 	/* validating and patching of IBs */ | 
 | 327 | 	int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx); | 
 | 328 | 	/* command emit functions */ | 
 | 329 | 	void (*emit_ib)(struct amdgpu_ring *ring, | 
 | 330 | 			struct amdgpu_ib *ib); | 
 | 331 | 	void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr, | 
| Chunming Zhou | 890ee23 | 2015-06-01 14:35:03 +0800 | [diff] [blame] | 332 | 			   uint64_t seq, unsigned flags); | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 333 | 	bool (*emit_semaphore)(struct amdgpu_ring *ring, | 
 | 334 | 			       struct amdgpu_semaphore *semaphore, | 
 | 335 | 			       bool emit_wait); | 
 | 336 | 	void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id, | 
 | 337 | 			      uint64_t pd_addr); | 
| Christian König | d2edb07 | 2015-05-11 14:10:34 +0200 | [diff] [blame] | 338 | 	void (*emit_hdp_flush)(struct amdgpu_ring *ring); | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 339 | 	void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid, | 
 | 340 | 				uint32_t gds_base, uint32_t gds_size, | 
 | 341 | 				uint32_t gws_base, uint32_t gws_size, | 
 | 342 | 				uint32_t oa_base, uint32_t oa_size); | 
 | 343 | 	/* testing functions */ | 
 | 344 | 	int (*test_ring)(struct amdgpu_ring *ring); | 
 | 345 | 	int (*test_ib)(struct amdgpu_ring *ring); | 
 | 346 | 	bool (*is_lockup)(struct amdgpu_ring *ring); | 
| Jammy Zhou | edff0e2 | 2015-09-01 13:04:08 +0800 | [diff] [blame] | 347 | 	/* insert NOP packets */ | 
 | 348 | 	void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 349 | }; | 
 | 350 |  | 
 | 351 | /* | 
 | 352 |  * BIOS. | 
 | 353 |  */ | 
 | 354 | bool amdgpu_get_bios(struct amdgpu_device *adev); | 
 | 355 | bool amdgpu_read_bios(struct amdgpu_device *adev); | 
 | 356 |  | 
 | 357 | /* | 
 | 358 |  * Dummy page | 
 | 359 |  */ | 
 | 360 | struct amdgpu_dummy_page { | 
 | 361 | 	struct page	*page; | 
 | 362 | 	dma_addr_t	addr; | 
 | 363 | }; | 
 | 364 | int amdgpu_dummy_page_init(struct amdgpu_device *adev); | 
 | 365 | void amdgpu_dummy_page_fini(struct amdgpu_device *adev); | 
 | 366 |  | 
 | 367 |  | 
 | 368 | /* | 
 | 369 |  * Clocks | 
 | 370 |  */ | 
 | 371 |  | 
 | 372 | #define AMDGPU_MAX_PPLL 3 | 
 | 373 |  | 
 | 374 | struct amdgpu_clock { | 
 | 375 | 	struct amdgpu_pll ppll[AMDGPU_MAX_PPLL]; | 
 | 376 | 	struct amdgpu_pll spll; | 
 | 377 | 	struct amdgpu_pll mpll; | 
 | 378 | 	/* 10 Khz units */ | 
 | 379 | 	uint32_t default_mclk; | 
 | 380 | 	uint32_t default_sclk; | 
 | 381 | 	uint32_t default_dispclk; | 
 | 382 | 	uint32_t current_dispclk; | 
 | 383 | 	uint32_t dp_extclk; | 
 | 384 | 	uint32_t max_pixel_clock; | 
 | 385 | }; | 
 | 386 |  | 
 | 387 | /* | 
 | 388 |  * Fences. | 
 | 389 |  */ | 
 | 390 | struct amdgpu_fence_driver { | 
 | 391 | 	struct amdgpu_ring		*ring; | 
 | 392 | 	uint64_t			gpu_addr; | 
 | 393 | 	volatile uint32_t		*cpu_addr; | 
 | 394 | 	/* sync_seq is protected by ring emission lock */ | 
 | 395 | 	uint64_t			sync_seq[AMDGPU_MAX_RINGS]; | 
 | 396 | 	atomic64_t			last_seq; | 
 | 397 | 	bool				initialized; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 398 | 	struct amdgpu_irq_src		*irq_src; | 
 | 399 | 	unsigned			irq_type; | 
 | 400 | 	struct delayed_work             lockup_work; | 
| monk.liu | 7f06c23 | 2015-07-30 18:28:12 +0800 | [diff] [blame] | 401 | 	wait_queue_head_t		fence_queue; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 402 | }; | 
 | 403 |  | 
 | 404 | /* some special values for the owner field */ | 
 | 405 | #define AMDGPU_FENCE_OWNER_UNDEFINED	((void*)0ul) | 
 | 406 | #define AMDGPU_FENCE_OWNER_VM		((void*)1ul) | 
 | 407 | #define AMDGPU_FENCE_OWNER_MOVE		((void*)2ul) | 
 | 408 |  | 
| Chunming Zhou | 890ee23 | 2015-06-01 14:35:03 +0800 | [diff] [blame] | 409 | #define AMDGPU_FENCE_FLAG_64BIT         (1 << 0) | 
 | 410 | #define AMDGPU_FENCE_FLAG_INT           (1 << 1) | 
 | 411 |  | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 412 | struct amdgpu_fence { | 
 | 413 | 	struct fence base; | 
| Chunming Zhou | 4cef926 | 2015-08-05 19:52:14 +0800 | [diff] [blame] | 414 |  | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 415 | 	/* RB, DMA, etc. */ | 
 | 416 | 	struct amdgpu_ring		*ring; | 
 | 417 | 	uint64_t			seq; | 
 | 418 |  | 
 | 419 | 	/* filp or special value for fence creator */ | 
 | 420 | 	void				*owner; | 
 | 421 |  | 
 | 422 | 	wait_queue_t			fence_wake; | 
 | 423 | }; | 
 | 424 |  | 
 | 425 | struct amdgpu_user_fence { | 
 | 426 | 	/* write-back bo */ | 
 | 427 | 	struct amdgpu_bo 	*bo; | 
 | 428 | 	/* write-back address offset to bo start */ | 
 | 429 | 	uint32_t                offset; | 
 | 430 | }; | 
 | 431 |  | 
 | 432 | int amdgpu_fence_driver_init(struct amdgpu_device *adev); | 
 | 433 | void amdgpu_fence_driver_fini(struct amdgpu_device *adev); | 
 | 434 | void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev); | 
 | 435 |  | 
 | 436 | void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); | 
 | 437 | int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, | 
 | 438 | 				   struct amdgpu_irq_src *irq_src, | 
 | 439 | 				   unsigned irq_type); | 
| Alex Deucher | 5ceb54c | 2015-08-05 12:41:48 -0400 | [diff] [blame] | 440 | void amdgpu_fence_driver_suspend(struct amdgpu_device *adev); | 
 | 441 | void amdgpu_fence_driver_resume(struct amdgpu_device *adev); | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 442 | int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, | 
 | 443 | 		      struct amdgpu_fence **fence); | 
 | 444 | void amdgpu_fence_process(struct amdgpu_ring *ring); | 
 | 445 | int amdgpu_fence_wait_next(struct amdgpu_ring *ring); | 
 | 446 | int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); | 
 | 447 | unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); | 
 | 448 |  | 
| Christian König | 8221d70 | 2015-09-02 12:14:57 -0400 | [diff] [blame] | 449 | signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, | 
 | 450 | 				  struct fence **array, | 
 | 451 | 				  uint32_t count, | 
 | 452 | 				  bool intr, | 
 | 453 | 				  signed long t); | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 454 | struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence); | 
 | 455 | void amdgpu_fence_unref(struct amdgpu_fence **fence); | 
 | 456 |  | 
 | 457 | bool amdgpu_fence_need_sync(struct amdgpu_fence *fence, | 
 | 458 | 			    struct amdgpu_ring *ring); | 
 | 459 | void amdgpu_fence_note_sync(struct amdgpu_fence *fence, | 
 | 460 | 			    struct amdgpu_ring *ring); | 
 | 461 |  | 
 | 462 | static inline struct amdgpu_fence *amdgpu_fence_later(struct amdgpu_fence *a, | 
 | 463 | 						      struct amdgpu_fence *b) | 
 | 464 | { | 
 | 465 | 	if (!a) { | 
 | 466 | 		return b; | 
 | 467 | 	} | 
 | 468 |  | 
 | 469 | 	if (!b) { | 
 | 470 | 		return a; | 
 | 471 | 	} | 
 | 472 |  | 
 | 473 | 	BUG_ON(a->ring != b->ring); | 
 | 474 |  | 
 | 475 | 	if (a->seq > b->seq) { | 
 | 476 | 		return a; | 
 | 477 | 	} else { | 
 | 478 | 		return b; | 
 | 479 | 	} | 
 | 480 | } | 
 | 481 |  | 
 | 482 | static inline bool amdgpu_fence_is_earlier(struct amdgpu_fence *a, | 
 | 483 | 					   struct amdgpu_fence *b) | 
 | 484 | { | 
 | 485 | 	if (!a) { | 
 | 486 | 		return false; | 
 | 487 | 	} | 
 | 488 |  | 
 | 489 | 	if (!b) { | 
 | 490 | 		return true; | 
 | 491 | 	} | 
 | 492 |  | 
 | 493 | 	BUG_ON(a->ring != b->ring); | 
 | 494 |  | 
 | 495 | 	return a->seq < b->seq; | 
 | 496 | } | 
 | 497 |  | 
| monk.liu | 332dfe9 | 2015-07-30 15:19:05 +0800 | [diff] [blame] | 498 | int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user, | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 499 | 			   void *owner, struct amdgpu_fence **fence); | 
 | 500 |  | 
 | 501 | /* | 
 | 502 |  * TTM. | 
 | 503 |  */ | 
 | 504 | struct amdgpu_mman { | 
 | 505 | 	struct ttm_bo_global_ref        bo_global_ref; | 
 | 506 | 	struct drm_global_reference	mem_global_ref; | 
 | 507 | 	struct ttm_bo_device		bdev; | 
 | 508 | 	bool				mem_global_referenced; | 
 | 509 | 	bool				initialized; | 
 | 510 |  | 
 | 511 | #if defined(CONFIG_DEBUG_FS) | 
 | 512 | 	struct dentry			*vram; | 
 | 513 | 	struct dentry			*gtt; | 
 | 514 | #endif | 
 | 515 |  | 
 | 516 | 	/* buffer handling */ | 
 | 517 | 	const struct amdgpu_buffer_funcs	*buffer_funcs; | 
 | 518 | 	struct amdgpu_ring			*buffer_funcs_ring; | 
 | 519 | }; | 
 | 520 |  | 
 | 521 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, | 
 | 522 | 		       uint64_t src_offset, | 
 | 523 | 		       uint64_t dst_offset, | 
 | 524 | 		       uint32_t byte_count, | 
 | 525 | 		       struct reservation_object *resv, | 
| Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 526 | 		       struct fence **fence); | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 527 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma); | 
 | 528 |  | 
 | 529 | struct amdgpu_bo_list_entry { | 
 | 530 | 	struct amdgpu_bo		*robj; | 
 | 531 | 	struct ttm_validate_buffer	tv; | 
 | 532 | 	struct amdgpu_bo_va		*bo_va; | 
 | 533 | 	unsigned			prefered_domains; | 
 | 534 | 	unsigned			allowed_domains; | 
 | 535 | 	uint32_t			priority; | 
 | 536 | }; | 
 | 537 |  | 
 | 538 | struct amdgpu_bo_va_mapping { | 
 | 539 | 	struct list_head		list; | 
 | 540 | 	struct interval_tree_node	it; | 
 | 541 | 	uint64_t			offset; | 
 | 542 | 	uint32_t			flags; | 
 | 543 | }; | 
 | 544 |  | 
 | 545 | /* bo virtual addresses in a specific vm */ | 
 | 546 | struct amdgpu_bo_va { | 
 | 547 | 	/* protected by bo being reserved */ | 
 | 548 | 	struct list_head		bo_list; | 
| Chunming Zhou | bb1e38a4 | 2015-08-03 18:19:38 +0800 | [diff] [blame] | 549 | 	struct fence		        *last_pt_update; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 550 | 	unsigned			ref_count; | 
 | 551 |  | 
| Christian König | 7fc1195 | 2015-07-30 11:53:42 +0200 | [diff] [blame] | 552 | 	/* protected by vm mutex and spinlock */ | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 553 | 	struct list_head		vm_status; | 
 | 554 |  | 
| Christian König | 7fc1195 | 2015-07-30 11:53:42 +0200 | [diff] [blame] | 555 | 	/* mappings for this bo_va */ | 
 | 556 | 	struct list_head		invalids; | 
 | 557 | 	struct list_head		valids; | 
 | 558 |  | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 559 | 	/* constant after initialization */ | 
 | 560 | 	struct amdgpu_vm		*vm; | 
 | 561 | 	struct amdgpu_bo		*bo; | 
 | 562 | }; | 
 | 563 |  | 
| Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 564 | #define AMDGPU_GEM_DOMAIN_MAX		0x3 | 
 | 565 |  | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 566 | struct amdgpu_bo { | 
 | 567 | 	/* Protected by gem.mutex */ | 
 | 568 | 	struct list_head		list; | 
 | 569 | 	/* Protected by tbo.reserved */ | 
 | 570 | 	u32				initial_domain; | 
| Chunming Zhou | 7e5a547 | 2015-04-24 17:37:30 +0800 | [diff] [blame] | 571 | 	struct ttm_place		placements[AMDGPU_GEM_DOMAIN_MAX + 1]; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 572 | 	struct ttm_placement		placement; | 
 | 573 | 	struct ttm_buffer_object	tbo; | 
 | 574 | 	struct ttm_bo_kmap_obj		kmap; | 
 | 575 | 	u64				flags; | 
 | 576 | 	unsigned			pin_count; | 
 | 577 | 	void				*kptr; | 
 | 578 | 	u64				tiling_flags; | 
 | 579 | 	u64				metadata_flags; | 
 | 580 | 	void				*metadata; | 
 | 581 | 	u32				metadata_size; | 
 | 582 | 	/* list of all virtual address to which this bo | 
 | 583 | 	 * is associated to | 
 | 584 | 	 */ | 
 | 585 | 	struct list_head		va; | 
 | 586 | 	/* Constant after initialization */ | 
 | 587 | 	struct amdgpu_device		*adev; | 
 | 588 | 	struct drm_gem_object		gem_base; | 
 | 589 |  | 
 | 590 | 	struct ttm_bo_kmap_obj		dma_buf_vmap; | 
 | 591 | 	pid_t				pid; | 
 | 592 | 	struct amdgpu_mn		*mn; | 
 | 593 | 	struct list_head		mn_list; | 
 | 594 | }; | 
 | 595 | #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base) | 
 | 596 |  | 
 | 597 | void amdgpu_gem_object_free(struct drm_gem_object *obj); | 
 | 598 | int amdgpu_gem_object_open(struct drm_gem_object *obj, | 
 | 599 | 				struct drm_file *file_priv); | 
 | 600 | void amdgpu_gem_object_close(struct drm_gem_object *obj, | 
 | 601 | 				struct drm_file *file_priv); | 
 | 602 | unsigned long amdgpu_gem_timeout(uint64_t timeout_ns); | 
 | 603 | struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); | 
 | 604 | struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev, | 
 | 605 | 							struct dma_buf_attachment *attach, | 
 | 606 | 							struct sg_table *sg); | 
 | 607 | struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, | 
 | 608 | 					struct drm_gem_object *gobj, | 
 | 609 | 					int flags); | 
 | 610 | int amdgpu_gem_prime_pin(struct drm_gem_object *obj); | 
 | 611 | void amdgpu_gem_prime_unpin(struct drm_gem_object *obj); | 
 | 612 | struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *); | 
 | 613 | void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); | 
 | 614 | void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); | 
 | 615 | int amdgpu_gem_debugfs_init(struct amdgpu_device *adev); | 
 | 616 |  | 
 | 617 | /* sub-allocation manager, it has to be protected by another lock. | 
 | 618 |  * By conception this is an helper for other part of the driver | 
 | 619 |  * like the indirect buffer or semaphore, which both have their | 
 | 620 |  * locking. | 
 | 621 |  * | 
 | 622 |  * Principe is simple, we keep a list of sub allocation in offset | 
 | 623 |  * order (first entry has offset == 0, last entry has the highest | 
 | 624 |  * offset). | 
 | 625 |  * | 
 | 626 |  * When allocating new object we first check if there is room at | 
 | 627 |  * the end total_size - (last_object_offset + last_object_size) >= | 
 | 628 |  * alloc_size. If so we allocate new object there. | 
 | 629 |  * | 
 | 630 |  * When there is not enough room at the end, we start waiting for | 
 | 631 |  * each sub object until we reach object_offset+object_size >= | 
 | 632 |  * alloc_size, this object then become the sub object we return. | 
 | 633 |  * | 
 | 634 |  * Alignment can't be bigger than page size. | 
 | 635 |  * | 
 | 636 |  * Hole are not considered for allocation to keep things simple. | 
 | 637 |  * Assumption is that there won't be hole (all object on same | 
 | 638 |  * alignment). | 
 | 639 |  */ | 
 | 640 | struct amdgpu_sa_manager { | 
 | 641 | 	wait_queue_head_t	wq; | 
 | 642 | 	struct amdgpu_bo	*bo; | 
 | 643 | 	struct list_head	*hole; | 
 | 644 | 	struct list_head	flist[AMDGPU_MAX_RINGS]; | 
 | 645 | 	struct list_head	olist; | 
 | 646 | 	unsigned		size; | 
 | 647 | 	uint64_t		gpu_addr; | 
 | 648 | 	void			*cpu_ptr; | 
 | 649 | 	uint32_t		domain; | 
 | 650 | 	uint32_t		align; | 
 | 651 | }; | 
 | 652 |  | 
 | 653 | struct amdgpu_sa_bo; | 
 | 654 |  | 
 | 655 | /* sub-allocation buffer */ | 
 | 656 | struct amdgpu_sa_bo { | 
 | 657 | 	struct list_head		olist; | 
 | 658 | 	struct list_head		flist; | 
 | 659 | 	struct amdgpu_sa_manager	*manager; | 
 | 660 | 	unsigned			soffset; | 
 | 661 | 	unsigned			eoffset; | 
| Chunming Zhou | 4ce9891 | 2015-08-19 16:41:19 +0800 | [diff] [blame] | 662 | 	struct fence		        *fence; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 663 | }; | 
 | 664 |  | 
 | 665 | /* | 
 | 666 |  * GEM objects. | 
 | 667 |  */ | 
 | 668 | struct amdgpu_gem { | 
 | 669 | 	struct mutex		mutex; | 
 | 670 | 	struct list_head	objects; | 
 | 671 | }; | 
 | 672 |  | 
 | 673 | int amdgpu_gem_init(struct amdgpu_device *adev); | 
 | 674 | void amdgpu_gem_fini(struct amdgpu_device *adev); | 
 | 675 | int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, | 
 | 676 | 				int alignment, u32 initial_domain, | 
 | 677 | 				u64 flags, bool kernel, | 
 | 678 | 				struct drm_gem_object **obj); | 
 | 679 |  | 
 | 680 | int amdgpu_mode_dumb_create(struct drm_file *file_priv, | 
 | 681 | 			    struct drm_device *dev, | 
 | 682 | 			    struct drm_mode_create_dumb *args); | 
 | 683 | int amdgpu_mode_dumb_mmap(struct drm_file *filp, | 
 | 684 | 			  struct drm_device *dev, | 
 | 685 | 			  uint32_t handle, uint64_t *offset_p); | 
 | 686 |  | 
 | 687 | /* | 
 | 688 |  * Semaphores. | 
 | 689 |  */ | 
 | 690 | struct amdgpu_semaphore { | 
 | 691 | 	struct amdgpu_sa_bo	*sa_bo; | 
 | 692 | 	signed			waiters; | 
 | 693 | 	uint64_t		gpu_addr; | 
 | 694 | }; | 
 | 695 |  | 
 | 696 | int amdgpu_semaphore_create(struct amdgpu_device *adev, | 
 | 697 | 			    struct amdgpu_semaphore **semaphore); | 
 | 698 | bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring, | 
 | 699 | 				  struct amdgpu_semaphore *semaphore); | 
 | 700 | bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring, | 
 | 701 | 				struct amdgpu_semaphore *semaphore); | 
 | 702 | void amdgpu_semaphore_free(struct amdgpu_device *adev, | 
 | 703 | 			   struct amdgpu_semaphore **semaphore, | 
| Chunming Zhou | 4ce9891 | 2015-08-19 16:41:19 +0800 | [diff] [blame] | 704 | 			   struct fence *fence); | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 705 |  | 
 | 706 | /* | 
 | 707 |  * Synchronization | 
 | 708 |  */ | 
 | 709 | struct amdgpu_sync { | 
 | 710 | 	struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS]; | 
 | 711 | 	struct amdgpu_fence	*sync_to[AMDGPU_MAX_RINGS]; | 
| Christian König | f91b3a6 | 2015-08-20 14:47:40 +0800 | [diff] [blame] | 712 | 	DECLARE_HASHTABLE(fences, 4); | 
| Chunming Zhou | 3c62338 | 2015-08-20 18:33:59 +0800 | [diff] [blame] | 713 | 	struct fence	        *last_vm_update; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 714 | }; | 
 | 715 |  | 
 | 716 | void amdgpu_sync_create(struct amdgpu_sync *sync); | 
| Christian König | 91e1a52 | 2015-07-06 22:06:40 +0200 | [diff] [blame] | 717 | int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync, | 
 | 718 | 		      struct fence *f); | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 719 | int amdgpu_sync_resv(struct amdgpu_device *adev, | 
 | 720 | 		     struct amdgpu_sync *sync, | 
 | 721 | 		     struct reservation_object *resv, | 
 | 722 | 		     void *owner); | 
 | 723 | int amdgpu_sync_rings(struct amdgpu_sync *sync, | 
 | 724 | 		      struct amdgpu_ring *ring); | 
| Christian König | e61235d | 2015-08-25 11:05:36 +0200 | [diff] [blame] | 725 | struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); | 
| Christian König | f91b3a6 | 2015-08-20 14:47:40 +0800 | [diff] [blame] | 726 | int amdgpu_sync_wait(struct amdgpu_sync *sync); | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 727 | void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync, | 
| Chunming Zhou | 4ce9891 | 2015-08-19 16:41:19 +0800 | [diff] [blame] | 728 | 		      struct fence *fence); | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 729 |  | 
 | 730 | /* | 
 | 731 |  * GART structures, functions & helpers | 
 | 732 |  */ | 
 | 733 | struct amdgpu_mc; | 
 | 734 |  | 
 | 735 | #define AMDGPU_GPU_PAGE_SIZE 4096 | 
 | 736 | #define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1) | 
 | 737 | #define AMDGPU_GPU_PAGE_SHIFT 12 | 
 | 738 | #define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK) | 
 | 739 |  | 
 | 740 | struct amdgpu_gart { | 
 | 741 | 	dma_addr_t			table_addr; | 
 | 742 | 	struct amdgpu_bo		*robj; | 
 | 743 | 	void				*ptr; | 
 | 744 | 	unsigned			num_gpu_pages; | 
 | 745 | 	unsigned			num_cpu_pages; | 
 | 746 | 	unsigned			table_size; | 
 | 747 | 	struct page			**pages; | 
 | 748 | 	dma_addr_t			*pages_addr; | 
 | 749 | 	bool				ready; | 
 | 750 | 	const struct amdgpu_gart_funcs *gart_funcs; | 
 | 751 | }; | 
 | 752 |  | 
 | 753 | int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev); | 
 | 754 | void amdgpu_gart_table_ram_free(struct amdgpu_device *adev); | 
 | 755 | int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev); | 
 | 756 | void amdgpu_gart_table_vram_free(struct amdgpu_device *adev); | 
 | 757 | int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); | 
 | 758 | void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); | 
 | 759 | int amdgpu_gart_init(struct amdgpu_device *adev); | 
 | 760 | void amdgpu_gart_fini(struct amdgpu_device *adev); | 
 | 761 | void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset, | 
 | 762 | 			int pages); | 
 | 763 | int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset, | 
 | 764 | 		     int pages, struct page **pagelist, | 
 | 765 | 		     dma_addr_t *dma_addr, uint32_t flags); | 
 | 766 |  | 
 | 767 | /* | 
 | 768 |  * GPU MC structures, functions & helpers | 
 | 769 |  */ | 
 | 770 | struct amdgpu_mc { | 
 | 771 | 	resource_size_t		aper_size; | 
 | 772 | 	resource_size_t		aper_base; | 
 | 773 | 	resource_size_t		agp_base; | 
 | 774 | 	/* for some chips with <= 32MB we need to lie | 
 | 775 | 	 * about vram size near mc fb location */ | 
 | 776 | 	u64			mc_vram_size; | 
 | 777 | 	u64			visible_vram_size; | 
 | 778 | 	u64			gtt_size; | 
 | 779 | 	u64			gtt_start; | 
 | 780 | 	u64			gtt_end; | 
 | 781 | 	u64			vram_start; | 
 | 782 | 	u64			vram_end; | 
 | 783 | 	unsigned		vram_width; | 
 | 784 | 	u64			real_vram_size; | 
 | 785 | 	int			vram_mtrr; | 
 | 786 | 	u64                     gtt_base_align; | 
 | 787 | 	u64                     mc_mask; | 
 | 788 | 	const struct firmware   *fw;	/* MC firmware */ | 
 | 789 | 	uint32_t                fw_version; | 
 | 790 | 	struct amdgpu_irq_src	vm_fault; | 
| Ken Wang | 81c59f5 | 2015-06-03 21:02:01 +0800 | [diff] [blame] | 791 | 	uint32_t		vram_type; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 792 | }; | 
 | 793 |  | 
 | 794 | /* | 
 | 795 |  * GPU doorbell structures, functions & helpers | 
 | 796 |  */ | 
 | 797 | typedef enum _AMDGPU_DOORBELL_ASSIGNMENT | 
 | 798 | { | 
 | 799 | 	AMDGPU_DOORBELL_KIQ                     = 0x000, | 
 | 800 | 	AMDGPU_DOORBELL_HIQ                     = 0x001, | 
 | 801 | 	AMDGPU_DOORBELL_DIQ                     = 0x002, | 
 | 802 | 	AMDGPU_DOORBELL_MEC_RING0               = 0x010, | 
 | 803 | 	AMDGPU_DOORBELL_MEC_RING1               = 0x011, | 
 | 804 | 	AMDGPU_DOORBELL_MEC_RING2               = 0x012, | 
 | 805 | 	AMDGPU_DOORBELL_MEC_RING3               = 0x013, | 
 | 806 | 	AMDGPU_DOORBELL_MEC_RING4               = 0x014, | 
 | 807 | 	AMDGPU_DOORBELL_MEC_RING5               = 0x015, | 
 | 808 | 	AMDGPU_DOORBELL_MEC_RING6               = 0x016, | 
 | 809 | 	AMDGPU_DOORBELL_MEC_RING7               = 0x017, | 
 | 810 | 	AMDGPU_DOORBELL_GFX_RING0               = 0x020, | 
 | 811 | 	AMDGPU_DOORBELL_sDMA_ENGINE0            = 0x1E0, | 
 | 812 | 	AMDGPU_DOORBELL_sDMA_ENGINE1            = 0x1E1, | 
 | 813 | 	AMDGPU_DOORBELL_IH                      = 0x1E8, | 
 | 814 | 	AMDGPU_DOORBELL_MAX_ASSIGNMENT          = 0x3FF, | 
 | 815 | 	AMDGPU_DOORBELL_INVALID                 = 0xFFFF | 
 | 816 | } AMDGPU_DOORBELL_ASSIGNMENT; | 
 | 817 |  | 
 | 818 | struct amdgpu_doorbell { | 
 | 819 | 	/* doorbell mmio */ | 
 | 820 | 	resource_size_t		base; | 
 | 821 | 	resource_size_t		size; | 
 | 822 | 	u32 __iomem		*ptr; | 
 | 823 | 	u32			num_doorbells;	/* Number of doorbells actually reserved for amdgpu. */ | 
 | 824 | }; | 
 | 825 |  | 
 | 826 | void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, | 
 | 827 | 				phys_addr_t *aperture_base, | 
 | 828 | 				size_t *aperture_size, | 
 | 829 | 				size_t *start_offset); | 
 | 830 |  | 
 | 831 | /* | 
 | 832 |  * IRQS. | 
 | 833 |  */ | 
 | 834 |  | 
 | 835 | struct amdgpu_flip_work { | 
 | 836 | 	struct work_struct		flip_work; | 
 | 837 | 	struct work_struct		unpin_work; | 
 | 838 | 	struct amdgpu_device		*adev; | 
 | 839 | 	int				crtc_id; | 
 | 840 | 	uint64_t			base; | 
 | 841 | 	struct drm_pending_vblank_event *event; | 
 | 842 | 	struct amdgpu_bo		*old_rbo; | 
| Christian König | 1ffd265 | 2015-08-11 17:29:52 +0200 | [diff] [blame] | 843 | 	struct fence			*excl; | 
 | 844 | 	unsigned			shared_count; | 
 | 845 | 	struct fence			**shared; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 846 | }; | 
 | 847 |  | 
 | 848 |  | 
 | 849 | /* | 
 | 850 |  * CP & rings. | 
 | 851 |  */ | 
 | 852 |  | 
 | 853 | struct amdgpu_ib { | 
 | 854 | 	struct amdgpu_sa_bo		*sa_bo; | 
 | 855 | 	uint32_t			length_dw; | 
 | 856 | 	uint64_t			gpu_addr; | 
 | 857 | 	uint32_t			*ptr; | 
 | 858 | 	struct amdgpu_ring		*ring; | 
 | 859 | 	struct amdgpu_fence		*fence; | 
 | 860 | 	struct amdgpu_user_fence        *user; | 
 | 861 | 	struct amdgpu_vm		*vm; | 
| Christian König | 3cb485f | 2015-05-11 15:34:59 +0200 | [diff] [blame] | 862 | 	struct amdgpu_ctx		*ctx; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 863 | 	struct amdgpu_sync		sync; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 864 | 	uint32_t			gds_base, gds_size; | 
 | 865 | 	uint32_t			gws_base, gws_size; | 
 | 866 | 	uint32_t			oa_base, oa_size; | 
| Jammy Zhou | de807f8 | 2015-05-11 23:41:41 +0800 | [diff] [blame] | 867 | 	uint32_t			flags; | 
| Christian König | 5430a3f | 2015-07-21 18:02:21 +0200 | [diff] [blame] | 868 | 	/* resulting sequence number */ | 
 | 869 | 	uint64_t			sequence; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 870 | }; | 
 | 871 |  | 
 | 872 | enum amdgpu_ring_type { | 
 | 873 | 	AMDGPU_RING_TYPE_GFX, | 
 | 874 | 	AMDGPU_RING_TYPE_COMPUTE, | 
 | 875 | 	AMDGPU_RING_TYPE_SDMA, | 
 | 876 | 	AMDGPU_RING_TYPE_UVD, | 
 | 877 | 	AMDGPU_RING_TYPE_VCE | 
 | 878 | }; | 
 | 879 |  | 
| Chunming Zhou | c1b69ed | 2015-07-21 13:45:14 +0800 | [diff] [blame] | 880 | extern struct amd_sched_backend_ops amdgpu_sched_ops; | 
 | 881 |  | 
| Chunming Zhou | 3c704e9 | 2015-07-29 10:33:14 +0800 | [diff] [blame] | 882 | int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev, | 
 | 883 | 					 struct amdgpu_ring *ring, | 
 | 884 | 					 struct amdgpu_ib *ibs, | 
 | 885 | 					 unsigned num_ibs, | 
| Chunming Zhou | bb977d3 | 2015-08-18 15:16:40 +0800 | [diff] [blame] | 886 | 					 int (*free_job)(struct amdgpu_job *), | 
| Chunming Zhou | 1763552 | 2015-08-03 11:43:19 +0800 | [diff] [blame] | 887 | 					 void *owner, | 
 | 888 | 					 struct fence **fence); | 
| Chunming Zhou | 3c704e9 | 2015-07-29 10:33:14 +0800 | [diff] [blame] | 889 |  | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 890 | struct amdgpu_ring { | 
 | 891 | 	struct amdgpu_device		*adev; | 
 | 892 | 	const struct amdgpu_ring_funcs	*funcs; | 
 | 893 | 	struct amdgpu_fence_driver	fence_drv; | 
| Alex Deucher | b80d847 | 2015-08-16 22:55:02 -0400 | [diff] [blame] | 894 | 	struct amd_gpu_scheduler 	*scheduler; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 895 |  | 
| Chunming Zhou | 176e1ab | 2015-07-24 10:49:47 +0800 | [diff] [blame] | 896 | 	spinlock_t              fence_lock; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 897 | 	struct mutex		*ring_lock; | 
 | 898 | 	struct amdgpu_bo	*ring_obj; | 
 | 899 | 	volatile uint32_t	*ring; | 
 | 900 | 	unsigned		rptr_offs; | 
 | 901 | 	u64			next_rptr_gpu_addr; | 
 | 902 | 	volatile u32		*next_rptr_cpu_addr; | 
 | 903 | 	unsigned		wptr; | 
 | 904 | 	unsigned		wptr_old; | 
 | 905 | 	unsigned		ring_size; | 
 | 906 | 	unsigned		ring_free_dw; | 
 | 907 | 	int			count_dw; | 
 | 908 | 	atomic_t		last_rptr; | 
 | 909 | 	atomic64_t		last_activity; | 
 | 910 | 	uint64_t		gpu_addr; | 
 | 911 | 	uint32_t		align_mask; | 
 | 912 | 	uint32_t		ptr_mask; | 
 | 913 | 	bool			ready; | 
 | 914 | 	u32			nop; | 
 | 915 | 	u32			idx; | 
 | 916 | 	u64			last_semaphore_signal_addr; | 
 | 917 | 	u64			last_semaphore_wait_addr; | 
 | 918 | 	u32			me; | 
 | 919 | 	u32			pipe; | 
 | 920 | 	u32			queue; | 
 | 921 | 	struct amdgpu_bo	*mqd_obj; | 
 | 922 | 	u32			doorbell_index; | 
 | 923 | 	bool			use_doorbell; | 
 | 924 | 	unsigned		wptr_offs; | 
 | 925 | 	unsigned		next_rptr_offs; | 
 | 926 | 	unsigned		fence_offs; | 
| Christian König | 3cb485f | 2015-05-11 15:34:59 +0200 | [diff] [blame] | 927 | 	struct amdgpu_ctx	*current_ctx; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 928 | 	enum amdgpu_ring_type	type; | 
 | 929 | 	char			name[16]; | 
| Chunming Zhou | 4274f5d | 2015-07-21 16:04:39 +0800 | [diff] [blame] | 930 | 	bool                    is_pte_ring; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 931 | }; | 
 | 932 |  | 
 | 933 | /* | 
 | 934 |  * VM | 
 | 935 |  */ | 
 | 936 |  | 
 | 937 | /* maximum number of VMIDs */ | 
 | 938 | #define AMDGPU_NUM_VM	16 | 
 | 939 |  | 
 | 940 | /* number of entries in page table */ | 
 | 941 | #define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size) | 
 | 942 |  | 
 | 943 | /* PTBs (Page Table Blocks) need to be aligned to 32K */ | 
 | 944 | #define AMDGPU_VM_PTB_ALIGN_SIZE   32768 | 
 | 945 | #define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1) | 
 | 946 | #define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK) | 
 | 947 |  | 
 | 948 | #define AMDGPU_PTE_VALID	(1 << 0) | 
 | 949 | #define AMDGPU_PTE_SYSTEM	(1 << 1) | 
 | 950 | #define AMDGPU_PTE_SNOOPED	(1 << 2) | 
 | 951 |  | 
 | 952 | /* VI only */ | 
 | 953 | #define AMDGPU_PTE_EXECUTABLE	(1 << 4) | 
 | 954 |  | 
 | 955 | #define AMDGPU_PTE_READABLE	(1 << 5) | 
 | 956 | #define AMDGPU_PTE_WRITEABLE	(1 << 6) | 
 | 957 |  | 
 | 958 | /* PTE (Page Table Entry) fragment field for different page sizes */ | 
 | 959 | #define AMDGPU_PTE_FRAG_4KB	(0 << 7) | 
 | 960 | #define AMDGPU_PTE_FRAG_64KB	(4 << 7) | 
 | 961 | #define AMDGPU_LOG2_PAGES_PER_FRAG 4 | 
 | 962 |  | 
 | 963 | struct amdgpu_vm_pt { | 
 | 964 | 	struct amdgpu_bo		*bo; | 
 | 965 | 	uint64_t			addr; | 
 | 966 | }; | 
 | 967 |  | 
 | 968 | struct amdgpu_vm_id { | 
 | 969 | 	unsigned		id; | 
 | 970 | 	uint64_t		pd_gpu_addr; | 
 | 971 | 	/* last flushed PD/PT update */ | 
| Chunming Zhou | 3c62338 | 2015-08-20 18:33:59 +0800 | [diff] [blame] | 972 | 	struct fence	        *flushed_updates; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 973 | 	/* last use of vmid */ | 
 | 974 | 	struct amdgpu_fence	*last_id_use; | 
 | 975 | }; | 
 | 976 |  | 
 | 977 | struct amdgpu_vm { | 
 | 978 | 	struct mutex		mutex; | 
 | 979 |  | 
 | 980 | 	struct rb_root		va; | 
 | 981 |  | 
| Christian König | 7fc1195 | 2015-07-30 11:53:42 +0200 | [diff] [blame] | 982 | 	/* protecting invalidated */ | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 983 | 	spinlock_t		status_lock; | 
 | 984 |  | 
 | 985 | 	/* BOs moved, but not yet updated in the PT */ | 
 | 986 | 	struct list_head	invalidated; | 
 | 987 |  | 
| Christian König | 7fc1195 | 2015-07-30 11:53:42 +0200 | [diff] [blame] | 988 | 	/* BOs cleared in the PT because of a move */ | 
 | 989 | 	struct list_head	cleared; | 
 | 990 |  | 
 | 991 | 	/* BO mappings freed, but not yet updated in the PT */ | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 992 | 	struct list_head	freed; | 
 | 993 |  | 
 | 994 | 	/* contains the page directory */ | 
 | 995 | 	struct amdgpu_bo	*page_directory; | 
 | 996 | 	unsigned		max_pde_used; | 
| Bas Nieuwenhuizen | 05906de | 2015-08-14 20:08:40 +0200 | [diff] [blame] | 997 | 	struct fence		*page_directory_fence; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 998 |  | 
 | 999 | 	/* array of page tables, one for each page directory entry */ | 
 | 1000 | 	struct amdgpu_vm_pt	*page_tables; | 
 | 1001 |  | 
 | 1002 | 	/* for id and flush management per ring */ | 
 | 1003 | 	struct amdgpu_vm_id	ids[AMDGPU_MAX_RINGS]; | 
 | 1004 | }; | 
 | 1005 |  | 
 | 1006 | struct amdgpu_vm_manager { | 
 | 1007 | 	struct amdgpu_fence		*active[AMDGPU_NUM_VM]; | 
 | 1008 | 	uint32_t			max_pfn; | 
 | 1009 | 	/* number of VMIDs */ | 
 | 1010 | 	unsigned			nvm; | 
 | 1011 | 	/* vram base address for page table entry  */ | 
 | 1012 | 	u64				vram_base_offset; | 
 | 1013 | 	/* is vm enabled? */ | 
 | 1014 | 	bool				enabled; | 
 | 1015 | 	/* for hw to save the PD addr on suspend/resume */ | 
 | 1016 | 	uint32_t			saved_table_addr[AMDGPU_NUM_VM]; | 
 | 1017 | 	/* vm pte handling */ | 
 | 1018 | 	const struct amdgpu_vm_pte_funcs        *vm_pte_funcs; | 
 | 1019 | 	struct amdgpu_ring                      *vm_pte_funcs_ring; | 
 | 1020 | }; | 
 | 1021 |  | 
 | 1022 | /* | 
 | 1023 |  * context related structures | 
 | 1024 |  */ | 
 | 1025 |  | 
| Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 1026 | #define AMDGPU_CTX_MAX_CS_PENDING	16 | 
 | 1027 |  | 
 | 1028 | struct amdgpu_ctx_ring { | 
| Christian König | 91404fb | 2015-08-05 18:33:21 +0200 | [diff] [blame] | 1029 | 	uint64_t		sequence; | 
 | 1030 | 	struct fence		*fences[AMDGPU_CTX_MAX_CS_PENDING]; | 
 | 1031 | 	struct amd_sched_entity	entity; | 
| Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 1032 | }; | 
 | 1033 |  | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1034 | struct amdgpu_ctx { | 
| Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 1035 | 	struct kref		refcount; | 
| Chunming Zhou | 9cb7e5a | 2015-07-21 13:17:19 +0800 | [diff] [blame] | 1036 | 	struct amdgpu_device    *adev; | 
| Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 1037 | 	unsigned		reset_counter; | 
| Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 1038 | 	spinlock_t		ring_lock; | 
 | 1039 | 	struct amdgpu_ctx_ring	rings[AMDGPU_MAX_RINGS]; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1040 | }; | 
 | 1041 |  | 
 | 1042 | struct amdgpu_ctx_mgr { | 
| Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 1043 | 	struct amdgpu_device	*adev; | 
 | 1044 | 	struct mutex		lock; | 
 | 1045 | 	/* protected by lock */ | 
 | 1046 | 	struct idr		ctx_handles; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1047 | }; | 
 | 1048 |  | 
| Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 1049 | int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, | 
 | 1050 | 		    struct amdgpu_ctx *ctx); | 
 | 1051 | void amdgpu_ctx_fini(struct amdgpu_ctx *ctx); | 
| Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 1052 |  | 
| Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 1053 | struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); | 
 | 1054 | int amdgpu_ctx_put(struct amdgpu_ctx *ctx); | 
 | 1055 |  | 
| Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 1056 | uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, | 
| Christian König | ce882e6 | 2015-08-19 15:00:55 +0200 | [diff] [blame] | 1057 | 			      struct fence *fence); | 
| Christian König | 21c16bf | 2015-07-07 17:24:49 +0200 | [diff] [blame] | 1058 | struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, | 
 | 1059 | 				   struct amdgpu_ring *ring, uint64_t seq); | 
 | 1060 |  | 
| Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 1061 | int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, | 
 | 1062 | 		     struct drm_file *filp); | 
 | 1063 |  | 
| Christian König | efd4ccb | 2015-08-04 16:20:31 +0200 | [diff] [blame] | 1064 | void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); | 
 | 1065 | void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); | 
| Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 1066 |  | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1067 | /* | 
 | 1068 |  * file private structure | 
 | 1069 |  */ | 
 | 1070 |  | 
 | 1071 | struct amdgpu_fpriv { | 
 | 1072 | 	struct amdgpu_vm	vm; | 
 | 1073 | 	struct mutex		bo_list_lock; | 
 | 1074 | 	struct idr		bo_list_handles; | 
| Alex Deucher | 0b492a4 | 2015-08-16 22:48:26 -0400 | [diff] [blame] | 1075 | 	struct amdgpu_ctx_mgr	ctx_mgr; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1076 | }; | 
 | 1077 |  | 
 | 1078 | /* | 
 | 1079 |  * residency list | 
 | 1080 |  */ | 
 | 1081 |  | 
 | 1082 | struct amdgpu_bo_list { | 
 | 1083 | 	struct mutex lock; | 
 | 1084 | 	struct amdgpu_bo *gds_obj; | 
 | 1085 | 	struct amdgpu_bo *gws_obj; | 
 | 1086 | 	struct amdgpu_bo *oa_obj; | 
 | 1087 | 	bool has_userptr; | 
 | 1088 | 	unsigned num_entries; | 
 | 1089 | 	struct amdgpu_bo_list_entry *array; | 
 | 1090 | }; | 
 | 1091 |  | 
 | 1092 | struct amdgpu_bo_list * | 
 | 1093 | amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id); | 
 | 1094 | void amdgpu_bo_list_put(struct amdgpu_bo_list *list); | 
 | 1095 | void amdgpu_bo_list_free(struct amdgpu_bo_list *list); | 
 | 1096 |  | 
 | 1097 | /* | 
 | 1098 |  * GFX stuff | 
 | 1099 |  */ | 
 | 1100 | #include "clearstate_defs.h" | 
 | 1101 |  | 
 | 1102 | struct amdgpu_rlc { | 
 | 1103 | 	/* for power gating */ | 
 | 1104 | 	struct amdgpu_bo	*save_restore_obj; | 
 | 1105 | 	uint64_t		save_restore_gpu_addr; | 
 | 1106 | 	volatile uint32_t	*sr_ptr; | 
 | 1107 | 	const u32               *reg_list; | 
 | 1108 | 	u32                     reg_list_size; | 
 | 1109 | 	/* for clear state */ | 
 | 1110 | 	struct amdgpu_bo	*clear_state_obj; | 
 | 1111 | 	uint64_t		clear_state_gpu_addr; | 
 | 1112 | 	volatile uint32_t	*cs_ptr; | 
 | 1113 | 	const struct cs_section_def   *cs_data; | 
 | 1114 | 	u32                     clear_state_size; | 
 | 1115 | 	/* for cp tables */ | 
 | 1116 | 	struct amdgpu_bo	*cp_table_obj; | 
 | 1117 | 	uint64_t		cp_table_gpu_addr; | 
 | 1118 | 	volatile uint32_t	*cp_table_ptr; | 
 | 1119 | 	u32                     cp_table_size; | 
 | 1120 | }; | 
 | 1121 |  | 
 | 1122 | struct amdgpu_mec { | 
 | 1123 | 	struct amdgpu_bo	*hpd_eop_obj; | 
 | 1124 | 	u64			hpd_eop_gpu_addr; | 
 | 1125 | 	u32 num_pipe; | 
 | 1126 | 	u32 num_mec; | 
 | 1127 | 	u32 num_queue; | 
 | 1128 | }; | 
 | 1129 |  | 
 | 1130 | /* | 
 | 1131 |  * GPU scratch registers structures, functions & helpers | 
 | 1132 |  */ | 
 | 1133 | struct amdgpu_scratch { | 
 | 1134 | 	unsigned		num_reg; | 
 | 1135 | 	uint32_t                reg_base; | 
 | 1136 | 	bool			free[32]; | 
 | 1137 | 	uint32_t		reg[32]; | 
 | 1138 | }; | 
 | 1139 |  | 
 | 1140 | /* | 
 | 1141 |  * GFX configurations | 
 | 1142 |  */ | 
 | 1143 | struct amdgpu_gca_config { | 
 | 1144 | 	unsigned max_shader_engines; | 
 | 1145 | 	unsigned max_tile_pipes; | 
 | 1146 | 	unsigned max_cu_per_sh; | 
 | 1147 | 	unsigned max_sh_per_se; | 
 | 1148 | 	unsigned max_backends_per_se; | 
 | 1149 | 	unsigned max_texture_channel_caches; | 
 | 1150 | 	unsigned max_gprs; | 
 | 1151 | 	unsigned max_gs_threads; | 
 | 1152 | 	unsigned max_hw_contexts; | 
 | 1153 | 	unsigned sc_prim_fifo_size_frontend; | 
 | 1154 | 	unsigned sc_prim_fifo_size_backend; | 
 | 1155 | 	unsigned sc_hiz_tile_fifo_size; | 
 | 1156 | 	unsigned sc_earlyz_tile_fifo_size; | 
 | 1157 |  | 
 | 1158 | 	unsigned num_tile_pipes; | 
 | 1159 | 	unsigned backend_enable_mask; | 
 | 1160 | 	unsigned mem_max_burst_length_bytes; | 
 | 1161 | 	unsigned mem_row_size_in_kb; | 
 | 1162 | 	unsigned shader_engine_tile_size; | 
 | 1163 | 	unsigned num_gpus; | 
 | 1164 | 	unsigned multi_gpu_tile_size; | 
 | 1165 | 	unsigned mc_arb_ramcfg; | 
 | 1166 | 	unsigned gb_addr_config; | 
 | 1167 |  | 
 | 1168 | 	uint32_t tile_mode_array[32]; | 
 | 1169 | 	uint32_t macrotile_mode_array[16]; | 
 | 1170 | }; | 
 | 1171 |  | 
 | 1172 | struct amdgpu_gfx { | 
 | 1173 | 	struct mutex			gpu_clock_mutex; | 
 | 1174 | 	struct amdgpu_gca_config	config; | 
 | 1175 | 	struct amdgpu_rlc		rlc; | 
 | 1176 | 	struct amdgpu_mec		mec; | 
 | 1177 | 	struct amdgpu_scratch		scratch; | 
 | 1178 | 	const struct firmware		*me_fw;	/* ME firmware */ | 
 | 1179 | 	uint32_t			me_fw_version; | 
 | 1180 | 	const struct firmware		*pfp_fw; /* PFP firmware */ | 
 | 1181 | 	uint32_t			pfp_fw_version; | 
 | 1182 | 	const struct firmware		*ce_fw;	/* CE firmware */ | 
 | 1183 | 	uint32_t			ce_fw_version; | 
 | 1184 | 	const struct firmware		*rlc_fw; /* RLC firmware */ | 
 | 1185 | 	uint32_t			rlc_fw_version; | 
 | 1186 | 	const struct firmware		*mec_fw; /* MEC firmware */ | 
 | 1187 | 	uint32_t			mec_fw_version; | 
 | 1188 | 	const struct firmware		*mec2_fw; /* MEC2 firmware */ | 
 | 1189 | 	uint32_t			mec2_fw_version; | 
| Ken Wang | 02558a0 | 2015-06-03 19:52:06 +0800 | [diff] [blame] | 1190 | 	uint32_t			me_feature_version; | 
 | 1191 | 	uint32_t			ce_feature_version; | 
 | 1192 | 	uint32_t			pfp_feature_version; | 
| Jammy Zhou | 351643d | 2015-08-04 10:43:50 +0800 | [diff] [blame] | 1193 | 	uint32_t			rlc_feature_version; | 
 | 1194 | 	uint32_t			mec_feature_version; | 
 | 1195 | 	uint32_t			mec2_feature_version; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1196 | 	struct amdgpu_ring		gfx_ring[AMDGPU_MAX_GFX_RINGS]; | 
 | 1197 | 	unsigned			num_gfx_rings; | 
 | 1198 | 	struct amdgpu_ring		compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; | 
 | 1199 | 	unsigned			num_compute_rings; | 
 | 1200 | 	struct amdgpu_irq_src		eop_irq; | 
 | 1201 | 	struct amdgpu_irq_src		priv_reg_irq; | 
 | 1202 | 	struct amdgpu_irq_src		priv_inst_irq; | 
 | 1203 | 	/* gfx status */ | 
 | 1204 | 	uint32_t gfx_current_status; | 
 | 1205 | 	/* sync signal for const engine */ | 
 | 1206 | 	unsigned ce_sync_offs; | 
| Ken Wang | a101a89 | 2015-06-03 17:47:54 +0800 | [diff] [blame] | 1207 | 	/* ce ram size*/ | 
 | 1208 | 	unsigned ce_ram_size; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1209 | }; | 
 | 1210 |  | 
 | 1211 | int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm, | 
 | 1212 | 		  unsigned size, struct amdgpu_ib *ib); | 
 | 1213 | void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib); | 
 | 1214 | int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs, | 
 | 1215 | 		       struct amdgpu_ib *ib, void *owner); | 
 | 1216 | int amdgpu_ib_pool_init(struct amdgpu_device *adev); | 
 | 1217 | void amdgpu_ib_pool_fini(struct amdgpu_device *adev); | 
 | 1218 | int amdgpu_ib_ring_tests(struct amdgpu_device *adev); | 
 | 1219 | /* Ring access between begin & end cannot sleep */ | 
 | 1220 | void amdgpu_ring_free_size(struct amdgpu_ring *ring); | 
 | 1221 | int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); | 
 | 1222 | int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw); | 
| Jammy Zhou | edff0e2 | 2015-09-01 13:04:08 +0800 | [diff] [blame] | 1223 | void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1224 | void amdgpu_ring_commit(struct amdgpu_ring *ring); | 
 | 1225 | void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring); | 
 | 1226 | void amdgpu_ring_undo(struct amdgpu_ring *ring); | 
 | 1227 | void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring); | 
 | 1228 | void amdgpu_ring_lockup_update(struct amdgpu_ring *ring); | 
 | 1229 | bool amdgpu_ring_test_lockup(struct amdgpu_ring *ring); | 
 | 1230 | unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, | 
 | 1231 | 			    uint32_t **data); | 
 | 1232 | int amdgpu_ring_restore(struct amdgpu_ring *ring, | 
 | 1233 | 			unsigned size, uint32_t *data); | 
 | 1234 | int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, | 
 | 1235 | 		     unsigned ring_size, u32 nop, u32 align_mask, | 
 | 1236 | 		     struct amdgpu_irq_src *irq_src, unsigned irq_type, | 
 | 1237 | 		     enum amdgpu_ring_type ring_type); | 
 | 1238 | void amdgpu_ring_fini(struct amdgpu_ring *ring); | 
 | 1239 |  | 
 | 1240 | /* | 
 | 1241 |  * CS. | 
 | 1242 |  */ | 
 | 1243 | struct amdgpu_cs_chunk { | 
 | 1244 | 	uint32_t		chunk_id; | 
 | 1245 | 	uint32_t		length_dw; | 
 | 1246 | 	uint32_t		*kdata; | 
 | 1247 | 	void __user		*user_ptr; | 
 | 1248 | }; | 
 | 1249 |  | 
 | 1250 | struct amdgpu_cs_parser { | 
 | 1251 | 	struct amdgpu_device	*adev; | 
 | 1252 | 	struct drm_file		*filp; | 
| Christian König | 3cb485f | 2015-05-11 15:34:59 +0200 | [diff] [blame] | 1253 | 	struct amdgpu_ctx	*ctx; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1254 | 	struct amdgpu_bo_list *bo_list; | 
 | 1255 | 	/* chunks */ | 
 | 1256 | 	unsigned		nchunks; | 
 | 1257 | 	struct amdgpu_cs_chunk	*chunks; | 
 | 1258 | 	/* relocations */ | 
 | 1259 | 	struct amdgpu_bo_list_entry	*vm_bos; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1260 | 	struct list_head	validated; | 
 | 1261 |  | 
 | 1262 | 	struct amdgpu_ib	*ibs; | 
 | 1263 | 	uint32_t		num_ibs; | 
 | 1264 |  | 
 | 1265 | 	struct ww_acquire_ctx	ticket; | 
 | 1266 |  | 
 | 1267 | 	/* user fence */ | 
 | 1268 | 	struct amdgpu_user_fence uf; | 
 | 1269 | }; | 
 | 1270 |  | 
| Chunming Zhou | bb977d3 | 2015-08-18 15:16:40 +0800 | [diff] [blame] | 1271 | struct amdgpu_job { | 
 | 1272 | 	struct amd_sched_job    base; | 
 | 1273 | 	struct amdgpu_device	*adev; | 
| Chunming Zhou | bb977d3 | 2015-08-18 15:16:40 +0800 | [diff] [blame] | 1274 | 	struct amdgpu_ib	*ibs; | 
 | 1275 | 	uint32_t		num_ibs; | 
 | 1276 | 	struct mutex            job_lock; | 
 | 1277 | 	struct amdgpu_user_fence uf; | 
| Junwei Zhang | 4c7eb91 | 2015-09-09 09:05:55 +0800 | [diff] [blame] | 1278 | 	int (*free_job)(struct amdgpu_job *job); | 
| Chunming Zhou | bb977d3 | 2015-08-18 15:16:40 +0800 | [diff] [blame] | 1279 | }; | 
| Junwei Zhang | a6db8a3 | 2015-09-09 09:21:19 +0800 | [diff] [blame^] | 1280 | #define to_amdgpu_job(sched_job)		\ | 
 | 1281 | 		container_of((sched_job), struct amdgpu_job, base) | 
| Chunming Zhou | bb977d3 | 2015-08-18 15:16:40 +0800 | [diff] [blame] | 1282 |  | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1283 | static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx) | 
 | 1284 | { | 
 | 1285 | 	return p->ibs[ib_idx].ptr[idx]; | 
 | 1286 | } | 
 | 1287 |  | 
 | 1288 | /* | 
 | 1289 |  * Writeback | 
 | 1290 |  */ | 
 | 1291 | #define AMDGPU_MAX_WB 1024	/* Reserve at most 1024 WB slots for amdgpu-owned rings. */ | 
 | 1292 |  | 
 | 1293 | struct amdgpu_wb { | 
 | 1294 | 	struct amdgpu_bo	*wb_obj; | 
 | 1295 | 	volatile uint32_t	*wb; | 
 | 1296 | 	uint64_t		gpu_addr; | 
 | 1297 | 	u32			num_wb;	/* Number of wb slots actually reserved for amdgpu. */ | 
 | 1298 | 	unsigned long		used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)]; | 
 | 1299 | }; | 
 | 1300 |  | 
 | 1301 | int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb); | 
 | 1302 | void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb); | 
 | 1303 |  | 
 | 1304 | /** | 
 | 1305 |  * struct amdgpu_pm - power management datas | 
 | 1306 |  * It keeps track of various data needed to take powermanagement decision. | 
 | 1307 |  */ | 
 | 1308 |  | 
 | 1309 | enum amdgpu_pm_state_type { | 
 | 1310 | 	/* not used for dpm */ | 
 | 1311 | 	POWER_STATE_TYPE_DEFAULT, | 
 | 1312 | 	POWER_STATE_TYPE_POWERSAVE, | 
 | 1313 | 	/* user selectable states */ | 
 | 1314 | 	POWER_STATE_TYPE_BATTERY, | 
 | 1315 | 	POWER_STATE_TYPE_BALANCED, | 
 | 1316 | 	POWER_STATE_TYPE_PERFORMANCE, | 
 | 1317 | 	/* internal states */ | 
 | 1318 | 	POWER_STATE_TYPE_INTERNAL_UVD, | 
 | 1319 | 	POWER_STATE_TYPE_INTERNAL_UVD_SD, | 
 | 1320 | 	POWER_STATE_TYPE_INTERNAL_UVD_HD, | 
 | 1321 | 	POWER_STATE_TYPE_INTERNAL_UVD_HD2, | 
 | 1322 | 	POWER_STATE_TYPE_INTERNAL_UVD_MVC, | 
 | 1323 | 	POWER_STATE_TYPE_INTERNAL_BOOT, | 
 | 1324 | 	POWER_STATE_TYPE_INTERNAL_THERMAL, | 
 | 1325 | 	POWER_STATE_TYPE_INTERNAL_ACPI, | 
 | 1326 | 	POWER_STATE_TYPE_INTERNAL_ULV, | 
 | 1327 | 	POWER_STATE_TYPE_INTERNAL_3DPERF, | 
 | 1328 | }; | 
 | 1329 |  | 
 | 1330 | enum amdgpu_int_thermal_type { | 
 | 1331 | 	THERMAL_TYPE_NONE, | 
 | 1332 | 	THERMAL_TYPE_EXTERNAL, | 
 | 1333 | 	THERMAL_TYPE_EXTERNAL_GPIO, | 
 | 1334 | 	THERMAL_TYPE_RV6XX, | 
 | 1335 | 	THERMAL_TYPE_RV770, | 
 | 1336 | 	THERMAL_TYPE_ADT7473_WITH_INTERNAL, | 
 | 1337 | 	THERMAL_TYPE_EVERGREEN, | 
 | 1338 | 	THERMAL_TYPE_SUMO, | 
 | 1339 | 	THERMAL_TYPE_NI, | 
 | 1340 | 	THERMAL_TYPE_SI, | 
 | 1341 | 	THERMAL_TYPE_EMC2103_WITH_INTERNAL, | 
 | 1342 | 	THERMAL_TYPE_CI, | 
 | 1343 | 	THERMAL_TYPE_KV, | 
 | 1344 | }; | 
 | 1345 |  | 
 | 1346 | enum amdgpu_dpm_auto_throttle_src { | 
 | 1347 | 	AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, | 
 | 1348 | 	AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL | 
 | 1349 | }; | 
 | 1350 |  | 
 | 1351 | enum amdgpu_dpm_event_src { | 
 | 1352 | 	AMDGPU_DPM_EVENT_SRC_ANALOG = 0, | 
 | 1353 | 	AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1, | 
 | 1354 | 	AMDGPU_DPM_EVENT_SRC_DIGITAL = 2, | 
 | 1355 | 	AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, | 
 | 1356 | 	AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4 | 
 | 1357 | }; | 
 | 1358 |  | 
 | 1359 | #define AMDGPU_MAX_VCE_LEVELS 6 | 
 | 1360 |  | 
 | 1361 | enum amdgpu_vce_level { | 
 | 1362 | 	AMDGPU_VCE_LEVEL_AC_ALL = 0,     /* AC, All cases */ | 
 | 1363 | 	AMDGPU_VCE_LEVEL_DC_EE = 1,      /* DC, entropy encoding */ | 
 | 1364 | 	AMDGPU_VCE_LEVEL_DC_LL_LOW = 2,  /* DC, low latency queue, res <= 720 */ | 
 | 1365 | 	AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */ | 
 | 1366 | 	AMDGPU_VCE_LEVEL_DC_GP_LOW = 4,  /* DC, general purpose queue, res <= 720 */ | 
 | 1367 | 	AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */ | 
 | 1368 | }; | 
 | 1369 |  | 
 | 1370 | struct amdgpu_ps { | 
 | 1371 | 	u32 caps; /* vbios flags */ | 
 | 1372 | 	u32 class; /* vbios flags */ | 
 | 1373 | 	u32 class2; /* vbios flags */ | 
 | 1374 | 	/* UVD clocks */ | 
 | 1375 | 	u32 vclk; | 
 | 1376 | 	u32 dclk; | 
 | 1377 | 	/* VCE clocks */ | 
 | 1378 | 	u32 evclk; | 
 | 1379 | 	u32 ecclk; | 
 | 1380 | 	bool vce_active; | 
 | 1381 | 	enum amdgpu_vce_level vce_level; | 
 | 1382 | 	/* asic priv */ | 
 | 1383 | 	void *ps_priv; | 
 | 1384 | }; | 
 | 1385 |  | 
 | 1386 | struct amdgpu_dpm_thermal { | 
 | 1387 | 	/* thermal interrupt work */ | 
 | 1388 | 	struct work_struct work; | 
 | 1389 | 	/* low temperature threshold */ | 
 | 1390 | 	int                min_temp; | 
 | 1391 | 	/* high temperature threshold */ | 
 | 1392 | 	int                max_temp; | 
 | 1393 | 	/* was last interrupt low to high or high to low */ | 
 | 1394 | 	bool               high_to_low; | 
 | 1395 | 	/* interrupt source */ | 
 | 1396 | 	struct amdgpu_irq_src	irq; | 
 | 1397 | }; | 
 | 1398 |  | 
 | 1399 | enum amdgpu_clk_action | 
 | 1400 | { | 
 | 1401 | 	AMDGPU_SCLK_UP = 1, | 
 | 1402 | 	AMDGPU_SCLK_DOWN | 
 | 1403 | }; | 
 | 1404 |  | 
 | 1405 | struct amdgpu_blacklist_clocks | 
 | 1406 | { | 
 | 1407 | 	u32 sclk; | 
 | 1408 | 	u32 mclk; | 
 | 1409 | 	enum amdgpu_clk_action action; | 
 | 1410 | }; | 
 | 1411 |  | 
 | 1412 | struct amdgpu_clock_and_voltage_limits { | 
 | 1413 | 	u32 sclk; | 
 | 1414 | 	u32 mclk; | 
 | 1415 | 	u16 vddc; | 
 | 1416 | 	u16 vddci; | 
 | 1417 | }; | 
 | 1418 |  | 
 | 1419 | struct amdgpu_clock_array { | 
 | 1420 | 	u32 count; | 
 | 1421 | 	u32 *values; | 
 | 1422 | }; | 
 | 1423 |  | 
 | 1424 | struct amdgpu_clock_voltage_dependency_entry { | 
 | 1425 | 	u32 clk; | 
 | 1426 | 	u16 v; | 
 | 1427 | }; | 
 | 1428 |  | 
 | 1429 | struct amdgpu_clock_voltage_dependency_table { | 
 | 1430 | 	u32 count; | 
 | 1431 | 	struct amdgpu_clock_voltage_dependency_entry *entries; | 
 | 1432 | }; | 
 | 1433 |  | 
 | 1434 | union amdgpu_cac_leakage_entry { | 
 | 1435 | 	struct { | 
 | 1436 | 		u16 vddc; | 
 | 1437 | 		u32 leakage; | 
 | 1438 | 	}; | 
 | 1439 | 	struct { | 
 | 1440 | 		u16 vddc1; | 
 | 1441 | 		u16 vddc2; | 
 | 1442 | 		u16 vddc3; | 
 | 1443 | 	}; | 
 | 1444 | }; | 
 | 1445 |  | 
 | 1446 | struct amdgpu_cac_leakage_table { | 
 | 1447 | 	u32 count; | 
 | 1448 | 	union amdgpu_cac_leakage_entry *entries; | 
 | 1449 | }; | 
 | 1450 |  | 
 | 1451 | struct amdgpu_phase_shedding_limits_entry { | 
 | 1452 | 	u16 voltage; | 
 | 1453 | 	u32 sclk; | 
 | 1454 | 	u32 mclk; | 
 | 1455 | }; | 
 | 1456 |  | 
 | 1457 | struct amdgpu_phase_shedding_limits_table { | 
 | 1458 | 	u32 count; | 
 | 1459 | 	struct amdgpu_phase_shedding_limits_entry *entries; | 
 | 1460 | }; | 
 | 1461 |  | 
 | 1462 | struct amdgpu_uvd_clock_voltage_dependency_entry { | 
 | 1463 | 	u32 vclk; | 
 | 1464 | 	u32 dclk; | 
 | 1465 | 	u16 v; | 
 | 1466 | }; | 
 | 1467 |  | 
 | 1468 | struct amdgpu_uvd_clock_voltage_dependency_table { | 
 | 1469 | 	u8 count; | 
 | 1470 | 	struct amdgpu_uvd_clock_voltage_dependency_entry *entries; | 
 | 1471 | }; | 
 | 1472 |  | 
 | 1473 | struct amdgpu_vce_clock_voltage_dependency_entry { | 
 | 1474 | 	u32 ecclk; | 
 | 1475 | 	u32 evclk; | 
 | 1476 | 	u16 v; | 
 | 1477 | }; | 
 | 1478 |  | 
 | 1479 | struct amdgpu_vce_clock_voltage_dependency_table { | 
 | 1480 | 	u8 count; | 
 | 1481 | 	struct amdgpu_vce_clock_voltage_dependency_entry *entries; | 
 | 1482 | }; | 
 | 1483 |  | 
 | 1484 | struct amdgpu_ppm_table { | 
 | 1485 | 	u8 ppm_design; | 
 | 1486 | 	u16 cpu_core_number; | 
 | 1487 | 	u32 platform_tdp; | 
 | 1488 | 	u32 small_ac_platform_tdp; | 
 | 1489 | 	u32 platform_tdc; | 
 | 1490 | 	u32 small_ac_platform_tdc; | 
 | 1491 | 	u32 apu_tdp; | 
 | 1492 | 	u32 dgpu_tdp; | 
 | 1493 | 	u32 dgpu_ulv_power; | 
 | 1494 | 	u32 tj_max; | 
 | 1495 | }; | 
 | 1496 |  | 
 | 1497 | struct amdgpu_cac_tdp_table { | 
 | 1498 | 	u16 tdp; | 
 | 1499 | 	u16 configurable_tdp; | 
 | 1500 | 	u16 tdc; | 
 | 1501 | 	u16 battery_power_limit; | 
 | 1502 | 	u16 small_power_limit; | 
 | 1503 | 	u16 low_cac_leakage; | 
 | 1504 | 	u16 high_cac_leakage; | 
 | 1505 | 	u16 maximum_power_delivery_limit; | 
 | 1506 | }; | 
 | 1507 |  | 
 | 1508 | struct amdgpu_dpm_dynamic_state { | 
 | 1509 | 	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk; | 
 | 1510 | 	struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk; | 
 | 1511 | 	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk; | 
 | 1512 | 	struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk; | 
 | 1513 | 	struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk; | 
 | 1514 | 	struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table; | 
 | 1515 | 	struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table; | 
 | 1516 | 	struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table; | 
 | 1517 | 	struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table; | 
 | 1518 | 	struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk; | 
 | 1519 | 	struct amdgpu_clock_array valid_sclk_values; | 
 | 1520 | 	struct amdgpu_clock_array valid_mclk_values; | 
 | 1521 | 	struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc; | 
 | 1522 | 	struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac; | 
 | 1523 | 	u32 mclk_sclk_ratio; | 
 | 1524 | 	u32 sclk_mclk_delta; | 
 | 1525 | 	u16 vddc_vddci_delta; | 
 | 1526 | 	u16 min_vddc_for_pcie_gen2; | 
 | 1527 | 	struct amdgpu_cac_leakage_table cac_leakage_table; | 
 | 1528 | 	struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table; | 
 | 1529 | 	struct amdgpu_ppm_table *ppm_table; | 
 | 1530 | 	struct amdgpu_cac_tdp_table *cac_tdp_table; | 
 | 1531 | }; | 
 | 1532 |  | 
 | 1533 | struct amdgpu_dpm_fan { | 
 | 1534 | 	u16 t_min; | 
 | 1535 | 	u16 t_med; | 
 | 1536 | 	u16 t_high; | 
 | 1537 | 	u16 pwm_min; | 
 | 1538 | 	u16 pwm_med; | 
 | 1539 | 	u16 pwm_high; | 
 | 1540 | 	u8 t_hyst; | 
 | 1541 | 	u32 cycle_delay; | 
 | 1542 | 	u16 t_max; | 
 | 1543 | 	u8 control_mode; | 
 | 1544 | 	u16 default_max_fan_pwm; | 
 | 1545 | 	u16 default_fan_output_sensitivity; | 
 | 1546 | 	u16 fan_output_sensitivity; | 
 | 1547 | 	bool ucode_fan_control; | 
 | 1548 | }; | 
 | 1549 |  | 
 | 1550 | enum amdgpu_pcie_gen { | 
 | 1551 | 	AMDGPU_PCIE_GEN1 = 0, | 
 | 1552 | 	AMDGPU_PCIE_GEN2 = 1, | 
 | 1553 | 	AMDGPU_PCIE_GEN3 = 2, | 
 | 1554 | 	AMDGPU_PCIE_GEN_INVALID = 0xffff | 
 | 1555 | }; | 
 | 1556 |  | 
 | 1557 | enum amdgpu_dpm_forced_level { | 
 | 1558 | 	AMDGPU_DPM_FORCED_LEVEL_AUTO = 0, | 
 | 1559 | 	AMDGPU_DPM_FORCED_LEVEL_LOW = 1, | 
 | 1560 | 	AMDGPU_DPM_FORCED_LEVEL_HIGH = 2, | 
 | 1561 | }; | 
 | 1562 |  | 
 | 1563 | struct amdgpu_vce_state { | 
 | 1564 | 	/* vce clocks */ | 
 | 1565 | 	u32 evclk; | 
 | 1566 | 	u32 ecclk; | 
 | 1567 | 	/* gpu clocks */ | 
 | 1568 | 	u32 sclk; | 
 | 1569 | 	u32 mclk; | 
 | 1570 | 	u8 clk_idx; | 
 | 1571 | 	u8 pstate; | 
 | 1572 | }; | 
 | 1573 |  | 
 | 1574 | struct amdgpu_dpm_funcs { | 
 | 1575 | 	int (*get_temperature)(struct amdgpu_device *adev); | 
 | 1576 | 	int (*pre_set_power_state)(struct amdgpu_device *adev); | 
 | 1577 | 	int (*set_power_state)(struct amdgpu_device *adev); | 
 | 1578 | 	void (*post_set_power_state)(struct amdgpu_device *adev); | 
 | 1579 | 	void (*display_configuration_changed)(struct amdgpu_device *adev); | 
 | 1580 | 	u32 (*get_sclk)(struct amdgpu_device *adev, bool low); | 
 | 1581 | 	u32 (*get_mclk)(struct amdgpu_device *adev, bool low); | 
 | 1582 | 	void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps); | 
 | 1583 | 	void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m); | 
 | 1584 | 	int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level); | 
 | 1585 | 	bool (*vblank_too_short)(struct amdgpu_device *adev); | 
 | 1586 | 	void (*powergate_uvd)(struct amdgpu_device *adev, bool gate); | 
| Sonny Jiang | b7a07769 | 2015-05-28 15:47:53 -0400 | [diff] [blame] | 1587 | 	void (*powergate_vce)(struct amdgpu_device *adev, bool gate); | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1588 | 	void (*enable_bapm)(struct amdgpu_device *adev, bool enable); | 
 | 1589 | 	void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode); | 
 | 1590 | 	u32 (*get_fan_control_mode)(struct amdgpu_device *adev); | 
 | 1591 | 	int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed); | 
 | 1592 | 	int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed); | 
 | 1593 | }; | 
 | 1594 |  | 
 | 1595 | struct amdgpu_dpm { | 
 | 1596 | 	struct amdgpu_ps        *ps; | 
 | 1597 | 	/* number of valid power states */ | 
 | 1598 | 	int                     num_ps; | 
 | 1599 | 	/* current power state that is active */ | 
 | 1600 | 	struct amdgpu_ps        *current_ps; | 
 | 1601 | 	/* requested power state */ | 
 | 1602 | 	struct amdgpu_ps        *requested_ps; | 
 | 1603 | 	/* boot up power state */ | 
 | 1604 | 	struct amdgpu_ps        *boot_ps; | 
 | 1605 | 	/* default uvd power state */ | 
 | 1606 | 	struct amdgpu_ps        *uvd_ps; | 
 | 1607 | 	/* vce requirements */ | 
 | 1608 | 	struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS]; | 
 | 1609 | 	enum amdgpu_vce_level vce_level; | 
 | 1610 | 	enum amdgpu_pm_state_type state; | 
 | 1611 | 	enum amdgpu_pm_state_type user_state; | 
 | 1612 | 	u32                     platform_caps; | 
 | 1613 | 	u32                     voltage_response_time; | 
 | 1614 | 	u32                     backbias_response_time; | 
 | 1615 | 	void                    *priv; | 
 | 1616 | 	u32			new_active_crtcs; | 
 | 1617 | 	int			new_active_crtc_count; | 
 | 1618 | 	u32			current_active_crtcs; | 
 | 1619 | 	int			current_active_crtc_count; | 
 | 1620 | 	struct amdgpu_dpm_dynamic_state dyn_state; | 
 | 1621 | 	struct amdgpu_dpm_fan fan; | 
 | 1622 | 	u32 tdp_limit; | 
 | 1623 | 	u32 near_tdp_limit; | 
 | 1624 | 	u32 near_tdp_limit_adjusted; | 
 | 1625 | 	u32 sq_ramping_threshold; | 
 | 1626 | 	u32 cac_leakage; | 
 | 1627 | 	u16 tdp_od_limit; | 
 | 1628 | 	u32 tdp_adjustment; | 
 | 1629 | 	u16 load_line_slope; | 
 | 1630 | 	bool power_control; | 
 | 1631 | 	bool ac_power; | 
 | 1632 | 	/* special states active */ | 
 | 1633 | 	bool                    thermal_active; | 
 | 1634 | 	bool                    uvd_active; | 
 | 1635 | 	bool                    vce_active; | 
 | 1636 | 	/* thermal handling */ | 
 | 1637 | 	struct amdgpu_dpm_thermal thermal; | 
 | 1638 | 	/* forced levels */ | 
 | 1639 | 	enum amdgpu_dpm_forced_level forced_level; | 
 | 1640 | }; | 
 | 1641 |  | 
 | 1642 | struct amdgpu_pm { | 
 | 1643 | 	struct mutex		mutex; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1644 | 	u32                     current_sclk; | 
 | 1645 | 	u32                     current_mclk; | 
 | 1646 | 	u32                     default_sclk; | 
 | 1647 | 	u32                     default_mclk; | 
 | 1648 | 	struct amdgpu_i2c_chan *i2c_bus; | 
 | 1649 | 	/* internal thermal controller on rv6xx+ */ | 
 | 1650 | 	enum amdgpu_int_thermal_type int_thermal_type; | 
 | 1651 | 	struct device	        *int_hwmon_dev; | 
 | 1652 | 	/* fan control parameters */ | 
 | 1653 | 	bool                    no_fan; | 
 | 1654 | 	u8                      fan_pulses_per_revolution; | 
 | 1655 | 	u8                      fan_min_rpm; | 
 | 1656 | 	u8                      fan_max_rpm; | 
 | 1657 | 	/* dpm */ | 
 | 1658 | 	bool                    dpm_enabled; | 
 | 1659 | 	struct amdgpu_dpm       dpm; | 
 | 1660 | 	const struct firmware	*fw;	/* SMC firmware */ | 
 | 1661 | 	uint32_t                fw_version; | 
 | 1662 | 	const struct amdgpu_dpm_funcs *funcs; | 
 | 1663 | }; | 
 | 1664 |  | 
 | 1665 | /* | 
 | 1666 |  * UVD | 
 | 1667 |  */ | 
 | 1668 | #define AMDGPU_MAX_UVD_HANDLES	10 | 
 | 1669 | #define AMDGPU_UVD_STACK_SIZE	(1024*1024) | 
 | 1670 | #define AMDGPU_UVD_HEAP_SIZE	(1024*1024) | 
 | 1671 | #define AMDGPU_UVD_FIRMWARE_OFFSET 256 | 
 | 1672 |  | 
 | 1673 | struct amdgpu_uvd { | 
 | 1674 | 	struct amdgpu_bo	*vcpu_bo; | 
 | 1675 | 	void			*cpu_addr; | 
 | 1676 | 	uint64_t		gpu_addr; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1677 | 	atomic_t		handles[AMDGPU_MAX_UVD_HANDLES]; | 
 | 1678 | 	struct drm_file		*filp[AMDGPU_MAX_UVD_HANDLES]; | 
 | 1679 | 	struct delayed_work	idle_work; | 
 | 1680 | 	const struct firmware	*fw;	/* UVD firmware */ | 
 | 1681 | 	struct amdgpu_ring	ring; | 
 | 1682 | 	struct amdgpu_irq_src	irq; | 
 | 1683 | 	bool			address_64_bit; | 
 | 1684 | }; | 
 | 1685 |  | 
 | 1686 | /* | 
 | 1687 |  * VCE | 
 | 1688 |  */ | 
 | 1689 | #define AMDGPU_MAX_VCE_HANDLES	16 | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1690 | #define AMDGPU_VCE_FIRMWARE_OFFSET 256 | 
 | 1691 |  | 
| Alex Deucher | 6a58577 | 2015-07-10 14:16:24 -0400 | [diff] [blame] | 1692 | #define AMDGPU_VCE_HARVEST_VCE0 (1 << 0) | 
 | 1693 | #define AMDGPU_VCE_HARVEST_VCE1 (1 << 1) | 
 | 1694 |  | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1695 | struct amdgpu_vce { | 
 | 1696 | 	struct amdgpu_bo	*vcpu_bo; | 
 | 1697 | 	uint64_t		gpu_addr; | 
 | 1698 | 	unsigned		fw_version; | 
 | 1699 | 	unsigned		fb_version; | 
 | 1700 | 	atomic_t		handles[AMDGPU_MAX_VCE_HANDLES]; | 
 | 1701 | 	struct drm_file		*filp[AMDGPU_MAX_VCE_HANDLES]; | 
| Christian König | f1689ec | 2015-06-11 20:56:18 +0200 | [diff] [blame] | 1702 | 	uint32_t		img_size[AMDGPU_MAX_VCE_HANDLES]; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1703 | 	struct delayed_work	idle_work; | 
 | 1704 | 	const struct firmware	*fw;	/* VCE firmware */ | 
 | 1705 | 	struct amdgpu_ring	ring[AMDGPU_MAX_VCE_RINGS]; | 
 | 1706 | 	struct amdgpu_irq_src	irq; | 
| Alex Deucher | 6a58577 | 2015-07-10 14:16:24 -0400 | [diff] [blame] | 1707 | 	unsigned		harvest_config; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1708 | }; | 
 | 1709 |  | 
 | 1710 | /* | 
 | 1711 |  * SDMA | 
 | 1712 |  */ | 
 | 1713 | struct amdgpu_sdma { | 
 | 1714 | 	/* SDMA firmware */ | 
 | 1715 | 	const struct firmware	*fw; | 
 | 1716 | 	uint32_t		fw_version; | 
| Jammy Zhou | cfa2104 | 2015-08-04 10:50:47 +0800 | [diff] [blame] | 1717 | 	uint32_t		feature_version; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1718 |  | 
 | 1719 | 	struct amdgpu_ring	ring; | 
| Jammy Zhou | 18111de | 2015-08-31 14:06:39 +0800 | [diff] [blame] | 1720 | 	bool			burst_nop; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1721 | }; | 
 | 1722 |  | 
 | 1723 | /* | 
 | 1724 |  * Firmware | 
 | 1725 |  */ | 
 | 1726 | struct amdgpu_firmware { | 
 | 1727 | 	struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM]; | 
 | 1728 | 	bool smu_load; | 
 | 1729 | 	struct amdgpu_bo *fw_buf; | 
 | 1730 | 	unsigned int fw_size; | 
 | 1731 | }; | 
 | 1732 |  | 
 | 1733 | /* | 
 | 1734 |  * Benchmarking | 
 | 1735 |  */ | 
 | 1736 | void amdgpu_benchmark(struct amdgpu_device *adev, int test_number); | 
 | 1737 |  | 
 | 1738 |  | 
 | 1739 | /* | 
 | 1740 |  * Testing | 
 | 1741 |  */ | 
 | 1742 | void amdgpu_test_moves(struct amdgpu_device *adev); | 
 | 1743 | void amdgpu_test_ring_sync(struct amdgpu_device *adev, | 
 | 1744 | 			   struct amdgpu_ring *cpA, | 
 | 1745 | 			   struct amdgpu_ring *cpB); | 
 | 1746 | void amdgpu_test_syncing(struct amdgpu_device *adev); | 
 | 1747 |  | 
 | 1748 | /* | 
 | 1749 |  * MMU Notifier | 
 | 1750 |  */ | 
 | 1751 | #if defined(CONFIG_MMU_NOTIFIER) | 
 | 1752 | int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr); | 
 | 1753 | void amdgpu_mn_unregister(struct amdgpu_bo *bo); | 
 | 1754 | #else | 
 | 1755 | static int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) | 
 | 1756 | { | 
 | 1757 | 	return -ENODEV; | 
 | 1758 | } | 
 | 1759 | static void amdgpu_mn_unregister(struct amdgpu_bo *bo) {} | 
 | 1760 | #endif | 
 | 1761 |  | 
 | 1762 | /* | 
 | 1763 |  * Debugfs | 
 | 1764 |  */ | 
 | 1765 | struct amdgpu_debugfs { | 
 | 1766 | 	struct drm_info_list	*files; | 
 | 1767 | 	unsigned		num_files; | 
 | 1768 | }; | 
 | 1769 |  | 
 | 1770 | int amdgpu_debugfs_add_files(struct amdgpu_device *adev, | 
 | 1771 | 			     struct drm_info_list *files, | 
 | 1772 | 			     unsigned nfiles); | 
 | 1773 | int amdgpu_debugfs_fence_init(struct amdgpu_device *adev); | 
 | 1774 |  | 
 | 1775 | #if defined(CONFIG_DEBUG_FS) | 
 | 1776 | int amdgpu_debugfs_init(struct drm_minor *minor); | 
 | 1777 | void amdgpu_debugfs_cleanup(struct drm_minor *minor); | 
 | 1778 | #endif | 
 | 1779 |  | 
 | 1780 | /* | 
 | 1781 |  * amdgpu smumgr functions | 
 | 1782 |  */ | 
 | 1783 | struct amdgpu_smumgr_funcs { | 
 | 1784 | 	int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype); | 
 | 1785 | 	int (*request_smu_load_fw)(struct amdgpu_device *adev); | 
 | 1786 | 	int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype); | 
 | 1787 | }; | 
 | 1788 |  | 
 | 1789 | /* | 
 | 1790 |  * amdgpu smumgr | 
 | 1791 |  */ | 
 | 1792 | struct amdgpu_smumgr { | 
 | 1793 | 	struct amdgpu_bo *toc_buf; | 
 | 1794 | 	struct amdgpu_bo *smu_buf; | 
 | 1795 | 	/* asic priv smu data */ | 
 | 1796 | 	void *priv; | 
 | 1797 | 	spinlock_t smu_lock; | 
 | 1798 | 	/* smumgr functions */ | 
 | 1799 | 	const struct amdgpu_smumgr_funcs *smumgr_funcs; | 
 | 1800 | 	/* ucode loading complete flag */ | 
 | 1801 | 	uint32_t fw_flags; | 
 | 1802 | }; | 
 | 1803 |  | 
 | 1804 | /* | 
 | 1805 |  * ASIC specific register table accessible by UMD | 
 | 1806 |  */ | 
 | 1807 | struct amdgpu_allowed_register_entry { | 
 | 1808 | 	uint32_t reg_offset; | 
 | 1809 | 	bool untouched; | 
 | 1810 | 	bool grbm_indexed; | 
 | 1811 | }; | 
 | 1812 |  | 
 | 1813 | struct amdgpu_cu_info { | 
 | 1814 | 	uint32_t number; /* total active CU number */ | 
 | 1815 | 	uint32_t ao_cu_mask; | 
 | 1816 | 	uint32_t bitmap[4][4]; | 
 | 1817 | }; | 
 | 1818 |  | 
 | 1819 |  | 
 | 1820 | /* | 
 | 1821 |  * ASIC specific functions. | 
 | 1822 |  */ | 
 | 1823 | struct amdgpu_asic_funcs { | 
 | 1824 | 	bool (*read_disabled_bios)(struct amdgpu_device *adev); | 
 | 1825 | 	int (*read_register)(struct amdgpu_device *adev, u32 se_num, | 
 | 1826 | 			     u32 sh_num, u32 reg_offset, u32 *value); | 
 | 1827 | 	void (*set_vga_state)(struct amdgpu_device *adev, bool state); | 
 | 1828 | 	int (*reset)(struct amdgpu_device *adev); | 
 | 1829 | 	/* wait for mc_idle */ | 
 | 1830 | 	int (*wait_for_mc_idle)(struct amdgpu_device *adev); | 
 | 1831 | 	/* get the reference clock */ | 
 | 1832 | 	u32 (*get_xclk)(struct amdgpu_device *adev); | 
 | 1833 | 	/* get the gpu clock counter */ | 
 | 1834 | 	uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); | 
 | 1835 | 	int (*get_cu_info)(struct amdgpu_device *adev, struct amdgpu_cu_info *info); | 
 | 1836 | 	/* MM block clocks */ | 
 | 1837 | 	int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); | 
 | 1838 | 	int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); | 
 | 1839 | }; | 
 | 1840 |  | 
 | 1841 | /* | 
 | 1842 |  * IOCTL. | 
 | 1843 |  */ | 
 | 1844 | int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, | 
 | 1845 | 			    struct drm_file *filp); | 
 | 1846 | int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, | 
 | 1847 | 				struct drm_file *filp); | 
 | 1848 |  | 
 | 1849 | int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data, | 
 | 1850 | 			  struct drm_file *filp); | 
 | 1851 | int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, | 
 | 1852 | 			struct drm_file *filp); | 
 | 1853 | int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, | 
 | 1854 | 			  struct drm_file *filp); | 
 | 1855 | int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | 
 | 1856 | 			      struct drm_file *filp); | 
 | 1857 | int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | 
 | 1858 | 			  struct drm_file *filp); | 
 | 1859 | int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, | 
 | 1860 | 			struct drm_file *filp); | 
 | 1861 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); | 
 | 1862 | int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); | 
 | 1863 |  | 
 | 1864 | int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, | 
 | 1865 | 				struct drm_file *filp); | 
 | 1866 |  | 
 | 1867 | /* VRAM scratch page for HDP bug, default vram page */ | 
 | 1868 | struct amdgpu_vram_scratch { | 
 | 1869 | 	struct amdgpu_bo		*robj; | 
 | 1870 | 	volatile uint32_t		*ptr; | 
 | 1871 | 	u64				gpu_addr; | 
 | 1872 | }; | 
 | 1873 |  | 
 | 1874 | /* | 
 | 1875 |  * ACPI | 
 | 1876 |  */ | 
 | 1877 | struct amdgpu_atif_notification_cfg { | 
 | 1878 | 	bool enabled; | 
 | 1879 | 	int command_code; | 
 | 1880 | }; | 
 | 1881 |  | 
 | 1882 | struct amdgpu_atif_notifications { | 
 | 1883 | 	bool display_switch; | 
 | 1884 | 	bool expansion_mode_change; | 
 | 1885 | 	bool thermal_state; | 
 | 1886 | 	bool forced_power_state; | 
 | 1887 | 	bool system_power_state; | 
 | 1888 | 	bool display_conf_change; | 
 | 1889 | 	bool px_gfx_switch; | 
 | 1890 | 	bool brightness_change; | 
 | 1891 | 	bool dgpu_display_event; | 
 | 1892 | }; | 
 | 1893 |  | 
 | 1894 | struct amdgpu_atif_functions { | 
 | 1895 | 	bool system_params; | 
 | 1896 | 	bool sbios_requests; | 
 | 1897 | 	bool select_active_disp; | 
 | 1898 | 	bool lid_state; | 
 | 1899 | 	bool get_tv_standard; | 
 | 1900 | 	bool set_tv_standard; | 
 | 1901 | 	bool get_panel_expansion_mode; | 
 | 1902 | 	bool set_panel_expansion_mode; | 
 | 1903 | 	bool temperature_change; | 
 | 1904 | 	bool graphics_device_types; | 
 | 1905 | }; | 
 | 1906 |  | 
 | 1907 | struct amdgpu_atif { | 
 | 1908 | 	struct amdgpu_atif_notifications notifications; | 
 | 1909 | 	struct amdgpu_atif_functions functions; | 
 | 1910 | 	struct amdgpu_atif_notification_cfg notification_cfg; | 
 | 1911 | 	struct amdgpu_encoder *encoder_for_bl; | 
 | 1912 | }; | 
 | 1913 |  | 
 | 1914 | struct amdgpu_atcs_functions { | 
 | 1915 | 	bool get_ext_state; | 
 | 1916 | 	bool pcie_perf_req; | 
 | 1917 | 	bool pcie_dev_rdy; | 
 | 1918 | 	bool pcie_bus_width; | 
 | 1919 | }; | 
 | 1920 |  | 
 | 1921 | struct amdgpu_atcs { | 
 | 1922 | 	struct amdgpu_atcs_functions functions; | 
 | 1923 | }; | 
 | 1924 |  | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1925 | /* | 
| Chunming Zhou | d03846a | 2015-07-28 14:20:03 -0400 | [diff] [blame] | 1926 |  * CGS | 
 | 1927 |  */ | 
 | 1928 | void *amdgpu_cgs_create_device(struct amdgpu_device *adev); | 
 | 1929 | void amdgpu_cgs_destroy_device(void *cgs_device); | 
 | 1930 |  | 
 | 1931 |  | 
 | 1932 | /* | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1933 |  * Core structure, functions and helpers. | 
 | 1934 |  */ | 
 | 1935 | typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t); | 
 | 1936 | typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); | 
 | 1937 |  | 
 | 1938 | typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); | 
 | 1939 | typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); | 
 | 1940 |  | 
| Alex Deucher | 8faf0e08 | 2015-07-28 11:50:31 -0400 | [diff] [blame] | 1941 | struct amdgpu_ip_block_status { | 
 | 1942 | 	bool valid; | 
 | 1943 | 	bool sw; | 
 | 1944 | 	bool hw; | 
 | 1945 | }; | 
 | 1946 |  | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1947 | struct amdgpu_device { | 
 | 1948 | 	struct device			*dev; | 
 | 1949 | 	struct drm_device		*ddev; | 
 | 1950 | 	struct pci_dev			*pdev; | 
 | 1951 | 	struct rw_semaphore		exclusive_lock; | 
 | 1952 |  | 
 | 1953 | 	/* ASIC */ | 
| Jammy Zhou | 2f7d10b | 2015-07-22 11:29:01 +0800 | [diff] [blame] | 1954 | 	enum amd_asic_type		asic_type; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 1955 | 	uint32_t			family; | 
 | 1956 | 	uint32_t			rev_id; | 
 | 1957 | 	uint32_t			external_rev_id; | 
 | 1958 | 	unsigned long			flags; | 
 | 1959 | 	int				usec_timeout; | 
 | 1960 | 	const struct amdgpu_asic_funcs	*asic_funcs; | 
 | 1961 | 	bool				shutdown; | 
 | 1962 | 	bool				suspend; | 
 | 1963 | 	bool				need_dma32; | 
 | 1964 | 	bool				accel_working; | 
 | 1965 | 	bool				needs_reset; | 
 | 1966 | 	struct work_struct 		reset_work; | 
 | 1967 | 	struct notifier_block		acpi_nb; | 
 | 1968 | 	struct amdgpu_i2c_chan		*i2c_bus[AMDGPU_MAX_I2C_BUS]; | 
 | 1969 | 	struct amdgpu_debugfs		debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; | 
 | 1970 | 	unsigned 			debugfs_count; | 
 | 1971 | #if defined(CONFIG_DEBUG_FS) | 
 | 1972 | 	struct dentry			*debugfs_regs; | 
 | 1973 | #endif | 
 | 1974 | 	struct amdgpu_atif		atif; | 
 | 1975 | 	struct amdgpu_atcs		atcs; | 
 | 1976 | 	struct mutex			srbm_mutex; | 
 | 1977 | 	/* GRBM index mutex. Protects concurrent access to GRBM index */ | 
 | 1978 | 	struct mutex                    grbm_idx_mutex; | 
 | 1979 | 	struct dev_pm_domain		vga_pm_domain; | 
 | 1980 | 	bool				have_disp_power_ref; | 
 | 1981 |  | 
 | 1982 | 	/* BIOS */ | 
 | 1983 | 	uint8_t				*bios; | 
 | 1984 | 	bool				is_atom_bios; | 
 | 1985 | 	uint16_t			bios_header_start; | 
 | 1986 | 	struct amdgpu_bo		*stollen_vga_memory; | 
 | 1987 | 	uint32_t			bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; | 
 | 1988 |  | 
 | 1989 | 	/* Register/doorbell mmio */ | 
 | 1990 | 	resource_size_t			rmmio_base; | 
 | 1991 | 	resource_size_t			rmmio_size; | 
 | 1992 | 	void __iomem			*rmmio; | 
 | 1993 | 	/* protects concurrent MM_INDEX/DATA based register access */ | 
 | 1994 | 	spinlock_t mmio_idx_lock; | 
 | 1995 | 	/* protects concurrent SMC based register access */ | 
 | 1996 | 	spinlock_t smc_idx_lock; | 
 | 1997 | 	amdgpu_rreg_t			smc_rreg; | 
 | 1998 | 	amdgpu_wreg_t			smc_wreg; | 
 | 1999 | 	/* protects concurrent PCIE register access */ | 
 | 2000 | 	spinlock_t pcie_idx_lock; | 
 | 2001 | 	amdgpu_rreg_t			pcie_rreg; | 
 | 2002 | 	amdgpu_wreg_t			pcie_wreg; | 
 | 2003 | 	/* protects concurrent UVD register access */ | 
 | 2004 | 	spinlock_t uvd_ctx_idx_lock; | 
 | 2005 | 	amdgpu_rreg_t			uvd_ctx_rreg; | 
 | 2006 | 	amdgpu_wreg_t			uvd_ctx_wreg; | 
 | 2007 | 	/* protects concurrent DIDT register access */ | 
 | 2008 | 	spinlock_t didt_idx_lock; | 
 | 2009 | 	amdgpu_rreg_t			didt_rreg; | 
 | 2010 | 	amdgpu_wreg_t			didt_wreg; | 
 | 2011 | 	/* protects concurrent ENDPOINT (audio) register access */ | 
 | 2012 | 	spinlock_t audio_endpt_idx_lock; | 
 | 2013 | 	amdgpu_block_rreg_t		audio_endpt_rreg; | 
 | 2014 | 	amdgpu_block_wreg_t		audio_endpt_wreg; | 
 | 2015 | 	void __iomem                    *rio_mem; | 
 | 2016 | 	resource_size_t			rio_mem_size; | 
 | 2017 | 	struct amdgpu_doorbell		doorbell; | 
 | 2018 |  | 
 | 2019 | 	/* clock/pll info */ | 
 | 2020 | 	struct amdgpu_clock            clock; | 
 | 2021 |  | 
 | 2022 | 	/* MC */ | 
 | 2023 | 	struct amdgpu_mc		mc; | 
 | 2024 | 	struct amdgpu_gart		gart; | 
 | 2025 | 	struct amdgpu_dummy_page	dummy_page; | 
 | 2026 | 	struct amdgpu_vm_manager	vm_manager; | 
 | 2027 |  | 
 | 2028 | 	/* memory management */ | 
 | 2029 | 	struct amdgpu_mman		mman; | 
 | 2030 | 	struct amdgpu_gem		gem; | 
 | 2031 | 	struct amdgpu_vram_scratch	vram_scratch; | 
 | 2032 | 	struct amdgpu_wb		wb; | 
 | 2033 | 	atomic64_t			vram_usage; | 
 | 2034 | 	atomic64_t			vram_vis_usage; | 
 | 2035 | 	atomic64_t			gtt_usage; | 
 | 2036 | 	atomic64_t			num_bytes_moved; | 
| Marek Olšák | d94aed5 | 2015-05-05 21:13:49 +0200 | [diff] [blame] | 2037 | 	atomic_t			gpu_reset_counter; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2038 |  | 
 | 2039 | 	/* display */ | 
 | 2040 | 	struct amdgpu_mode_info		mode_info; | 
 | 2041 | 	struct work_struct		hotplug_work; | 
 | 2042 | 	struct amdgpu_irq_src		crtc_irq; | 
 | 2043 | 	struct amdgpu_irq_src		pageflip_irq; | 
 | 2044 | 	struct amdgpu_irq_src		hpd_irq; | 
 | 2045 |  | 
 | 2046 | 	/* rings */ | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2047 | 	unsigned			fence_context; | 
 | 2048 | 	struct mutex			ring_lock; | 
 | 2049 | 	unsigned			num_rings; | 
 | 2050 | 	struct amdgpu_ring		*rings[AMDGPU_MAX_RINGS]; | 
 | 2051 | 	bool				ib_pool_ready; | 
 | 2052 | 	struct amdgpu_sa_manager	ring_tmp_bo; | 
 | 2053 |  | 
 | 2054 | 	/* interrupts */ | 
 | 2055 | 	struct amdgpu_irq		irq; | 
 | 2056 |  | 
 | 2057 | 	/* dpm */ | 
 | 2058 | 	struct amdgpu_pm		pm; | 
 | 2059 | 	u32				cg_flags; | 
 | 2060 | 	u32				pg_flags; | 
 | 2061 |  | 
 | 2062 | 	/* amdgpu smumgr */ | 
 | 2063 | 	struct amdgpu_smumgr smu; | 
 | 2064 |  | 
 | 2065 | 	/* gfx */ | 
 | 2066 | 	struct amdgpu_gfx		gfx; | 
 | 2067 |  | 
 | 2068 | 	/* sdma */ | 
| Jammy Zhou | 36f523a | 2015-09-01 12:54:27 +0800 | [diff] [blame] | 2069 | 	struct amdgpu_sdma		sdma[AMDGPU_MAX_SDMA_INSTANCES]; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2070 | 	struct amdgpu_irq_src		sdma_trap_irq; | 
 | 2071 | 	struct amdgpu_irq_src		sdma_illegal_inst_irq; | 
 | 2072 |  | 
 | 2073 | 	/* uvd */ | 
 | 2074 | 	bool				has_uvd; | 
 | 2075 | 	struct amdgpu_uvd		uvd; | 
 | 2076 |  | 
 | 2077 | 	/* vce */ | 
 | 2078 | 	struct amdgpu_vce		vce; | 
 | 2079 |  | 
 | 2080 | 	/* firmwares */ | 
 | 2081 | 	struct amdgpu_firmware		firmware; | 
 | 2082 |  | 
 | 2083 | 	/* GDS */ | 
 | 2084 | 	struct amdgpu_gds		gds; | 
 | 2085 |  | 
 | 2086 | 	const struct amdgpu_ip_block_version *ip_blocks; | 
 | 2087 | 	int				num_ip_blocks; | 
| Alex Deucher | 8faf0e08 | 2015-07-28 11:50:31 -0400 | [diff] [blame] | 2088 | 	struct amdgpu_ip_block_status	*ip_block_status; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2089 | 	struct mutex	mn_lock; | 
 | 2090 | 	DECLARE_HASHTABLE(mn_hash, 7); | 
 | 2091 |  | 
 | 2092 | 	/* tracking pinned memory */ | 
 | 2093 | 	u64 vram_pin_size; | 
 | 2094 | 	u64 gart_pin_size; | 
| Oded Gabbay | 130e037 | 2015-06-12 21:35:14 +0300 | [diff] [blame] | 2095 |  | 
 | 2096 | 	/* amdkfd interface */ | 
 | 2097 | 	struct kfd_dev          *kfd; | 
| Chunming Zhou | 23ca0e4 | 2015-07-06 13:42:58 +0800 | [diff] [blame] | 2098 |  | 
 | 2099 | 	/* kernel conext for IB submission */ | 
| Christian König | 47f3850 | 2015-08-04 17:51:05 +0200 | [diff] [blame] | 2100 | 	struct amdgpu_ctx	kernel_ctx; | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2101 | }; | 
 | 2102 |  | 
 | 2103 | bool amdgpu_device_is_px(struct drm_device *dev); | 
 | 2104 | int amdgpu_device_init(struct amdgpu_device *adev, | 
 | 2105 | 		       struct drm_device *ddev, | 
 | 2106 | 		       struct pci_dev *pdev, | 
 | 2107 | 		       uint32_t flags); | 
 | 2108 | void amdgpu_device_fini(struct amdgpu_device *adev); | 
 | 2109 | int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); | 
 | 2110 |  | 
 | 2111 | uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, | 
 | 2112 | 			bool always_indirect); | 
 | 2113 | void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, | 
 | 2114 | 		    bool always_indirect); | 
 | 2115 | u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg); | 
 | 2116 | void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v); | 
 | 2117 |  | 
 | 2118 | u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index); | 
 | 2119 | void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); | 
 | 2120 |  | 
 | 2121 | /* | 
 | 2122 |  * Cast helper | 
 | 2123 |  */ | 
 | 2124 | extern const struct fence_ops amdgpu_fence_ops; | 
 | 2125 | static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f) | 
 | 2126 | { | 
 | 2127 | 	struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base); | 
 | 2128 |  | 
 | 2129 | 	if (__f->base.ops == &amdgpu_fence_ops) | 
 | 2130 | 		return __f; | 
 | 2131 |  | 
 | 2132 | 	return NULL; | 
 | 2133 | } | 
 | 2134 |  | 
 | 2135 | /* | 
 | 2136 |  * Registers read & write functions. | 
 | 2137 |  */ | 
 | 2138 | #define RREG32(reg) amdgpu_mm_rreg(adev, (reg), false) | 
 | 2139 | #define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), true) | 
 | 2140 | #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), false)) | 
 | 2141 | #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), false) | 
 | 2142 | #define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), true) | 
 | 2143 | #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) | 
 | 2144 | #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) | 
 | 2145 | #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) | 
 | 2146 | #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v)) | 
 | 2147 | #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg)) | 
 | 2148 | #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v)) | 
 | 2149 | #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg)) | 
 | 2150 | #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v)) | 
 | 2151 | #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg)) | 
 | 2152 | #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) | 
 | 2153 | #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) | 
 | 2154 | #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) | 
 | 2155 | #define WREG32_P(reg, val, mask)				\ | 
 | 2156 | 	do {							\ | 
 | 2157 | 		uint32_t tmp_ = RREG32(reg);			\ | 
 | 2158 | 		tmp_ &= (mask);					\ | 
 | 2159 | 		tmp_ |= ((val) & ~(mask));			\ | 
 | 2160 | 		WREG32(reg, tmp_);				\ | 
 | 2161 | 	} while (0) | 
 | 2162 | #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) | 
 | 2163 | #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) | 
 | 2164 | #define WREG32_PLL_P(reg, val, mask)				\ | 
 | 2165 | 	do {							\ | 
 | 2166 | 		uint32_t tmp_ = RREG32_PLL(reg);		\ | 
 | 2167 | 		tmp_ &= (mask);					\ | 
 | 2168 | 		tmp_ |= ((val) & ~(mask));			\ | 
 | 2169 | 		WREG32_PLL(reg, tmp_);				\ | 
 | 2170 | 	} while (0) | 
 | 2171 | #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false)) | 
 | 2172 | #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg)) | 
 | 2173 | #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v)) | 
 | 2174 |  | 
 | 2175 | #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index)) | 
 | 2176 | #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v)) | 
 | 2177 |  | 
 | 2178 | #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT | 
 | 2179 | #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK | 
 | 2180 |  | 
 | 2181 | #define REG_SET_FIELD(orig_val, reg, field, field_val)			\ | 
 | 2182 | 	(((orig_val) & ~REG_FIELD_MASK(reg, field)) |			\ | 
 | 2183 | 	 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field)))) | 
 | 2184 |  | 
 | 2185 | #define REG_GET_FIELD(value, reg, field)				\ | 
 | 2186 | 	(((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field)) | 
 | 2187 |  | 
 | 2188 | /* | 
 | 2189 |  * BIOS helpers. | 
 | 2190 |  */ | 
 | 2191 | #define RBIOS8(i) (adev->bios[i]) | 
 | 2192 | #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) | 
 | 2193 | #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) | 
 | 2194 |  | 
 | 2195 | /* | 
 | 2196 |  * RING helpers. | 
 | 2197 |  */ | 
 | 2198 | static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v) | 
 | 2199 | { | 
 | 2200 | 	if (ring->count_dw <= 0) | 
| Jammy Zhou | 86c2b79 | 2015-05-13 22:52:42 +0800 | [diff] [blame] | 2201 | 		DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n"); | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2202 | 	ring->ring[ring->wptr++] = v; | 
 | 2203 | 	ring->wptr &= ring->ptr_mask; | 
 | 2204 | 	ring->count_dw--; | 
 | 2205 | 	ring->ring_free_dw--; | 
 | 2206 | } | 
 | 2207 |  | 
| Jammy Zhou | 4b2f7e2 | 2015-09-01 12:56:17 +0800 | [diff] [blame] | 2208 | static inline struct amdgpu_sdma * amdgpu_get_sdma_instance(struct amdgpu_ring *ring) | 
 | 2209 | { | 
 | 2210 | 	struct amdgpu_device *adev = ring->adev; | 
 | 2211 | 	int i; | 
 | 2212 |  | 
 | 2213 | 	for (i = 0; i < AMDGPU_MAX_SDMA_INSTANCES; i++) | 
 | 2214 | 		if (&adev->sdma[i].ring == ring) | 
 | 2215 | 			break; | 
 | 2216 |  | 
 | 2217 | 	if (i < AMDGPU_MAX_SDMA_INSTANCES) | 
 | 2218 | 		return &adev->sdma[i]; | 
 | 2219 | 	else | 
 | 2220 | 		return NULL; | 
 | 2221 | } | 
 | 2222 |  | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2223 | /* | 
 | 2224 |  * ASICs macro. | 
 | 2225 |  */ | 
 | 2226 | #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state)) | 
 | 2227 | #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) | 
 | 2228 | #define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev)) | 
 | 2229 | #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) | 
 | 2230 | #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) | 
 | 2231 | #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) | 
 | 2232 | #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) | 
 | 2233 | #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) | 
 | 2234 | #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) | 
 | 2235 | #define amdgpu_asic_get_cu_info(adev, info) (adev)->asic_funcs->get_cu_info((adev), (info)) | 
 | 2236 | #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) | 
 | 2237 | #define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) | 
 | 2238 | #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) | 
 | 2239 | #define amdgpu_vm_write_pte(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (addr), (count), (incr), (flags))) | 
 | 2240 | #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) | 
 | 2241 | #define amdgpu_vm_pad_ib(adev, ib) ((adev)->vm_manager.vm_pte_funcs->pad_ib((ib))) | 
 | 2242 | #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) | 
 | 2243 | #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) | 
 | 2244 | #define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r)) | 
 | 2245 | #define amdgpu_ring_is_lockup(r) (r)->funcs->is_lockup((r)) | 
 | 2246 | #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) | 
 | 2247 | #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) | 
 | 2248 | #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) | 
 | 2249 | #define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib)) | 
 | 2250 | #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) | 
| Chunming Zhou | 890ee23 | 2015-06-01 14:35:03 +0800 | [diff] [blame] | 2251 | #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2252 | #define amdgpu_ring_emit_semaphore(r, semaphore, emit_wait) (r)->funcs->emit_semaphore((r), (semaphore), (emit_wait)) | 
 | 2253 | #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) | 
| Christian König | d2edb07 | 2015-05-11 14:10:34 +0200 | [diff] [blame] | 2254 | #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2255 | #define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev)) | 
 | 2256 | #define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv)) | 
 | 2257 | #define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev)) | 
 | 2258 | #define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r)) | 
 | 2259 | #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc)) | 
 | 2260 | #define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc)) | 
 | 2261 | #define amdgpu_display_is_display_hung(adev) (adev)->mode_info.funcs->is_display_hung((adev)) | 
 | 2262 | #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l)) | 
 | 2263 | #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e)) | 
 | 2264 | #define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h)) | 
 | 2265 | #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h)) | 
 | 2266 | #define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev)) | 
 | 2267 | #define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev)) | 
 | 2268 | #define amdgpu_display_page_flip(adev, crtc, base) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base)) | 
 | 2269 | #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos)) | 
 | 2270 | #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c)) | 
 | 2271 | #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) | 
 | 2272 | #define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s)) | 
 | 2273 | #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) | 
| Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 2274 | #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib),  (s), (d), (b)) | 
| Chunming Zhou | 6e7a384 | 2015-08-27 13:46:09 +0800 | [diff] [blame] | 2275 | #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2276 | #define amdgpu_dpm_get_temperature(adev) (adev)->pm.funcs->get_temperature((adev)) | 
 | 2277 | #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev)) | 
 | 2278 | #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev)) | 
 | 2279 | #define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev)) | 
 | 2280 | #define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev)) | 
 | 2281 | #define amdgpu_dpm_get_sclk(adev, l) (adev)->pm.funcs->get_sclk((adev), (l)) | 
 | 2282 | #define amdgpu_dpm_get_mclk(adev, l) (adev)->pm.funcs->get_mclk((adev), (l)) | 
 | 2283 | #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps)) | 
 | 2284 | #define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)) | 
 | 2285 | #define amdgpu_dpm_force_performance_level(adev, l) (adev)->pm.funcs->force_performance_level((adev), (l)) | 
 | 2286 | #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev)) | 
 | 2287 | #define amdgpu_dpm_powergate_uvd(adev, g) (adev)->pm.funcs->powergate_uvd((adev), (g)) | 
| Sonny Jiang | b7a07769 | 2015-05-28 15:47:53 -0400 | [diff] [blame] | 2288 | #define amdgpu_dpm_powergate_vce(adev, g) (adev)->pm.funcs->powergate_vce((adev), (g)) | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2289 | #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) | 
 | 2290 | #define amdgpu_dpm_set_fan_control_mode(adev, m) (adev)->pm.funcs->set_fan_control_mode((adev), (m)) | 
 | 2291 | #define amdgpu_dpm_get_fan_control_mode(adev) (adev)->pm.funcs->get_fan_control_mode((adev)) | 
 | 2292 | #define amdgpu_dpm_set_fan_speed_percent(adev, s) (adev)->pm.funcs->set_fan_speed_percent((adev), (s)) | 
 | 2293 | #define amdgpu_dpm_get_fan_speed_percent(adev, s) (adev)->pm.funcs->get_fan_speed_percent((adev), (s)) | 
 | 2294 |  | 
 | 2295 | #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a)) | 
 | 2296 |  | 
 | 2297 | /* Common functions */ | 
 | 2298 | int amdgpu_gpu_reset(struct amdgpu_device *adev); | 
 | 2299 | void amdgpu_pci_config_reset(struct amdgpu_device *adev); | 
 | 2300 | bool amdgpu_card_posted(struct amdgpu_device *adev); | 
 | 2301 | void amdgpu_update_display_priority(struct amdgpu_device *adev); | 
 | 2302 | bool amdgpu_boot_test_post_card(struct amdgpu_device *adev); | 
| Chunming Zhou | d5fc5e8 | 2015-07-21 16:52:10 +0800 | [diff] [blame] | 2303 | struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev, | 
 | 2304 | 						 struct drm_file *filp, | 
 | 2305 | 						 struct amdgpu_ctx *ctx, | 
 | 2306 | 						 struct amdgpu_ib *ibs, | 
 | 2307 | 						 uint32_t num_ibs); | 
 | 2308 |  | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2309 | int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); | 
 | 2310 | int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, | 
 | 2311 | 		       u32 ip_instance, u32 ring, | 
 | 2312 | 		       struct amdgpu_ring **out_ring); | 
 | 2313 | void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain); | 
 | 2314 | bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); | 
 | 2315 | int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, | 
 | 2316 | 				     uint32_t flags); | 
 | 2317 | bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); | 
 | 2318 | bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); | 
 | 2319 | uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, | 
 | 2320 | 				 struct ttm_mem_reg *mem); | 
 | 2321 | void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base); | 
 | 2322 | void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc); | 
 | 2323 | void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size); | 
 | 2324 | void amdgpu_program_register_sequence(struct amdgpu_device *adev, | 
 | 2325 | 					     const u32 *registers, | 
 | 2326 | 					     const u32 array_size); | 
 | 2327 |  | 
 | 2328 | bool amdgpu_device_is_px(struct drm_device *dev); | 
 | 2329 | /* atpx handler */ | 
 | 2330 | #if defined(CONFIG_VGA_SWITCHEROO) | 
 | 2331 | void amdgpu_register_atpx_handler(void); | 
 | 2332 | void amdgpu_unregister_atpx_handler(void); | 
 | 2333 | #else | 
 | 2334 | static inline void amdgpu_register_atpx_handler(void) {} | 
 | 2335 | static inline void amdgpu_unregister_atpx_handler(void) {} | 
 | 2336 | #endif | 
 | 2337 |  | 
 | 2338 | /* | 
 | 2339 |  * KMS | 
 | 2340 |  */ | 
 | 2341 | extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; | 
 | 2342 | extern int amdgpu_max_kms_ioctl; | 
 | 2343 |  | 
 | 2344 | int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags); | 
 | 2345 | int amdgpu_driver_unload_kms(struct drm_device *dev); | 
 | 2346 | void amdgpu_driver_lastclose_kms(struct drm_device *dev); | 
 | 2347 | int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); | 
 | 2348 | void amdgpu_driver_postclose_kms(struct drm_device *dev, | 
 | 2349 | 				 struct drm_file *file_priv); | 
 | 2350 | void amdgpu_driver_preclose_kms(struct drm_device *dev, | 
 | 2351 | 				struct drm_file *file_priv); | 
 | 2352 | int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon); | 
 | 2353 | int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon); | 
 | 2354 | u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, int crtc); | 
 | 2355 | int amdgpu_enable_vblank_kms(struct drm_device *dev, int crtc); | 
 | 2356 | void amdgpu_disable_vblank_kms(struct drm_device *dev, int crtc); | 
 | 2357 | int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, int crtc, | 
 | 2358 | 				    int *max_error, | 
 | 2359 | 				    struct timeval *vblank_time, | 
 | 2360 | 				    unsigned flags); | 
 | 2361 | long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, | 
 | 2362 | 			     unsigned long arg); | 
 | 2363 |  | 
 | 2364 | /* | 
 | 2365 |  * vm | 
 | 2366 |  */ | 
 | 2367 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); | 
 | 2368 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); | 
 | 2369 | struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, | 
 | 2370 | 					  struct amdgpu_vm *vm, | 
 | 2371 | 					  struct list_head *head); | 
| Christian König | 7f8a529 | 2015-07-20 16:09:40 +0200 | [diff] [blame] | 2372 | int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, | 
 | 2373 | 		      struct amdgpu_sync *sync); | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2374 | void amdgpu_vm_flush(struct amdgpu_ring *ring, | 
 | 2375 | 		     struct amdgpu_vm *vm, | 
| Chunming Zhou | 3c62338 | 2015-08-20 18:33:59 +0800 | [diff] [blame] | 2376 | 		     struct fence *updates); | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2377 | void amdgpu_vm_fence(struct amdgpu_device *adev, | 
 | 2378 | 		     struct amdgpu_vm *vm, | 
 | 2379 | 		     struct amdgpu_fence *fence); | 
 | 2380 | uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr); | 
 | 2381 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | 
 | 2382 | 				    struct amdgpu_vm *vm); | 
 | 2383 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, | 
 | 2384 | 				struct amdgpu_vm *vm); | 
 | 2385 | int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, | 
| monk.liu | cfe2c97 | 2015-05-26 15:01:54 +0800 | [diff] [blame] | 2386 | 				struct amdgpu_vm *vm, struct amdgpu_sync *sync); | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2387 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, | 
 | 2388 | 			struct amdgpu_bo_va *bo_va, | 
 | 2389 | 			struct ttm_mem_reg *mem); | 
 | 2390 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, | 
 | 2391 | 			     struct amdgpu_bo *bo); | 
 | 2392 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, | 
 | 2393 | 				       struct amdgpu_bo *bo); | 
 | 2394 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, | 
 | 2395 | 				      struct amdgpu_vm *vm, | 
 | 2396 | 				      struct amdgpu_bo *bo); | 
 | 2397 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, | 
 | 2398 | 		     struct amdgpu_bo_va *bo_va, | 
 | 2399 | 		     uint64_t addr, uint64_t offset, | 
 | 2400 | 		     uint64_t size, uint32_t flags); | 
 | 2401 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, | 
 | 2402 | 		       struct amdgpu_bo_va *bo_va, | 
 | 2403 | 		       uint64_t addr); | 
 | 2404 | void amdgpu_vm_bo_rmv(struct amdgpu_device *adev, | 
 | 2405 | 		      struct amdgpu_bo_va *bo_va); | 
| Chunming Zhou | c7ae72c | 2015-08-25 17:23:45 +0800 | [diff] [blame] | 2406 | int amdgpu_vm_free_job(struct amdgpu_job *job); | 
| Alex Deucher | 97b2e20 | 2015-04-20 16:51:00 -0400 | [diff] [blame] | 2407 | /* | 
 | 2408 |  * functions used by amdgpu_encoder.c | 
 | 2409 |  */ | 
 | 2410 | struct amdgpu_afmt_acr { | 
 | 2411 | 	u32 clock; | 
 | 2412 |  | 
 | 2413 | 	int n_32khz; | 
 | 2414 | 	int cts_32khz; | 
 | 2415 |  | 
 | 2416 | 	int n_44_1khz; | 
 | 2417 | 	int cts_44_1khz; | 
 | 2418 |  | 
 | 2419 | 	int n_48khz; | 
 | 2420 | 	int cts_48khz; | 
 | 2421 |  | 
 | 2422 | }; | 
 | 2423 |  | 
 | 2424 | struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock); | 
 | 2425 |  | 
 | 2426 | /* amdgpu_acpi.c */ | 
 | 2427 | #if defined(CONFIG_ACPI) | 
 | 2428 | int amdgpu_acpi_init(struct amdgpu_device *adev); | 
 | 2429 | void amdgpu_acpi_fini(struct amdgpu_device *adev); | 
 | 2430 | bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev); | 
 | 2431 | int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, | 
 | 2432 | 						u8 perf_req, bool advertise); | 
 | 2433 | int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); | 
 | 2434 | #else | 
 | 2435 | static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } | 
 | 2436 | static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } | 
 | 2437 | #endif | 
 | 2438 |  | 
 | 2439 | struct amdgpu_bo_va_mapping * | 
 | 2440 | amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, | 
 | 2441 | 		       uint64_t addr, struct amdgpu_bo **bo); | 
 | 2442 |  | 
 | 2443 | #include "amdgpu_object.h" | 
 | 2444 |  | 
 | 2445 | #endif |