blob: 149b769130910dcde19d3d90b29bb7a2c6aef9dc [file] [log] [blame]
Alex Deucher97b2e202015-04-20 16:51:00 -04001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __AMDGPU_H__
29#define __AMDGPU_H__
30
31#include <linux/atomic.h>
32#include <linux/wait.h>
33#include <linux/list.h>
34#include <linux/kref.h>
35#include <linux/interval_tree.h>
36#include <linux/hashtable.h>
37#include <linux/fence.h>
38
39#include <ttm/ttm_bo_api.h>
40#include <ttm/ttm_bo_driver.h>
41#include <ttm/ttm_placement.h>
42#include <ttm/ttm_module.h>
43#include <ttm/ttm_execbuf_util.h>
44
45#include <drm/drm_gem.h>
Chunming Zhou7e5a5472015-04-24 17:37:30 +080046#include <drm/amdgpu_drm.h>
Alex Deucher97b2e202015-04-20 16:51:00 -040047
yanyang15fc3aee2015-05-22 14:39:35 -040048#include "amd_shared.h"
Alex Deucher97b2e202015-04-20 16:51:00 -040049#include "amdgpu_family.h"
50#include "amdgpu_mode.h"
51#include "amdgpu_ih.h"
52#include "amdgpu_irq.h"
53#include "amdgpu_ucode.h"
54#include "amdgpu_gds.h"
55
56/*
57 * Modules parameters.
58 */
59extern int amdgpu_modeset;
60extern int amdgpu_vram_limit;
61extern int amdgpu_gart_size;
62extern int amdgpu_benchmarking;
63extern int amdgpu_testing;
64extern int amdgpu_audio;
65extern int amdgpu_disp_priority;
66extern int amdgpu_hw_i2c;
67extern int amdgpu_pcie_gen2;
68extern int amdgpu_msi;
69extern int amdgpu_lockup_timeout;
70extern int amdgpu_dpm;
71extern int amdgpu_smc_load_fw;
72extern int amdgpu_aspm;
73extern int amdgpu_runtime_pm;
74extern int amdgpu_hard_reset;
75extern unsigned amdgpu_ip_block_mask;
76extern int amdgpu_bapm;
77extern int amdgpu_deep_color;
78extern int amdgpu_vm_size;
79extern int amdgpu_vm_block_size;
80
81#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
82#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
83/* AMDGPU_IB_POOL_SIZE must be a power of 2 */
84#define AMDGPU_IB_POOL_SIZE 16
85#define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
86#define AMDGPUFB_CONN_LIMIT 4
87#define AMDGPU_BIOS_NUM_SCRATCH 8
88
Alex Deucher97b2e202015-04-20 16:51:00 -040089/* max number of rings */
90#define AMDGPU_MAX_RINGS 16
91#define AMDGPU_MAX_GFX_RINGS 1
92#define AMDGPU_MAX_COMPUTE_RINGS 8
93#define AMDGPU_MAX_VCE_RINGS 2
94
95/* number of hw syncs before falling back on blocking */
96#define AMDGPU_NUM_SYNCS 4
97
98/* hardcode that limit for now */
99#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
100
101/* hard reset data */
102#define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
103
104/* reset flags */
105#define AMDGPU_RESET_GFX (1 << 0)
106#define AMDGPU_RESET_COMPUTE (1 << 1)
107#define AMDGPU_RESET_DMA (1 << 2)
108#define AMDGPU_RESET_CP (1 << 3)
109#define AMDGPU_RESET_GRBM (1 << 4)
110#define AMDGPU_RESET_DMA1 (1 << 5)
111#define AMDGPU_RESET_RLC (1 << 6)
112#define AMDGPU_RESET_SEM (1 << 7)
113#define AMDGPU_RESET_IH (1 << 8)
114#define AMDGPU_RESET_VMC (1 << 9)
115#define AMDGPU_RESET_MC (1 << 10)
116#define AMDGPU_RESET_DISPLAY (1 << 11)
117#define AMDGPU_RESET_UVD (1 << 12)
118#define AMDGPU_RESET_VCE (1 << 13)
119#define AMDGPU_RESET_VCE1 (1 << 14)
120
121/* CG block flags */
122#define AMDGPU_CG_BLOCK_GFX (1 << 0)
123#define AMDGPU_CG_BLOCK_MC (1 << 1)
124#define AMDGPU_CG_BLOCK_SDMA (1 << 2)
125#define AMDGPU_CG_BLOCK_UVD (1 << 3)
126#define AMDGPU_CG_BLOCK_VCE (1 << 4)
127#define AMDGPU_CG_BLOCK_HDP (1 << 5)
128#define AMDGPU_CG_BLOCK_BIF (1 << 6)
129
130/* CG flags */
131#define AMDGPU_CG_SUPPORT_GFX_MGCG (1 << 0)
132#define AMDGPU_CG_SUPPORT_GFX_MGLS (1 << 1)
133#define AMDGPU_CG_SUPPORT_GFX_CGCG (1 << 2)
134#define AMDGPU_CG_SUPPORT_GFX_CGLS (1 << 3)
135#define AMDGPU_CG_SUPPORT_GFX_CGTS (1 << 4)
136#define AMDGPU_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
137#define AMDGPU_CG_SUPPORT_GFX_CP_LS (1 << 6)
138#define AMDGPU_CG_SUPPORT_GFX_RLC_LS (1 << 7)
139#define AMDGPU_CG_SUPPORT_MC_LS (1 << 8)
140#define AMDGPU_CG_SUPPORT_MC_MGCG (1 << 9)
141#define AMDGPU_CG_SUPPORT_SDMA_LS (1 << 10)
142#define AMDGPU_CG_SUPPORT_SDMA_MGCG (1 << 11)
143#define AMDGPU_CG_SUPPORT_BIF_LS (1 << 12)
144#define AMDGPU_CG_SUPPORT_UVD_MGCG (1 << 13)
145#define AMDGPU_CG_SUPPORT_VCE_MGCG (1 << 14)
146#define AMDGPU_CG_SUPPORT_HDP_LS (1 << 15)
147#define AMDGPU_CG_SUPPORT_HDP_MGCG (1 << 16)
148
149/* PG flags */
150#define AMDGPU_PG_SUPPORT_GFX_PG (1 << 0)
151#define AMDGPU_PG_SUPPORT_GFX_SMG (1 << 1)
152#define AMDGPU_PG_SUPPORT_GFX_DMG (1 << 2)
153#define AMDGPU_PG_SUPPORT_UVD (1 << 3)
154#define AMDGPU_PG_SUPPORT_VCE (1 << 4)
155#define AMDGPU_PG_SUPPORT_CP (1 << 5)
156#define AMDGPU_PG_SUPPORT_GDS (1 << 6)
157#define AMDGPU_PG_SUPPORT_RLC_SMU_HS (1 << 7)
158#define AMDGPU_PG_SUPPORT_SDMA (1 << 8)
159#define AMDGPU_PG_SUPPORT_ACP (1 << 9)
160#define AMDGPU_PG_SUPPORT_SAMU (1 << 10)
161
162/* GFX current status */
163#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
164#define AMDGPU_GFX_SAFE_MODE 0x00000001L
165#define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L
166#define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
167#define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
168
169/* max cursor sizes (in pixels) */
170#define CIK_CURSOR_WIDTH 128
171#define CIK_CURSOR_HEIGHT 128
172
173struct amdgpu_device;
174struct amdgpu_fence;
175struct amdgpu_ib;
176struct amdgpu_vm;
177struct amdgpu_ring;
178struct amdgpu_semaphore;
179struct amdgpu_cs_parser;
180struct amdgpu_irq_src;
181
182enum amdgpu_cp_irq {
183 AMDGPU_CP_IRQ_GFX_EOP = 0,
184 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
185 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
186 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
187 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP,
188 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP,
189 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP,
190 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP,
191 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP,
192
193 AMDGPU_CP_IRQ_LAST
194};
195
196enum amdgpu_sdma_irq {
197 AMDGPU_SDMA_IRQ_TRAP0 = 0,
198 AMDGPU_SDMA_IRQ_TRAP1,
199
200 AMDGPU_SDMA_IRQ_LAST
201};
202
203enum amdgpu_thermal_irq {
204 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
205 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
206
207 AMDGPU_THERMAL_IRQ_LAST
208};
209
Alex Deucher97b2e202015-04-20 16:51:00 -0400210int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -0400211 enum amd_ip_block_type block_type,
212 enum amd_clockgating_state state);
Alex Deucher97b2e202015-04-20 16:51:00 -0400213int amdgpu_set_powergating_state(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -0400214 enum amd_ip_block_type block_type,
215 enum amd_powergating_state state);
Alex Deucher97b2e202015-04-20 16:51:00 -0400216
217struct amdgpu_ip_block_version {
yanyang15fc3aee2015-05-22 14:39:35 -0400218 enum amd_ip_block_type type;
Alex Deucher97b2e202015-04-20 16:51:00 -0400219 u32 major;
220 u32 minor;
221 u32 rev;
yanyang15fc3aee2015-05-22 14:39:35 -0400222 const struct amd_ip_funcs *funcs;
Alex Deucher97b2e202015-04-20 16:51:00 -0400223};
224
225int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -0400226 enum amd_ip_block_type type,
Alex Deucher97b2e202015-04-20 16:51:00 -0400227 u32 major, u32 minor);
228
229const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
230 struct amdgpu_device *adev,
yanyang15fc3aee2015-05-22 14:39:35 -0400231 enum amd_ip_block_type type);
Alex Deucher97b2e202015-04-20 16:51:00 -0400232
233/* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
234struct amdgpu_buffer_funcs {
235 /* maximum bytes in a single operation */
236 uint32_t copy_max_bytes;
237
238 /* number of dw to reserve per operation */
239 unsigned copy_num_dw;
240
241 /* used for buffer migration */
242 void (*emit_copy_buffer)(struct amdgpu_ring *ring,
243 /* src addr in bytes */
244 uint64_t src_offset,
245 /* dst addr in bytes */
246 uint64_t dst_offset,
247 /* number of byte to transfer */
248 uint32_t byte_count);
249
250 /* maximum bytes in a single operation */
251 uint32_t fill_max_bytes;
252
253 /* number of dw to reserve per operation */
254 unsigned fill_num_dw;
255
256 /* used for buffer clearing */
257 void (*emit_fill_buffer)(struct amdgpu_ring *ring,
258 /* value to write to memory */
259 uint32_t src_data,
260 /* dst addr in bytes */
261 uint64_t dst_offset,
262 /* number of byte to fill */
263 uint32_t byte_count);
264};
265
266/* provided by hw blocks that can write ptes, e.g., sdma */
267struct amdgpu_vm_pte_funcs {
268 /* copy pte entries from GART */
269 void (*copy_pte)(struct amdgpu_ib *ib,
270 uint64_t pe, uint64_t src,
271 unsigned count);
272 /* write pte one entry at a time with addr mapping */
273 void (*write_pte)(struct amdgpu_ib *ib,
274 uint64_t pe,
275 uint64_t addr, unsigned count,
276 uint32_t incr, uint32_t flags);
277 /* for linear pte/pde updates without addr mapping */
278 void (*set_pte_pde)(struct amdgpu_ib *ib,
279 uint64_t pe,
280 uint64_t addr, unsigned count,
281 uint32_t incr, uint32_t flags);
282 /* pad the indirect buffer to the necessary number of dw */
283 void (*pad_ib)(struct amdgpu_ib *ib);
284};
285
286/* provided by the gmc block */
287struct amdgpu_gart_funcs {
288 /* flush the vm tlb via mmio */
289 void (*flush_gpu_tlb)(struct amdgpu_device *adev,
290 uint32_t vmid);
291 /* write pte/pde updates using the cpu */
292 int (*set_pte_pde)(struct amdgpu_device *adev,
293 void *cpu_pt_addr, /* cpu addr of page table */
294 uint32_t gpu_page_idx, /* pte/pde to update */
295 uint64_t addr, /* addr to write into pte/pde */
296 uint32_t flags); /* access flags */
297};
298
299/* provided by the ih block */
300struct amdgpu_ih_funcs {
301 /* ring read/write ptr handling, called from interrupt context */
302 u32 (*get_wptr)(struct amdgpu_device *adev);
303 void (*decode_iv)(struct amdgpu_device *adev,
304 struct amdgpu_iv_entry *entry);
305 void (*set_rptr)(struct amdgpu_device *adev);
306};
307
308/* provided by hw blocks that expose a ring buffer for commands */
309struct amdgpu_ring_funcs {
310 /* ring read/write ptr handling */
311 u32 (*get_rptr)(struct amdgpu_ring *ring);
312 u32 (*get_wptr)(struct amdgpu_ring *ring);
313 void (*set_wptr)(struct amdgpu_ring *ring);
314 /* validating and patching of IBs */
315 int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
316 /* command emit functions */
317 void (*emit_ib)(struct amdgpu_ring *ring,
318 struct amdgpu_ib *ib);
319 void (*emit_fence)(struct amdgpu_ring *ring, uint64_t addr,
320 uint64_t seq, bool write64bit);
321 bool (*emit_semaphore)(struct amdgpu_ring *ring,
322 struct amdgpu_semaphore *semaphore,
323 bool emit_wait);
324 void (*emit_vm_flush)(struct amdgpu_ring *ring, unsigned vm_id,
325 uint64_t pd_addr);
Christian Königd2edb072015-05-11 14:10:34 +0200326 void (*emit_hdp_flush)(struct amdgpu_ring *ring);
Alex Deucher97b2e202015-04-20 16:51:00 -0400327 void (*emit_gds_switch)(struct amdgpu_ring *ring, uint32_t vmid,
328 uint32_t gds_base, uint32_t gds_size,
329 uint32_t gws_base, uint32_t gws_size,
330 uint32_t oa_base, uint32_t oa_size);
331 /* testing functions */
332 int (*test_ring)(struct amdgpu_ring *ring);
333 int (*test_ib)(struct amdgpu_ring *ring);
334 bool (*is_lockup)(struct amdgpu_ring *ring);
335};
336
337/*
338 * BIOS.
339 */
340bool amdgpu_get_bios(struct amdgpu_device *adev);
341bool amdgpu_read_bios(struct amdgpu_device *adev);
342
343/*
344 * Dummy page
345 */
346struct amdgpu_dummy_page {
347 struct page *page;
348 dma_addr_t addr;
349};
350int amdgpu_dummy_page_init(struct amdgpu_device *adev);
351void amdgpu_dummy_page_fini(struct amdgpu_device *adev);
352
353
354/*
355 * Clocks
356 */
357
358#define AMDGPU_MAX_PPLL 3
359
360struct amdgpu_clock {
361 struct amdgpu_pll ppll[AMDGPU_MAX_PPLL];
362 struct amdgpu_pll spll;
363 struct amdgpu_pll mpll;
364 /* 10 Khz units */
365 uint32_t default_mclk;
366 uint32_t default_sclk;
367 uint32_t default_dispclk;
368 uint32_t current_dispclk;
369 uint32_t dp_extclk;
370 uint32_t max_pixel_clock;
371};
372
373/*
374 * Fences.
375 */
376struct amdgpu_fence_driver {
377 struct amdgpu_ring *ring;
378 uint64_t gpu_addr;
379 volatile uint32_t *cpu_addr;
380 /* sync_seq is protected by ring emission lock */
381 uint64_t sync_seq[AMDGPU_MAX_RINGS];
382 atomic64_t last_seq;
383 bool initialized;
384 bool delayed_irq;
385 struct amdgpu_irq_src *irq_src;
386 unsigned irq_type;
387 struct delayed_work lockup_work;
388};
389
390/* some special values for the owner field */
391#define AMDGPU_FENCE_OWNER_UNDEFINED ((void*)0ul)
392#define AMDGPU_FENCE_OWNER_VM ((void*)1ul)
393#define AMDGPU_FENCE_OWNER_MOVE ((void*)2ul)
394
395struct amdgpu_fence {
396 struct fence base;
397
398 /* RB, DMA, etc. */
399 struct amdgpu_ring *ring;
400 uint64_t seq;
401
402 /* filp or special value for fence creator */
403 void *owner;
404
405 wait_queue_t fence_wake;
406};
407
408struct amdgpu_user_fence {
409 /* write-back bo */
410 struct amdgpu_bo *bo;
411 /* write-back address offset to bo start */
412 uint32_t offset;
413};
414
415int amdgpu_fence_driver_init(struct amdgpu_device *adev);
416void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
417void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
418
419void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
420int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
421 struct amdgpu_irq_src *irq_src,
422 unsigned irq_type);
423int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
424 struct amdgpu_fence **fence);
425void amdgpu_fence_process(struct amdgpu_ring *ring);
426int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
427int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
428unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
429
430bool amdgpu_fence_signaled(struct amdgpu_fence *fence);
431int amdgpu_fence_wait(struct amdgpu_fence *fence, bool interruptible);
432int amdgpu_fence_wait_any(struct amdgpu_device *adev,
433 struct amdgpu_fence **fences,
434 bool intr);
435long amdgpu_fence_wait_seq_timeout(struct amdgpu_device *adev,
436 u64 *target_seq, bool intr,
437 long timeout);
438struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
439void amdgpu_fence_unref(struct amdgpu_fence **fence);
440
441bool amdgpu_fence_need_sync(struct amdgpu_fence *fence,
442 struct amdgpu_ring *ring);
443void amdgpu_fence_note_sync(struct amdgpu_fence *fence,
444 struct amdgpu_ring *ring);
445
446static inline struct amdgpu_fence *amdgpu_fence_later(struct amdgpu_fence *a,
447 struct amdgpu_fence *b)
448{
449 if (!a) {
450 return b;
451 }
452
453 if (!b) {
454 return a;
455 }
456
457 BUG_ON(a->ring != b->ring);
458
459 if (a->seq > b->seq) {
460 return a;
461 } else {
462 return b;
463 }
464}
465
466static inline bool amdgpu_fence_is_earlier(struct amdgpu_fence *a,
467 struct amdgpu_fence *b)
468{
469 if (!a) {
470 return false;
471 }
472
473 if (!b) {
474 return true;
475 }
476
477 BUG_ON(a->ring != b->ring);
478
479 return a->seq < b->seq;
480}
481
482int amdgpu_user_fence_emit(struct amdgpu_ring *ring, struct amdgpu_user_fence *user,
483 void *owner, struct amdgpu_fence **fence);
484
485/*
486 * TTM.
487 */
488struct amdgpu_mman {
489 struct ttm_bo_global_ref bo_global_ref;
490 struct drm_global_reference mem_global_ref;
491 struct ttm_bo_device bdev;
492 bool mem_global_referenced;
493 bool initialized;
494
495#if defined(CONFIG_DEBUG_FS)
496 struct dentry *vram;
497 struct dentry *gtt;
498#endif
499
500 /* buffer handling */
501 const struct amdgpu_buffer_funcs *buffer_funcs;
502 struct amdgpu_ring *buffer_funcs_ring;
503};
504
505int amdgpu_copy_buffer(struct amdgpu_ring *ring,
506 uint64_t src_offset,
507 uint64_t dst_offset,
508 uint32_t byte_count,
509 struct reservation_object *resv,
510 struct amdgpu_fence **fence);
511int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
512
513struct amdgpu_bo_list_entry {
514 struct amdgpu_bo *robj;
515 struct ttm_validate_buffer tv;
516 struct amdgpu_bo_va *bo_va;
517 unsigned prefered_domains;
518 unsigned allowed_domains;
519 uint32_t priority;
520};
521
522struct amdgpu_bo_va_mapping {
523 struct list_head list;
524 struct interval_tree_node it;
525 uint64_t offset;
526 uint32_t flags;
527};
528
529/* bo virtual addresses in a specific vm */
530struct amdgpu_bo_va {
531 /* protected by bo being reserved */
532 struct list_head bo_list;
533 uint64_t addr;
534 struct amdgpu_fence *last_pt_update;
535 unsigned ref_count;
536
537 /* protected by vm mutex */
538 struct list_head mappings;
539 struct list_head vm_status;
540
541 /* constant after initialization */
542 struct amdgpu_vm *vm;
543 struct amdgpu_bo *bo;
544};
545
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800546#define AMDGPU_GEM_DOMAIN_MAX 0x3
547
Alex Deucher97b2e202015-04-20 16:51:00 -0400548struct amdgpu_bo {
549 /* Protected by gem.mutex */
550 struct list_head list;
551 /* Protected by tbo.reserved */
552 u32 initial_domain;
Chunming Zhou7e5a5472015-04-24 17:37:30 +0800553 struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
Alex Deucher97b2e202015-04-20 16:51:00 -0400554 struct ttm_placement placement;
555 struct ttm_buffer_object tbo;
556 struct ttm_bo_kmap_obj kmap;
557 u64 flags;
558 unsigned pin_count;
559 void *kptr;
560 u64 tiling_flags;
561 u64 metadata_flags;
562 void *metadata;
563 u32 metadata_size;
564 /* list of all virtual address to which this bo
565 * is associated to
566 */
567 struct list_head va;
568 /* Constant after initialization */
569 struct amdgpu_device *adev;
570 struct drm_gem_object gem_base;
571
572 struct ttm_bo_kmap_obj dma_buf_vmap;
573 pid_t pid;
574 struct amdgpu_mn *mn;
575 struct list_head mn_list;
576};
577#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
578
579void amdgpu_gem_object_free(struct drm_gem_object *obj);
580int amdgpu_gem_object_open(struct drm_gem_object *obj,
581 struct drm_file *file_priv);
582void amdgpu_gem_object_close(struct drm_gem_object *obj,
583 struct drm_file *file_priv);
584unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
585struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
586struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
587 struct dma_buf_attachment *attach,
588 struct sg_table *sg);
589struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
590 struct drm_gem_object *gobj,
591 int flags);
592int amdgpu_gem_prime_pin(struct drm_gem_object *obj);
593void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
594struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
595void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
596void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
597int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
598
599/* sub-allocation manager, it has to be protected by another lock.
600 * By conception this is an helper for other part of the driver
601 * like the indirect buffer or semaphore, which both have their
602 * locking.
603 *
604 * Principe is simple, we keep a list of sub allocation in offset
605 * order (first entry has offset == 0, last entry has the highest
606 * offset).
607 *
608 * When allocating new object we first check if there is room at
609 * the end total_size - (last_object_offset + last_object_size) >=
610 * alloc_size. If so we allocate new object there.
611 *
612 * When there is not enough room at the end, we start waiting for
613 * each sub object until we reach object_offset+object_size >=
614 * alloc_size, this object then become the sub object we return.
615 *
616 * Alignment can't be bigger than page size.
617 *
618 * Hole are not considered for allocation to keep things simple.
619 * Assumption is that there won't be hole (all object on same
620 * alignment).
621 */
622struct amdgpu_sa_manager {
623 wait_queue_head_t wq;
624 struct amdgpu_bo *bo;
625 struct list_head *hole;
626 struct list_head flist[AMDGPU_MAX_RINGS];
627 struct list_head olist;
628 unsigned size;
629 uint64_t gpu_addr;
630 void *cpu_ptr;
631 uint32_t domain;
632 uint32_t align;
633};
634
635struct amdgpu_sa_bo;
636
637/* sub-allocation buffer */
638struct amdgpu_sa_bo {
639 struct list_head olist;
640 struct list_head flist;
641 struct amdgpu_sa_manager *manager;
642 unsigned soffset;
643 unsigned eoffset;
644 struct amdgpu_fence *fence;
645};
646
647/*
648 * GEM objects.
649 */
650struct amdgpu_gem {
651 struct mutex mutex;
652 struct list_head objects;
653};
654
655int amdgpu_gem_init(struct amdgpu_device *adev);
656void amdgpu_gem_fini(struct amdgpu_device *adev);
657int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
658 int alignment, u32 initial_domain,
659 u64 flags, bool kernel,
660 struct drm_gem_object **obj);
661
662int amdgpu_mode_dumb_create(struct drm_file *file_priv,
663 struct drm_device *dev,
664 struct drm_mode_create_dumb *args);
665int amdgpu_mode_dumb_mmap(struct drm_file *filp,
666 struct drm_device *dev,
667 uint32_t handle, uint64_t *offset_p);
668
669/*
670 * Semaphores.
671 */
672struct amdgpu_semaphore {
673 struct amdgpu_sa_bo *sa_bo;
674 signed waiters;
675 uint64_t gpu_addr;
676};
677
678int amdgpu_semaphore_create(struct amdgpu_device *adev,
679 struct amdgpu_semaphore **semaphore);
680bool amdgpu_semaphore_emit_signal(struct amdgpu_ring *ring,
681 struct amdgpu_semaphore *semaphore);
682bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
683 struct amdgpu_semaphore *semaphore);
684void amdgpu_semaphore_free(struct amdgpu_device *adev,
685 struct amdgpu_semaphore **semaphore,
686 struct amdgpu_fence *fence);
687
688/*
689 * Synchronization
690 */
691struct amdgpu_sync {
692 struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
693 struct amdgpu_fence *sync_to[AMDGPU_MAX_RINGS];
694 struct amdgpu_fence *last_vm_update;
695};
696
697void amdgpu_sync_create(struct amdgpu_sync *sync);
698void amdgpu_sync_fence(struct amdgpu_sync *sync,
699 struct amdgpu_fence *fence);
700int amdgpu_sync_resv(struct amdgpu_device *adev,
701 struct amdgpu_sync *sync,
702 struct reservation_object *resv,
703 void *owner);
704int amdgpu_sync_rings(struct amdgpu_sync *sync,
705 struct amdgpu_ring *ring);
706void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync,
707 struct amdgpu_fence *fence);
708
709/*
710 * GART structures, functions & helpers
711 */
712struct amdgpu_mc;
713
714#define AMDGPU_GPU_PAGE_SIZE 4096
715#define AMDGPU_GPU_PAGE_MASK (AMDGPU_GPU_PAGE_SIZE - 1)
716#define AMDGPU_GPU_PAGE_SHIFT 12
717#define AMDGPU_GPU_PAGE_ALIGN(a) (((a) + AMDGPU_GPU_PAGE_MASK) & ~AMDGPU_GPU_PAGE_MASK)
718
719struct amdgpu_gart {
720 dma_addr_t table_addr;
721 struct amdgpu_bo *robj;
722 void *ptr;
723 unsigned num_gpu_pages;
724 unsigned num_cpu_pages;
725 unsigned table_size;
726 struct page **pages;
727 dma_addr_t *pages_addr;
728 bool ready;
729 const struct amdgpu_gart_funcs *gart_funcs;
730};
731
732int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev);
733void amdgpu_gart_table_ram_free(struct amdgpu_device *adev);
734int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev);
735void amdgpu_gart_table_vram_free(struct amdgpu_device *adev);
736int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
737void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
738int amdgpu_gart_init(struct amdgpu_device *adev);
739void amdgpu_gart_fini(struct amdgpu_device *adev);
740void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
741 int pages);
742int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
743 int pages, struct page **pagelist,
744 dma_addr_t *dma_addr, uint32_t flags);
745
746/*
747 * GPU MC structures, functions & helpers
748 */
749struct amdgpu_mc {
750 resource_size_t aper_size;
751 resource_size_t aper_base;
752 resource_size_t agp_base;
753 /* for some chips with <= 32MB we need to lie
754 * about vram size near mc fb location */
755 u64 mc_vram_size;
756 u64 visible_vram_size;
757 u64 gtt_size;
758 u64 gtt_start;
759 u64 gtt_end;
760 u64 vram_start;
761 u64 vram_end;
762 unsigned vram_width;
763 u64 real_vram_size;
764 int vram_mtrr;
765 u64 gtt_base_align;
766 u64 mc_mask;
767 const struct firmware *fw; /* MC firmware */
768 uint32_t fw_version;
769 struct amdgpu_irq_src vm_fault;
Ken Wang81c59f52015-06-03 21:02:01 +0800770 uint32_t vram_type;
Alex Deucher97b2e202015-04-20 16:51:00 -0400771};
772
773/*
774 * GPU doorbell structures, functions & helpers
775 */
776typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
777{
778 AMDGPU_DOORBELL_KIQ = 0x000,
779 AMDGPU_DOORBELL_HIQ = 0x001,
780 AMDGPU_DOORBELL_DIQ = 0x002,
781 AMDGPU_DOORBELL_MEC_RING0 = 0x010,
782 AMDGPU_DOORBELL_MEC_RING1 = 0x011,
783 AMDGPU_DOORBELL_MEC_RING2 = 0x012,
784 AMDGPU_DOORBELL_MEC_RING3 = 0x013,
785 AMDGPU_DOORBELL_MEC_RING4 = 0x014,
786 AMDGPU_DOORBELL_MEC_RING5 = 0x015,
787 AMDGPU_DOORBELL_MEC_RING6 = 0x016,
788 AMDGPU_DOORBELL_MEC_RING7 = 0x017,
789 AMDGPU_DOORBELL_GFX_RING0 = 0x020,
790 AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
791 AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
792 AMDGPU_DOORBELL_IH = 0x1E8,
793 AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
794 AMDGPU_DOORBELL_INVALID = 0xFFFF
795} AMDGPU_DOORBELL_ASSIGNMENT;
796
797struct amdgpu_doorbell {
798 /* doorbell mmio */
799 resource_size_t base;
800 resource_size_t size;
801 u32 __iomem *ptr;
802 u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
803};
804
805void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev,
806 phys_addr_t *aperture_base,
807 size_t *aperture_size,
808 size_t *start_offset);
809
810/*
811 * IRQS.
812 */
813
814struct amdgpu_flip_work {
815 struct work_struct flip_work;
816 struct work_struct unpin_work;
817 struct amdgpu_device *adev;
818 int crtc_id;
819 uint64_t base;
820 struct drm_pending_vblank_event *event;
821 struct amdgpu_bo *old_rbo;
822 struct fence *fence;
823};
824
825
826/*
827 * CP & rings.
828 */
829
830struct amdgpu_ib {
831 struct amdgpu_sa_bo *sa_bo;
832 uint32_t length_dw;
833 uint64_t gpu_addr;
834 uint32_t *ptr;
835 struct amdgpu_ring *ring;
836 struct amdgpu_fence *fence;
837 struct amdgpu_user_fence *user;
838 struct amdgpu_vm *vm;
Christian König3cb485f2015-05-11 15:34:59 +0200839 struct amdgpu_ctx *ctx;
Alex Deucher97b2e202015-04-20 16:51:00 -0400840 struct amdgpu_sync sync;
Alex Deucher97b2e202015-04-20 16:51:00 -0400841 uint32_t gds_base, gds_size;
842 uint32_t gws_base, gws_size;
843 uint32_t oa_base, oa_size;
Jammy Zhoude807f82015-05-11 23:41:41 +0800844 uint32_t flags;
Alex Deucher97b2e202015-04-20 16:51:00 -0400845};
846
847enum amdgpu_ring_type {
848 AMDGPU_RING_TYPE_GFX,
849 AMDGPU_RING_TYPE_COMPUTE,
850 AMDGPU_RING_TYPE_SDMA,
851 AMDGPU_RING_TYPE_UVD,
852 AMDGPU_RING_TYPE_VCE
853};
854
855struct amdgpu_ring {
856 struct amdgpu_device *adev;
857 const struct amdgpu_ring_funcs *funcs;
858 struct amdgpu_fence_driver fence_drv;
859
860 struct mutex *ring_lock;
861 struct amdgpu_bo *ring_obj;
862 volatile uint32_t *ring;
863 unsigned rptr_offs;
864 u64 next_rptr_gpu_addr;
865 volatile u32 *next_rptr_cpu_addr;
866 unsigned wptr;
867 unsigned wptr_old;
868 unsigned ring_size;
869 unsigned ring_free_dw;
870 int count_dw;
871 atomic_t last_rptr;
872 atomic64_t last_activity;
873 uint64_t gpu_addr;
874 uint32_t align_mask;
875 uint32_t ptr_mask;
876 bool ready;
877 u32 nop;
878 u32 idx;
879 u64 last_semaphore_signal_addr;
880 u64 last_semaphore_wait_addr;
881 u32 me;
882 u32 pipe;
883 u32 queue;
884 struct amdgpu_bo *mqd_obj;
885 u32 doorbell_index;
886 bool use_doorbell;
887 unsigned wptr_offs;
888 unsigned next_rptr_offs;
889 unsigned fence_offs;
Christian König3cb485f2015-05-11 15:34:59 +0200890 struct amdgpu_ctx *current_ctx;
Alex Deucher97b2e202015-04-20 16:51:00 -0400891 enum amdgpu_ring_type type;
892 char name[16];
893};
894
895/*
896 * VM
897 */
898
899/* maximum number of VMIDs */
900#define AMDGPU_NUM_VM 16
901
902/* number of entries in page table */
903#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
904
905/* PTBs (Page Table Blocks) need to be aligned to 32K */
906#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
907#define AMDGPU_VM_PTB_ALIGN_MASK (AMDGPU_VM_PTB_ALIGN_SIZE - 1)
908#define AMDGPU_VM_PTB_ALIGN(a) (((a) + AMDGPU_VM_PTB_ALIGN_MASK) & ~AMDGPU_VM_PTB_ALIGN_MASK)
909
910#define AMDGPU_PTE_VALID (1 << 0)
911#define AMDGPU_PTE_SYSTEM (1 << 1)
912#define AMDGPU_PTE_SNOOPED (1 << 2)
913
914/* VI only */
915#define AMDGPU_PTE_EXECUTABLE (1 << 4)
916
917#define AMDGPU_PTE_READABLE (1 << 5)
918#define AMDGPU_PTE_WRITEABLE (1 << 6)
919
920/* PTE (Page Table Entry) fragment field for different page sizes */
921#define AMDGPU_PTE_FRAG_4KB (0 << 7)
922#define AMDGPU_PTE_FRAG_64KB (4 << 7)
923#define AMDGPU_LOG2_PAGES_PER_FRAG 4
924
925struct amdgpu_vm_pt {
926 struct amdgpu_bo *bo;
927 uint64_t addr;
928};
929
930struct amdgpu_vm_id {
931 unsigned id;
932 uint64_t pd_gpu_addr;
933 /* last flushed PD/PT update */
934 struct amdgpu_fence *flushed_updates;
935 /* last use of vmid */
936 struct amdgpu_fence *last_id_use;
937};
938
939struct amdgpu_vm {
940 struct mutex mutex;
941
942 struct rb_root va;
943
944 /* protecting invalidated and freed */
945 spinlock_t status_lock;
946
947 /* BOs moved, but not yet updated in the PT */
948 struct list_head invalidated;
949
950 /* BOs freed, but not yet updated in the PT */
951 struct list_head freed;
952
953 /* contains the page directory */
954 struct amdgpu_bo *page_directory;
955 unsigned max_pde_used;
956
957 /* array of page tables, one for each page directory entry */
958 struct amdgpu_vm_pt *page_tables;
959
960 /* for id and flush management per ring */
961 struct amdgpu_vm_id ids[AMDGPU_MAX_RINGS];
962};
963
964struct amdgpu_vm_manager {
965 struct amdgpu_fence *active[AMDGPU_NUM_VM];
966 uint32_t max_pfn;
967 /* number of VMIDs */
968 unsigned nvm;
969 /* vram base address for page table entry */
970 u64 vram_base_offset;
971 /* is vm enabled? */
972 bool enabled;
973 /* for hw to save the PD addr on suspend/resume */
974 uint32_t saved_table_addr[AMDGPU_NUM_VM];
975 /* vm pte handling */
976 const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
977 struct amdgpu_ring *vm_pte_funcs_ring;
978};
979
980/*
981 * context related structures
982 */
983
984struct amdgpu_ctx_state {
985 uint64_t flags;
Marek Olšákd94aed52015-05-05 21:13:49 +0200986 uint32_t hangs;
Alex Deucher97b2e202015-04-20 16:51:00 -0400987};
988
989struct amdgpu_ctx {
990 /* call kref_get()before CS start and kref_put() after CS fence signaled */
991 struct kref refcount;
992 struct amdgpu_fpriv *fpriv;
993 struct amdgpu_ctx_state state;
994 uint32_t id;
Marek Olšákd94aed52015-05-05 21:13:49 +0200995 unsigned reset_counter;
Alex Deucher97b2e202015-04-20 16:51:00 -0400996};
997
998struct amdgpu_ctx_mgr {
999 struct amdgpu_device *adev;
1000 struct idr ctx_handles;
1001 /* lock for IDR system */
Marek Olšák0147ee02015-05-05 20:52:00 +02001002 struct mutex lock;
Alex Deucher97b2e202015-04-20 16:51:00 -04001003};
1004
1005/*
1006 * file private structure
1007 */
1008
1009struct amdgpu_fpriv {
1010 struct amdgpu_vm vm;
1011 struct mutex bo_list_lock;
1012 struct idr bo_list_handles;
1013 struct amdgpu_ctx_mgr ctx_mgr;
1014};
1015
1016/*
1017 * residency list
1018 */
1019
1020struct amdgpu_bo_list {
1021 struct mutex lock;
1022 struct amdgpu_bo *gds_obj;
1023 struct amdgpu_bo *gws_obj;
1024 struct amdgpu_bo *oa_obj;
1025 bool has_userptr;
1026 unsigned num_entries;
1027 struct amdgpu_bo_list_entry *array;
1028};
1029
1030struct amdgpu_bo_list *
1031amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
1032void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
1033void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
1034
1035/*
1036 * GFX stuff
1037 */
1038#include "clearstate_defs.h"
1039
1040struct amdgpu_rlc {
1041 /* for power gating */
1042 struct amdgpu_bo *save_restore_obj;
1043 uint64_t save_restore_gpu_addr;
1044 volatile uint32_t *sr_ptr;
1045 const u32 *reg_list;
1046 u32 reg_list_size;
1047 /* for clear state */
1048 struct amdgpu_bo *clear_state_obj;
1049 uint64_t clear_state_gpu_addr;
1050 volatile uint32_t *cs_ptr;
1051 const struct cs_section_def *cs_data;
1052 u32 clear_state_size;
1053 /* for cp tables */
1054 struct amdgpu_bo *cp_table_obj;
1055 uint64_t cp_table_gpu_addr;
1056 volatile uint32_t *cp_table_ptr;
1057 u32 cp_table_size;
1058};
1059
1060struct amdgpu_mec {
1061 struct amdgpu_bo *hpd_eop_obj;
1062 u64 hpd_eop_gpu_addr;
1063 u32 num_pipe;
1064 u32 num_mec;
1065 u32 num_queue;
1066};
1067
1068/*
1069 * GPU scratch registers structures, functions & helpers
1070 */
1071struct amdgpu_scratch {
1072 unsigned num_reg;
1073 uint32_t reg_base;
1074 bool free[32];
1075 uint32_t reg[32];
1076};
1077
1078/*
1079 * GFX configurations
1080 */
1081struct amdgpu_gca_config {
1082 unsigned max_shader_engines;
1083 unsigned max_tile_pipes;
1084 unsigned max_cu_per_sh;
1085 unsigned max_sh_per_se;
1086 unsigned max_backends_per_se;
1087 unsigned max_texture_channel_caches;
1088 unsigned max_gprs;
1089 unsigned max_gs_threads;
1090 unsigned max_hw_contexts;
1091 unsigned sc_prim_fifo_size_frontend;
1092 unsigned sc_prim_fifo_size_backend;
1093 unsigned sc_hiz_tile_fifo_size;
1094 unsigned sc_earlyz_tile_fifo_size;
1095
1096 unsigned num_tile_pipes;
1097 unsigned backend_enable_mask;
1098 unsigned mem_max_burst_length_bytes;
1099 unsigned mem_row_size_in_kb;
1100 unsigned shader_engine_tile_size;
1101 unsigned num_gpus;
1102 unsigned multi_gpu_tile_size;
1103 unsigned mc_arb_ramcfg;
1104 unsigned gb_addr_config;
1105
1106 uint32_t tile_mode_array[32];
1107 uint32_t macrotile_mode_array[16];
1108};
1109
1110struct amdgpu_gfx {
1111 struct mutex gpu_clock_mutex;
1112 struct amdgpu_gca_config config;
1113 struct amdgpu_rlc rlc;
1114 struct amdgpu_mec mec;
1115 struct amdgpu_scratch scratch;
1116 const struct firmware *me_fw; /* ME firmware */
1117 uint32_t me_fw_version;
1118 const struct firmware *pfp_fw; /* PFP firmware */
1119 uint32_t pfp_fw_version;
1120 const struct firmware *ce_fw; /* CE firmware */
1121 uint32_t ce_fw_version;
1122 const struct firmware *rlc_fw; /* RLC firmware */
1123 uint32_t rlc_fw_version;
1124 const struct firmware *mec_fw; /* MEC firmware */
1125 uint32_t mec_fw_version;
1126 const struct firmware *mec2_fw; /* MEC2 firmware */
1127 uint32_t mec2_fw_version;
Ken Wang02558a02015-06-03 19:52:06 +08001128 uint32_t me_feature_version;
1129 uint32_t ce_feature_version;
1130 uint32_t pfp_feature_version;
Alex Deucher97b2e202015-04-20 16:51:00 -04001131 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
1132 unsigned num_gfx_rings;
1133 struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
1134 unsigned num_compute_rings;
1135 struct amdgpu_irq_src eop_irq;
1136 struct amdgpu_irq_src priv_reg_irq;
1137 struct amdgpu_irq_src priv_inst_irq;
1138 /* gfx status */
1139 uint32_t gfx_current_status;
1140 /* sync signal for const engine */
1141 unsigned ce_sync_offs;
Ken Wanga101a892015-06-03 17:47:54 +08001142 /* ce ram size*/
1143 unsigned ce_ram_size;
Alex Deucher97b2e202015-04-20 16:51:00 -04001144};
1145
1146int amdgpu_ib_get(struct amdgpu_ring *ring, struct amdgpu_vm *vm,
1147 unsigned size, struct amdgpu_ib *ib);
1148void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib);
1149int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
1150 struct amdgpu_ib *ib, void *owner);
1151int amdgpu_ib_pool_init(struct amdgpu_device *adev);
1152void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
1153int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
1154/* Ring access between begin & end cannot sleep */
1155void amdgpu_ring_free_size(struct amdgpu_ring *ring);
1156int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw);
1157int amdgpu_ring_lock(struct amdgpu_ring *ring, unsigned ndw);
1158void amdgpu_ring_commit(struct amdgpu_ring *ring);
1159void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring);
1160void amdgpu_ring_undo(struct amdgpu_ring *ring);
1161void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring);
1162void amdgpu_ring_lockup_update(struct amdgpu_ring *ring);
1163bool amdgpu_ring_test_lockup(struct amdgpu_ring *ring);
1164unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
1165 uint32_t **data);
1166int amdgpu_ring_restore(struct amdgpu_ring *ring,
1167 unsigned size, uint32_t *data);
1168int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
1169 unsigned ring_size, u32 nop, u32 align_mask,
1170 struct amdgpu_irq_src *irq_src, unsigned irq_type,
1171 enum amdgpu_ring_type ring_type);
1172void amdgpu_ring_fini(struct amdgpu_ring *ring);
1173
1174/*
1175 * CS.
1176 */
1177struct amdgpu_cs_chunk {
1178 uint32_t chunk_id;
1179 uint32_t length_dw;
1180 uint32_t *kdata;
1181 void __user *user_ptr;
1182};
1183
1184struct amdgpu_cs_parser {
1185 struct amdgpu_device *adev;
1186 struct drm_file *filp;
Christian König3cb485f2015-05-11 15:34:59 +02001187 struct amdgpu_ctx *ctx;
Alex Deucher97b2e202015-04-20 16:51:00 -04001188 struct amdgpu_bo_list *bo_list;
1189 /* chunks */
1190 unsigned nchunks;
1191 struct amdgpu_cs_chunk *chunks;
1192 /* relocations */
1193 struct amdgpu_bo_list_entry *vm_bos;
1194 struct amdgpu_bo_list_entry *ib_bos;
1195 struct list_head validated;
1196
1197 struct amdgpu_ib *ibs;
1198 uint32_t num_ibs;
1199
1200 struct ww_acquire_ctx ticket;
1201
1202 /* user fence */
1203 struct amdgpu_user_fence uf;
1204};
1205
1206static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
1207{
1208 return p->ibs[ib_idx].ptr[idx];
1209}
1210
1211/*
1212 * Writeback
1213 */
1214#define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */
1215
1216struct amdgpu_wb {
1217 struct amdgpu_bo *wb_obj;
1218 volatile uint32_t *wb;
1219 uint64_t gpu_addr;
1220 u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
1221 unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
1222};
1223
1224int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
1225void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
1226
1227/**
1228 * struct amdgpu_pm - power management datas
1229 * It keeps track of various data needed to take powermanagement decision.
1230 */
1231
1232enum amdgpu_pm_state_type {
1233 /* not used for dpm */
1234 POWER_STATE_TYPE_DEFAULT,
1235 POWER_STATE_TYPE_POWERSAVE,
1236 /* user selectable states */
1237 POWER_STATE_TYPE_BATTERY,
1238 POWER_STATE_TYPE_BALANCED,
1239 POWER_STATE_TYPE_PERFORMANCE,
1240 /* internal states */
1241 POWER_STATE_TYPE_INTERNAL_UVD,
1242 POWER_STATE_TYPE_INTERNAL_UVD_SD,
1243 POWER_STATE_TYPE_INTERNAL_UVD_HD,
1244 POWER_STATE_TYPE_INTERNAL_UVD_HD2,
1245 POWER_STATE_TYPE_INTERNAL_UVD_MVC,
1246 POWER_STATE_TYPE_INTERNAL_BOOT,
1247 POWER_STATE_TYPE_INTERNAL_THERMAL,
1248 POWER_STATE_TYPE_INTERNAL_ACPI,
1249 POWER_STATE_TYPE_INTERNAL_ULV,
1250 POWER_STATE_TYPE_INTERNAL_3DPERF,
1251};
1252
1253enum amdgpu_int_thermal_type {
1254 THERMAL_TYPE_NONE,
1255 THERMAL_TYPE_EXTERNAL,
1256 THERMAL_TYPE_EXTERNAL_GPIO,
1257 THERMAL_TYPE_RV6XX,
1258 THERMAL_TYPE_RV770,
1259 THERMAL_TYPE_ADT7473_WITH_INTERNAL,
1260 THERMAL_TYPE_EVERGREEN,
1261 THERMAL_TYPE_SUMO,
1262 THERMAL_TYPE_NI,
1263 THERMAL_TYPE_SI,
1264 THERMAL_TYPE_EMC2103_WITH_INTERNAL,
1265 THERMAL_TYPE_CI,
1266 THERMAL_TYPE_KV,
1267};
1268
1269enum amdgpu_dpm_auto_throttle_src {
1270 AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
1271 AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
1272};
1273
1274enum amdgpu_dpm_event_src {
1275 AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
1276 AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
1277 AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
1278 AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
1279 AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
1280};
1281
1282#define AMDGPU_MAX_VCE_LEVELS 6
1283
1284enum amdgpu_vce_level {
1285 AMDGPU_VCE_LEVEL_AC_ALL = 0, /* AC, All cases */
1286 AMDGPU_VCE_LEVEL_DC_EE = 1, /* DC, entropy encoding */
1287 AMDGPU_VCE_LEVEL_DC_LL_LOW = 2, /* DC, low latency queue, res <= 720 */
1288 AMDGPU_VCE_LEVEL_DC_LL_HIGH = 3, /* DC, low latency queue, 1080 >= res > 720 */
1289 AMDGPU_VCE_LEVEL_DC_GP_LOW = 4, /* DC, general purpose queue, res <= 720 */
1290 AMDGPU_VCE_LEVEL_DC_GP_HIGH = 5, /* DC, general purpose queue, 1080 >= res > 720 */
1291};
1292
1293struct amdgpu_ps {
1294 u32 caps; /* vbios flags */
1295 u32 class; /* vbios flags */
1296 u32 class2; /* vbios flags */
1297 /* UVD clocks */
1298 u32 vclk;
1299 u32 dclk;
1300 /* VCE clocks */
1301 u32 evclk;
1302 u32 ecclk;
1303 bool vce_active;
1304 enum amdgpu_vce_level vce_level;
1305 /* asic priv */
1306 void *ps_priv;
1307};
1308
1309struct amdgpu_dpm_thermal {
1310 /* thermal interrupt work */
1311 struct work_struct work;
1312 /* low temperature threshold */
1313 int min_temp;
1314 /* high temperature threshold */
1315 int max_temp;
1316 /* was last interrupt low to high or high to low */
1317 bool high_to_low;
1318 /* interrupt source */
1319 struct amdgpu_irq_src irq;
1320};
1321
1322enum amdgpu_clk_action
1323{
1324 AMDGPU_SCLK_UP = 1,
1325 AMDGPU_SCLK_DOWN
1326};
1327
1328struct amdgpu_blacklist_clocks
1329{
1330 u32 sclk;
1331 u32 mclk;
1332 enum amdgpu_clk_action action;
1333};
1334
1335struct amdgpu_clock_and_voltage_limits {
1336 u32 sclk;
1337 u32 mclk;
1338 u16 vddc;
1339 u16 vddci;
1340};
1341
1342struct amdgpu_clock_array {
1343 u32 count;
1344 u32 *values;
1345};
1346
1347struct amdgpu_clock_voltage_dependency_entry {
1348 u32 clk;
1349 u16 v;
1350};
1351
1352struct amdgpu_clock_voltage_dependency_table {
1353 u32 count;
1354 struct amdgpu_clock_voltage_dependency_entry *entries;
1355};
1356
1357union amdgpu_cac_leakage_entry {
1358 struct {
1359 u16 vddc;
1360 u32 leakage;
1361 };
1362 struct {
1363 u16 vddc1;
1364 u16 vddc2;
1365 u16 vddc3;
1366 };
1367};
1368
1369struct amdgpu_cac_leakage_table {
1370 u32 count;
1371 union amdgpu_cac_leakage_entry *entries;
1372};
1373
1374struct amdgpu_phase_shedding_limits_entry {
1375 u16 voltage;
1376 u32 sclk;
1377 u32 mclk;
1378};
1379
1380struct amdgpu_phase_shedding_limits_table {
1381 u32 count;
1382 struct amdgpu_phase_shedding_limits_entry *entries;
1383};
1384
1385struct amdgpu_uvd_clock_voltage_dependency_entry {
1386 u32 vclk;
1387 u32 dclk;
1388 u16 v;
1389};
1390
1391struct amdgpu_uvd_clock_voltage_dependency_table {
1392 u8 count;
1393 struct amdgpu_uvd_clock_voltage_dependency_entry *entries;
1394};
1395
1396struct amdgpu_vce_clock_voltage_dependency_entry {
1397 u32 ecclk;
1398 u32 evclk;
1399 u16 v;
1400};
1401
1402struct amdgpu_vce_clock_voltage_dependency_table {
1403 u8 count;
1404 struct amdgpu_vce_clock_voltage_dependency_entry *entries;
1405};
1406
1407struct amdgpu_ppm_table {
1408 u8 ppm_design;
1409 u16 cpu_core_number;
1410 u32 platform_tdp;
1411 u32 small_ac_platform_tdp;
1412 u32 platform_tdc;
1413 u32 small_ac_platform_tdc;
1414 u32 apu_tdp;
1415 u32 dgpu_tdp;
1416 u32 dgpu_ulv_power;
1417 u32 tj_max;
1418};
1419
1420struct amdgpu_cac_tdp_table {
1421 u16 tdp;
1422 u16 configurable_tdp;
1423 u16 tdc;
1424 u16 battery_power_limit;
1425 u16 small_power_limit;
1426 u16 low_cac_leakage;
1427 u16 high_cac_leakage;
1428 u16 maximum_power_delivery_limit;
1429};
1430
1431struct amdgpu_dpm_dynamic_state {
1432 struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_sclk;
1433 struct amdgpu_clock_voltage_dependency_table vddci_dependency_on_mclk;
1434 struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_mclk;
1435 struct amdgpu_clock_voltage_dependency_table mvdd_dependency_on_mclk;
1436 struct amdgpu_clock_voltage_dependency_table vddc_dependency_on_dispclk;
1437 struct amdgpu_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
1438 struct amdgpu_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
1439 struct amdgpu_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
1440 struct amdgpu_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
1441 struct amdgpu_clock_voltage_dependency_table vddgfx_dependency_on_sclk;
1442 struct amdgpu_clock_array valid_sclk_values;
1443 struct amdgpu_clock_array valid_mclk_values;
1444 struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_dc;
1445 struct amdgpu_clock_and_voltage_limits max_clock_voltage_on_ac;
1446 u32 mclk_sclk_ratio;
1447 u32 sclk_mclk_delta;
1448 u16 vddc_vddci_delta;
1449 u16 min_vddc_for_pcie_gen2;
1450 struct amdgpu_cac_leakage_table cac_leakage_table;
1451 struct amdgpu_phase_shedding_limits_table phase_shedding_limits_table;
1452 struct amdgpu_ppm_table *ppm_table;
1453 struct amdgpu_cac_tdp_table *cac_tdp_table;
1454};
1455
1456struct amdgpu_dpm_fan {
1457 u16 t_min;
1458 u16 t_med;
1459 u16 t_high;
1460 u16 pwm_min;
1461 u16 pwm_med;
1462 u16 pwm_high;
1463 u8 t_hyst;
1464 u32 cycle_delay;
1465 u16 t_max;
1466 u8 control_mode;
1467 u16 default_max_fan_pwm;
1468 u16 default_fan_output_sensitivity;
1469 u16 fan_output_sensitivity;
1470 bool ucode_fan_control;
1471};
1472
1473enum amdgpu_pcie_gen {
1474 AMDGPU_PCIE_GEN1 = 0,
1475 AMDGPU_PCIE_GEN2 = 1,
1476 AMDGPU_PCIE_GEN3 = 2,
1477 AMDGPU_PCIE_GEN_INVALID = 0xffff
1478};
1479
1480enum amdgpu_dpm_forced_level {
1481 AMDGPU_DPM_FORCED_LEVEL_AUTO = 0,
1482 AMDGPU_DPM_FORCED_LEVEL_LOW = 1,
1483 AMDGPU_DPM_FORCED_LEVEL_HIGH = 2,
1484};
1485
1486struct amdgpu_vce_state {
1487 /* vce clocks */
1488 u32 evclk;
1489 u32 ecclk;
1490 /* gpu clocks */
1491 u32 sclk;
1492 u32 mclk;
1493 u8 clk_idx;
1494 u8 pstate;
1495};
1496
1497struct amdgpu_dpm_funcs {
1498 int (*get_temperature)(struct amdgpu_device *adev);
1499 int (*pre_set_power_state)(struct amdgpu_device *adev);
1500 int (*set_power_state)(struct amdgpu_device *adev);
1501 void (*post_set_power_state)(struct amdgpu_device *adev);
1502 void (*display_configuration_changed)(struct amdgpu_device *adev);
1503 u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
1504 u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
1505 void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
1506 void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
1507 int (*force_performance_level)(struct amdgpu_device *adev, enum amdgpu_dpm_forced_level level);
1508 bool (*vblank_too_short)(struct amdgpu_device *adev);
1509 void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
1510 void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
1511 void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
1512 u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
1513 int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
1514 int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
1515};
1516
1517struct amdgpu_dpm {
1518 struct amdgpu_ps *ps;
1519 /* number of valid power states */
1520 int num_ps;
1521 /* current power state that is active */
1522 struct amdgpu_ps *current_ps;
1523 /* requested power state */
1524 struct amdgpu_ps *requested_ps;
1525 /* boot up power state */
1526 struct amdgpu_ps *boot_ps;
1527 /* default uvd power state */
1528 struct amdgpu_ps *uvd_ps;
1529 /* vce requirements */
1530 struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS];
1531 enum amdgpu_vce_level vce_level;
1532 enum amdgpu_pm_state_type state;
1533 enum amdgpu_pm_state_type user_state;
1534 u32 platform_caps;
1535 u32 voltage_response_time;
1536 u32 backbias_response_time;
1537 void *priv;
1538 u32 new_active_crtcs;
1539 int new_active_crtc_count;
1540 u32 current_active_crtcs;
1541 int current_active_crtc_count;
1542 struct amdgpu_dpm_dynamic_state dyn_state;
1543 struct amdgpu_dpm_fan fan;
1544 u32 tdp_limit;
1545 u32 near_tdp_limit;
1546 u32 near_tdp_limit_adjusted;
1547 u32 sq_ramping_threshold;
1548 u32 cac_leakage;
1549 u16 tdp_od_limit;
1550 u32 tdp_adjustment;
1551 u16 load_line_slope;
1552 bool power_control;
1553 bool ac_power;
1554 /* special states active */
1555 bool thermal_active;
1556 bool uvd_active;
1557 bool vce_active;
1558 /* thermal handling */
1559 struct amdgpu_dpm_thermal thermal;
1560 /* forced levels */
1561 enum amdgpu_dpm_forced_level forced_level;
1562};
1563
1564struct amdgpu_pm {
1565 struct mutex mutex;
Alex Deucher97b2e202015-04-20 16:51:00 -04001566 u32 current_sclk;
1567 u32 current_mclk;
1568 u32 default_sclk;
1569 u32 default_mclk;
1570 struct amdgpu_i2c_chan *i2c_bus;
1571 /* internal thermal controller on rv6xx+ */
1572 enum amdgpu_int_thermal_type int_thermal_type;
1573 struct device *int_hwmon_dev;
1574 /* fan control parameters */
1575 bool no_fan;
1576 u8 fan_pulses_per_revolution;
1577 u8 fan_min_rpm;
1578 u8 fan_max_rpm;
1579 /* dpm */
1580 bool dpm_enabled;
1581 struct amdgpu_dpm dpm;
1582 const struct firmware *fw; /* SMC firmware */
1583 uint32_t fw_version;
1584 const struct amdgpu_dpm_funcs *funcs;
1585};
1586
1587/*
1588 * UVD
1589 */
1590#define AMDGPU_MAX_UVD_HANDLES 10
1591#define AMDGPU_UVD_STACK_SIZE (1024*1024)
1592#define AMDGPU_UVD_HEAP_SIZE (1024*1024)
1593#define AMDGPU_UVD_FIRMWARE_OFFSET 256
1594
1595struct amdgpu_uvd {
1596 struct amdgpu_bo *vcpu_bo;
1597 void *cpu_addr;
1598 uint64_t gpu_addr;
1599 void *saved_bo;
1600 atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
1601 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
1602 struct delayed_work idle_work;
1603 const struct firmware *fw; /* UVD firmware */
1604 struct amdgpu_ring ring;
1605 struct amdgpu_irq_src irq;
1606 bool address_64_bit;
1607};
1608
1609/*
1610 * VCE
1611 */
1612#define AMDGPU_MAX_VCE_HANDLES 16
Alex Deucher97b2e202015-04-20 16:51:00 -04001613#define AMDGPU_VCE_FIRMWARE_OFFSET 256
1614
1615struct amdgpu_vce {
1616 struct amdgpu_bo *vcpu_bo;
1617 uint64_t gpu_addr;
1618 unsigned fw_version;
1619 unsigned fb_version;
1620 atomic_t handles[AMDGPU_MAX_VCE_HANDLES];
1621 struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES];
1622 struct delayed_work idle_work;
1623 const struct firmware *fw; /* VCE firmware */
1624 struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
1625 struct amdgpu_irq_src irq;
1626};
1627
1628/*
1629 * SDMA
1630 */
1631struct amdgpu_sdma {
1632 /* SDMA firmware */
1633 const struct firmware *fw;
1634 uint32_t fw_version;
1635
1636 struct amdgpu_ring ring;
1637};
1638
1639/*
1640 * Firmware
1641 */
1642struct amdgpu_firmware {
1643 struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM];
1644 bool smu_load;
1645 struct amdgpu_bo *fw_buf;
1646 unsigned int fw_size;
1647};
1648
1649/*
1650 * Benchmarking
1651 */
1652void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
1653
1654
1655/*
1656 * Testing
1657 */
1658void amdgpu_test_moves(struct amdgpu_device *adev);
1659void amdgpu_test_ring_sync(struct amdgpu_device *adev,
1660 struct amdgpu_ring *cpA,
1661 struct amdgpu_ring *cpB);
1662void amdgpu_test_syncing(struct amdgpu_device *adev);
1663
1664/*
1665 * MMU Notifier
1666 */
1667#if defined(CONFIG_MMU_NOTIFIER)
1668int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
1669void amdgpu_mn_unregister(struct amdgpu_bo *bo);
1670#else
1671static int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
1672{
1673 return -ENODEV;
1674}
1675static void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
1676#endif
1677
1678/*
1679 * Debugfs
1680 */
1681struct amdgpu_debugfs {
1682 struct drm_info_list *files;
1683 unsigned num_files;
1684};
1685
1686int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
1687 struct drm_info_list *files,
1688 unsigned nfiles);
1689int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
1690
1691#if defined(CONFIG_DEBUG_FS)
1692int amdgpu_debugfs_init(struct drm_minor *minor);
1693void amdgpu_debugfs_cleanup(struct drm_minor *minor);
1694#endif
1695
1696/*
1697 * amdgpu smumgr functions
1698 */
1699struct amdgpu_smumgr_funcs {
1700 int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype);
1701 int (*request_smu_load_fw)(struct amdgpu_device *adev);
1702 int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype);
1703};
1704
1705/*
1706 * amdgpu smumgr
1707 */
1708struct amdgpu_smumgr {
1709 struct amdgpu_bo *toc_buf;
1710 struct amdgpu_bo *smu_buf;
1711 /* asic priv smu data */
1712 void *priv;
1713 spinlock_t smu_lock;
1714 /* smumgr functions */
1715 const struct amdgpu_smumgr_funcs *smumgr_funcs;
1716 /* ucode loading complete flag */
1717 uint32_t fw_flags;
1718};
1719
1720/*
1721 * ASIC specific register table accessible by UMD
1722 */
1723struct amdgpu_allowed_register_entry {
1724 uint32_t reg_offset;
1725 bool untouched;
1726 bool grbm_indexed;
1727};
1728
1729struct amdgpu_cu_info {
1730 uint32_t number; /* total active CU number */
1731 uint32_t ao_cu_mask;
1732 uint32_t bitmap[4][4];
1733};
1734
1735
1736/*
1737 * ASIC specific functions.
1738 */
1739struct amdgpu_asic_funcs {
1740 bool (*read_disabled_bios)(struct amdgpu_device *adev);
1741 int (*read_register)(struct amdgpu_device *adev, u32 se_num,
1742 u32 sh_num, u32 reg_offset, u32 *value);
1743 void (*set_vga_state)(struct amdgpu_device *adev, bool state);
1744 int (*reset)(struct amdgpu_device *adev);
1745 /* wait for mc_idle */
1746 int (*wait_for_mc_idle)(struct amdgpu_device *adev);
1747 /* get the reference clock */
1748 u32 (*get_xclk)(struct amdgpu_device *adev);
1749 /* get the gpu clock counter */
1750 uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
1751 int (*get_cu_info)(struct amdgpu_device *adev, struct amdgpu_cu_info *info);
1752 /* MM block clocks */
1753 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
1754 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
1755};
1756
1757/*
1758 * IOCTL.
1759 */
1760int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
1761 struct drm_file *filp);
1762int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
1763 struct drm_file *filp);
1764
1765int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data,
1766 struct drm_file *filp);
1767int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
1768 struct drm_file *filp);
1769int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
1770 struct drm_file *filp);
1771int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
1772 struct drm_file *filp);
1773int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
1774 struct drm_file *filp);
1775int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
1776 struct drm_file *filp);
1777int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1778int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1779
1780int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
1781 struct drm_file *filp);
1782
1783/* VRAM scratch page for HDP bug, default vram page */
1784struct amdgpu_vram_scratch {
1785 struct amdgpu_bo *robj;
1786 volatile uint32_t *ptr;
1787 u64 gpu_addr;
1788};
1789
1790/*
1791 * ACPI
1792 */
1793struct amdgpu_atif_notification_cfg {
1794 bool enabled;
1795 int command_code;
1796};
1797
1798struct amdgpu_atif_notifications {
1799 bool display_switch;
1800 bool expansion_mode_change;
1801 bool thermal_state;
1802 bool forced_power_state;
1803 bool system_power_state;
1804 bool display_conf_change;
1805 bool px_gfx_switch;
1806 bool brightness_change;
1807 bool dgpu_display_event;
1808};
1809
1810struct amdgpu_atif_functions {
1811 bool system_params;
1812 bool sbios_requests;
1813 bool select_active_disp;
1814 bool lid_state;
1815 bool get_tv_standard;
1816 bool set_tv_standard;
1817 bool get_panel_expansion_mode;
1818 bool set_panel_expansion_mode;
1819 bool temperature_change;
1820 bool graphics_device_types;
1821};
1822
1823struct amdgpu_atif {
1824 struct amdgpu_atif_notifications notifications;
1825 struct amdgpu_atif_functions functions;
1826 struct amdgpu_atif_notification_cfg notification_cfg;
1827 struct amdgpu_encoder *encoder_for_bl;
1828};
1829
1830struct amdgpu_atcs_functions {
1831 bool get_ext_state;
1832 bool pcie_perf_req;
1833 bool pcie_dev_rdy;
1834 bool pcie_bus_width;
1835};
1836
1837struct amdgpu_atcs {
1838 struct amdgpu_atcs_functions functions;
1839};
1840
1841int amdgpu_ctx_alloc(struct amdgpu_device *adev,struct amdgpu_fpriv *fpriv,
1842 uint32_t *id,uint32_t flags);
1843int amdgpu_ctx_free(struct amdgpu_device *adev, struct amdgpu_fpriv *fpriv,
1844 uint32_t id);
Alex Deucher97b2e202015-04-20 16:51:00 -04001845
1846void amdgpu_ctx_fini(struct amdgpu_fpriv *fpriv);
Jammy Zhou66b3cf22015-05-08 17:29:40 +08001847struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
1848int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
Alex Deucher97b2e202015-04-20 16:51:00 -04001849
1850extern int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
1851 struct drm_file *filp);
1852
1853/*
1854 * Core structure, functions and helpers.
1855 */
1856typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
1857typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1858
1859typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1860typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
1861
1862struct amdgpu_device {
1863 struct device *dev;
1864 struct drm_device *ddev;
1865 struct pci_dev *pdev;
1866 struct rw_semaphore exclusive_lock;
1867
1868 /* ASIC */
1869 enum amdgpu_asic_type asic_type;
1870 uint32_t family;
1871 uint32_t rev_id;
1872 uint32_t external_rev_id;
1873 unsigned long flags;
1874 int usec_timeout;
1875 const struct amdgpu_asic_funcs *asic_funcs;
1876 bool shutdown;
1877 bool suspend;
1878 bool need_dma32;
1879 bool accel_working;
1880 bool needs_reset;
1881 struct work_struct reset_work;
1882 struct notifier_block acpi_nb;
1883 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
1884 struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
1885 unsigned debugfs_count;
1886#if defined(CONFIG_DEBUG_FS)
1887 struct dentry *debugfs_regs;
1888#endif
1889 struct amdgpu_atif atif;
1890 struct amdgpu_atcs atcs;
1891 struct mutex srbm_mutex;
1892 /* GRBM index mutex. Protects concurrent access to GRBM index */
1893 struct mutex grbm_idx_mutex;
1894 struct dev_pm_domain vga_pm_domain;
1895 bool have_disp_power_ref;
1896
1897 /* BIOS */
1898 uint8_t *bios;
1899 bool is_atom_bios;
1900 uint16_t bios_header_start;
1901 struct amdgpu_bo *stollen_vga_memory;
1902 uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
1903
1904 /* Register/doorbell mmio */
1905 resource_size_t rmmio_base;
1906 resource_size_t rmmio_size;
1907 void __iomem *rmmio;
1908 /* protects concurrent MM_INDEX/DATA based register access */
1909 spinlock_t mmio_idx_lock;
1910 /* protects concurrent SMC based register access */
1911 spinlock_t smc_idx_lock;
1912 amdgpu_rreg_t smc_rreg;
1913 amdgpu_wreg_t smc_wreg;
1914 /* protects concurrent PCIE register access */
1915 spinlock_t pcie_idx_lock;
1916 amdgpu_rreg_t pcie_rreg;
1917 amdgpu_wreg_t pcie_wreg;
1918 /* protects concurrent UVD register access */
1919 spinlock_t uvd_ctx_idx_lock;
1920 amdgpu_rreg_t uvd_ctx_rreg;
1921 amdgpu_wreg_t uvd_ctx_wreg;
1922 /* protects concurrent DIDT register access */
1923 spinlock_t didt_idx_lock;
1924 amdgpu_rreg_t didt_rreg;
1925 amdgpu_wreg_t didt_wreg;
1926 /* protects concurrent ENDPOINT (audio) register access */
1927 spinlock_t audio_endpt_idx_lock;
1928 amdgpu_block_rreg_t audio_endpt_rreg;
1929 amdgpu_block_wreg_t audio_endpt_wreg;
1930 void __iomem *rio_mem;
1931 resource_size_t rio_mem_size;
1932 struct amdgpu_doorbell doorbell;
1933
1934 /* clock/pll info */
1935 struct amdgpu_clock clock;
1936
1937 /* MC */
1938 struct amdgpu_mc mc;
1939 struct amdgpu_gart gart;
1940 struct amdgpu_dummy_page dummy_page;
1941 struct amdgpu_vm_manager vm_manager;
1942
1943 /* memory management */
1944 struct amdgpu_mman mman;
1945 struct amdgpu_gem gem;
1946 struct amdgpu_vram_scratch vram_scratch;
1947 struct amdgpu_wb wb;
1948 atomic64_t vram_usage;
1949 atomic64_t vram_vis_usage;
1950 atomic64_t gtt_usage;
1951 atomic64_t num_bytes_moved;
Marek Olšákd94aed52015-05-05 21:13:49 +02001952 atomic_t gpu_reset_counter;
Alex Deucher97b2e202015-04-20 16:51:00 -04001953
1954 /* display */
1955 struct amdgpu_mode_info mode_info;
1956 struct work_struct hotplug_work;
1957 struct amdgpu_irq_src crtc_irq;
1958 struct amdgpu_irq_src pageflip_irq;
1959 struct amdgpu_irq_src hpd_irq;
1960
1961 /* rings */
1962 wait_queue_head_t fence_queue;
1963 unsigned fence_context;
1964 struct mutex ring_lock;
1965 unsigned num_rings;
1966 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
1967 bool ib_pool_ready;
1968 struct amdgpu_sa_manager ring_tmp_bo;
1969
1970 /* interrupts */
1971 struct amdgpu_irq irq;
1972
1973 /* dpm */
1974 struct amdgpu_pm pm;
1975 u32 cg_flags;
1976 u32 pg_flags;
1977
1978 /* amdgpu smumgr */
1979 struct amdgpu_smumgr smu;
1980
1981 /* gfx */
1982 struct amdgpu_gfx gfx;
1983
1984 /* sdma */
1985 struct amdgpu_sdma sdma[2];
1986 struct amdgpu_irq_src sdma_trap_irq;
1987 struct amdgpu_irq_src sdma_illegal_inst_irq;
1988
1989 /* uvd */
1990 bool has_uvd;
1991 struct amdgpu_uvd uvd;
1992
1993 /* vce */
1994 struct amdgpu_vce vce;
1995
1996 /* firmwares */
1997 struct amdgpu_firmware firmware;
1998
1999 /* GDS */
2000 struct amdgpu_gds gds;
2001
2002 const struct amdgpu_ip_block_version *ip_blocks;
2003 int num_ip_blocks;
2004 bool *ip_block_enabled;
2005 struct mutex mn_lock;
2006 DECLARE_HASHTABLE(mn_hash, 7);
2007
2008 /* tracking pinned memory */
2009 u64 vram_pin_size;
2010 u64 gart_pin_size;
2011};
2012
2013bool amdgpu_device_is_px(struct drm_device *dev);
2014int amdgpu_device_init(struct amdgpu_device *adev,
2015 struct drm_device *ddev,
2016 struct pci_dev *pdev,
2017 uint32_t flags);
2018void amdgpu_device_fini(struct amdgpu_device *adev);
2019int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
2020
2021uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
2022 bool always_indirect);
2023void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
2024 bool always_indirect);
2025u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
2026void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
2027
2028u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
2029void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
2030
2031/*
2032 * Cast helper
2033 */
2034extern const struct fence_ops amdgpu_fence_ops;
2035static inline struct amdgpu_fence *to_amdgpu_fence(struct fence *f)
2036{
2037 struct amdgpu_fence *__f = container_of(f, struct amdgpu_fence, base);
2038
2039 if (__f->base.ops == &amdgpu_fence_ops)
2040 return __f;
2041
2042 return NULL;
2043}
2044
2045/*
2046 * Registers read & write functions.
2047 */
2048#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), false)
2049#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), true)
2050#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), false))
2051#define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), false)
2052#define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), true)
2053#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
2054#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
2055#define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
2056#define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
2057#define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
2058#define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
2059#define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
2060#define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
2061#define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
2062#define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
2063#define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
2064#define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
2065#define WREG32_P(reg, val, mask) \
2066 do { \
2067 uint32_t tmp_ = RREG32(reg); \
2068 tmp_ &= (mask); \
2069 tmp_ |= ((val) & ~(mask)); \
2070 WREG32(reg, tmp_); \
2071 } while (0)
2072#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
2073#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
2074#define WREG32_PLL_P(reg, val, mask) \
2075 do { \
2076 uint32_t tmp_ = RREG32_PLL(reg); \
2077 tmp_ &= (mask); \
2078 tmp_ |= ((val) & ~(mask)); \
2079 WREG32_PLL(reg, tmp_); \
2080 } while (0)
2081#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
2082#define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
2083#define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
2084
2085#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
2086#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
2087
2088#define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
2089#define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
2090
2091#define REG_SET_FIELD(orig_val, reg, field, field_val) \
2092 (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \
2093 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field))))
2094
2095#define REG_GET_FIELD(value, reg, field) \
2096 (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
2097
2098/*
2099 * BIOS helpers.
2100 */
2101#define RBIOS8(i) (adev->bios[i])
2102#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
2103#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
2104
2105/*
2106 * RING helpers.
2107 */
2108static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
2109{
2110 if (ring->count_dw <= 0)
Jammy Zhou86c2b792015-05-13 22:52:42 +08002111 DRM_ERROR("amdgpu: writing more dwords to the ring than expected!\n");
Alex Deucher97b2e202015-04-20 16:51:00 -04002112 ring->ring[ring->wptr++] = v;
2113 ring->wptr &= ring->ptr_mask;
2114 ring->count_dw--;
2115 ring->ring_free_dw--;
2116}
2117
2118/*
2119 * ASICs macro.
2120 */
2121#define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
2122#define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
2123#define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev))
2124#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
2125#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
2126#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
2127#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
2128#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
2129#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
2130#define amdgpu_asic_get_cu_info(adev, info) (adev)->asic_funcs->get_cu_info((adev), (info))
2131#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
2132#define amdgpu_gart_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gart.gart_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
2133#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
2134#define amdgpu_vm_write_pte(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (addr), (count), (incr), (flags)))
2135#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
2136#define amdgpu_vm_pad_ib(adev, ib) ((adev)->vm_manager.vm_pte_funcs->pad_ib((ib)))
2137#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
2138#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
2139#define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r))
2140#define amdgpu_ring_is_lockup(r) (r)->funcs->is_lockup((r))
2141#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
2142#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
2143#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
2144#define amdgpu_ring_emit_ib(r, ib) (r)->funcs->emit_ib((r), (ib))
2145#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
2146#define amdgpu_ring_emit_fence(r, addr, seq, write64bit) (r)->funcs->emit_fence((r), (addr), (seq), (write64bit))
2147#define amdgpu_ring_emit_semaphore(r, semaphore, emit_wait) (r)->funcs->emit_semaphore((r), (semaphore), (emit_wait))
2148#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
Christian Königd2edb072015-05-11 14:10:34 +02002149#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
Alex Deucher97b2e202015-04-20 16:51:00 -04002150#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
2151#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
2152#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
2153#define amdgpu_display_set_vga_render_state(adev, r) (adev)->mode_info.funcs->set_vga_render_state((adev), (r))
2154#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
2155#define amdgpu_display_vblank_wait(adev, crtc) (adev)->mode_info.funcs->vblank_wait((adev), (crtc))
2156#define amdgpu_display_is_display_hung(adev) (adev)->mode_info.funcs->is_display_hung((adev))
2157#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
2158#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
2159#define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
2160#define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
2161#define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
2162#define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
2163#define amdgpu_display_page_flip(adev, crtc, base) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base))
2164#define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
2165#define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
2166#define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
2167#define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s))
2168#define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
2169#define amdgpu_emit_copy_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((r), (s), (d), (b))
2170#define amdgpu_emit_fill_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((r), (s), (d), (b))
2171#define amdgpu_dpm_get_temperature(adev) (adev)->pm.funcs->get_temperature((adev))
2172#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
2173#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
2174#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
2175#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
2176#define amdgpu_dpm_get_sclk(adev, l) (adev)->pm.funcs->get_sclk((adev), (l))
2177#define amdgpu_dpm_get_mclk(adev, l) (adev)->pm.funcs->get_mclk((adev), (l))
2178#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
2179#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m))
2180#define amdgpu_dpm_force_performance_level(adev, l) (adev)->pm.funcs->force_performance_level((adev), (l))
2181#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
2182#define amdgpu_dpm_powergate_uvd(adev, g) (adev)->pm.funcs->powergate_uvd((adev), (g))
2183#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
2184#define amdgpu_dpm_set_fan_control_mode(adev, m) (adev)->pm.funcs->set_fan_control_mode((adev), (m))
2185#define amdgpu_dpm_get_fan_control_mode(adev) (adev)->pm.funcs->get_fan_control_mode((adev))
2186#define amdgpu_dpm_set_fan_speed_percent(adev, s) (adev)->pm.funcs->set_fan_speed_percent((adev), (s))
2187#define amdgpu_dpm_get_fan_speed_percent(adev, s) (adev)->pm.funcs->get_fan_speed_percent((adev), (s))
2188
2189#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
2190
2191/* Common functions */
2192int amdgpu_gpu_reset(struct amdgpu_device *adev);
2193void amdgpu_pci_config_reset(struct amdgpu_device *adev);
2194bool amdgpu_card_posted(struct amdgpu_device *adev);
2195void amdgpu_update_display_priority(struct amdgpu_device *adev);
2196bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
2197int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
2198int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
2199 u32 ip_instance, u32 ring,
2200 struct amdgpu_ring **out_ring);
2201void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain);
2202bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
2203int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
2204 uint32_t flags);
2205bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
2206bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
2207uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
2208 struct ttm_mem_reg *mem);
2209void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
2210void amdgpu_gtt_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
2211void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
2212void amdgpu_program_register_sequence(struct amdgpu_device *adev,
2213 const u32 *registers,
2214 const u32 array_size);
2215
2216bool amdgpu_device_is_px(struct drm_device *dev);
2217/* atpx handler */
2218#if defined(CONFIG_VGA_SWITCHEROO)
2219void amdgpu_register_atpx_handler(void);
2220void amdgpu_unregister_atpx_handler(void);
2221#else
2222static inline void amdgpu_register_atpx_handler(void) {}
2223static inline void amdgpu_unregister_atpx_handler(void) {}
2224#endif
2225
2226/*
2227 * KMS
2228 */
2229extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
2230extern int amdgpu_max_kms_ioctl;
2231
2232int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
2233int amdgpu_driver_unload_kms(struct drm_device *dev);
2234void amdgpu_driver_lastclose_kms(struct drm_device *dev);
2235int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
2236void amdgpu_driver_postclose_kms(struct drm_device *dev,
2237 struct drm_file *file_priv);
2238void amdgpu_driver_preclose_kms(struct drm_device *dev,
2239 struct drm_file *file_priv);
2240int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon);
2241int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
2242u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, int crtc);
2243int amdgpu_enable_vblank_kms(struct drm_device *dev, int crtc);
2244void amdgpu_disable_vblank_kms(struct drm_device *dev, int crtc);
2245int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
2246 int *max_error,
2247 struct timeval *vblank_time,
2248 unsigned flags);
2249long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
2250 unsigned long arg);
2251
2252/*
2253 * vm
2254 */
2255int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
2256void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
2257struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
2258 struct amdgpu_vm *vm,
2259 struct list_head *head);
2260struct amdgpu_fence *amdgpu_vm_grab_id(struct amdgpu_ring *ring,
2261 struct amdgpu_vm *vm);
2262void amdgpu_vm_flush(struct amdgpu_ring *ring,
2263 struct amdgpu_vm *vm,
2264 struct amdgpu_fence *updates);
2265void amdgpu_vm_fence(struct amdgpu_device *adev,
2266 struct amdgpu_vm *vm,
2267 struct amdgpu_fence *fence);
2268uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
2269int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
2270 struct amdgpu_vm *vm);
2271int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
2272 struct amdgpu_vm *vm);
2273int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
monk.liucfe2c972015-05-26 15:01:54 +08002274 struct amdgpu_vm *vm, struct amdgpu_sync *sync);
Alex Deucher97b2e202015-04-20 16:51:00 -04002275int amdgpu_vm_bo_update(struct amdgpu_device *adev,
2276 struct amdgpu_bo_va *bo_va,
2277 struct ttm_mem_reg *mem);
2278void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
2279 struct amdgpu_bo *bo);
2280struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
2281 struct amdgpu_bo *bo);
2282struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
2283 struct amdgpu_vm *vm,
2284 struct amdgpu_bo *bo);
2285int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2286 struct amdgpu_bo_va *bo_va,
2287 uint64_t addr, uint64_t offset,
2288 uint64_t size, uint32_t flags);
2289int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
2290 struct amdgpu_bo_va *bo_va,
2291 uint64_t addr);
2292void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
2293 struct amdgpu_bo_va *bo_va);
2294
2295/*
2296 * functions used by amdgpu_encoder.c
2297 */
2298struct amdgpu_afmt_acr {
2299 u32 clock;
2300
2301 int n_32khz;
2302 int cts_32khz;
2303
2304 int n_44_1khz;
2305 int cts_44_1khz;
2306
2307 int n_48khz;
2308 int cts_48khz;
2309
2310};
2311
2312struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
2313
2314/* amdgpu_acpi.c */
2315#if defined(CONFIG_ACPI)
2316int amdgpu_acpi_init(struct amdgpu_device *adev);
2317void amdgpu_acpi_fini(struct amdgpu_device *adev);
2318bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
2319int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
2320 u8 perf_req, bool advertise);
2321int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
2322#else
2323static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
2324static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
2325#endif
2326
2327struct amdgpu_bo_va_mapping *
2328amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
2329 uint64_t addr, struct amdgpu_bo **bo);
2330
2331#include "amdgpu_object.h"
2332
2333#endif