blob: 794182a1324b7850830f4167cf47bec323ede4e4 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RADEON_H__
29#define __RADEON_H__
30
Jerome Glisse771fe6b2009-06-05 14:42:42 +020031/* TODO: Here are things that needs to be done :
32 * - surface allocator & initializer : (bit like scratch reg) should
33 * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
34 * related to surface
35 * - WB : write back stuff (do it bit like scratch reg things)
36 * - Vblank : look at Jesse's rework and what we should do
37 * - r600/r700: gart & cp
38 * - cs : clean cs ioctl use bitmap & things like that.
39 * - power management stuff
40 * - Barrier in gart code
41 * - Unmappabled vram ?
42 * - TESTING, TESTING, TESTING
43 */
44
Jerome Glissed39c3b82009-09-28 18:34:43 +020045/* Initialization path:
46 * We expect that acceleration initialization might fail for various
47 * reasons even thought we work hard to make it works on most
48 * configurations. In order to still have a working userspace in such
49 * situation the init path must succeed up to the memory controller
50 * initialization point. Failure before this point are considered as
51 * fatal error. Here is the init callchain :
52 * radeon_device_init perform common structure, mutex initialization
53 * asic_init setup the GPU memory layout and perform all
54 * one time initialization (failure in this
55 * function are considered fatal)
56 * asic_startup setup the GPU acceleration, in order to
57 * follow guideline the first thing this
58 * function should do is setting the GPU
59 * memory controller (only MC setup failure
60 * are considered as fatal)
61 */
62
Arun Sharma600634972011-07-26 16:09:06 -070063#include <linux/atomic.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020064#include <linux/wait.h>
65#include <linux/list.h>
66#include <linux/kref.h>
67
Jerome Glisse4c788672009-11-20 14:29:23 +010068#include <ttm/ttm_bo_api.h>
69#include <ttm/ttm_bo_driver.h>
70#include <ttm/ttm_placement.h>
71#include <ttm/ttm_module.h>
Thomas Hellstrom147666f2010-11-17 12:38:32 +000072#include <ttm/ttm_execbuf_util.h>
Jerome Glisse4c788672009-11-20 14:29:23 +010073
Dave Airliec2142712009-09-22 08:50:10 +100074#include "radeon_family.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020075#include "radeon_mode.h"
76#include "radeon_reg.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020077
78/*
79 * Modules parameters.
80 */
81extern int radeon_no_wb;
82extern int radeon_modeset;
83extern int radeon_dynclks;
84extern int radeon_r4xx_atom;
85extern int radeon_agpmode;
86extern int radeon_vram_limit;
87extern int radeon_gart_size;
88extern int radeon_benchmarking;
Michel Dänzerecc0b322009-07-21 11:23:57 +020089extern int radeon_testing;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020090extern int radeon_connector_table;
Dave Airlie4ce001a2009-08-13 16:32:14 +100091extern int radeon_tv;
Christian Koenigdafc3bd2009-10-11 23:49:13 +020092extern int radeon_audio;
Alex Deucherf46c0122010-03-31 00:33:27 -040093extern int radeon_disp_priority;
Alex Deuchere2b0a8e2010-03-17 02:07:37 -040094extern int radeon_hw_i2c;
Alex Deucherd42dd572011-01-12 20:05:11 -050095extern int radeon_pcie_gen2;
Alex Deuchera18cee12011-11-01 14:20:30 -040096extern int radeon_msi;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020097
98/*
99 * Copy from radeon_drv.h so we don't have to include both and have conflicting
100 * symbol;
101 */
102#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
Jerome Glisse225758d2010-03-09 14:45:10 +0000103#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
Jerome Glissee8217672010-02-15 21:36:13 +0100104/* RADEON_IB_POOL_SIZE must be a power of 2 */
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200105#define RADEON_IB_POOL_SIZE 16
Michael Wittenc245cb92011-09-16 20:45:30 +0000106#define RADEON_DEBUGFS_MAX_COMPONENTS 32
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200107#define RADEONFB_CONN_LIMIT 4
Yang Zhaof657c2a2009-09-15 12:21:01 +1000108#define RADEON_BIOS_NUM_SCRATCH 8
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200109
Alex Deucher1b370782011-11-17 20:13:28 -0500110/* max number of rings */
111#define RADEON_NUM_RINGS 3
112
113/* internal ring indices */
114/* r1xx+ has gfx CP ring */
115#define RADEON_RING_TYPE_GFX_INDEX 0
116
117/* cayman has 2 compute CP rings */
118#define CAYMAN_RING_TYPE_CP1_INDEX 1
119#define CAYMAN_RING_TYPE_CP2_INDEX 2
120
Jerome Glisse721604a2012-01-05 22:11:05 -0500121/* hardcode those limit for now */
122#define RADEON_VA_RESERVED_SIZE (8 << 20)
123#define RADEON_IB_VM_MAX_SIZE (64 << 10)
124
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200125/*
126 * Errata workarounds.
127 */
128enum radeon_pll_errata {
129 CHIP_ERRATA_R300_CG = 0x00000001,
130 CHIP_ERRATA_PLL_DUMMYREADS = 0x00000002,
131 CHIP_ERRATA_PLL_DELAY = 0x00000004
132};
133
134
135struct radeon_device;
136
137
138/*
139 * BIOS.
140 */
Dave Airlie6a9ee8a2010-02-01 15:38:10 +1000141#define ATRM_BIOS_PAGE 4096
142
Dave Airlie8edb3812010-03-01 21:50:01 +1100143#if defined(CONFIG_VGA_SWITCHEROO)
Dave Airlie6a9ee8a2010-02-01 15:38:10 +1000144bool radeon_atrm_supported(struct pci_dev *pdev);
145int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
Dave Airlie8edb3812010-03-01 21:50:01 +1100146#else
147static inline bool radeon_atrm_supported(struct pci_dev *pdev)
148{
149 return false;
150}
151
152static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
153 return -EINVAL;
154}
155#endif
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200156bool radeon_get_bios(struct radeon_device *rdev);
157
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000158
159/*
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500160 * Mutex which allows recursive locking from the same process.
161 */
162struct radeon_mutex {
163 struct mutex mutex;
164 struct task_struct *owner;
165 int level;
166};
167
168static inline void radeon_mutex_init(struct radeon_mutex *mutex)
169{
170 mutex_init(&mutex->mutex);
171 mutex->owner = NULL;
172 mutex->level = 0;
173}
174
175static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
176{
177 if (mutex_trylock(&mutex->mutex)) {
178 /* The mutex was unlocked before, so it's ours now */
179 mutex->owner = current;
180 } else if (mutex->owner != current) {
181 /* Another process locked the mutex, take it */
182 mutex_lock(&mutex->mutex);
183 mutex->owner = current;
184 }
185 /* Otherwise the mutex was already locked by this process */
186
187 mutex->level++;
188}
189
190static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
191{
192 if (--mutex->level > 0)
193 return;
194
195 mutex->owner = NULL;
196 mutex_unlock(&mutex->mutex);
197}
198
199
200/*
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000201 * Dummy page
202 */
203struct radeon_dummy_page {
204 struct page *page;
205 dma_addr_t addr;
206};
207int radeon_dummy_page_init(struct radeon_device *rdev);
208void radeon_dummy_page_fini(struct radeon_device *rdev);
209
210
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200211/*
212 * Clocks
213 */
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200214struct radeon_clock {
215 struct radeon_pll p1pll;
216 struct radeon_pll p2pll;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500217 struct radeon_pll dcpll;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200218 struct radeon_pll spll;
219 struct radeon_pll mpll;
220 /* 10 Khz units */
221 uint32_t default_mclk;
222 uint32_t default_sclk;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500223 uint32_t default_dispclk;
224 uint32_t dp_extclk;
Alex Deucherb20f9be2011-06-08 13:01:11 -0400225 uint32_t max_pixel_clock;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200226};
227
Rafał Miłecki74338742009-11-03 00:53:02 +0100228/*
229 * Power management
230 */
231int radeon_pm_init(struct radeon_device *rdev);
Alex Deucher29fb52c2010-03-11 10:01:17 -0500232void radeon_pm_fini(struct radeon_device *rdev);
Rafał Miłeckic913e232009-12-22 23:02:16 +0100233void radeon_pm_compute_clocks(struct radeon_device *rdev);
Alex Deucherce8f5372010-05-07 15:10:16 -0400234void radeon_pm_suspend(struct radeon_device *rdev);
235void radeon_pm_resume(struct radeon_device *rdev);
Alex Deucher56278a82009-12-28 13:58:44 -0500236void radeon_combios_get_power_modes(struct radeon_device *rdev);
237void radeon_atombios_get_power_modes(struct radeon_device *rdev);
Alex Deucher8a83ec52011-04-12 14:49:23 -0400238void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
Alex Deucherf8920342010-06-30 12:02:03 -0400239void rs690_pm_info(struct radeon_device *rdev);
Alex Deucher20d391d2011-02-01 16:12:34 -0500240extern int rv6xx_get_temp(struct radeon_device *rdev);
241extern int rv770_get_temp(struct radeon_device *rdev);
242extern int evergreen_get_temp(struct radeon_device *rdev);
243extern int sumo_get_temp(struct radeon_device *rdev);
Alex Deucher1bd47d22012-03-20 17:18:10 -0400244extern int si_get_temp(struct radeon_device *rdev);
Jerome Glisse285484e2011-12-16 17:03:42 -0500245extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
246 unsigned *bankh, unsigned *mtaspect,
247 unsigned *tile_split);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000248
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200249/*
250 * Fences.
251 */
252struct radeon_fence_driver {
253 uint32_t scratch_reg;
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000254 uint64_t gpu_addr;
255 volatile uint32_t *cpu_addr;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200256 atomic_t seq;
257 uint32_t last_seq;
Christian König36abaca2012-05-02 15:11:13 +0200258 unsigned long last_activity;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200259 wait_queue_head_t queue;
Christian König851a6bd2011-10-24 15:05:29 +0200260 struct list_head emitted;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200261 struct list_head signaled;
Jerome Glisse0a0c7592009-12-11 20:36:19 +0100262 bool initialized;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200263};
264
265struct radeon_fence {
266 struct radeon_device *rdev;
267 struct kref kref;
268 struct list_head list;
269 /* protected by radeon_fence.lock */
270 uint32_t seq;
Christian König851a6bd2011-10-24 15:05:29 +0200271 bool emitted;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200272 bool signaled;
Alex Deucher74652802011-08-25 13:39:48 -0400273 /* RB, DMA, etc. */
274 int ring;
Christian König93504fc2012-01-05 22:11:06 -0500275 struct radeon_semaphore *semaphore;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200276};
277
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000278int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
279int radeon_fence_driver_init(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200280void radeon_fence_driver_fini(struct radeon_device *rdev);
Alex Deucher74652802011-08-25 13:39:48 -0400281int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200282int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
Alex Deucher74652802011-08-25 13:39:48 -0400283void radeon_fence_process(struct radeon_device *rdev, int ring);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200284bool radeon_fence_signaled(struct radeon_fence *fence);
285int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
Alex Deucher74652802011-08-25 13:39:48 -0400286int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
Christian Königadea5c22012-05-02 15:11:16 +0200287int radeon_fence_wait_empty(struct radeon_device *rdev, int ring);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200288struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
289void radeon_fence_unref(struct radeon_fence **fence);
Christian König47492a22011-10-20 12:38:09 +0200290int radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200291
Dave Airliee024e112009-06-24 09:48:08 +1000292/*
293 * Tiling registers
294 */
295struct radeon_surface_reg {
Jerome Glisse4c788672009-11-20 14:29:23 +0100296 struct radeon_bo *bo;
Dave Airliee024e112009-06-24 09:48:08 +1000297};
298
299#define RADEON_GEM_MAX_SURFACES 8
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200300
301/*
Jerome Glisse4c788672009-11-20 14:29:23 +0100302 * TTM.
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200303 */
Jerome Glisse4c788672009-11-20 14:29:23 +0100304struct radeon_mman {
305 struct ttm_bo_global_ref bo_global_ref;
Dave Airlieba4420c2010-03-09 10:56:52 +1000306 struct drm_global_reference mem_global_ref;
Jerome Glisse4c788672009-11-20 14:29:23 +0100307 struct ttm_bo_device bdev;
Jerome Glisse0a0c7592009-12-11 20:36:19 +0100308 bool mem_global_referenced;
309 bool initialized;
Jerome Glisse4c788672009-11-20 14:29:23 +0100310};
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200311
Jerome Glisse721604a2012-01-05 22:11:05 -0500312/* bo virtual address in a specific vm */
313struct radeon_bo_va {
314 /* bo list is protected by bo being reserved */
315 struct list_head bo_list;
316 /* vm list is protected by vm mutex */
317 struct list_head vm_list;
318 /* constant after initialization */
319 struct radeon_vm *vm;
320 struct radeon_bo *bo;
321 uint64_t soffset;
322 uint64_t eoffset;
323 uint32_t flags;
324 bool valid;
325};
326
Jerome Glisse4c788672009-11-20 14:29:23 +0100327struct radeon_bo {
328 /* Protected by gem.mutex */
329 struct list_head list;
330 /* Protected by tbo.reserved */
Jerome Glisse312ea8d2009-12-07 15:52:58 +0100331 u32 placements[3];
332 struct ttm_placement placement;
Jerome Glisse4c788672009-11-20 14:29:23 +0100333 struct ttm_buffer_object tbo;
334 struct ttm_bo_kmap_obj kmap;
335 unsigned pin_count;
336 void *kptr;
337 u32 tiling_flags;
338 u32 pitch;
339 int surface_reg;
Jerome Glisse721604a2012-01-05 22:11:05 -0500340 /* list of all virtual address to which this bo
341 * is associated to
342 */
343 struct list_head va;
Jerome Glisse4c788672009-11-20 14:29:23 +0100344 /* Constant after initialization */
345 struct radeon_device *rdev;
Daniel Vetter441921d2011-02-18 17:59:16 +0100346 struct drm_gem_object gem_base;
Jerome Glisse4c788672009-11-20 14:29:23 +0100347};
Daniel Vetter7e4d15d2011-02-18 17:59:17 +0100348#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
Jerome Glisse4c788672009-11-20 14:29:23 +0100349
350struct radeon_bo_list {
Thomas Hellstrom147666f2010-11-17 12:38:32 +0000351 struct ttm_validate_buffer tv;
Jerome Glisse4c788672009-11-20 14:29:23 +0100352 struct radeon_bo *bo;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200353 uint64_t gpu_offset;
354 unsigned rdomain;
355 unsigned wdomain;
Jerome Glisse4c788672009-11-20 14:29:23 +0100356 u32 tiling_flags;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200357};
358
Jerome Glisseb15ba512011-11-15 11:48:34 -0500359/* sub-allocation manager, it has to be protected by another lock.
360 * By conception this is an helper for other part of the driver
361 * like the indirect buffer or semaphore, which both have their
362 * locking.
363 *
364 * Principe is simple, we keep a list of sub allocation in offset
365 * order (first entry has offset == 0, last entry has the highest
366 * offset).
367 *
368 * When allocating new object we first check if there is room at
369 * the end total_size - (last_object_offset + last_object_size) >=
370 * alloc_size. If so we allocate new object there.
371 *
372 * When there is not enough room at the end, we start waiting for
373 * each sub object until we reach object_offset+object_size >=
374 * alloc_size, this object then become the sub object we return.
375 *
376 * Alignment can't be bigger than page size.
377 *
378 * Hole are not considered for allocation to keep things simple.
379 * Assumption is that there won't be hole (all object on same
380 * alignment).
381 */
382struct radeon_sa_manager {
383 struct radeon_bo *bo;
384 struct list_head sa_bo;
385 unsigned size;
386 uint64_t gpu_addr;
387 void *cpu_ptr;
388 uint32_t domain;
389};
390
391struct radeon_sa_bo;
392
393/* sub-allocation buffer */
394struct radeon_sa_bo {
395 struct list_head list;
396 struct radeon_sa_manager *manager;
397 unsigned offset;
398 unsigned size;
399};
400
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200401/*
402 * GEM objects.
403 */
404struct radeon_gem {
Jerome Glisse4c788672009-11-20 14:29:23 +0100405 struct mutex mutex;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200406 struct list_head objects;
407};
408
409int radeon_gem_init(struct radeon_device *rdev);
410void radeon_gem_fini(struct radeon_device *rdev);
411int radeon_gem_object_create(struct radeon_device *rdev, int size,
Jerome Glisse4c788672009-11-20 14:29:23 +0100412 int alignment, int initial_domain,
413 bool discardable, bool kernel,
414 struct drm_gem_object **obj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200415
Dave Airlieff72145b2011-02-07 12:16:14 +1000416int radeon_mode_dumb_create(struct drm_file *file_priv,
417 struct drm_device *dev,
418 struct drm_mode_create_dumb *args);
419int radeon_mode_dumb_mmap(struct drm_file *filp,
420 struct drm_device *dev,
421 uint32_t handle, uint64_t *offset_p);
422int radeon_mode_dumb_destroy(struct drm_file *file_priv,
423 struct drm_device *dev,
424 uint32_t handle);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200425
426/*
Jerome Glissec1341e52011-12-21 12:13:47 -0500427 * Semaphores.
428 */
429struct radeon_ring;
430
431#define RADEON_SEMAPHORE_BO_SIZE 256
432
433struct radeon_semaphore_driver {
434 rwlock_t lock;
435 struct list_head bo;
436};
437
438struct radeon_semaphore_bo;
439
440/* everything here is constant */
441struct radeon_semaphore {
442 struct list_head list;
443 uint64_t gpu_addr;
444 uint32_t *cpu_ptr;
445 struct radeon_semaphore_bo *bo;
446};
447
448struct radeon_semaphore_bo {
449 struct list_head list;
450 struct radeon_ib *ib;
451 struct list_head free;
452 struct radeon_semaphore semaphores[RADEON_SEMAPHORE_BO_SIZE/8];
453 unsigned nused;
454};
455
456void radeon_semaphore_driver_fini(struct radeon_device *rdev);
457int radeon_semaphore_create(struct radeon_device *rdev,
458 struct radeon_semaphore **semaphore);
459void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
460 struct radeon_semaphore *semaphore);
461void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
462 struct radeon_semaphore *semaphore);
463void radeon_semaphore_free(struct radeon_device *rdev,
464 struct radeon_semaphore *semaphore);
465
466/*
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200467 * GART structures, functions & helpers
468 */
469struct radeon_mc;
470
Matt Turnera77f1712009-10-14 00:34:41 -0400471#define RADEON_GPU_PAGE_SIZE 4096
Jerome Glissed594e462010-02-17 21:54:29 +0000472#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
Alex Deucher003cefe2011-09-16 12:04:08 -0400473#define RADEON_GPU_PAGE_SHIFT 12
Jerome Glisse721604a2012-01-05 22:11:05 -0500474#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
Matt Turnera77f1712009-10-14 00:34:41 -0400475
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200476struct radeon_gart {
477 dma_addr_t table_addr;
Jerome Glissec9a1be92011-11-03 11:16:49 -0400478 struct radeon_bo *robj;
479 void *ptr;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200480 unsigned num_gpu_pages;
481 unsigned num_cpu_pages;
482 unsigned table_size;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200483 struct page **pages;
484 dma_addr_t *pages_addr;
485 bool ready;
486};
487
488int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
489void radeon_gart_table_ram_free(struct radeon_device *rdev);
490int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
491void radeon_gart_table_vram_free(struct radeon_device *rdev);
Jerome Glissec9a1be92011-11-03 11:16:49 -0400492int radeon_gart_table_vram_pin(struct radeon_device *rdev);
493void radeon_gart_table_vram_unpin(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200494int radeon_gart_init(struct radeon_device *rdev);
495void radeon_gart_fini(struct radeon_device *rdev);
496void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
497 int pages);
498int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
Konrad Rzeszutek Wilkc39d3512010-12-02 11:04:29 -0500499 int pages, struct page **pagelist,
500 dma_addr_t *dma_addr);
Jerome Glissec9a1be92011-11-03 11:16:49 -0400501void radeon_gart_restore(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200502
503
504/*
505 * GPU MC structures, functions & helpers
506 */
507struct radeon_mc {
508 resource_size_t aper_size;
509 resource_size_t aper_base;
510 resource_size_t agp_base;
Dave Airlie7a50f012009-07-21 20:39:30 +1000511 /* for some chips with <= 32MB we need to lie
512 * about vram size near mc fb location */
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000513 u64 mc_vram_size;
Jerome Glissed594e462010-02-17 21:54:29 +0000514 u64 visible_vram_size;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000515 u64 gtt_size;
516 u64 gtt_start;
517 u64 gtt_end;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000518 u64 vram_start;
519 u64 vram_end;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200520 unsigned vram_width;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000521 u64 real_vram_size;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200522 int vram_mtrr;
523 bool vram_is_ddr;
Jerome Glissed594e462010-02-17 21:54:29 +0000524 bool igp_sideport_enabled;
Alex Deucher8d369bb2010-07-15 10:51:10 -0400525 u64 gtt_base_align;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200526};
527
Alex Deucher06b64762010-01-05 11:27:29 -0500528bool radeon_combios_sideport_present(struct radeon_device *rdev);
529bool radeon_atombios_sideport_present(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200530
531/*
532 * GPU scratch registers structures, functions & helpers
533 */
534struct radeon_scratch {
535 unsigned num_reg;
Alex Deucher724c80e2010-08-27 18:25:25 -0400536 uint32_t reg_base;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200537 bool free[32];
538 uint32_t reg[32];
539};
540
541int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
542void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
543
544
545/*
546 * IRQS.
547 */
Alex Deucher6f34be52010-11-21 10:59:01 -0500548
549struct radeon_unpin_work {
550 struct work_struct work;
551 struct radeon_device *rdev;
552 int crtc_id;
553 struct radeon_fence *fence;
554 struct drm_pending_vblank_event *event;
555 struct radeon_bo *old_rbo;
556 u64 new_crtc_base;
557};
558
559struct r500_irq_stat_regs {
560 u32 disp_int;
Alex Deucherf122c612012-03-30 08:59:57 -0400561 u32 hdmi0_status;
Alex Deucher6f34be52010-11-21 10:59:01 -0500562};
563
564struct r600_irq_stat_regs {
565 u32 disp_int;
566 u32 disp_int_cont;
567 u32 disp_int_cont2;
568 u32 d1grph_int;
569 u32 d2grph_int;
Alex Deucherf122c612012-03-30 08:59:57 -0400570 u32 hdmi0_status;
571 u32 hdmi1_status;
Alex Deucher6f34be52010-11-21 10:59:01 -0500572};
573
574struct evergreen_irq_stat_regs {
575 u32 disp_int;
576 u32 disp_int_cont;
577 u32 disp_int_cont2;
578 u32 disp_int_cont3;
579 u32 disp_int_cont4;
580 u32 disp_int_cont5;
581 u32 d1grph_int;
582 u32 d2grph_int;
583 u32 d3grph_int;
584 u32 d4grph_int;
585 u32 d5grph_int;
586 u32 d6grph_int;
Alex Deucherf122c612012-03-30 08:59:57 -0400587 u32 afmt_status1;
588 u32 afmt_status2;
589 u32 afmt_status3;
590 u32 afmt_status4;
591 u32 afmt_status5;
592 u32 afmt_status6;
Alex Deucher6f34be52010-11-21 10:59:01 -0500593};
594
595union radeon_irq_stat_regs {
596 struct r500_irq_stat_regs r500;
597 struct r600_irq_stat_regs r600;
598 struct evergreen_irq_stat_regs evergreen;
599};
600
Ilija Hadzic54bd5202011-10-26 15:43:58 -0400601#define RADEON_MAX_HPD_PINS 6
602#define RADEON_MAX_CRTCS 6
Alex Deucherf122c612012-03-30 08:59:57 -0400603#define RADEON_MAX_AFMT_BLOCKS 6
Ilija Hadzic54bd5202011-10-26 15:43:58 -0400604
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200605struct radeon_irq {
606 bool installed;
Alex Deucher1b370782011-11-17 20:13:28 -0500607 bool sw_int[RADEON_NUM_RINGS];
Ilija Hadzic54bd5202011-10-26 15:43:58 -0400608 bool crtc_vblank_int[RADEON_MAX_CRTCS];
609 bool pflip[RADEON_MAX_CRTCS];
Rafał Miłecki73a6d3f2010-01-08 00:22:47 +0100610 wait_queue_head_t vblank_queue;
Ilija Hadzic54bd5202011-10-26 15:43:58 -0400611 bool hpd[RADEON_MAX_HPD_PINS];
Alex Deucher2031f772010-04-22 12:52:11 -0400612 bool gui_idle;
613 bool gui_idle_acked;
614 wait_queue_head_t idle_queue;
Alex Deucherf122c612012-03-30 08:59:57 -0400615 bool afmt[RADEON_MAX_AFMT_BLOCKS];
Dave Airlie1614f8b2009-12-01 16:04:56 +1000616 spinlock_t sw_lock;
Alex Deucher1b370782011-11-17 20:13:28 -0500617 int sw_refcount[RADEON_NUM_RINGS];
Alex Deucher6f34be52010-11-21 10:59:01 -0500618 union radeon_irq_stat_regs stat_regs;
Ilija Hadzic54bd5202011-10-26 15:43:58 -0400619 spinlock_t pflip_lock[RADEON_MAX_CRTCS];
620 int pflip_refcount[RADEON_MAX_CRTCS];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200621};
622
623int radeon_irq_kms_init(struct radeon_device *rdev);
624void radeon_irq_kms_fini(struct radeon_device *rdev);
Alex Deucher1b370782011-11-17 20:13:28 -0500625void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
626void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
Alex Deucher6f34be52010-11-21 10:59:01 -0500627void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
628void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200629
630/*
Christian Könige32eb502011-10-23 12:56:27 +0200631 * CP & rings.
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200632 */
Alex Deucher74652802011-08-25 13:39:48 -0400633
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200634struct radeon_ib {
Jerome Glisseb15ba512011-11-15 11:48:34 -0500635 struct radeon_sa_bo sa_bo;
Jerome Glissee8217672010-02-15 21:36:13 +0100636 unsigned idx;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200637 uint32_t length_dw;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500638 uint64_t gpu_addr;
639 uint32_t *ptr;
640 struct radeon_fence *fence;
Jerome Glisse721604a2012-01-05 22:11:05 -0500641 unsigned vm_id;
Alex Deucherdfcf5f32012-03-20 17:18:14 -0400642 bool is_const_ib;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200643};
644
Dave Airlieecb114a2009-09-15 11:12:56 +1000645/*
646 * locking -
647 * mutex protects scheduled_ibs, ready, alloc_bm
648 */
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200649struct radeon_ib_pool {
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500650 struct radeon_mutex mutex;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500651 struct radeon_sa_manager sa_manager;
652 struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
653 bool ready;
654 unsigned head_id;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200655};
656
Christian Könige32eb502011-10-23 12:56:27 +0200657struct radeon_ring {
Jerome Glisse4c788672009-11-20 14:29:23 +0100658 struct radeon_bo *ring_obj;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200659 volatile uint32_t *ring;
660 unsigned rptr;
Christian König5596a9d2011-10-13 12:48:45 +0200661 unsigned rptr_offs;
662 unsigned rptr_reg;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200663 unsigned wptr;
664 unsigned wptr_old;
Christian König5596a9d2011-10-13 12:48:45 +0200665 unsigned wptr_reg;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200666 unsigned ring_size;
667 unsigned ring_free_dw;
668 int count_dw;
669 uint64_t gpu_addr;
670 uint32_t align_mask;
671 uint32_t ptr_mask;
672 struct mutex mutex;
673 bool ready;
Alex Deucher78c55602011-11-17 14:25:56 -0500674 u32 ptr_reg_shift;
675 u32 ptr_reg_mask;
676 u32 nop;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200677};
678
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500679/*
Jerome Glisse721604a2012-01-05 22:11:05 -0500680 * VM
681 */
682struct radeon_vm {
683 struct list_head list;
684 struct list_head va;
685 int id;
686 unsigned last_pfn;
687 u64 pt_gpu_addr;
688 u64 *pt;
689 struct radeon_sa_bo sa_bo;
690 struct mutex mutex;
691 /* last fence for cs using this vm */
692 struct radeon_fence *fence;
693};
694
695struct radeon_vm_funcs {
696 int (*init)(struct radeon_device *rdev);
697 void (*fini)(struct radeon_device *rdev);
698 /* cs mutex must be lock for schedule_ib */
699 int (*bind)(struct radeon_device *rdev, struct radeon_vm *vm, int id);
700 void (*unbind)(struct radeon_device *rdev, struct radeon_vm *vm);
701 void (*tlb_flush)(struct radeon_device *rdev, struct radeon_vm *vm);
702 uint32_t (*page_flags)(struct radeon_device *rdev,
703 struct radeon_vm *vm,
704 uint32_t flags);
705 void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm,
706 unsigned pfn, uint64_t addr, uint32_t flags);
707};
708
709struct radeon_vm_manager {
710 struct list_head lru_vm;
711 uint32_t use_bitmap;
712 struct radeon_sa_manager sa_manager;
713 uint32_t max_pfn;
714 /* fields constant after init */
715 const struct radeon_vm_funcs *funcs;
716 /* number of VMIDs */
717 unsigned nvm;
718 /* vram base address for page table entry */
719 u64 vram_base_offset;
Alex Deucher67e915e2012-01-06 09:38:15 -0500720 /* is vm enabled? */
721 bool enabled;
Jerome Glisse721604a2012-01-05 22:11:05 -0500722};
723
724/*
725 * file private structure
726 */
727struct radeon_fpriv {
728 struct radeon_vm vm;
729};
730
731/*
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500732 * R6xx+ IH ring
733 */
734struct r600_ih {
Jerome Glisse4c788672009-11-20 14:29:23 +0100735 struct radeon_bo *ring_obj;
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500736 volatile uint32_t *ring;
737 unsigned rptr;
Christian Königbf852792011-10-13 13:19:22 +0200738 unsigned rptr_offs;
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500739 unsigned wptr;
740 unsigned wptr_old;
741 unsigned ring_size;
742 uint64_t gpu_addr;
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500743 uint32_t ptr_mask;
744 spinlock_t lock;
745 bool enabled;
746};
747
Ilija Hadzic8eec9d62011-10-12 23:29:40 -0400748struct r600_blit_cp_primitives {
749 void (*set_render_target)(struct radeon_device *rdev, int format,
750 int w, int h, u64 gpu_addr);
751 void (*cp_set_surface_sync)(struct radeon_device *rdev,
752 u32 sync_type, u32 size,
753 u64 mc_addr);
754 void (*set_shaders)(struct radeon_device *rdev);
755 void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
756 void (*set_tex_resource)(struct radeon_device *rdev,
757 int format, int w, int h, int pitch,
Alex Deucher9bb77032011-10-22 10:07:09 -0400758 u64 gpu_addr, u32 size);
Ilija Hadzic8eec9d62011-10-12 23:29:40 -0400759 void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
760 int x2, int y2);
761 void (*draw_auto)(struct radeon_device *rdev);
762 void (*set_default_state)(struct radeon_device *rdev);
763};
764
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000765struct r600_blit {
Jerome Glisseff82f052010-01-22 15:19:00 +0100766 struct mutex mutex;
Jerome Glisse4c788672009-11-20 14:29:23 +0100767 struct radeon_bo *shader_obj;
Ilija Hadzic8eec9d62011-10-12 23:29:40 -0400768 struct r600_blit_cp_primitives primitives;
769 int max_dim;
770 int ring_size_common;
771 int ring_size_per_loop;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000772 u64 shader_gpu_addr;
773 u32 vs_offset, ps_offset;
774 u32 state_offset;
775 u32 state_len;
776 u32 vb_used, vb_total;
777 struct radeon_ib *vb_ib;
778};
779
Alex Deucher6ddddfe2011-10-14 10:51:22 -0400780void r600_blit_suspend(struct radeon_device *rdev);
781
Alex Deucher347e7592012-03-20 17:18:21 -0400782/*
783 * SI RLC stuff
784 */
785struct si_rlc {
786 /* for power gating */
787 struct radeon_bo *save_restore_obj;
788 uint64_t save_restore_gpu_addr;
789 /* for clear state */
790 struct radeon_bo *clear_state_obj;
791 uint64_t clear_state_gpu_addr;
792};
793
Jerome Glisse69e130a2011-12-21 12:13:46 -0500794int radeon_ib_get(struct radeon_device *rdev, int ring,
795 struct radeon_ib **ib, unsigned size);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200796void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
Jerome Glissec1341e52011-12-21 12:13:47 -0500797bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200798int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
799int radeon_ib_pool_init(struct radeon_device *rdev);
800void radeon_ib_pool_fini(struct radeon_device *rdev);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500801int radeon_ib_pool_start(struct radeon_device *rdev);
802int radeon_ib_pool_suspend(struct radeon_device *rdev);
Christian König7bd560e2012-05-02 15:11:12 +0200803int radeon_ib_ring_tests(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200804/* Ring access between begin & end cannot sleep */
Christian Könige32eb502011-10-23 12:56:27 +0200805int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
806void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
807int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
808int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
809void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
810void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
811void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
812int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
813int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
Alex Deucher78c55602011-11-17 14:25:56 -0500814 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
815 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
Christian Könige32eb502011-10-23 12:56:27 +0200816void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200817
818
819/*
820 * CS.
821 */
822struct radeon_cs_reloc {
823 struct drm_gem_object *gobj;
Jerome Glisse4c788672009-11-20 14:29:23 +0100824 struct radeon_bo *robj;
825 struct radeon_bo_list lobj;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200826 uint32_t handle;
827 uint32_t flags;
828};
829
830struct radeon_cs_chunk {
831 uint32_t chunk_id;
832 uint32_t length_dw;
Jerome Glisse721604a2012-01-05 22:11:05 -0500833 int kpage_idx[2];
834 uint32_t *kpage[2];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200835 uint32_t *kdata;
Jerome Glisse721604a2012-01-05 22:11:05 -0500836 void __user *user_ptr;
837 int last_copied_page;
838 int last_page_index;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200839};
840
841struct radeon_cs_parser {
Jerome Glissec8c15ff2010-01-18 13:01:36 +0100842 struct device *dev;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200843 struct radeon_device *rdev;
844 struct drm_file *filp;
845 /* chunks */
846 unsigned nchunks;
847 struct radeon_cs_chunk *chunks;
848 uint64_t *chunks_array;
849 /* IB */
850 unsigned idx;
851 /* relocations */
852 unsigned nrelocs;
853 struct radeon_cs_reloc *relocs;
854 struct radeon_cs_reloc **relocs_ptr;
855 struct list_head validated;
856 /* indices of various chunks */
857 int chunk_ib_idx;
858 int chunk_relocs_idx;
Jerome Glisse721604a2012-01-05 22:11:05 -0500859 int chunk_flags_idx;
Alex Deucherdfcf5f32012-03-20 17:18:14 -0400860 int chunk_const_ib_idx;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200861 struct radeon_ib *ib;
Alex Deucherdfcf5f32012-03-20 17:18:14 -0400862 struct radeon_ib *const_ib;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200863 void *track;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000864 unsigned family;
Marek Olšáke70f2242011-10-25 01:38:45 +0200865 int parser_error;
Jerome Glisse721604a2012-01-05 22:11:05 -0500866 u32 cs_flags;
867 u32 ring;
868 s32 priority;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200869};
870
Dave Airlie513bcb42009-09-23 16:56:27 +1000871extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
872extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
Andi Kleence580fa2011-10-13 16:08:47 -0700873extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
Dave Airlie513bcb42009-09-23 16:56:27 +1000874
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200875struct radeon_cs_packet {
876 unsigned idx;
877 unsigned type;
878 unsigned reg;
879 unsigned opcode;
880 int count;
881 unsigned one_reg_wr;
882};
883
884typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p,
885 struct radeon_cs_packet *pkt,
886 unsigned idx, unsigned reg);
887typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
888 struct radeon_cs_packet *pkt);
889
890
891/*
892 * AGP
893 */
894int radeon_agp_init(struct radeon_device *rdev);
Dave Airlie0ebf1712009-11-05 15:39:10 +1000895void radeon_agp_resume(struct radeon_device *rdev);
Jerome Glisse10b06122010-05-21 18:48:54 +0200896void radeon_agp_suspend(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200897void radeon_agp_fini(struct radeon_device *rdev);
898
899
900/*
901 * Writeback
902 */
903struct radeon_wb {
Jerome Glisse4c788672009-11-20 14:29:23 +0100904 struct radeon_bo *wb_obj;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200905 volatile uint32_t *wb;
906 uint64_t gpu_addr;
Alex Deucher724c80e2010-08-27 18:25:25 -0400907 bool enabled;
Alex Deucherd0f8a852010-09-04 05:04:34 -0400908 bool use_event;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200909};
910
Alex Deucher724c80e2010-08-27 18:25:25 -0400911#define RADEON_WB_SCRATCH_OFFSET 0
912#define RADEON_WB_CP_RPTR_OFFSET 1024
Alex Deucher0c88a022011-03-02 20:07:31 -0500913#define RADEON_WB_CP1_RPTR_OFFSET 1280
914#define RADEON_WB_CP2_RPTR_OFFSET 1536
Alex Deucher724c80e2010-08-27 18:25:25 -0400915#define R600_WB_IH_WPTR_OFFSET 2048
Alex Deucherd0f8a852010-09-04 05:04:34 -0400916#define R600_WB_EVENT_OFFSET 3072
Alex Deucher724c80e2010-08-27 18:25:25 -0400917
Jerome Glissec93bb852009-07-13 21:04:08 +0200918/**
919 * struct radeon_pm - power management datas
920 * @max_bandwidth: maximum bandwidth the gpu has (MByte/s)
921 * @igp_sideport_mclk: sideport memory clock Mhz (rs690,rs740,rs780,rs880)
922 * @igp_system_mclk: system clock Mhz (rs690,rs740,rs780,rs880)
923 * @igp_ht_link_clk: ht link clock Mhz (rs690,rs740,rs780,rs880)
924 * @igp_ht_link_width: ht link width in bits (rs690,rs740,rs780,rs880)
925 * @k8_bandwidth: k8 bandwidth the gpu has (MByte/s) (IGP)
926 * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP)
927 * @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP)
928 * @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP)
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300929 * @sclk: GPU clock Mhz (core bandwidth depends of this clock)
Jerome Glissec93bb852009-07-13 21:04:08 +0200930 * @needed_bandwidth: current bandwidth needs
931 *
932 * It keeps track of various data needed to take powermanagement decision.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300933 * Bandwidth need is used to determine minimun clock of the GPU and memory.
Jerome Glissec93bb852009-07-13 21:04:08 +0200934 * Equation between gpu/memory clock and available bandwidth is hw dependent
935 * (type of memory, bus size, efficiency, ...)
936 */
Alex Deucherce8f5372010-05-07 15:10:16 -0400937
938enum radeon_pm_method {
939 PM_METHOD_PROFILE,
940 PM_METHOD_DYNPM,
Rafał Miłeckic913e232009-12-22 23:02:16 +0100941};
Alex Deucherce8f5372010-05-07 15:10:16 -0400942
943enum radeon_dynpm_state {
944 DYNPM_STATE_DISABLED,
945 DYNPM_STATE_MINIMUM,
946 DYNPM_STATE_PAUSED,
Rafael J. Wysocki3f53eb62010-06-17 23:02:27 +0000947 DYNPM_STATE_ACTIVE,
948 DYNPM_STATE_SUSPENDED,
Alex Deucherce8f5372010-05-07 15:10:16 -0400949};
950enum radeon_dynpm_action {
951 DYNPM_ACTION_NONE,
952 DYNPM_ACTION_MINIMUM,
953 DYNPM_ACTION_DOWNCLOCK,
954 DYNPM_ACTION_UPCLOCK,
955 DYNPM_ACTION_DEFAULT
Rafał Miłeckic913e232009-12-22 23:02:16 +0100956};
Alex Deucher56278a82009-12-28 13:58:44 -0500957
958enum radeon_voltage_type {
959 VOLTAGE_NONE = 0,
960 VOLTAGE_GPIO,
961 VOLTAGE_VDDC,
962 VOLTAGE_SW
963};
964
Alex Deucher0ec0e742009-12-23 13:21:58 -0500965enum radeon_pm_state_type {
966 POWER_STATE_TYPE_DEFAULT,
967 POWER_STATE_TYPE_POWERSAVE,
968 POWER_STATE_TYPE_BATTERY,
969 POWER_STATE_TYPE_BALANCED,
970 POWER_STATE_TYPE_PERFORMANCE,
971};
972
Alex Deucherce8f5372010-05-07 15:10:16 -0400973enum radeon_pm_profile_type {
974 PM_PROFILE_DEFAULT,
975 PM_PROFILE_AUTO,
976 PM_PROFILE_LOW,
Alex Deucherc9e75b22010-06-02 17:56:01 -0400977 PM_PROFILE_MID,
Alex Deucherce8f5372010-05-07 15:10:16 -0400978 PM_PROFILE_HIGH,
979};
980
981#define PM_PROFILE_DEFAULT_IDX 0
982#define PM_PROFILE_LOW_SH_IDX 1
Alex Deucherc9e75b22010-06-02 17:56:01 -0400983#define PM_PROFILE_MID_SH_IDX 2
984#define PM_PROFILE_HIGH_SH_IDX 3
985#define PM_PROFILE_LOW_MH_IDX 4
986#define PM_PROFILE_MID_MH_IDX 5
987#define PM_PROFILE_HIGH_MH_IDX 6
988#define PM_PROFILE_MAX 7
Alex Deucherce8f5372010-05-07 15:10:16 -0400989
990struct radeon_pm_profile {
991 int dpms_off_ps_idx;
992 int dpms_on_ps_idx;
993 int dpms_off_cm_idx;
994 int dpms_on_cm_idx;
Alex Deucher516d0e42009-12-23 14:28:05 -0500995};
996
Alex Deucher21a81222010-07-02 12:58:16 -0400997enum radeon_int_thermal_type {
998 THERMAL_TYPE_NONE,
999 THERMAL_TYPE_RV6XX,
1000 THERMAL_TYPE_RV770,
1001 THERMAL_TYPE_EVERGREEN,
Alex Deuchere33df252010-11-22 17:56:32 -05001002 THERMAL_TYPE_SUMO,
Alex Deucher4fddba12011-01-06 21:19:22 -05001003 THERMAL_TYPE_NI,
Alex Deucher14607d02012-03-20 17:18:09 -04001004 THERMAL_TYPE_SI,
Alex Deucher21a81222010-07-02 12:58:16 -04001005};
1006
Alex Deucher56278a82009-12-28 13:58:44 -05001007struct radeon_voltage {
1008 enum radeon_voltage_type type;
1009 /* gpio voltage */
1010 struct radeon_gpio_rec gpio;
1011 u32 delay; /* delay in usec from voltage drop to sclk change */
1012 bool active_high; /* voltage drop is active when bit is high */
1013 /* VDDC voltage */
1014 u8 vddc_id; /* index into vddc voltage table */
1015 u8 vddci_id; /* index into vddci voltage table */
1016 bool vddci_enabled;
1017 /* r6xx+ sw */
Alex Deucher2feea492011-04-12 14:49:24 -04001018 u16 voltage;
1019 /* evergreen+ vddci */
1020 u16 vddci;
Alex Deucher56278a82009-12-28 13:58:44 -05001021};
1022
Alex Deucherd7311172010-05-03 01:13:14 -04001023/* clock mode flags */
1024#define RADEON_PM_MODE_NO_DISPLAY (1 << 0)
1025
Alex Deucher56278a82009-12-28 13:58:44 -05001026struct radeon_pm_clock_info {
1027 /* memory clock */
1028 u32 mclk;
1029 /* engine clock */
1030 u32 sclk;
1031 /* voltage info */
1032 struct radeon_voltage voltage;
Alex Deucherd7311172010-05-03 01:13:14 -04001033 /* standardized clock flags */
Alex Deucher56278a82009-12-28 13:58:44 -05001034 u32 flags;
1035};
1036
Alex Deuchera48b9b42010-04-22 14:03:55 -04001037/* state flags */
Alex Deucherd7311172010-05-03 01:13:14 -04001038#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0)
Alex Deuchera48b9b42010-04-22 14:03:55 -04001039
Alex Deucher56278a82009-12-28 13:58:44 -05001040struct radeon_power_state {
Alex Deucher0ec0e742009-12-23 13:21:58 -05001041 enum radeon_pm_state_type type;
Alex Deucher8f3f1c92011-11-04 10:09:43 -04001042 struct radeon_pm_clock_info *clock_info;
Alex Deucher56278a82009-12-28 13:58:44 -05001043 /* number of valid clock modes in this power state */
1044 int num_clock_modes;
Alex Deucher56278a82009-12-28 13:58:44 -05001045 struct radeon_pm_clock_info *default_clock_mode;
Alex Deuchera48b9b42010-04-22 14:03:55 -04001046 /* standardized state flags */
1047 u32 flags;
Alex Deucher79daedc2010-04-22 14:25:19 -04001048 u32 misc; /* vbios specific flags */
1049 u32 misc2; /* vbios specific flags */
1050 int pcie_lanes; /* pcie lanes */
Alex Deucher56278a82009-12-28 13:58:44 -05001051};
1052
Rafał Miłecki27459322010-02-11 22:16:36 +00001053/*
1054 * Some modes are overclocked by very low value, accept them
1055 */
1056#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
1057
Jerome Glissec93bb852009-07-13 21:04:08 +02001058struct radeon_pm {
Rafał Miłeckic913e232009-12-22 23:02:16 +01001059 struct mutex mutex;
Alex Deuchera48b9b42010-04-22 14:03:55 -04001060 u32 active_crtcs;
1061 int active_crtc_count;
Rafał Miłeckic913e232009-12-22 23:02:16 +01001062 int req_vblank;
Rafał Miłecki839461d2010-03-02 22:06:51 +01001063 bool vblank_sync;
Alex Deucher2031f772010-04-22 12:52:11 -04001064 bool gui_idle;
Jerome Glissec93bb852009-07-13 21:04:08 +02001065 fixed20_12 max_bandwidth;
1066 fixed20_12 igp_sideport_mclk;
1067 fixed20_12 igp_system_mclk;
1068 fixed20_12 igp_ht_link_clk;
1069 fixed20_12 igp_ht_link_width;
1070 fixed20_12 k8_bandwidth;
1071 fixed20_12 sideport_bandwidth;
1072 fixed20_12 ht_bandwidth;
1073 fixed20_12 core_bandwidth;
1074 fixed20_12 sclk;
Alex Deucherf47299c2010-03-16 20:54:38 -04001075 fixed20_12 mclk;
Jerome Glissec93bb852009-07-13 21:04:08 +02001076 fixed20_12 needed_bandwidth;
Alex Deucher0975b162011-02-02 18:42:03 -05001077 struct radeon_power_state *power_state;
Alex Deucher56278a82009-12-28 13:58:44 -05001078 /* number of valid power states */
1079 int num_power_states;
Alex Deuchera48b9b42010-04-22 14:03:55 -04001080 int current_power_state_index;
1081 int current_clock_mode_index;
1082 int requested_power_state_index;
1083 int requested_clock_mode_index;
1084 int default_power_state_index;
1085 u32 current_sclk;
1086 u32 current_mclk;
Alex Deucher2feea492011-04-12 14:49:24 -04001087 u16 current_vddc;
1088 u16 current_vddci;
Alex Deucher9ace9f72011-01-06 21:19:26 -05001089 u32 default_sclk;
1090 u32 default_mclk;
Alex Deucher2feea492011-04-12 14:49:24 -04001091 u16 default_vddc;
1092 u16 default_vddci;
Alex Deucher29fb52c2010-03-11 10:01:17 -05001093 struct radeon_i2c_chan *i2c_bus;
Alex Deucherce8f5372010-05-07 15:10:16 -04001094 /* selected pm method */
1095 enum radeon_pm_method pm_method;
1096 /* dynpm power management */
1097 struct delayed_work dynpm_idle_work;
1098 enum radeon_dynpm_state dynpm_state;
1099 enum radeon_dynpm_action dynpm_planned_action;
1100 unsigned long dynpm_action_timeout;
1101 bool dynpm_can_upclock;
1102 bool dynpm_can_downclock;
1103 /* profile-based power management */
1104 enum radeon_pm_profile_type profile;
1105 int profile_index;
1106 struct radeon_pm_profile profiles[PM_PROFILE_MAX];
Alex Deucher21a81222010-07-02 12:58:16 -04001107 /* internal thermal controller on rv6xx+ */
1108 enum radeon_int_thermal_type int_thermal_type;
1109 struct device *int_hwmon_dev;
Jerome Glissec93bb852009-07-13 21:04:08 +02001110};
1111
Alex Deuchera4c9e2e2011-11-04 10:09:41 -04001112int radeon_pm_get_type_index(struct radeon_device *rdev,
1113 enum radeon_pm_state_type ps_type,
1114 int instance);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001115
Rafał Miłeckia92553a2012-04-28 23:35:20 +02001116struct r600_audio {
1117 bool enabled;
1118 int channels;
1119 int rate;
1120 int bits_per_sample;
1121 u8 status_bits;
1122 u8 category_code;
1123};
1124
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001125/*
1126 * Benchmarking
1127 */
Ilija Hadzic638dd7d2011-10-12 23:29:39 -04001128void radeon_benchmark(struct radeon_device *rdev, int test_number);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001129
1130
1131/*
Michel Dänzerecc0b322009-07-21 11:23:57 +02001132 * Testing
1133 */
1134void radeon_test_moves(struct radeon_device *rdev);
Christian König60a7e392011-09-27 12:31:00 +02001135void radeon_test_ring_sync(struct radeon_device *rdev,
Christian Könige32eb502011-10-23 12:56:27 +02001136 struct radeon_ring *cpA,
1137 struct radeon_ring *cpB);
Christian König60a7e392011-09-27 12:31:00 +02001138void radeon_test_syncing(struct radeon_device *rdev);
Michel Dänzerecc0b322009-07-21 11:23:57 +02001139
1140
1141/*
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001142 * Debugfs
1143 */
Christian König4d8bf9a2011-10-24 14:54:54 +02001144struct radeon_debugfs {
1145 struct drm_info_list *files;
1146 unsigned num_files;
1147};
1148
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001149int radeon_debugfs_add_files(struct radeon_device *rdev,
1150 struct drm_info_list *files,
1151 unsigned nfiles);
1152int radeon_debugfs_fence_init(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001153
1154
1155/*
1156 * ASIC specific functions.
1157 */
1158struct radeon_asic {
Jerome Glisse068a1172009-06-17 13:28:30 +02001159 int (*init)(struct radeon_device *rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001160 void (*fini)(struct radeon_device *rdev);
1161 int (*resume)(struct radeon_device *rdev);
1162 int (*suspend)(struct radeon_device *rdev);
Dave Airlie28d52042009-09-21 14:33:58 +10001163 void (*vga_set_state)(struct radeon_device *rdev, bool state);
Jerome Glissea2d07b72010-03-09 14:45:11 +00001164 int (*asic_reset)(struct radeon_device *rdev);
Alex Deucher54e88e02012-02-23 18:10:29 -05001165 /* ioctl hw specific callback. Some hw might want to perform special
1166 * operation on specific ioctl. For instance on wait idle some hw
1167 * might want to perform and HDP flush through MMIO as it seems that
1168 * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
1169 * through ring.
1170 */
1171 void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
1172 /* check if 3D engine is idle */
1173 bool (*gui_idle)(struct radeon_device *rdev);
1174 /* wait for mc_idle */
1175 int (*mc_wait_for_idle)(struct radeon_device *rdev);
1176 /* gart */
Alex Deucherc5b3b852012-02-23 17:53:46 -05001177 struct {
1178 void (*tlb_flush)(struct radeon_device *rdev);
1179 int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr);
1180 } gart;
Alex Deucher54e88e02012-02-23 18:10:29 -05001181 /* ring specific callbacks */
Christian König4c87bc22011-10-19 19:02:21 +02001182 struct {
1183 void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
Jerome Glisse721604a2012-01-05 22:11:05 -05001184 int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
Christian König4c87bc22011-10-19 19:02:21 +02001185 void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
Christian Könige32eb502011-10-23 12:56:27 +02001186 void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
Christian König4c87bc22011-10-19 19:02:21 +02001187 struct radeon_semaphore *semaphore, bool emit_wait);
Christian Königeb0c19c2012-02-23 15:18:44 +01001188 int (*cs_parse)(struct radeon_cs_parser *p);
Alex Deucherf7128122012-02-23 17:53:45 -05001189 void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
1190 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1191 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
Christian König312c4a82012-05-02 15:11:09 +02001192 bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
Christian König4c87bc22011-10-19 19:02:21 +02001193 } ring[RADEON_NUM_RINGS];
Alex Deucher54e88e02012-02-23 18:10:29 -05001194 /* irqs */
Alex Deucherb35ea4a2012-02-23 17:53:43 -05001195 struct {
1196 int (*set)(struct radeon_device *rdev);
1197 int (*process)(struct radeon_device *rdev);
1198 } irq;
Alex Deucher54e88e02012-02-23 18:10:29 -05001199 /* displays */
Alex Deucherc79a49c2012-02-23 17:53:47 -05001200 struct {
1201 /* display watermarks */
1202 void (*bandwidth_update)(struct radeon_device *rdev);
1203 /* get frame count */
1204 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
1205 /* wait for vblank */
1206 void (*wait_for_vblank)(struct radeon_device *rdev, int crtc);
1207 } display;
Alex Deucher54e88e02012-02-23 18:10:29 -05001208 /* copy functions for bo handling */
Alex Deucher27cd7762012-02-23 17:53:42 -05001209 struct {
1210 int (*blit)(struct radeon_device *rdev,
1211 uint64_t src_offset,
1212 uint64_t dst_offset,
1213 unsigned num_gpu_pages,
1214 struct radeon_fence *fence);
1215 u32 blit_ring_index;
1216 int (*dma)(struct radeon_device *rdev,
1217 uint64_t src_offset,
1218 uint64_t dst_offset,
1219 unsigned num_gpu_pages,
1220 struct radeon_fence *fence);
1221 u32 dma_ring_index;
1222 /* method used for bo copy */
1223 int (*copy)(struct radeon_device *rdev,
1224 uint64_t src_offset,
1225 uint64_t dst_offset,
1226 unsigned num_gpu_pages,
1227 struct radeon_fence *fence);
1228 /* ring used for bo copies */
1229 u32 copy_ring_index;
1230 } copy;
Alex Deucher54e88e02012-02-23 18:10:29 -05001231 /* surfaces */
Alex Deucher9e6f3d02012-02-23 17:53:49 -05001232 struct {
1233 int (*set_reg)(struct radeon_device *rdev, int reg,
1234 uint32_t tiling_flags, uint32_t pitch,
1235 uint32_t offset, uint32_t obj_size);
1236 void (*clear_reg)(struct radeon_device *rdev, int reg);
1237 } surface;
Alex Deucher54e88e02012-02-23 18:10:29 -05001238 /* hotplug detect */
Alex Deucher901ea572012-02-23 17:53:39 -05001239 struct {
1240 void (*init)(struct radeon_device *rdev);
1241 void (*fini)(struct radeon_device *rdev);
1242 bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
1243 void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
1244 } hpd;
Alex Deucherce8f5372010-05-07 15:10:16 -04001245 /* power management */
Alex Deuchera02fa392012-02-23 17:53:41 -05001246 struct {
1247 void (*misc)(struct radeon_device *rdev);
1248 void (*prepare)(struct radeon_device *rdev);
1249 void (*finish)(struct radeon_device *rdev);
1250 void (*init_profile)(struct radeon_device *rdev);
1251 void (*get_dynpm_state)(struct radeon_device *rdev);
Alex Deucher798bcf72012-02-23 17:53:48 -05001252 uint32_t (*get_engine_clock)(struct radeon_device *rdev);
1253 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
1254 uint32_t (*get_memory_clock)(struct radeon_device *rdev);
1255 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
1256 int (*get_pcie_lanes)(struct radeon_device *rdev);
1257 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
1258 void (*set_clock_gating)(struct radeon_device *rdev, int enable);
Alex Deuchera02fa392012-02-23 17:53:41 -05001259 } pm;
Alex Deucher6f34be52010-11-21 10:59:01 -05001260 /* pageflipping */
Alex Deucher0f9e0062012-02-23 17:53:40 -05001261 struct {
1262 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
1263 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
1264 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
1265 } pflip;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001266};
1267
Jerome Glisse21f9a432009-09-11 15:55:33 +02001268/*
1269 * Asic structures
1270 */
Jerome Glisse225758d2010-03-09 14:45:10 +00001271struct r100_gpu_lockup {
1272 unsigned long last_jiffies;
1273 u32 last_cp_rptr;
1274};
1275
Dave Airlie551ebd82009-09-01 15:25:57 +10001276struct r100_asic {
Jerome Glisse225758d2010-03-09 14:45:10 +00001277 const unsigned *reg_safe_bm;
1278 unsigned reg_safe_bm_size;
1279 u32 hdp_cntl;
1280 struct r100_gpu_lockup lockup;
Dave Airlie551ebd82009-09-01 15:25:57 +10001281};
1282
Jerome Glisse21f9a432009-09-11 15:55:33 +02001283struct r300_asic {
Jerome Glisse225758d2010-03-09 14:45:10 +00001284 const unsigned *reg_safe_bm;
1285 unsigned reg_safe_bm_size;
1286 u32 resync_scratch;
1287 u32 hdp_cntl;
1288 struct r100_gpu_lockup lockup;
Jerome Glisse21f9a432009-09-11 15:55:33 +02001289};
1290
1291struct r600_asic {
Jerome Glisse225758d2010-03-09 14:45:10 +00001292 unsigned max_pipes;
1293 unsigned max_tile_pipes;
1294 unsigned max_simds;
1295 unsigned max_backends;
1296 unsigned max_gprs;
1297 unsigned max_threads;
1298 unsigned max_stack_entries;
1299 unsigned max_hw_contexts;
1300 unsigned max_gs_threads;
1301 unsigned sx_max_export_size;
1302 unsigned sx_max_export_pos_size;
1303 unsigned sx_max_export_smx_size;
1304 unsigned sq_num_cf_insts;
1305 unsigned tiling_nbanks;
1306 unsigned tiling_npipes;
1307 unsigned tiling_group_size;
Alex Deuchere7aeeba2010-06-04 13:10:12 -04001308 unsigned tile_config;
Alex Deuchere55b9422011-07-15 19:53:52 +00001309 unsigned backend_map;
Jerome Glisse225758d2010-03-09 14:45:10 +00001310 struct r100_gpu_lockup lockup;
Jerome Glisse21f9a432009-09-11 15:55:33 +02001311};
1312
1313struct rv770_asic {
Jerome Glisse225758d2010-03-09 14:45:10 +00001314 unsigned max_pipes;
1315 unsigned max_tile_pipes;
1316 unsigned max_simds;
1317 unsigned max_backends;
1318 unsigned max_gprs;
1319 unsigned max_threads;
1320 unsigned max_stack_entries;
1321 unsigned max_hw_contexts;
1322 unsigned max_gs_threads;
1323 unsigned sx_max_export_size;
1324 unsigned sx_max_export_pos_size;
1325 unsigned sx_max_export_smx_size;
1326 unsigned sq_num_cf_insts;
1327 unsigned sx_num_of_sets;
1328 unsigned sc_prim_fifo_size;
1329 unsigned sc_hiz_tile_fifo_size;
1330 unsigned sc_earlyz_tile_fifo_fize;
1331 unsigned tiling_nbanks;
1332 unsigned tiling_npipes;
1333 unsigned tiling_group_size;
Alex Deuchere7aeeba2010-06-04 13:10:12 -04001334 unsigned tile_config;
Alex Deuchere55b9422011-07-15 19:53:52 +00001335 unsigned backend_map;
Jerome Glisse225758d2010-03-09 14:45:10 +00001336 struct r100_gpu_lockup lockup;
Jerome Glisse21f9a432009-09-11 15:55:33 +02001337};
1338
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001339struct evergreen_asic {
1340 unsigned num_ses;
1341 unsigned max_pipes;
1342 unsigned max_tile_pipes;
1343 unsigned max_simds;
1344 unsigned max_backends;
1345 unsigned max_gprs;
1346 unsigned max_threads;
1347 unsigned max_stack_entries;
1348 unsigned max_hw_contexts;
1349 unsigned max_gs_threads;
1350 unsigned sx_max_export_size;
1351 unsigned sx_max_export_pos_size;
1352 unsigned sx_max_export_smx_size;
1353 unsigned sq_num_cf_insts;
1354 unsigned sx_num_of_sets;
1355 unsigned sc_prim_fifo_size;
1356 unsigned sc_hiz_tile_fifo_size;
1357 unsigned sc_earlyz_tile_fifo_size;
1358 unsigned tiling_nbanks;
1359 unsigned tiling_npipes;
1360 unsigned tiling_group_size;
Alex Deuchere7aeeba2010-06-04 13:10:12 -04001361 unsigned tile_config;
Alex Deuchere55b9422011-07-15 19:53:52 +00001362 unsigned backend_map;
Alex Deucher17db7042010-12-21 16:05:39 -05001363 struct r100_gpu_lockup lockup;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001364};
1365
Alex Deucherfecf1d02011-03-02 20:07:29 -05001366struct cayman_asic {
1367 unsigned max_shader_engines;
1368 unsigned max_pipes_per_simd;
1369 unsigned max_tile_pipes;
1370 unsigned max_simds_per_se;
1371 unsigned max_backends_per_se;
1372 unsigned max_texture_channel_caches;
1373 unsigned max_gprs;
1374 unsigned max_threads;
1375 unsigned max_gs_threads;
1376 unsigned max_stack_entries;
1377 unsigned sx_num_of_sets;
1378 unsigned sx_max_export_size;
1379 unsigned sx_max_export_pos_size;
1380 unsigned sx_max_export_smx_size;
1381 unsigned max_hw_contexts;
1382 unsigned sq_num_cf_insts;
1383 unsigned sc_prim_fifo_size;
1384 unsigned sc_hiz_tile_fifo_size;
1385 unsigned sc_earlyz_tile_fifo_size;
1386
1387 unsigned num_shader_engines;
1388 unsigned num_shader_pipes_per_simd;
1389 unsigned num_tile_pipes;
1390 unsigned num_simds_per_se;
1391 unsigned num_backends_per_se;
1392 unsigned backend_disable_mask_per_asic;
1393 unsigned backend_map;
1394 unsigned num_texture_channel_caches;
1395 unsigned mem_max_burst_length_bytes;
1396 unsigned mem_row_size_in_kb;
1397 unsigned shader_engine_tile_size;
1398 unsigned num_gpus;
1399 unsigned multi_gpu_tile_size;
1400
1401 unsigned tile_config;
1402 struct r100_gpu_lockup lockup;
1403};
1404
Alex Deucher0a96d722012-03-20 17:18:11 -04001405struct si_asic {
1406 unsigned max_shader_engines;
1407 unsigned max_pipes_per_simd;
1408 unsigned max_tile_pipes;
1409 unsigned max_simds_per_se;
1410 unsigned max_backends_per_se;
1411 unsigned max_texture_channel_caches;
1412 unsigned max_gprs;
1413 unsigned max_gs_threads;
1414 unsigned max_hw_contexts;
1415 unsigned sc_prim_fifo_size_frontend;
1416 unsigned sc_prim_fifo_size_backend;
1417 unsigned sc_hiz_tile_fifo_size;
1418 unsigned sc_earlyz_tile_fifo_size;
1419
1420 unsigned num_shader_engines;
1421 unsigned num_tile_pipes;
1422 unsigned num_backends_per_se;
1423 unsigned backend_disable_mask_per_asic;
1424 unsigned backend_map;
1425 unsigned num_texture_channel_caches;
1426 unsigned mem_max_burst_length_bytes;
1427 unsigned mem_row_size_in_kb;
1428 unsigned shader_engine_tile_size;
1429 unsigned num_gpus;
1430 unsigned multi_gpu_tile_size;
1431
1432 unsigned tile_config;
1433 struct r100_gpu_lockup lockup;
1434};
1435
Jerome Glisse068a1172009-06-17 13:28:30 +02001436union radeon_asic_config {
1437 struct r300_asic r300;
Dave Airlie551ebd82009-09-01 15:25:57 +10001438 struct r100_asic r100;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001439 struct r600_asic r600;
1440 struct rv770_asic rv770;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001441 struct evergreen_asic evergreen;
Alex Deucherfecf1d02011-03-02 20:07:29 -05001442 struct cayman_asic cayman;
Alex Deucher0a96d722012-03-20 17:18:11 -04001443 struct si_asic si;
Jerome Glisse068a1172009-06-17 13:28:30 +02001444};
1445
Daniel Vetter0a10c852010-03-11 21:19:14 +00001446/*
1447 * asic initizalization from radeon_asic.c
1448 */
1449void radeon_agp_disable(struct radeon_device *rdev);
1450int radeon_asic_init(struct radeon_device *rdev);
1451
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001452
1453/*
1454 * IOCTL.
1455 */
1456int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
1457 struct drm_file *filp);
1458int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
1459 struct drm_file *filp);
1460int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
1461 struct drm_file *file_priv);
1462int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
1463 struct drm_file *file_priv);
1464int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1465 struct drm_file *file_priv);
1466int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
1467 struct drm_file *file_priv);
1468int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1469 struct drm_file *filp);
1470int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
1471 struct drm_file *filp);
1472int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
1473 struct drm_file *filp);
1474int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
1475 struct drm_file *filp);
Jerome Glisse721604a2012-01-05 22:11:05 -05001476int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
1477 struct drm_file *filp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001478int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
Dave Airliee024e112009-06-24 09:48:08 +10001479int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
1480 struct drm_file *filp);
1481int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
1482 struct drm_file *filp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001483
Alex Deucher16cdf042011-10-28 10:30:02 -04001484/* VRAM scratch page for HDP bug, default vram page */
1485struct r600_vram_scratch {
Alex Deucher87cbf8f2010-08-27 13:59:54 -04001486 struct radeon_bo *robj;
1487 volatile uint32_t *ptr;
Alex Deucher16cdf042011-10-28 10:30:02 -04001488 u64 gpu_addr;
Alex Deucher87cbf8f2010-08-27 13:59:54 -04001489};
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001490
Michel Dänzer7a1619b2011-11-10 18:57:26 +01001491
1492/*
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001493 * Core structure, functions and helpers.
1494 */
1495typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t);
1496typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t);
1497
1498struct radeon_device {
Jerome Glisse9f022dd2009-09-11 15:35:22 +02001499 struct device *dev;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001500 struct drm_device *ddev;
1501 struct pci_dev *pdev;
1502 /* ASIC */
Jerome Glisse068a1172009-06-17 13:28:30 +02001503 union radeon_asic_config config;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001504 enum radeon_family family;
1505 unsigned long flags;
1506 int usec_timeout;
1507 enum radeon_pll_errata pll_errata;
1508 int num_gb_pipes;
Alex Deucherf779b3e2009-08-19 19:11:39 -04001509 int num_z_pipes;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001510 int disp_priority;
1511 /* BIOS */
1512 uint8_t *bios;
1513 bool is_atom_bios;
1514 uint16_t bios_header_start;
Jerome Glisse4c788672009-11-20 14:29:23 +01001515 struct radeon_bo *stollen_vga_memory;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001516 /* Register mmio */
Dave Airlie4c9bc752009-06-29 18:29:12 +10001517 resource_size_t rmmio_base;
1518 resource_size_t rmmio_size;
Benjamin Herrenschmidta0533fb2011-07-13 06:28:12 +00001519 void __iomem *rmmio;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001520 radeon_rreg_t mc_rreg;
1521 radeon_wreg_t mc_wreg;
1522 radeon_rreg_t pll_rreg;
1523 radeon_wreg_t pll_wreg;
Dave Airliede1b2892009-08-12 18:43:14 +10001524 uint32_t pcie_reg_mask;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001525 radeon_rreg_t pciep_rreg;
1526 radeon_wreg_t pciep_wreg;
Alex Deucher351a52a2010-06-30 11:52:50 -04001527 /* io port */
1528 void __iomem *rio_mem;
1529 resource_size_t rio_mem_size;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001530 struct radeon_clock clock;
1531 struct radeon_mc mc;
1532 struct radeon_gart gart;
1533 struct radeon_mode_info mode_info;
1534 struct radeon_scratch scratch;
1535 struct radeon_mman mman;
Alex Deucher74652802011-08-25 13:39:48 -04001536 rwlock_t fence_lock;
1537 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
Christian König15d33322011-09-15 19:02:22 +02001538 struct radeon_semaphore_driver semaphore_drv;
Christian Könige32eb502011-10-23 12:56:27 +02001539 struct radeon_ring ring[RADEON_NUM_RINGS];
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001540 struct radeon_ib_pool ib_pool;
1541 struct radeon_irq irq;
1542 struct radeon_asic *asic;
1543 struct radeon_gem gem;
Jerome Glissec93bb852009-07-13 21:04:08 +02001544 struct radeon_pm pm;
Yang Zhaof657c2a2009-09-15 12:21:01 +10001545 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
Michel Dänzer7a1619b2011-11-10 18:57:26 +01001546 struct radeon_mutex cs_mutex;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001547 struct radeon_wb wb;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001548 struct radeon_dummy_page dummy_page;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001549 bool shutdown;
1550 bool suspend;
Dave Airliead49f502009-07-10 22:36:26 +10001551 bool need_dma32;
Jerome Glisse733289c2009-09-16 15:24:21 +02001552 bool accel_working;
Dave Airliee024e112009-06-24 09:48:08 +10001553 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001554 const struct firmware *me_fw; /* all family ME firmware */
1555 const struct firmware *pfp_fw; /* r6/700 PFP firmware */
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001556 const struct firmware *rlc_fw; /* r6/700 RLC firmware */
Alex Deucher0af62b02011-01-06 21:19:31 -05001557 const struct firmware *mc_fw; /* NI MC firmware */
Alex Deucher0f0de062012-03-20 17:18:17 -04001558 const struct firmware *ce_fw; /* SI CE firmware */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001559 struct r600_blit r600_blit;
Alex Deucher16cdf042011-10-28 10:30:02 -04001560 struct r600_vram_scratch vram_scratch;
Alex Deucher3e5cb982009-10-16 12:21:24 -04001561 int msi_enabled; /* msi enabled */
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001562 struct r600_ih ih; /* r6/700 interrupt ring */
Alex Deucher347e7592012-03-20 17:18:21 -04001563 struct si_rlc rlc;
Alex Deucherd4877cf2009-12-04 16:56:37 -05001564 struct work_struct hotplug_work;
Alex Deucherf122c612012-03-30 08:59:57 -04001565 struct work_struct audio_work;
Alex Deucher18917b62010-02-01 16:02:25 -05001566 int num_crtc; /* number of crtcs */
Alex Deucher40bacf12009-12-23 03:23:21 -05001567 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
Matthew Garrett5876dd22010-04-26 15:52:20 -04001568 struct mutex vram_mutex;
Rafał Miłeckia92553a2012-04-28 23:35:20 +02001569 struct r600_audio audio; /* audio stuff */
Alex Deucherce8f5372010-05-07 15:10:16 -04001570 struct notifier_block acpi_nb;
Marek Olšák9eba4a92011-01-05 05:46:48 +01001571 /* only one userspace can use Hyperz features or CMASK at a time */
Dave Airlieab9e1f52010-07-13 11:11:11 +10001572 struct drm_file *hyperz_filp;
Marek Olšák9eba4a92011-01-05 05:46:48 +01001573 struct drm_file *cmask_filp;
Alex Deucherf376b942010-08-05 21:21:16 -04001574 /* i2c buses */
1575 struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
Christian König4d8bf9a2011-10-24 14:54:54 +02001576 /* debugfs */
1577 struct radeon_debugfs debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
1578 unsigned debugfs_count;
Jerome Glisse721604a2012-01-05 22:11:05 -05001579 /* virtual memory */
1580 struct radeon_vm_manager vm_manager;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001581};
1582
1583int radeon_device_init(struct radeon_device *rdev,
1584 struct drm_device *ddev,
1585 struct pci_dev *pdev,
1586 uint32_t flags);
1587void radeon_device_fini(struct radeon_device *rdev);
1588int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
1589
Andi Kleen6fcbef72011-10-13 16:08:42 -07001590uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
1591void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
1592u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
1593void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
Alex Deucher351a52a2010-06-30 11:52:50 -04001594
Jerome Glisse4c788672009-11-20 14:29:23 +01001595/*
1596 * Cast helper
1597 */
1598#define to_radeon_fence(p) ((struct radeon_fence *)(p))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001599
1600/*
1601 * Registers read & write functions.
1602 */
Benjamin Herrenschmidta0533fb2011-07-13 06:28:12 +00001603#define RREG8(reg) readb((rdev->rmmio) + (reg))
1604#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
1605#define RREG16(reg) readw((rdev->rmmio) + (reg))
1606#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
Dave Airliede1b2892009-08-12 18:43:14 +10001607#define RREG32(reg) r100_mm_rreg(rdev, (reg))
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001608#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
Dave Airliede1b2892009-08-12 18:43:14 +10001609#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001610#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1611#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1612#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
1613#define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v))
1614#define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg))
1615#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
Dave Airliede1b2892009-08-12 18:43:14 +10001616#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
1617#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
Rafał Miłeckiaa5120d2010-02-18 20:24:28 +00001618#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg))
1619#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001620#define WREG32_P(reg, val, mask) \
1621 do { \
1622 uint32_t tmp_ = RREG32(reg); \
1623 tmp_ &= (mask); \
1624 tmp_ |= ((val) & ~(mask)); \
1625 WREG32(reg, tmp_); \
1626 } while (0)
1627#define WREG32_PLL_P(reg, val, mask) \
1628 do { \
1629 uint32_t tmp_ = RREG32_PLL(reg); \
1630 tmp_ &= (mask); \
1631 tmp_ |= ((val) & ~(mask)); \
1632 WREG32_PLL(reg, tmp_); \
1633 } while (0)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001634#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg)))
Alex Deucher351a52a2010-06-30 11:52:50 -04001635#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
1636#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001637
Dave Airliede1b2892009-08-12 18:43:14 +10001638/*
1639 * Indirect registers accessor
1640 */
1641static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
1642{
1643 uint32_t r;
1644
1645 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
1646 r = RREG32(RADEON_PCIE_DATA);
1647 return r;
1648}
1649
1650static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1651{
1652 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
1653 WREG32(RADEON_PCIE_DATA, (v));
1654}
1655
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001656void r100_pll_errata_after_index(struct radeon_device *rdev);
1657
1658
1659/*
1660 * ASICs helpers.
1661 */
Dave Airlieb995e432009-07-14 02:02:32 +10001662#define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \
1663 (rdev->pdev->device == 0x5969))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001664#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \
1665 (rdev->family == CHIP_RV200) || \
1666 (rdev->family == CHIP_RS100) || \
1667 (rdev->family == CHIP_RS200) || \
1668 (rdev->family == CHIP_RV250) || \
1669 (rdev->family == CHIP_RV280) || \
1670 (rdev->family == CHIP_RS300))
1671#define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300) || \
1672 (rdev->family == CHIP_RV350) || \
1673 (rdev->family == CHIP_R350) || \
1674 (rdev->family == CHIP_RV380) || \
1675 (rdev->family == CHIP_R420) || \
1676 (rdev->family == CHIP_R423) || \
1677 (rdev->family == CHIP_RV410) || \
1678 (rdev->family == CHIP_RS400) || \
1679 (rdev->family == CHIP_RS480))
Alex Deucher3313e3d2011-01-06 18:49:34 -05001680#define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \
1681 (rdev->ddev->pdev->device == 0x9443) || \
1682 (rdev->ddev->pdev->device == 0x944B) || \
1683 (rdev->ddev->pdev->device == 0x9506) || \
1684 (rdev->ddev->pdev->device == 0x9509) || \
1685 (rdev->ddev->pdev->device == 0x950F) || \
1686 (rdev->ddev->pdev->device == 0x689C) || \
1687 (rdev->ddev->pdev->device == 0x689D))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001688#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
Alex Deucher99999aa2010-11-16 12:09:41 -05001689#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \
1690 (rdev->family == CHIP_RS690) || \
1691 (rdev->family == CHIP_RS740) || \
1692 (rdev->family >= CHIP_R600))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001693#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
1694#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001695#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
Alex Deucher633b9162011-01-06 21:19:11 -05001696#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \
1697 (rdev->flags & RADEON_IS_IGP))
Alex Deucher1fe18302011-01-06 21:19:12 -05001698#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS))
Alex Deucher8848f752012-03-20 17:18:28 -04001699#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA))
1700#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
1701 (rdev->flags & RADEON_IS_IGP))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001702
1703/*
1704 * BIOS helpers.
1705 */
1706#define RBIOS8(i) (rdev->bios[i])
1707#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
1708#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
1709
1710int radeon_combios_init(struct radeon_device *rdev);
1711void radeon_combios_fini(struct radeon_device *rdev);
1712int radeon_atombios_init(struct radeon_device *rdev);
1713void radeon_atombios_fini(struct radeon_device *rdev);
1714
1715
1716/*
1717 * RING helpers.
1718 */
Andi Kleence580fa2011-10-13 16:08:47 -07001719#if DRM_DEBUG_CODE == 0
Christian Könige32eb502011-10-23 12:56:27 +02001720static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001721{
Christian Könige32eb502011-10-23 12:56:27 +02001722 ring->ring[ring->wptr++] = v;
1723 ring->wptr &= ring->ptr_mask;
1724 ring->count_dw--;
1725 ring->ring_free_dw--;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001726}
Andi Kleence580fa2011-10-13 16:08:47 -07001727#else
1728/* With debugging this is just too big to inline */
Christian Könige32eb502011-10-23 12:56:27 +02001729void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
Andi Kleence580fa2011-10-13 16:08:47 -07001730#endif
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001731
1732/*
1733 * ASICs macro.
1734 */
Jerome Glisse068a1172009-06-17 13:28:30 +02001735#define radeon_init(rdev) (rdev)->asic->init((rdev))
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001736#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
1737#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
1738#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
Christian Königeb0c19c2012-02-23 15:18:44 +01001739#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
Dave Airlie28d52042009-09-21 14:33:58 +10001740#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
Jerome Glissea2d07b72010-03-09 14:45:11 +00001741#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
Alex Deucherc5b3b852012-02-23 17:53:46 -05001742#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
1743#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
Alex Deucherf7128122012-02-23 17:53:45 -05001744#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
1745#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
1746#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
Christian König4c87bc22011-10-19 19:02:21 +02001747#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
Jerome Glisse721604a2012-01-05 22:11:05 -05001748#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
Christian König312c4a82012-05-02 15:11:09 +02001749#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
Alex Deucherb35ea4a2012-02-23 17:53:43 -05001750#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
1751#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
Alex Deucherc79a49c2012-02-23 17:53:47 -05001752#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
Christian König4c87bc22011-10-19 19:02:21 +02001753#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
1754#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
Alex Deucher27cd7762012-02-23 17:53:42 -05001755#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
1756#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
1757#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
1758#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
1759#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
1760#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
Alex Deucher798bcf72012-02-23 17:53:48 -05001761#define radeon_get_engine_clock(rdev) (rdev)->asic->pm.get_engine_clock((rdev))
1762#define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e))
1763#define radeon_get_memory_clock(rdev) (rdev)->asic->pm.get_memory_clock((rdev))
1764#define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e))
1765#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
1766#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
1767#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
Alex Deucher9e6f3d02012-02-23 17:53:49 -05001768#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
1769#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
Alex Deucherc79a49c2012-02-23 17:53:47 -05001770#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
Alex Deucher901ea572012-02-23 17:53:39 -05001771#define radeon_hpd_init(rdev) (rdev)->asic->hpd.init((rdev))
1772#define radeon_hpd_fini(rdev) (rdev)->asic->hpd.fini((rdev))
1773#define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h))
1774#define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h))
Alex Deucherdef9ba92010-04-22 12:39:58 -04001775#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
Alex Deuchera02fa392012-02-23 17:53:41 -05001776#define radeon_pm_misc(rdev) (rdev)->asic->pm.misc((rdev))
1777#define radeon_pm_prepare(rdev) (rdev)->asic->pm.prepare((rdev))
1778#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
1779#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
1780#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
Alex Deucher0f9e0062012-02-23 17:53:40 -05001781#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pflip.pre_page_flip((rdev), (crtc))
1782#define radeon_page_flip(rdev, crtc, base) rdev->asic->pflip.page_flip((rdev), (crtc), (base))
1783#define radeon_post_page_flip(rdev, crtc) rdev->asic->pflip.post_page_flip((rdev), (crtc))
Alex Deucherc79a49c2012-02-23 17:53:47 -05001784#define radeon_wait_for_vblank(rdev, crtc) rdev->asic->display.wait_for_vblank((rdev), (crtc))
Alex Deucher89e51812012-02-23 17:53:38 -05001785#define radeon_mc_wait_for_idle(rdev) rdev->asic->mc_wait_for_idle((rdev))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001786
Jerome Glisse6cf8a3f2009-09-10 21:46:48 +02001787/* Common functions */
Jerome Glisse700a0cc2010-01-13 15:16:38 +01001788/* AGP */
Jerome Glisse90aca4d2010-03-09 14:45:12 +00001789extern int radeon_gpu_reset(struct radeon_device *rdev);
Jerome Glisse700a0cc2010-01-13 15:16:38 +01001790extern void radeon_agp_disable(struct radeon_device *rdev);
Jerome Glisse21f9a432009-09-11 15:55:33 +02001791extern int radeon_modeset_init(struct radeon_device *rdev);
1792extern void radeon_modeset_fini(struct radeon_device *rdev);
Jerome Glisse9f022dd2009-09-11 15:35:22 +02001793extern bool radeon_card_posted(struct radeon_device *rdev);
Alex Deucherf47299c2010-03-16 20:54:38 -04001794extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
Alex Deucherf46c0122010-03-31 00:33:27 -04001795extern void radeon_update_display_priority(struct radeon_device *rdev);
Dave Airlie72542d72009-12-01 14:06:31 +10001796extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
Jerome Glisse21f9a432009-09-11 15:55:33 +02001797extern void radeon_scratch_init(struct radeon_device *rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04001798extern void radeon_wb_fini(struct radeon_device *rdev);
1799extern int radeon_wb_init(struct radeon_device *rdev);
1800extern void radeon_wb_disable(struct radeon_device *rdev);
Jerome Glisse21f9a432009-09-11 15:55:33 +02001801extern void radeon_surface_init(struct radeon_device *rdev);
1802extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
Jerome Glisseca6ffc62009-10-01 10:20:52 +02001803extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
Jerome Glissed39c3b82009-09-28 18:34:43 +02001804extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
Jerome Glisse312ea8d2009-12-07 15:52:58 +01001805extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
Jerome Glissed03d8582009-12-14 21:02:09 +01001806extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
Jerome Glissed594e462010-02-17 21:54:29 +00001807extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
1808extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
Dave Airlie6a9ee8a2010-02-01 15:38:10 +10001809extern int radeon_resume_kms(struct drm_device *dev);
1810extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
Dave Airlie53595332011-03-14 09:47:24 +10001811extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
Jerome Glisse6cf8a3f2009-09-10 21:46:48 +02001812
Daniel Vetter3574dda2011-02-18 17:59:19 +01001813/*
Jerome Glisse721604a2012-01-05 22:11:05 -05001814 * vm
1815 */
1816int radeon_vm_manager_init(struct radeon_device *rdev);
1817void radeon_vm_manager_fini(struct radeon_device *rdev);
1818int radeon_vm_manager_start(struct radeon_device *rdev);
1819int radeon_vm_manager_suspend(struct radeon_device *rdev);
1820int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
1821void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
1822int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm);
1823void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
1824int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1825 struct radeon_vm *vm,
1826 struct radeon_bo *bo,
1827 struct ttm_mem_reg *mem);
1828void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1829 struct radeon_bo *bo);
1830int radeon_vm_bo_add(struct radeon_device *rdev,
1831 struct radeon_vm *vm,
1832 struct radeon_bo *bo,
1833 uint64_t offset,
1834 uint32_t flags);
1835int radeon_vm_bo_rmv(struct radeon_device *rdev,
1836 struct radeon_vm *vm,
1837 struct radeon_bo *bo);
1838
Alex Deucherf122c612012-03-30 08:59:57 -04001839/* audio */
1840void r600_audio_update_hdmi(struct work_struct *work);
Jerome Glisse721604a2012-01-05 22:11:05 -05001841
1842/*
Alex Deucher16cdf042011-10-28 10:30:02 -04001843 * R600 vram scratch functions
1844 */
1845int r600_vram_scratch_init(struct radeon_device *rdev);
1846void r600_vram_scratch_fini(struct radeon_device *rdev);
1847
1848/*
Jerome Glisse285484e2011-12-16 17:03:42 -05001849 * r600 cs checking helper
1850 */
1851unsigned r600_mip_minify(unsigned size, unsigned level);
1852bool r600_fmt_is_valid_color(u32 format);
1853bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family);
1854int r600_fmt_get_blocksize(u32 format);
1855int r600_fmt_get_nblocksx(u32 format, u32 w);
1856int r600_fmt_get_nblocksy(u32 format, u32 h);
1857
1858/*
Daniel Vetter3574dda2011-02-18 17:59:19 +01001859 * r600 functions used by radeon_encoder.c
1860 */
Rafał Miłecki2cd62182010-03-08 22:14:01 +00001861extern void r600_hdmi_enable(struct drm_encoder *encoder);
1862extern void r600_hdmi_disable(struct drm_encoder *encoder);
Christian Koenigdafc3bd2009-10-11 23:49:13 +02001863extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
Alex Deucherfe251e22010-03-24 13:36:43 -04001864
Alex Deucher0af62b02011-01-06 21:19:31 -05001865extern int ni_init_microcode(struct radeon_device *rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001866extern int ni_mc_load_microcode(struct radeon_device *rdev);
Alex Deucher0af62b02011-01-06 21:19:31 -05001867
Alberto Miloned7a29522010-07-06 11:40:24 -04001868/* radeon_acpi.c */
1869#if defined(CONFIG_ACPI)
1870extern int radeon_acpi_init(struct radeon_device *rdev);
1871#else
1872static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
1873#endif
1874
Jerome Glisse4c788672009-11-20 14:29:23 +01001875#include "radeon_object.h"
1876
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001877#endif