blob: e2feddd91df5afcacbcafa8759cec3e513cb0b67 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RADEON_H__
29#define __RADEON_H__
30
Jerome Glisse771fe6b2009-06-05 14:42:42 +020031/* TODO: Here are things that needs to be done :
32 * - surface allocator & initializer : (bit like scratch reg) should
33 * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
34 * related to surface
35 * - WB : write back stuff (do it bit like scratch reg things)
36 * - Vblank : look at Jesse's rework and what we should do
37 * - r600/r700: gart & cp
38 * - cs : clean cs ioctl use bitmap & things like that.
39 * - power management stuff
40 * - Barrier in gart code
41 * - Unmappabled vram ?
42 * - TESTING, TESTING, TESTING
43 */
44
Jerome Glissed39c3b82009-09-28 18:34:43 +020045/* Initialization path:
46 * We expect that acceleration initialization might fail for various
47 * reasons even thought we work hard to make it works on most
48 * configurations. In order to still have a working userspace in such
49 * situation the init path must succeed up to the memory controller
50 * initialization point. Failure before this point are considered as
51 * fatal error. Here is the init callchain :
52 * radeon_device_init perform common structure, mutex initialization
53 * asic_init setup the GPU memory layout and perform all
54 * one time initialization (failure in this
55 * function are considered fatal)
56 * asic_startup setup the GPU acceleration, in order to
57 * follow guideline the first thing this
58 * function should do is setting the GPU
59 * memory controller (only MC setup failure
60 * are considered as fatal)
61 */
62
Arun Sharma600634972011-07-26 16:09:06 -070063#include <linux/atomic.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020064#include <linux/wait.h>
65#include <linux/list.h>
66#include <linux/kref.h>
67
Jerome Glisse4c788672009-11-20 14:29:23 +010068#include <ttm/ttm_bo_api.h>
69#include <ttm/ttm_bo_driver.h>
70#include <ttm/ttm_placement.h>
71#include <ttm/ttm_module.h>
Thomas Hellstrom147666f2010-11-17 12:38:32 +000072#include <ttm/ttm_execbuf_util.h>
Jerome Glisse4c788672009-11-20 14:29:23 +010073
Dave Airliec2142712009-09-22 08:50:10 +100074#include "radeon_family.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020075#include "radeon_mode.h"
76#include "radeon_reg.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020077
78/*
79 * Modules parameters.
80 */
81extern int radeon_no_wb;
82extern int radeon_modeset;
83extern int radeon_dynclks;
84extern int radeon_r4xx_atom;
85extern int radeon_agpmode;
86extern int radeon_vram_limit;
87extern int radeon_gart_size;
88extern int radeon_benchmarking;
Michel Dänzerecc0b322009-07-21 11:23:57 +020089extern int radeon_testing;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020090extern int radeon_connector_table;
Dave Airlie4ce001a2009-08-13 16:32:14 +100091extern int radeon_tv;
Christian Koenigdafc3bd2009-10-11 23:49:13 +020092extern int radeon_audio;
Alex Deucherf46c0122010-03-31 00:33:27 -040093extern int radeon_disp_priority;
Alex Deuchere2b0a8e2010-03-17 02:07:37 -040094extern int radeon_hw_i2c;
Alex Deucherd42dd572011-01-12 20:05:11 -050095extern int radeon_pcie_gen2;
Alex Deuchera18cee12011-11-01 14:20:30 -040096extern int radeon_msi;
Christian König3368ff02012-05-02 15:11:21 +020097extern int radeon_lockup_timeout;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020098
99/*
100 * Copy from radeon_drv.h so we don't have to include both and have conflicting
101 * symbol;
102 */
Jerome Glissebb635562012-05-09 15:34:46 +0200103#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
104#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
Jerome Glissee8217672010-02-15 21:36:13 +0100105/* RADEON_IB_POOL_SIZE must be a power of 2 */
Jerome Glissebb635562012-05-09 15:34:46 +0200106#define RADEON_IB_POOL_SIZE 16
107#define RADEON_DEBUGFS_MAX_COMPONENTS 32
108#define RADEONFB_CONN_LIMIT 4
109#define RADEON_BIOS_NUM_SCRATCH 8
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200110
Alex Deucher1b370782011-11-17 20:13:28 -0500111/* max number of rings */
Jerome Glissebb635562012-05-09 15:34:46 +0200112#define RADEON_NUM_RINGS 3
113
114/* fence seq are set to this number when signaled */
115#define RADEON_FENCE_SIGNALED_SEQ 0LL
Alex Deucher1b370782011-11-17 20:13:28 -0500116
117/* internal ring indices */
118/* r1xx+ has gfx CP ring */
Jerome Glissebb635562012-05-09 15:34:46 +0200119#define RADEON_RING_TYPE_GFX_INDEX 0
Alex Deucher1b370782011-11-17 20:13:28 -0500120
121/* cayman has 2 compute CP rings */
Jerome Glissebb635562012-05-09 15:34:46 +0200122#define CAYMAN_RING_TYPE_CP1_INDEX 1
123#define CAYMAN_RING_TYPE_CP2_INDEX 2
Alex Deucher1b370782011-11-17 20:13:28 -0500124
Jerome Glisse721604a2012-01-05 22:11:05 -0500125/* hardcode those limit for now */
Jerome Glissebb635562012-05-09 15:34:46 +0200126#define RADEON_VA_RESERVED_SIZE (8 << 20)
127#define RADEON_IB_VM_MAX_SIZE (64 << 10)
Jerome Glisse721604a2012-01-05 22:11:05 -0500128
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200129/*
130 * Errata workarounds.
131 */
132enum radeon_pll_errata {
133 CHIP_ERRATA_R300_CG = 0x00000001,
134 CHIP_ERRATA_PLL_DUMMYREADS = 0x00000002,
135 CHIP_ERRATA_PLL_DELAY = 0x00000004
136};
137
138
139struct radeon_device;
140
141
142/*
143 * BIOS.
144 */
Dave Airlie6a9ee8a2010-02-01 15:38:10 +1000145#define ATRM_BIOS_PAGE 4096
146
Dave Airlie8edb3812010-03-01 21:50:01 +1100147#if defined(CONFIG_VGA_SWITCHEROO)
Dave Airlie6a9ee8a2010-02-01 15:38:10 +1000148bool radeon_atrm_supported(struct pci_dev *pdev);
149int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
Dave Airlie8edb3812010-03-01 21:50:01 +1100150#else
151static inline bool radeon_atrm_supported(struct pci_dev *pdev)
152{
153 return false;
154}
155
156static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
157 return -EINVAL;
158}
159#endif
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200160bool radeon_get_bios(struct radeon_device *rdev);
161
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000162
163/*
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500164 * Mutex which allows recursive locking from the same process.
165 */
166struct radeon_mutex {
167 struct mutex mutex;
168 struct task_struct *owner;
169 int level;
170};
171
172static inline void radeon_mutex_init(struct radeon_mutex *mutex)
173{
174 mutex_init(&mutex->mutex);
175 mutex->owner = NULL;
176 mutex->level = 0;
177}
178
179static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
180{
181 if (mutex_trylock(&mutex->mutex)) {
182 /* The mutex was unlocked before, so it's ours now */
183 mutex->owner = current;
184 } else if (mutex->owner != current) {
185 /* Another process locked the mutex, take it */
186 mutex_lock(&mutex->mutex);
187 mutex->owner = current;
188 }
189 /* Otherwise the mutex was already locked by this process */
190
191 mutex->level++;
192}
193
194static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
195{
196 if (--mutex->level > 0)
197 return;
198
199 mutex->owner = NULL;
200 mutex_unlock(&mutex->mutex);
201}
202
203
204/*
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000205 * Dummy page
206 */
207struct radeon_dummy_page {
208 struct page *page;
209 dma_addr_t addr;
210};
211int radeon_dummy_page_init(struct radeon_device *rdev);
212void radeon_dummy_page_fini(struct radeon_device *rdev);
213
214
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200215/*
216 * Clocks
217 */
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200218struct radeon_clock {
219 struct radeon_pll p1pll;
220 struct radeon_pll p2pll;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500221 struct radeon_pll dcpll;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200222 struct radeon_pll spll;
223 struct radeon_pll mpll;
224 /* 10 Khz units */
225 uint32_t default_mclk;
226 uint32_t default_sclk;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500227 uint32_t default_dispclk;
228 uint32_t dp_extclk;
Alex Deucherb20f9be2011-06-08 13:01:11 -0400229 uint32_t max_pixel_clock;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200230};
231
Rafał Miłecki74338742009-11-03 00:53:02 +0100232/*
233 * Power management
234 */
235int radeon_pm_init(struct radeon_device *rdev);
Alex Deucher29fb52c2010-03-11 10:01:17 -0500236void radeon_pm_fini(struct radeon_device *rdev);
Rafał Miłeckic913e232009-12-22 23:02:16 +0100237void radeon_pm_compute_clocks(struct radeon_device *rdev);
Alex Deucherce8f5372010-05-07 15:10:16 -0400238void radeon_pm_suspend(struct radeon_device *rdev);
239void radeon_pm_resume(struct radeon_device *rdev);
Alex Deucher56278a82009-12-28 13:58:44 -0500240void radeon_combios_get_power_modes(struct radeon_device *rdev);
241void radeon_atombios_get_power_modes(struct radeon_device *rdev);
Alex Deucher8a83ec52011-04-12 14:49:23 -0400242void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
Alex Deucherf8920342010-06-30 12:02:03 -0400243void rs690_pm_info(struct radeon_device *rdev);
Alex Deucher20d391d2011-02-01 16:12:34 -0500244extern int rv6xx_get_temp(struct radeon_device *rdev);
245extern int rv770_get_temp(struct radeon_device *rdev);
246extern int evergreen_get_temp(struct radeon_device *rdev);
247extern int sumo_get_temp(struct radeon_device *rdev);
Alex Deucher1bd47d22012-03-20 17:18:10 -0400248extern int si_get_temp(struct radeon_device *rdev);
Jerome Glisse285484e2011-12-16 17:03:42 -0500249extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
250 unsigned *bankh, unsigned *mtaspect,
251 unsigned *tile_split);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000252
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200253/*
254 * Fences.
255 */
256struct radeon_fence_driver {
257 uint32_t scratch_reg;
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000258 uint64_t gpu_addr;
259 volatile uint32_t *cpu_addr;
Jerome Glissebb635562012-05-09 15:34:46 +0200260 /* seq is protected by ring emission lock */
261 uint64_t seq;
262 atomic64_t last_seq;
Christian König36abaca2012-05-02 15:11:13 +0200263 unsigned long last_activity;
Jerome Glisse0a0c7592009-12-11 20:36:19 +0100264 bool initialized;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200265};
266
267struct radeon_fence {
268 struct radeon_device *rdev;
269 struct kref kref;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200270 /* protected by radeon_fence.lock */
Jerome Glissebb635562012-05-09 15:34:46 +0200271 uint64_t seq;
Alex Deucher74652802011-08-25 13:39:48 -0400272 /* RB, DMA, etc. */
Jerome Glissebb635562012-05-09 15:34:46 +0200273 unsigned ring;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200274};
275
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000276int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
277int radeon_fence_driver_init(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200278void radeon_fence_driver_fini(struct radeon_device *rdev);
Christian König876dc9f2012-05-08 14:24:01 +0200279int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
Alex Deucher74652802011-08-25 13:39:48 -0400280void radeon_fence_process(struct radeon_device *rdev, int ring);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200281bool radeon_fence_signaled(struct radeon_fence *fence);
282int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
Christian König8a47cc92012-05-09 15:34:48 +0200283int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
284int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
Jerome Glisse0085c9502012-05-09 15:34:55 +0200285int radeon_fence_wait_any(struct radeon_device *rdev,
286 struct radeon_fence **fences,
287 bool intr);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200288struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
289void radeon_fence_unref(struct radeon_fence **fence);
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200290unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200291
Dave Airliee024e112009-06-24 09:48:08 +1000292/*
293 * Tiling registers
294 */
295struct radeon_surface_reg {
Jerome Glisse4c788672009-11-20 14:29:23 +0100296 struct radeon_bo *bo;
Dave Airliee024e112009-06-24 09:48:08 +1000297};
298
299#define RADEON_GEM_MAX_SURFACES 8
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200300
301/*
Jerome Glisse4c788672009-11-20 14:29:23 +0100302 * TTM.
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200303 */
Jerome Glisse4c788672009-11-20 14:29:23 +0100304struct radeon_mman {
305 struct ttm_bo_global_ref bo_global_ref;
Dave Airlieba4420c2010-03-09 10:56:52 +1000306 struct drm_global_reference mem_global_ref;
Jerome Glisse4c788672009-11-20 14:29:23 +0100307 struct ttm_bo_device bdev;
Jerome Glisse0a0c7592009-12-11 20:36:19 +0100308 bool mem_global_referenced;
309 bool initialized;
Jerome Glisse4c788672009-11-20 14:29:23 +0100310};
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200311
Jerome Glisse721604a2012-01-05 22:11:05 -0500312/* bo virtual address in a specific vm */
313struct radeon_bo_va {
314 /* bo list is protected by bo being reserved */
315 struct list_head bo_list;
316 /* vm list is protected by vm mutex */
317 struct list_head vm_list;
318 /* constant after initialization */
319 struct radeon_vm *vm;
320 struct radeon_bo *bo;
321 uint64_t soffset;
322 uint64_t eoffset;
323 uint32_t flags;
324 bool valid;
325};
326
Jerome Glisse4c788672009-11-20 14:29:23 +0100327struct radeon_bo {
328 /* Protected by gem.mutex */
329 struct list_head list;
330 /* Protected by tbo.reserved */
Jerome Glisse312ea8d2009-12-07 15:52:58 +0100331 u32 placements[3];
332 struct ttm_placement placement;
Jerome Glisse4c788672009-11-20 14:29:23 +0100333 struct ttm_buffer_object tbo;
334 struct ttm_bo_kmap_obj kmap;
335 unsigned pin_count;
336 void *kptr;
337 u32 tiling_flags;
338 u32 pitch;
339 int surface_reg;
Jerome Glisse721604a2012-01-05 22:11:05 -0500340 /* list of all virtual address to which this bo
341 * is associated to
342 */
343 struct list_head va;
Jerome Glisse4c788672009-11-20 14:29:23 +0100344 /* Constant after initialization */
345 struct radeon_device *rdev;
Daniel Vetter441921d2011-02-18 17:59:16 +0100346 struct drm_gem_object gem_base;
Dave Airlie63bc6202012-05-31 13:52:53 +0100347
348 struct ttm_bo_kmap_obj dma_buf_vmap;
349 int vmapping_count;
Jerome Glisse4c788672009-11-20 14:29:23 +0100350};
Daniel Vetter7e4d15d2011-02-18 17:59:17 +0100351#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
Jerome Glisse4c788672009-11-20 14:29:23 +0100352
353struct radeon_bo_list {
Thomas Hellstrom147666f2010-11-17 12:38:32 +0000354 struct ttm_validate_buffer tv;
Jerome Glisse4c788672009-11-20 14:29:23 +0100355 struct radeon_bo *bo;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200356 uint64_t gpu_offset;
357 unsigned rdomain;
358 unsigned wdomain;
Jerome Glisse4c788672009-11-20 14:29:23 +0100359 u32 tiling_flags;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200360};
361
Jerome Glisseb15ba512011-11-15 11:48:34 -0500362/* sub-allocation manager, it has to be protected by another lock.
363 * By conception this is an helper for other part of the driver
364 * like the indirect buffer or semaphore, which both have their
365 * locking.
366 *
367 * Principe is simple, we keep a list of sub allocation in offset
368 * order (first entry has offset == 0, last entry has the highest
369 * offset).
370 *
371 * When allocating new object we first check if there is room at
372 * the end total_size - (last_object_offset + last_object_size) >=
373 * alloc_size. If so we allocate new object there.
374 *
375 * When there is not enough room at the end, we start waiting for
376 * each sub object until we reach object_offset+object_size >=
377 * alloc_size, this object then become the sub object we return.
378 *
379 * Alignment can't be bigger than page size.
380 *
381 * Hole are not considered for allocation to keep things simple.
382 * Assumption is that there won't be hole (all object on same
383 * alignment).
384 */
385struct radeon_sa_manager {
Christian Königa651c552012-05-09 15:34:50 +0200386 spinlock_t lock;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500387 struct radeon_bo *bo;
Christian Königc3b7fe82012-05-09 15:34:56 +0200388 struct list_head *hole;
389 struct list_head flist[RADEON_NUM_RINGS];
390 struct list_head olist;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500391 unsigned size;
392 uint64_t gpu_addr;
393 void *cpu_ptr;
394 uint32_t domain;
395};
396
397struct radeon_sa_bo;
398
399/* sub-allocation buffer */
400struct radeon_sa_bo {
Christian Königc3b7fe82012-05-09 15:34:56 +0200401 struct list_head olist;
402 struct list_head flist;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500403 struct radeon_sa_manager *manager;
Christian Könige6661a92012-05-09 15:34:52 +0200404 unsigned soffset;
405 unsigned eoffset;
Christian König557017a2012-05-09 15:34:54 +0200406 struct radeon_fence *fence;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500407};
408
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200409/*
410 * GEM objects.
411 */
412struct radeon_gem {
Jerome Glisse4c788672009-11-20 14:29:23 +0100413 struct mutex mutex;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200414 struct list_head objects;
415};
416
417int radeon_gem_init(struct radeon_device *rdev);
418void radeon_gem_fini(struct radeon_device *rdev);
419int radeon_gem_object_create(struct radeon_device *rdev, int size,
Jerome Glisse4c788672009-11-20 14:29:23 +0100420 int alignment, int initial_domain,
421 bool discardable, bool kernel,
422 struct drm_gem_object **obj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200423
Dave Airlieff72145b2011-02-07 12:16:14 +1000424int radeon_mode_dumb_create(struct drm_file *file_priv,
425 struct drm_device *dev,
426 struct drm_mode_create_dumb *args);
427int radeon_mode_dumb_mmap(struct drm_file *filp,
428 struct drm_device *dev,
429 uint32_t handle, uint64_t *offset_p);
430int radeon_mode_dumb_destroy(struct drm_file *file_priv,
431 struct drm_device *dev,
432 uint32_t handle);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200433
434/*
Jerome Glissec1341e52011-12-21 12:13:47 -0500435 * Semaphores.
436 */
Jerome Glissec1341e52011-12-21 12:13:47 -0500437/* everything here is constant */
438struct radeon_semaphore {
Jerome Glissea8c05942012-05-09 15:34:57 +0200439 struct radeon_sa_bo *sa_bo;
440 signed waiters;
Jerome Glissec1341e52011-12-21 12:13:47 -0500441 uint64_t gpu_addr;
Jerome Glissec1341e52011-12-21 12:13:47 -0500442};
443
Jerome Glissec1341e52011-12-21 12:13:47 -0500444int radeon_semaphore_create(struct radeon_device *rdev,
445 struct radeon_semaphore **semaphore);
446void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
447 struct radeon_semaphore *semaphore);
448void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
449 struct radeon_semaphore *semaphore);
Christian König8f676c42012-05-02 15:11:18 +0200450int radeon_semaphore_sync_rings(struct radeon_device *rdev,
451 struct radeon_semaphore *semaphore,
452 bool sync_to[RADEON_NUM_RINGS],
453 int dst_ring);
Jerome Glissec1341e52011-12-21 12:13:47 -0500454void radeon_semaphore_free(struct radeon_device *rdev,
Jerome Glissea8c05942012-05-09 15:34:57 +0200455 struct radeon_semaphore *semaphore,
456 struct radeon_fence *fence);
Jerome Glissec1341e52011-12-21 12:13:47 -0500457
458/*
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200459 * GART structures, functions & helpers
460 */
461struct radeon_mc;
462
Matt Turnera77f1712009-10-14 00:34:41 -0400463#define RADEON_GPU_PAGE_SIZE 4096
Jerome Glissed594e462010-02-17 21:54:29 +0000464#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
Alex Deucher003cefe2011-09-16 12:04:08 -0400465#define RADEON_GPU_PAGE_SHIFT 12
Jerome Glisse721604a2012-01-05 22:11:05 -0500466#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
Matt Turnera77f1712009-10-14 00:34:41 -0400467
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200468struct radeon_gart {
469 dma_addr_t table_addr;
Jerome Glissec9a1be92011-11-03 11:16:49 -0400470 struct radeon_bo *robj;
471 void *ptr;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200472 unsigned num_gpu_pages;
473 unsigned num_cpu_pages;
474 unsigned table_size;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200475 struct page **pages;
476 dma_addr_t *pages_addr;
477 bool ready;
478};
479
480int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
481void radeon_gart_table_ram_free(struct radeon_device *rdev);
482int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
483void radeon_gart_table_vram_free(struct radeon_device *rdev);
Jerome Glissec9a1be92011-11-03 11:16:49 -0400484int radeon_gart_table_vram_pin(struct radeon_device *rdev);
485void radeon_gart_table_vram_unpin(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200486int radeon_gart_init(struct radeon_device *rdev);
487void radeon_gart_fini(struct radeon_device *rdev);
488void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
489 int pages);
490int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
Konrad Rzeszutek Wilkc39d3512010-12-02 11:04:29 -0500491 int pages, struct page **pagelist,
492 dma_addr_t *dma_addr);
Jerome Glissec9a1be92011-11-03 11:16:49 -0400493void radeon_gart_restore(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200494
495
496/*
497 * GPU MC structures, functions & helpers
498 */
499struct radeon_mc {
500 resource_size_t aper_size;
501 resource_size_t aper_base;
502 resource_size_t agp_base;
Dave Airlie7a50f012009-07-21 20:39:30 +1000503 /* for some chips with <= 32MB we need to lie
504 * about vram size near mc fb location */
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000505 u64 mc_vram_size;
Jerome Glissed594e462010-02-17 21:54:29 +0000506 u64 visible_vram_size;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000507 u64 gtt_size;
508 u64 gtt_start;
509 u64 gtt_end;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000510 u64 vram_start;
511 u64 vram_end;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200512 unsigned vram_width;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000513 u64 real_vram_size;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200514 int vram_mtrr;
515 bool vram_is_ddr;
Jerome Glissed594e462010-02-17 21:54:29 +0000516 bool igp_sideport_enabled;
Alex Deucher8d369bb2010-07-15 10:51:10 -0400517 u64 gtt_base_align;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200518};
519
Alex Deucher06b64762010-01-05 11:27:29 -0500520bool radeon_combios_sideport_present(struct radeon_device *rdev);
521bool radeon_atombios_sideport_present(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200522
523/*
524 * GPU scratch registers structures, functions & helpers
525 */
526struct radeon_scratch {
527 unsigned num_reg;
Alex Deucher724c80e2010-08-27 18:25:25 -0400528 uint32_t reg_base;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200529 bool free[32];
530 uint32_t reg[32];
531};
532
533int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
534void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
535
536
537/*
538 * IRQS.
539 */
Alex Deucher6f34be52010-11-21 10:59:01 -0500540
541struct radeon_unpin_work {
542 struct work_struct work;
543 struct radeon_device *rdev;
544 int crtc_id;
545 struct radeon_fence *fence;
546 struct drm_pending_vblank_event *event;
547 struct radeon_bo *old_rbo;
548 u64 new_crtc_base;
549};
550
551struct r500_irq_stat_regs {
552 u32 disp_int;
Alex Deucherf122c612012-03-30 08:59:57 -0400553 u32 hdmi0_status;
Alex Deucher6f34be52010-11-21 10:59:01 -0500554};
555
556struct r600_irq_stat_regs {
557 u32 disp_int;
558 u32 disp_int_cont;
559 u32 disp_int_cont2;
560 u32 d1grph_int;
561 u32 d2grph_int;
Alex Deucherf122c612012-03-30 08:59:57 -0400562 u32 hdmi0_status;
563 u32 hdmi1_status;
Alex Deucher6f34be52010-11-21 10:59:01 -0500564};
565
566struct evergreen_irq_stat_regs {
567 u32 disp_int;
568 u32 disp_int_cont;
569 u32 disp_int_cont2;
570 u32 disp_int_cont3;
571 u32 disp_int_cont4;
572 u32 disp_int_cont5;
573 u32 d1grph_int;
574 u32 d2grph_int;
575 u32 d3grph_int;
576 u32 d4grph_int;
577 u32 d5grph_int;
578 u32 d6grph_int;
Alex Deucherf122c612012-03-30 08:59:57 -0400579 u32 afmt_status1;
580 u32 afmt_status2;
581 u32 afmt_status3;
582 u32 afmt_status4;
583 u32 afmt_status5;
584 u32 afmt_status6;
Alex Deucher6f34be52010-11-21 10:59:01 -0500585};
586
587union radeon_irq_stat_regs {
588 struct r500_irq_stat_regs r500;
589 struct r600_irq_stat_regs r600;
590 struct evergreen_irq_stat_regs evergreen;
591};
592
Ilija Hadzic54bd52062011-10-26 15:43:58 -0400593#define RADEON_MAX_HPD_PINS 6
594#define RADEON_MAX_CRTCS 6
Alex Deucherf122c612012-03-30 08:59:57 -0400595#define RADEON_MAX_AFMT_BLOCKS 6
Ilija Hadzic54bd52062011-10-26 15:43:58 -0400596
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200597struct radeon_irq {
598 bool installed;
Alex Deucher1b370782011-11-17 20:13:28 -0500599 bool sw_int[RADEON_NUM_RINGS];
Ilija Hadzic54bd52062011-10-26 15:43:58 -0400600 bool crtc_vblank_int[RADEON_MAX_CRTCS];
601 bool pflip[RADEON_MAX_CRTCS];
Rafał Miłecki73a6d3f2010-01-08 00:22:47 +0100602 wait_queue_head_t vblank_queue;
Ilija Hadzic54bd52062011-10-26 15:43:58 -0400603 bool hpd[RADEON_MAX_HPD_PINS];
Alex Deucher2031f772010-04-22 12:52:11 -0400604 bool gui_idle;
605 bool gui_idle_acked;
606 wait_queue_head_t idle_queue;
Alex Deucherf122c612012-03-30 08:59:57 -0400607 bool afmt[RADEON_MAX_AFMT_BLOCKS];
Dave Airlie1614f8b2009-12-01 16:04:56 +1000608 spinlock_t sw_lock;
Alex Deucher1b370782011-11-17 20:13:28 -0500609 int sw_refcount[RADEON_NUM_RINGS];
Alex Deucher6f34be52010-11-21 10:59:01 -0500610 union radeon_irq_stat_regs stat_regs;
Ilija Hadzic54bd52062011-10-26 15:43:58 -0400611 spinlock_t pflip_lock[RADEON_MAX_CRTCS];
612 int pflip_refcount[RADEON_MAX_CRTCS];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200613};
614
615int radeon_irq_kms_init(struct radeon_device *rdev);
616void radeon_irq_kms_fini(struct radeon_device *rdev);
Alex Deucher1b370782011-11-17 20:13:28 -0500617void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
618void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
Alex Deucher6f34be52010-11-21 10:59:01 -0500619void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
620void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200621
622/*
Christian Könige32eb502011-10-23 12:56:27 +0200623 * CP & rings.
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200624 */
Alex Deucher74652802011-08-25 13:39:48 -0400625
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200626struct radeon_ib {
Jerome Glisse68470ae2012-05-09 15:35:00 +0200627 struct radeon_sa_bo *sa_bo;
628 uint32_t length_dw;
629 uint64_t gpu_addr;
630 uint32_t *ptr;
Christian König876dc9f2012-05-08 14:24:01 +0200631 int ring;
Jerome Glisse68470ae2012-05-09 15:35:00 +0200632 struct radeon_fence *fence;
633 unsigned vm_id;
634 bool is_const_ib;
635 struct radeon_semaphore *semaphore;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200636};
637
Christian Könige32eb502011-10-23 12:56:27 +0200638struct radeon_ring {
Jerome Glisse4c788672009-11-20 14:29:23 +0100639 struct radeon_bo *ring_obj;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200640 volatile uint32_t *ring;
641 unsigned rptr;
Christian König5596a9d2011-10-13 12:48:45 +0200642 unsigned rptr_offs;
643 unsigned rptr_reg;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200644 unsigned wptr;
645 unsigned wptr_old;
Christian König5596a9d2011-10-13 12:48:45 +0200646 unsigned wptr_reg;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200647 unsigned ring_size;
648 unsigned ring_free_dw;
649 int count_dw;
Christian König069211e2012-05-02 15:11:20 +0200650 unsigned long last_activity;
651 unsigned last_rptr;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200652 uint64_t gpu_addr;
653 uint32_t align_mask;
654 uint32_t ptr_mask;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200655 bool ready;
Alex Deucher78c55602011-11-17 14:25:56 -0500656 u32 ptr_reg_shift;
657 u32 ptr_reg_mask;
658 u32 nop;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200659};
660
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500661/*
Jerome Glisse721604a2012-01-05 22:11:05 -0500662 * VM
663 */
664struct radeon_vm {
665 struct list_head list;
666 struct list_head va;
667 int id;
668 unsigned last_pfn;
669 u64 pt_gpu_addr;
670 u64 *pt;
Christian König2e0d9912012-05-09 15:34:53 +0200671 struct radeon_sa_bo *sa_bo;
Jerome Glisse721604a2012-01-05 22:11:05 -0500672 struct mutex mutex;
673 /* last fence for cs using this vm */
674 struct radeon_fence *fence;
675};
676
677struct radeon_vm_funcs {
678 int (*init)(struct radeon_device *rdev);
679 void (*fini)(struct radeon_device *rdev);
680 /* cs mutex must be lock for schedule_ib */
681 int (*bind)(struct radeon_device *rdev, struct radeon_vm *vm, int id);
682 void (*unbind)(struct radeon_device *rdev, struct radeon_vm *vm);
683 void (*tlb_flush)(struct radeon_device *rdev, struct radeon_vm *vm);
684 uint32_t (*page_flags)(struct radeon_device *rdev,
685 struct radeon_vm *vm,
686 uint32_t flags);
687 void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm,
688 unsigned pfn, uint64_t addr, uint32_t flags);
689};
690
691struct radeon_vm_manager {
692 struct list_head lru_vm;
693 uint32_t use_bitmap;
694 struct radeon_sa_manager sa_manager;
695 uint32_t max_pfn;
696 /* fields constant after init */
697 const struct radeon_vm_funcs *funcs;
698 /* number of VMIDs */
699 unsigned nvm;
700 /* vram base address for page table entry */
701 u64 vram_base_offset;
Alex Deucher67e915e2012-01-06 09:38:15 -0500702 /* is vm enabled? */
703 bool enabled;
Jerome Glisse721604a2012-01-05 22:11:05 -0500704};
705
706/*
707 * file private structure
708 */
709struct radeon_fpriv {
710 struct radeon_vm vm;
711};
712
713/*
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500714 * R6xx+ IH ring
715 */
716struct r600_ih {
Jerome Glisse4c788672009-11-20 14:29:23 +0100717 struct radeon_bo *ring_obj;
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500718 volatile uint32_t *ring;
719 unsigned rptr;
Christian Königbf852792011-10-13 13:19:22 +0200720 unsigned rptr_offs;
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500721 unsigned wptr;
722 unsigned wptr_old;
723 unsigned ring_size;
724 uint64_t gpu_addr;
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500725 uint32_t ptr_mask;
726 spinlock_t lock;
727 bool enabled;
728};
729
Ilija Hadzic8eec9d62011-10-12 23:29:40 -0400730struct r600_blit_cp_primitives {
731 void (*set_render_target)(struct radeon_device *rdev, int format,
732 int w, int h, u64 gpu_addr);
733 void (*cp_set_surface_sync)(struct radeon_device *rdev,
734 u32 sync_type, u32 size,
735 u64 mc_addr);
736 void (*set_shaders)(struct radeon_device *rdev);
737 void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
738 void (*set_tex_resource)(struct radeon_device *rdev,
739 int format, int w, int h, int pitch,
Alex Deucher9bb77032011-10-22 10:07:09 -0400740 u64 gpu_addr, u32 size);
Ilija Hadzic8eec9d62011-10-12 23:29:40 -0400741 void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
742 int x2, int y2);
743 void (*draw_auto)(struct radeon_device *rdev);
744 void (*set_default_state)(struct radeon_device *rdev);
745};
746
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000747struct r600_blit {
Jerome Glisse4c788672009-11-20 14:29:23 +0100748 struct radeon_bo *shader_obj;
Ilija Hadzic8eec9d62011-10-12 23:29:40 -0400749 struct r600_blit_cp_primitives primitives;
750 int max_dim;
751 int ring_size_common;
752 int ring_size_per_loop;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000753 u64 shader_gpu_addr;
754 u32 vs_offset, ps_offset;
755 u32 state_offset;
756 u32 state_len;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000757};
758
Alex Deucher6ddddfe2011-10-14 10:51:22 -0400759void r600_blit_suspend(struct radeon_device *rdev);
760
Alex Deucher347e7592012-03-20 17:18:21 -0400761/*
762 * SI RLC stuff
763 */
764struct si_rlc {
765 /* for power gating */
766 struct radeon_bo *save_restore_obj;
767 uint64_t save_restore_gpu_addr;
768 /* for clear state */
769 struct radeon_bo *clear_state_obj;
770 uint64_t clear_state_gpu_addr;
771};
772
Jerome Glisse69e130a2011-12-21 12:13:46 -0500773int radeon_ib_get(struct radeon_device *rdev, int ring,
Jerome Glissef2e39222012-05-09 15:35:02 +0200774 struct radeon_ib *ib, unsigned size);
775void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200776int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
777int radeon_ib_pool_init(struct radeon_device *rdev);
778void radeon_ib_pool_fini(struct radeon_device *rdev);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500779int radeon_ib_pool_start(struct radeon_device *rdev);
780int radeon_ib_pool_suspend(struct radeon_device *rdev);
Christian König7bd560e2012-05-02 15:11:12 +0200781int radeon_ib_ring_tests(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200782/* Ring access between begin & end cannot sleep */
Christian Könige32eb502011-10-23 12:56:27 +0200783int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
784void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
785int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
786int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
787void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
788void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
Christian Königd6999bc2012-05-09 15:34:45 +0200789void radeon_ring_undo(struct radeon_ring *ring);
Christian Könige32eb502011-10-23 12:56:27 +0200790void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
791int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
Christian König7b9ef162012-05-02 15:11:23 +0200792void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring);
Christian König069211e2012-05-02 15:11:20 +0200793void radeon_ring_lockup_update(struct radeon_ring *ring);
794bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
Christian Könige32eb502011-10-23 12:56:27 +0200795int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
Alex Deucher78c55602011-11-17 14:25:56 -0500796 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
797 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
Christian Könige32eb502011-10-23 12:56:27 +0200798void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200799
800
801/*
802 * CS.
803 */
804struct radeon_cs_reloc {
805 struct drm_gem_object *gobj;
Jerome Glisse4c788672009-11-20 14:29:23 +0100806 struct radeon_bo *robj;
807 struct radeon_bo_list lobj;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200808 uint32_t handle;
809 uint32_t flags;
810};
811
812struct radeon_cs_chunk {
813 uint32_t chunk_id;
814 uint32_t length_dw;
Jerome Glisse721604a2012-01-05 22:11:05 -0500815 int kpage_idx[2];
816 uint32_t *kpage[2];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200817 uint32_t *kdata;
Jerome Glisse721604a2012-01-05 22:11:05 -0500818 void __user *user_ptr;
819 int last_copied_page;
820 int last_page_index;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200821};
822
823struct radeon_cs_parser {
Jerome Glissec8c15ff2010-01-18 13:01:36 +0100824 struct device *dev;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200825 struct radeon_device *rdev;
826 struct drm_file *filp;
827 /* chunks */
828 unsigned nchunks;
829 struct radeon_cs_chunk *chunks;
830 uint64_t *chunks_array;
831 /* IB */
832 unsigned idx;
833 /* relocations */
834 unsigned nrelocs;
835 struct radeon_cs_reloc *relocs;
836 struct radeon_cs_reloc **relocs_ptr;
837 struct list_head validated;
838 /* indices of various chunks */
839 int chunk_ib_idx;
840 int chunk_relocs_idx;
Jerome Glisse721604a2012-01-05 22:11:05 -0500841 int chunk_flags_idx;
Alex Deucherdfcf5f32012-03-20 17:18:14 -0400842 int chunk_const_ib_idx;
Jerome Glissef2e39222012-05-09 15:35:02 +0200843 struct radeon_ib ib;
844 struct radeon_ib const_ib;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200845 void *track;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000846 unsigned family;
Marek Olšáke70f2242011-10-25 01:38:45 +0200847 int parser_error;
Jerome Glisse721604a2012-01-05 22:11:05 -0500848 u32 cs_flags;
849 u32 ring;
850 s32 priority;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200851};
852
Dave Airlie513bcb42009-09-23 16:56:27 +1000853extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
Andi Kleence580fa2011-10-13 16:08:47 -0700854extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
Dave Airlie513bcb42009-09-23 16:56:27 +1000855
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200856struct radeon_cs_packet {
857 unsigned idx;
858 unsigned type;
859 unsigned reg;
860 unsigned opcode;
861 int count;
862 unsigned one_reg_wr;
863};
864
865typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p,
866 struct radeon_cs_packet *pkt,
867 unsigned idx, unsigned reg);
868typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
869 struct radeon_cs_packet *pkt);
870
871
872/*
873 * AGP
874 */
875int radeon_agp_init(struct radeon_device *rdev);
Dave Airlie0ebf1712009-11-05 15:39:10 +1000876void radeon_agp_resume(struct radeon_device *rdev);
Jerome Glisse10b06122010-05-21 18:48:54 +0200877void radeon_agp_suspend(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200878void radeon_agp_fini(struct radeon_device *rdev);
879
880
881/*
882 * Writeback
883 */
884struct radeon_wb {
Jerome Glisse4c788672009-11-20 14:29:23 +0100885 struct radeon_bo *wb_obj;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200886 volatile uint32_t *wb;
887 uint64_t gpu_addr;
Alex Deucher724c80e2010-08-27 18:25:25 -0400888 bool enabled;
Alex Deucherd0f8a852010-09-04 05:04:34 -0400889 bool use_event;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200890};
891
Alex Deucher724c80e2010-08-27 18:25:25 -0400892#define RADEON_WB_SCRATCH_OFFSET 0
893#define RADEON_WB_CP_RPTR_OFFSET 1024
Alex Deucher0c88a022011-03-02 20:07:31 -0500894#define RADEON_WB_CP1_RPTR_OFFSET 1280
895#define RADEON_WB_CP2_RPTR_OFFSET 1536
Alex Deucher724c80e2010-08-27 18:25:25 -0400896#define R600_WB_IH_WPTR_OFFSET 2048
Alex Deucherd0f8a852010-09-04 05:04:34 -0400897#define R600_WB_EVENT_OFFSET 3072
Alex Deucher724c80e2010-08-27 18:25:25 -0400898
Jerome Glissec93bb852009-07-13 21:04:08 +0200899/**
900 * struct radeon_pm - power management datas
901 * @max_bandwidth: maximum bandwidth the gpu has (MByte/s)
902 * @igp_sideport_mclk: sideport memory clock Mhz (rs690,rs740,rs780,rs880)
903 * @igp_system_mclk: system clock Mhz (rs690,rs740,rs780,rs880)
904 * @igp_ht_link_clk: ht link clock Mhz (rs690,rs740,rs780,rs880)
905 * @igp_ht_link_width: ht link width in bits (rs690,rs740,rs780,rs880)
906 * @k8_bandwidth: k8 bandwidth the gpu has (MByte/s) (IGP)
907 * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP)
908 * @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP)
909 * @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP)
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300910 * @sclk: GPU clock Mhz (core bandwidth depends of this clock)
Jerome Glissec93bb852009-07-13 21:04:08 +0200911 * @needed_bandwidth: current bandwidth needs
912 *
913 * It keeps track of various data needed to take powermanagement decision.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300914 * Bandwidth need is used to determine minimun clock of the GPU and memory.
Jerome Glissec93bb852009-07-13 21:04:08 +0200915 * Equation between gpu/memory clock and available bandwidth is hw dependent
916 * (type of memory, bus size, efficiency, ...)
917 */
Alex Deucherce8f5372010-05-07 15:10:16 -0400918
919enum radeon_pm_method {
920 PM_METHOD_PROFILE,
921 PM_METHOD_DYNPM,
Rafał Miłeckic913e232009-12-22 23:02:16 +0100922};
Alex Deucherce8f5372010-05-07 15:10:16 -0400923
924enum radeon_dynpm_state {
925 DYNPM_STATE_DISABLED,
926 DYNPM_STATE_MINIMUM,
927 DYNPM_STATE_PAUSED,
Rafael J. Wysocki3f53eb62010-06-17 23:02:27 +0000928 DYNPM_STATE_ACTIVE,
929 DYNPM_STATE_SUSPENDED,
Alex Deucherce8f5372010-05-07 15:10:16 -0400930};
931enum radeon_dynpm_action {
932 DYNPM_ACTION_NONE,
933 DYNPM_ACTION_MINIMUM,
934 DYNPM_ACTION_DOWNCLOCK,
935 DYNPM_ACTION_UPCLOCK,
936 DYNPM_ACTION_DEFAULT
Rafał Miłeckic913e232009-12-22 23:02:16 +0100937};
Alex Deucher56278a82009-12-28 13:58:44 -0500938
939enum radeon_voltage_type {
940 VOLTAGE_NONE = 0,
941 VOLTAGE_GPIO,
942 VOLTAGE_VDDC,
943 VOLTAGE_SW
944};
945
Alex Deucher0ec0e742009-12-23 13:21:58 -0500946enum radeon_pm_state_type {
947 POWER_STATE_TYPE_DEFAULT,
948 POWER_STATE_TYPE_POWERSAVE,
949 POWER_STATE_TYPE_BATTERY,
950 POWER_STATE_TYPE_BALANCED,
951 POWER_STATE_TYPE_PERFORMANCE,
952};
953
Alex Deucherce8f5372010-05-07 15:10:16 -0400954enum radeon_pm_profile_type {
955 PM_PROFILE_DEFAULT,
956 PM_PROFILE_AUTO,
957 PM_PROFILE_LOW,
Alex Deucherc9e75b22010-06-02 17:56:01 -0400958 PM_PROFILE_MID,
Alex Deucherce8f5372010-05-07 15:10:16 -0400959 PM_PROFILE_HIGH,
960};
961
962#define PM_PROFILE_DEFAULT_IDX 0
963#define PM_PROFILE_LOW_SH_IDX 1
Alex Deucherc9e75b22010-06-02 17:56:01 -0400964#define PM_PROFILE_MID_SH_IDX 2
965#define PM_PROFILE_HIGH_SH_IDX 3
966#define PM_PROFILE_LOW_MH_IDX 4
967#define PM_PROFILE_MID_MH_IDX 5
968#define PM_PROFILE_HIGH_MH_IDX 6
969#define PM_PROFILE_MAX 7
Alex Deucherce8f5372010-05-07 15:10:16 -0400970
971struct radeon_pm_profile {
972 int dpms_off_ps_idx;
973 int dpms_on_ps_idx;
974 int dpms_off_cm_idx;
975 int dpms_on_cm_idx;
Alex Deucher516d0e42009-12-23 14:28:05 -0500976};
977
Alex Deucher21a81222010-07-02 12:58:16 -0400978enum radeon_int_thermal_type {
979 THERMAL_TYPE_NONE,
980 THERMAL_TYPE_RV6XX,
981 THERMAL_TYPE_RV770,
982 THERMAL_TYPE_EVERGREEN,
Alex Deuchere33df252010-11-22 17:56:32 -0500983 THERMAL_TYPE_SUMO,
Alex Deucher4fddba12011-01-06 21:19:22 -0500984 THERMAL_TYPE_NI,
Alex Deucher14607d02012-03-20 17:18:09 -0400985 THERMAL_TYPE_SI,
Alex Deucher21a81222010-07-02 12:58:16 -0400986};
987
Alex Deucher56278a82009-12-28 13:58:44 -0500988struct radeon_voltage {
989 enum radeon_voltage_type type;
990 /* gpio voltage */
991 struct radeon_gpio_rec gpio;
992 u32 delay; /* delay in usec from voltage drop to sclk change */
993 bool active_high; /* voltage drop is active when bit is high */
994 /* VDDC voltage */
995 u8 vddc_id; /* index into vddc voltage table */
996 u8 vddci_id; /* index into vddci voltage table */
997 bool vddci_enabled;
998 /* r6xx+ sw */
Alex Deucher2feea492011-04-12 14:49:24 -0400999 u16 voltage;
1000 /* evergreen+ vddci */
1001 u16 vddci;
Alex Deucher56278a82009-12-28 13:58:44 -05001002};
1003
Alex Deucherd7311172010-05-03 01:13:14 -04001004/* clock mode flags */
1005#define RADEON_PM_MODE_NO_DISPLAY (1 << 0)
1006
Alex Deucher56278a82009-12-28 13:58:44 -05001007struct radeon_pm_clock_info {
1008 /* memory clock */
1009 u32 mclk;
1010 /* engine clock */
1011 u32 sclk;
1012 /* voltage info */
1013 struct radeon_voltage voltage;
Alex Deucherd7311172010-05-03 01:13:14 -04001014 /* standardized clock flags */
Alex Deucher56278a82009-12-28 13:58:44 -05001015 u32 flags;
1016};
1017
Alex Deuchera48b9b42010-04-22 14:03:55 -04001018/* state flags */
Alex Deucherd7311172010-05-03 01:13:14 -04001019#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0)
Alex Deuchera48b9b42010-04-22 14:03:55 -04001020
Alex Deucher56278a82009-12-28 13:58:44 -05001021struct radeon_power_state {
Alex Deucher0ec0e742009-12-23 13:21:58 -05001022 enum radeon_pm_state_type type;
Alex Deucher8f3f1c92011-11-04 10:09:43 -04001023 struct radeon_pm_clock_info *clock_info;
Alex Deucher56278a82009-12-28 13:58:44 -05001024 /* number of valid clock modes in this power state */
1025 int num_clock_modes;
Alex Deucher56278a82009-12-28 13:58:44 -05001026 struct radeon_pm_clock_info *default_clock_mode;
Alex Deuchera48b9b42010-04-22 14:03:55 -04001027 /* standardized state flags */
1028 u32 flags;
Alex Deucher79daedc2010-04-22 14:25:19 -04001029 u32 misc; /* vbios specific flags */
1030 u32 misc2; /* vbios specific flags */
1031 int pcie_lanes; /* pcie lanes */
Alex Deucher56278a82009-12-28 13:58:44 -05001032};
1033
Rafał Miłecki27459322010-02-11 22:16:36 +00001034/*
1035 * Some modes are overclocked by very low value, accept them
1036 */
1037#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
1038
Jerome Glissec93bb852009-07-13 21:04:08 +02001039struct radeon_pm {
Rafał Miłeckic913e232009-12-22 23:02:16 +01001040 struct mutex mutex;
Alex Deuchera48b9b42010-04-22 14:03:55 -04001041 u32 active_crtcs;
1042 int active_crtc_count;
Rafał Miłeckic913e232009-12-22 23:02:16 +01001043 int req_vblank;
Rafał Miłecki839461d2010-03-02 22:06:51 +01001044 bool vblank_sync;
Alex Deucher2031f772010-04-22 12:52:11 -04001045 bool gui_idle;
Jerome Glissec93bb852009-07-13 21:04:08 +02001046 fixed20_12 max_bandwidth;
1047 fixed20_12 igp_sideport_mclk;
1048 fixed20_12 igp_system_mclk;
1049 fixed20_12 igp_ht_link_clk;
1050 fixed20_12 igp_ht_link_width;
1051 fixed20_12 k8_bandwidth;
1052 fixed20_12 sideport_bandwidth;
1053 fixed20_12 ht_bandwidth;
1054 fixed20_12 core_bandwidth;
1055 fixed20_12 sclk;
Alex Deucherf47299c2010-03-16 20:54:38 -04001056 fixed20_12 mclk;
Jerome Glissec93bb852009-07-13 21:04:08 +02001057 fixed20_12 needed_bandwidth;
Alex Deucher0975b162011-02-02 18:42:03 -05001058 struct radeon_power_state *power_state;
Alex Deucher56278a82009-12-28 13:58:44 -05001059 /* number of valid power states */
1060 int num_power_states;
Alex Deuchera48b9b42010-04-22 14:03:55 -04001061 int current_power_state_index;
1062 int current_clock_mode_index;
1063 int requested_power_state_index;
1064 int requested_clock_mode_index;
1065 int default_power_state_index;
1066 u32 current_sclk;
1067 u32 current_mclk;
Alex Deucher2feea492011-04-12 14:49:24 -04001068 u16 current_vddc;
1069 u16 current_vddci;
Alex Deucher9ace9f72011-01-06 21:19:26 -05001070 u32 default_sclk;
1071 u32 default_mclk;
Alex Deucher2feea492011-04-12 14:49:24 -04001072 u16 default_vddc;
1073 u16 default_vddci;
Alex Deucher29fb52c2010-03-11 10:01:17 -05001074 struct radeon_i2c_chan *i2c_bus;
Alex Deucherce8f5372010-05-07 15:10:16 -04001075 /* selected pm method */
1076 enum radeon_pm_method pm_method;
1077 /* dynpm power management */
1078 struct delayed_work dynpm_idle_work;
1079 enum radeon_dynpm_state dynpm_state;
1080 enum radeon_dynpm_action dynpm_planned_action;
1081 unsigned long dynpm_action_timeout;
1082 bool dynpm_can_upclock;
1083 bool dynpm_can_downclock;
1084 /* profile-based power management */
1085 enum radeon_pm_profile_type profile;
1086 int profile_index;
1087 struct radeon_pm_profile profiles[PM_PROFILE_MAX];
Alex Deucher21a81222010-07-02 12:58:16 -04001088 /* internal thermal controller on rv6xx+ */
1089 enum radeon_int_thermal_type int_thermal_type;
1090 struct device *int_hwmon_dev;
Jerome Glissec93bb852009-07-13 21:04:08 +02001091};
1092
Alex Deuchera4c9e2e2011-11-04 10:09:41 -04001093int radeon_pm_get_type_index(struct radeon_device *rdev,
1094 enum radeon_pm_state_type ps_type,
1095 int instance);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001096
Rafał Miłeckia92553a2012-04-28 23:35:20 +02001097struct r600_audio {
Rafał Miłeckia92553a2012-04-28 23:35:20 +02001098 int channels;
1099 int rate;
1100 int bits_per_sample;
1101 u8 status_bits;
1102 u8 category_code;
1103};
1104
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001105/*
1106 * Benchmarking
1107 */
Ilija Hadzic638dd7d2011-10-12 23:29:39 -04001108void radeon_benchmark(struct radeon_device *rdev, int test_number);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001109
1110
1111/*
Michel Dänzerecc0b322009-07-21 11:23:57 +02001112 * Testing
1113 */
1114void radeon_test_moves(struct radeon_device *rdev);
Christian König60a7e392011-09-27 12:31:00 +02001115void radeon_test_ring_sync(struct radeon_device *rdev,
Christian Könige32eb502011-10-23 12:56:27 +02001116 struct radeon_ring *cpA,
1117 struct radeon_ring *cpB);
Christian König60a7e392011-09-27 12:31:00 +02001118void radeon_test_syncing(struct radeon_device *rdev);
Michel Dänzerecc0b322009-07-21 11:23:57 +02001119
1120
1121/*
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001122 * Debugfs
1123 */
Christian König4d8bf9a2011-10-24 14:54:54 +02001124struct radeon_debugfs {
1125 struct drm_info_list *files;
1126 unsigned num_files;
1127};
1128
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001129int radeon_debugfs_add_files(struct radeon_device *rdev,
1130 struct drm_info_list *files,
1131 unsigned nfiles);
1132int radeon_debugfs_fence_init(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001133
1134
1135/*
1136 * ASIC specific functions.
1137 */
1138struct radeon_asic {
Jerome Glisse068a1172009-06-17 13:28:30 +02001139 int (*init)(struct radeon_device *rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001140 void (*fini)(struct radeon_device *rdev);
1141 int (*resume)(struct radeon_device *rdev);
1142 int (*suspend)(struct radeon_device *rdev);
Dave Airlie28d52042009-09-21 14:33:58 +10001143 void (*vga_set_state)(struct radeon_device *rdev, bool state);
Jerome Glissea2d07b72010-03-09 14:45:11 +00001144 int (*asic_reset)(struct radeon_device *rdev);
Alex Deucher54e88e02012-02-23 18:10:29 -05001145 /* ioctl hw specific callback. Some hw might want to perform special
1146 * operation on specific ioctl. For instance on wait idle some hw
1147 * might want to perform and HDP flush through MMIO as it seems that
1148 * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
1149 * through ring.
1150 */
1151 void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
1152 /* check if 3D engine is idle */
1153 bool (*gui_idle)(struct radeon_device *rdev);
1154 /* wait for mc_idle */
1155 int (*mc_wait_for_idle)(struct radeon_device *rdev);
1156 /* gart */
Alex Deucherc5b3b852012-02-23 17:53:46 -05001157 struct {
1158 void (*tlb_flush)(struct radeon_device *rdev);
1159 int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr);
1160 } gart;
Alex Deucher54e88e02012-02-23 18:10:29 -05001161 /* ring specific callbacks */
Christian König4c87bc22011-10-19 19:02:21 +02001162 struct {
1163 void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
Jerome Glisse721604a2012-01-05 22:11:05 -05001164 int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
Christian König4c87bc22011-10-19 19:02:21 +02001165 void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
Christian Könige32eb502011-10-23 12:56:27 +02001166 void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
Christian König4c87bc22011-10-19 19:02:21 +02001167 struct radeon_semaphore *semaphore, bool emit_wait);
Christian Königeb0c19c2012-02-23 15:18:44 +01001168 int (*cs_parse)(struct radeon_cs_parser *p);
Alex Deucherf7128122012-02-23 17:53:45 -05001169 void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
1170 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1171 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
Christian König312c4a82012-05-02 15:11:09 +02001172 bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
Christian König4c87bc22011-10-19 19:02:21 +02001173 } ring[RADEON_NUM_RINGS];
Alex Deucher54e88e02012-02-23 18:10:29 -05001174 /* irqs */
Alex Deucherb35ea4a2012-02-23 17:53:43 -05001175 struct {
1176 int (*set)(struct radeon_device *rdev);
1177 int (*process)(struct radeon_device *rdev);
1178 } irq;
Alex Deucher54e88e02012-02-23 18:10:29 -05001179 /* displays */
Alex Deucherc79a49c2012-02-23 17:53:47 -05001180 struct {
1181 /* display watermarks */
1182 void (*bandwidth_update)(struct radeon_device *rdev);
1183 /* get frame count */
1184 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
1185 /* wait for vblank */
1186 void (*wait_for_vblank)(struct radeon_device *rdev, int crtc);
1187 } display;
Alex Deucher54e88e02012-02-23 18:10:29 -05001188 /* copy functions for bo handling */
Alex Deucher27cd7762012-02-23 17:53:42 -05001189 struct {
1190 int (*blit)(struct radeon_device *rdev,
1191 uint64_t src_offset,
1192 uint64_t dst_offset,
1193 unsigned num_gpu_pages,
Christian König876dc9f2012-05-08 14:24:01 +02001194 struct radeon_fence **fence);
Alex Deucher27cd7762012-02-23 17:53:42 -05001195 u32 blit_ring_index;
1196 int (*dma)(struct radeon_device *rdev,
1197 uint64_t src_offset,
1198 uint64_t dst_offset,
1199 unsigned num_gpu_pages,
Christian König876dc9f2012-05-08 14:24:01 +02001200 struct radeon_fence **fence);
Alex Deucher27cd7762012-02-23 17:53:42 -05001201 u32 dma_ring_index;
1202 /* method used for bo copy */
1203 int (*copy)(struct radeon_device *rdev,
1204 uint64_t src_offset,
1205 uint64_t dst_offset,
1206 unsigned num_gpu_pages,
Christian König876dc9f2012-05-08 14:24:01 +02001207 struct radeon_fence **fence);
Alex Deucher27cd7762012-02-23 17:53:42 -05001208 /* ring used for bo copies */
1209 u32 copy_ring_index;
1210 } copy;
Alex Deucher54e88e02012-02-23 18:10:29 -05001211 /* surfaces */
Alex Deucher9e6f3d02012-02-23 17:53:49 -05001212 struct {
1213 int (*set_reg)(struct radeon_device *rdev, int reg,
1214 uint32_t tiling_flags, uint32_t pitch,
1215 uint32_t offset, uint32_t obj_size);
1216 void (*clear_reg)(struct radeon_device *rdev, int reg);
1217 } surface;
Alex Deucher54e88e02012-02-23 18:10:29 -05001218 /* hotplug detect */
Alex Deucher901ea572012-02-23 17:53:39 -05001219 struct {
1220 void (*init)(struct radeon_device *rdev);
1221 void (*fini)(struct radeon_device *rdev);
1222 bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
1223 void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
1224 } hpd;
Alex Deucherce8f5372010-05-07 15:10:16 -04001225 /* power management */
Alex Deuchera02fa392012-02-23 17:53:41 -05001226 struct {
1227 void (*misc)(struct radeon_device *rdev);
1228 void (*prepare)(struct radeon_device *rdev);
1229 void (*finish)(struct radeon_device *rdev);
1230 void (*init_profile)(struct radeon_device *rdev);
1231 void (*get_dynpm_state)(struct radeon_device *rdev);
Alex Deucher798bcf72012-02-23 17:53:48 -05001232 uint32_t (*get_engine_clock)(struct radeon_device *rdev);
1233 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
1234 uint32_t (*get_memory_clock)(struct radeon_device *rdev);
1235 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
1236 int (*get_pcie_lanes)(struct radeon_device *rdev);
1237 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
1238 void (*set_clock_gating)(struct radeon_device *rdev, int enable);
Alex Deuchera02fa392012-02-23 17:53:41 -05001239 } pm;
Alex Deucher6f34be52010-11-21 10:59:01 -05001240 /* pageflipping */
Alex Deucher0f9e0062012-02-23 17:53:40 -05001241 struct {
1242 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
1243 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
1244 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
1245 } pflip;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001246};
1247
Jerome Glisse21f9a432009-09-11 15:55:33 +02001248/*
1249 * Asic structures
1250 */
Dave Airlie551ebd82009-09-01 15:25:57 +10001251struct r100_asic {
Jerome Glisse225758d2010-03-09 14:45:10 +00001252 const unsigned *reg_safe_bm;
1253 unsigned reg_safe_bm_size;
1254 u32 hdp_cntl;
Dave Airlie551ebd82009-09-01 15:25:57 +10001255};
1256
Jerome Glisse21f9a432009-09-11 15:55:33 +02001257struct r300_asic {
Jerome Glisse225758d2010-03-09 14:45:10 +00001258 const unsigned *reg_safe_bm;
1259 unsigned reg_safe_bm_size;
1260 u32 resync_scratch;
1261 u32 hdp_cntl;
Jerome Glisse21f9a432009-09-11 15:55:33 +02001262};
1263
1264struct r600_asic {
Jerome Glisse225758d2010-03-09 14:45:10 +00001265 unsigned max_pipes;
1266 unsigned max_tile_pipes;
1267 unsigned max_simds;
1268 unsigned max_backends;
1269 unsigned max_gprs;
1270 unsigned max_threads;
1271 unsigned max_stack_entries;
1272 unsigned max_hw_contexts;
1273 unsigned max_gs_threads;
1274 unsigned sx_max_export_size;
1275 unsigned sx_max_export_pos_size;
1276 unsigned sx_max_export_smx_size;
1277 unsigned sq_num_cf_insts;
1278 unsigned tiling_nbanks;
1279 unsigned tiling_npipes;
1280 unsigned tiling_group_size;
Alex Deuchere7aeeba2010-06-04 13:10:12 -04001281 unsigned tile_config;
Alex Deuchere55b9422011-07-15 19:53:52 +00001282 unsigned backend_map;
Jerome Glisse21f9a432009-09-11 15:55:33 +02001283};
1284
1285struct rv770_asic {
Jerome Glisse225758d2010-03-09 14:45:10 +00001286 unsigned max_pipes;
1287 unsigned max_tile_pipes;
1288 unsigned max_simds;
1289 unsigned max_backends;
1290 unsigned max_gprs;
1291 unsigned max_threads;
1292 unsigned max_stack_entries;
1293 unsigned max_hw_contexts;
1294 unsigned max_gs_threads;
1295 unsigned sx_max_export_size;
1296 unsigned sx_max_export_pos_size;
1297 unsigned sx_max_export_smx_size;
1298 unsigned sq_num_cf_insts;
1299 unsigned sx_num_of_sets;
1300 unsigned sc_prim_fifo_size;
1301 unsigned sc_hiz_tile_fifo_size;
1302 unsigned sc_earlyz_tile_fifo_fize;
1303 unsigned tiling_nbanks;
1304 unsigned tiling_npipes;
1305 unsigned tiling_group_size;
Alex Deuchere7aeeba2010-06-04 13:10:12 -04001306 unsigned tile_config;
Alex Deuchere55b9422011-07-15 19:53:52 +00001307 unsigned backend_map;
Jerome Glisse21f9a432009-09-11 15:55:33 +02001308};
1309
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001310struct evergreen_asic {
1311 unsigned num_ses;
1312 unsigned max_pipes;
1313 unsigned max_tile_pipes;
1314 unsigned max_simds;
1315 unsigned max_backends;
1316 unsigned max_gprs;
1317 unsigned max_threads;
1318 unsigned max_stack_entries;
1319 unsigned max_hw_contexts;
1320 unsigned max_gs_threads;
1321 unsigned sx_max_export_size;
1322 unsigned sx_max_export_pos_size;
1323 unsigned sx_max_export_smx_size;
1324 unsigned sq_num_cf_insts;
1325 unsigned sx_num_of_sets;
1326 unsigned sc_prim_fifo_size;
1327 unsigned sc_hiz_tile_fifo_size;
1328 unsigned sc_earlyz_tile_fifo_size;
1329 unsigned tiling_nbanks;
1330 unsigned tiling_npipes;
1331 unsigned tiling_group_size;
Alex Deuchere7aeeba2010-06-04 13:10:12 -04001332 unsigned tile_config;
Alex Deuchere55b9422011-07-15 19:53:52 +00001333 unsigned backend_map;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001334};
1335
Alex Deucherfecf1d02011-03-02 20:07:29 -05001336struct cayman_asic {
1337 unsigned max_shader_engines;
1338 unsigned max_pipes_per_simd;
1339 unsigned max_tile_pipes;
1340 unsigned max_simds_per_se;
1341 unsigned max_backends_per_se;
1342 unsigned max_texture_channel_caches;
1343 unsigned max_gprs;
1344 unsigned max_threads;
1345 unsigned max_gs_threads;
1346 unsigned max_stack_entries;
1347 unsigned sx_num_of_sets;
1348 unsigned sx_max_export_size;
1349 unsigned sx_max_export_pos_size;
1350 unsigned sx_max_export_smx_size;
1351 unsigned max_hw_contexts;
1352 unsigned sq_num_cf_insts;
1353 unsigned sc_prim_fifo_size;
1354 unsigned sc_hiz_tile_fifo_size;
1355 unsigned sc_earlyz_tile_fifo_size;
1356
1357 unsigned num_shader_engines;
1358 unsigned num_shader_pipes_per_simd;
1359 unsigned num_tile_pipes;
1360 unsigned num_simds_per_se;
1361 unsigned num_backends_per_se;
1362 unsigned backend_disable_mask_per_asic;
1363 unsigned backend_map;
1364 unsigned num_texture_channel_caches;
1365 unsigned mem_max_burst_length_bytes;
1366 unsigned mem_row_size_in_kb;
1367 unsigned shader_engine_tile_size;
1368 unsigned num_gpus;
1369 unsigned multi_gpu_tile_size;
1370
1371 unsigned tile_config;
Alex Deucherfecf1d02011-03-02 20:07:29 -05001372};
1373
Alex Deucher0a96d722012-03-20 17:18:11 -04001374struct si_asic {
1375 unsigned max_shader_engines;
Alex Deucher0a96d722012-03-20 17:18:11 -04001376 unsigned max_tile_pipes;
Alex Deucher1a8ca752012-06-01 18:58:22 -04001377 unsigned max_cu_per_sh;
1378 unsigned max_sh_per_se;
Alex Deucher0a96d722012-03-20 17:18:11 -04001379 unsigned max_backends_per_se;
1380 unsigned max_texture_channel_caches;
1381 unsigned max_gprs;
1382 unsigned max_gs_threads;
1383 unsigned max_hw_contexts;
1384 unsigned sc_prim_fifo_size_frontend;
1385 unsigned sc_prim_fifo_size_backend;
1386 unsigned sc_hiz_tile_fifo_size;
1387 unsigned sc_earlyz_tile_fifo_size;
1388
Alex Deucher0a96d722012-03-20 17:18:11 -04001389 unsigned num_tile_pipes;
1390 unsigned num_backends_per_se;
1391 unsigned backend_disable_mask_per_asic;
1392 unsigned backend_map;
1393 unsigned num_texture_channel_caches;
1394 unsigned mem_max_burst_length_bytes;
1395 unsigned mem_row_size_in_kb;
1396 unsigned shader_engine_tile_size;
1397 unsigned num_gpus;
1398 unsigned multi_gpu_tile_size;
1399
1400 unsigned tile_config;
Alex Deucher0a96d722012-03-20 17:18:11 -04001401};
1402
Jerome Glisse068a1172009-06-17 13:28:30 +02001403union radeon_asic_config {
1404 struct r300_asic r300;
Dave Airlie551ebd82009-09-01 15:25:57 +10001405 struct r100_asic r100;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001406 struct r600_asic r600;
1407 struct rv770_asic rv770;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001408 struct evergreen_asic evergreen;
Alex Deucherfecf1d02011-03-02 20:07:29 -05001409 struct cayman_asic cayman;
Alex Deucher0a96d722012-03-20 17:18:11 -04001410 struct si_asic si;
Jerome Glisse068a1172009-06-17 13:28:30 +02001411};
1412
Daniel Vetter0a10c852010-03-11 21:19:14 +00001413/*
1414 * asic initizalization from radeon_asic.c
1415 */
1416void radeon_agp_disable(struct radeon_device *rdev);
1417int radeon_asic_init(struct radeon_device *rdev);
1418
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001419
1420/*
1421 * IOCTL.
1422 */
1423int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
1424 struct drm_file *filp);
1425int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
1426 struct drm_file *filp);
1427int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
1428 struct drm_file *file_priv);
1429int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
1430 struct drm_file *file_priv);
1431int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1432 struct drm_file *file_priv);
1433int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
1434 struct drm_file *file_priv);
1435int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1436 struct drm_file *filp);
1437int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
1438 struct drm_file *filp);
1439int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
1440 struct drm_file *filp);
1441int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
1442 struct drm_file *filp);
Jerome Glisse721604a2012-01-05 22:11:05 -05001443int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
1444 struct drm_file *filp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001445int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
Dave Airliee024e112009-06-24 09:48:08 +10001446int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
1447 struct drm_file *filp);
1448int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
1449 struct drm_file *filp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001450
Alex Deucher16cdf042011-10-28 10:30:02 -04001451/* VRAM scratch page for HDP bug, default vram page */
1452struct r600_vram_scratch {
Alex Deucher87cbf8f2010-08-27 13:59:54 -04001453 struct radeon_bo *robj;
1454 volatile uint32_t *ptr;
Alex Deucher16cdf042011-10-28 10:30:02 -04001455 u64 gpu_addr;
Alex Deucher87cbf8f2010-08-27 13:59:54 -04001456};
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001457
Michel Dänzer7a1619b2011-11-10 18:57:26 +01001458
1459/*
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001460 * Core structure, functions and helpers.
1461 */
1462typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t);
1463typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t);
1464
1465struct radeon_device {
Jerome Glisse9f022dd2009-09-11 15:35:22 +02001466 struct device *dev;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001467 struct drm_device *ddev;
1468 struct pci_dev *pdev;
1469 /* ASIC */
Jerome Glisse068a1172009-06-17 13:28:30 +02001470 union radeon_asic_config config;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001471 enum radeon_family family;
1472 unsigned long flags;
1473 int usec_timeout;
1474 enum radeon_pll_errata pll_errata;
1475 int num_gb_pipes;
Alex Deucherf779b3e2009-08-19 19:11:39 -04001476 int num_z_pipes;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001477 int disp_priority;
1478 /* BIOS */
1479 uint8_t *bios;
1480 bool is_atom_bios;
1481 uint16_t bios_header_start;
Jerome Glisse4c788672009-11-20 14:29:23 +01001482 struct radeon_bo *stollen_vga_memory;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001483 /* Register mmio */
Dave Airlie4c9bc752009-06-29 18:29:12 +10001484 resource_size_t rmmio_base;
1485 resource_size_t rmmio_size;
Benjamin Herrenschmidta0533fb2011-07-13 06:28:12 +00001486 void __iomem *rmmio;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001487 radeon_rreg_t mc_rreg;
1488 radeon_wreg_t mc_wreg;
1489 radeon_rreg_t pll_rreg;
1490 radeon_wreg_t pll_wreg;
Dave Airliede1b2892009-08-12 18:43:14 +10001491 uint32_t pcie_reg_mask;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001492 radeon_rreg_t pciep_rreg;
1493 radeon_wreg_t pciep_wreg;
Alex Deucher351a52a2010-06-30 11:52:50 -04001494 /* io port */
1495 void __iomem *rio_mem;
1496 resource_size_t rio_mem_size;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001497 struct radeon_clock clock;
1498 struct radeon_mc mc;
1499 struct radeon_gart gart;
1500 struct radeon_mode_info mode_info;
1501 struct radeon_scratch scratch;
1502 struct radeon_mman mman;
Alex Deucher74652802011-08-25 13:39:48 -04001503 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
Jerome Glisse0085c9502012-05-09 15:34:55 +02001504 wait_queue_head_t fence_queue;
Christian Königd6999bc2012-05-09 15:34:45 +02001505 struct mutex ring_lock;
Christian Könige32eb502011-10-23 12:56:27 +02001506 struct radeon_ring ring[RADEON_NUM_RINGS];
Jerome Glissec507f7e2012-05-09 15:34:58 +02001507 bool ib_pool_ready;
1508 struct radeon_sa_manager ring_tmp_bo;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001509 struct radeon_irq irq;
1510 struct radeon_asic *asic;
1511 struct radeon_gem gem;
Jerome Glissec93bb852009-07-13 21:04:08 +02001512 struct radeon_pm pm;
Yang Zhaof657c2a2009-09-15 12:21:01 +10001513 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
Michel Dänzer7a1619b2011-11-10 18:57:26 +01001514 struct radeon_mutex cs_mutex;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001515 struct radeon_wb wb;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001516 struct radeon_dummy_page dummy_page;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001517 bool shutdown;
1518 bool suspend;
Dave Airliead49f502009-07-10 22:36:26 +10001519 bool need_dma32;
Jerome Glisse733289c2009-09-16 15:24:21 +02001520 bool accel_working;
Dave Airliee024e112009-06-24 09:48:08 +10001521 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001522 const struct firmware *me_fw; /* all family ME firmware */
1523 const struct firmware *pfp_fw; /* r6/700 PFP firmware */
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001524 const struct firmware *rlc_fw; /* r6/700 RLC firmware */
Alex Deucher0af62b02011-01-06 21:19:31 -05001525 const struct firmware *mc_fw; /* NI MC firmware */
Alex Deucher0f0de062012-03-20 17:18:17 -04001526 const struct firmware *ce_fw; /* SI CE firmware */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001527 struct r600_blit r600_blit;
Alex Deucher16cdf042011-10-28 10:30:02 -04001528 struct r600_vram_scratch vram_scratch;
Alex Deucher3e5cb982009-10-16 12:21:24 -04001529 int msi_enabled; /* msi enabled */
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001530 struct r600_ih ih; /* r6/700 interrupt ring */
Alex Deucher347e7592012-03-20 17:18:21 -04001531 struct si_rlc rlc;
Alex Deucherd4877cf2009-12-04 16:56:37 -05001532 struct work_struct hotplug_work;
Alex Deucherf122c612012-03-30 08:59:57 -04001533 struct work_struct audio_work;
Alex Deucher18917b62010-02-01 16:02:25 -05001534 int num_crtc; /* number of crtcs */
Alex Deucher40bacf12009-12-23 03:23:21 -05001535 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
Matthew Garrett5876dd22010-04-26 15:52:20 -04001536 struct mutex vram_mutex;
Rafał Miłecki3299de92012-05-14 21:25:57 +02001537 bool audio_enabled;
1538 struct r600_audio audio_status; /* audio stuff */
Alex Deucherce8f5372010-05-07 15:10:16 -04001539 struct notifier_block acpi_nb;
Marek Olšák9eba4a92011-01-05 05:46:48 +01001540 /* only one userspace can use Hyperz features or CMASK at a time */
Dave Airlieab9e1f52010-07-13 11:11:11 +10001541 struct drm_file *hyperz_filp;
Marek Olšák9eba4a92011-01-05 05:46:48 +01001542 struct drm_file *cmask_filp;
Alex Deucherf376b942010-08-05 21:21:16 -04001543 /* i2c buses */
1544 struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
Christian König4d8bf9a2011-10-24 14:54:54 +02001545 /* debugfs */
1546 struct radeon_debugfs debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
1547 unsigned debugfs_count;
Jerome Glisse721604a2012-01-05 22:11:05 -05001548 /* virtual memory */
1549 struct radeon_vm_manager vm_manager;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001550};
1551
1552int radeon_device_init(struct radeon_device *rdev,
1553 struct drm_device *ddev,
1554 struct pci_dev *pdev,
1555 uint32_t flags);
1556void radeon_device_fini(struct radeon_device *rdev);
1557int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
1558
Andi Kleen6fcbef72011-10-13 16:08:42 -07001559uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
1560void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
1561u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
1562void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
Alex Deucher351a52a2010-06-30 11:52:50 -04001563
Jerome Glisse4c788672009-11-20 14:29:23 +01001564/*
1565 * Cast helper
1566 */
1567#define to_radeon_fence(p) ((struct radeon_fence *)(p))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001568
1569/*
1570 * Registers read & write functions.
1571 */
Benjamin Herrenschmidta0533fb2011-07-13 06:28:12 +00001572#define RREG8(reg) readb((rdev->rmmio) + (reg))
1573#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
1574#define RREG16(reg) readw((rdev->rmmio) + (reg))
1575#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
Dave Airliede1b2892009-08-12 18:43:14 +10001576#define RREG32(reg) r100_mm_rreg(rdev, (reg))
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001577#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
Dave Airliede1b2892009-08-12 18:43:14 +10001578#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001579#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1580#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1581#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
1582#define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v))
1583#define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg))
1584#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
Dave Airliede1b2892009-08-12 18:43:14 +10001585#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
1586#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
Rafał Miłeckiaa5120d2010-02-18 20:24:28 +00001587#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg))
1588#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001589#define WREG32_P(reg, val, mask) \
1590 do { \
1591 uint32_t tmp_ = RREG32(reg); \
1592 tmp_ &= (mask); \
1593 tmp_ |= ((val) & ~(mask)); \
1594 WREG32(reg, tmp_); \
1595 } while (0)
1596#define WREG32_PLL_P(reg, val, mask) \
1597 do { \
1598 uint32_t tmp_ = RREG32_PLL(reg); \
1599 tmp_ &= (mask); \
1600 tmp_ |= ((val) & ~(mask)); \
1601 WREG32_PLL(reg, tmp_); \
1602 } while (0)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001603#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg)))
Alex Deucher351a52a2010-06-30 11:52:50 -04001604#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
1605#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001606
Dave Airliede1b2892009-08-12 18:43:14 +10001607/*
1608 * Indirect registers accessor
1609 */
1610static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
1611{
1612 uint32_t r;
1613
1614 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
1615 r = RREG32(RADEON_PCIE_DATA);
1616 return r;
1617}
1618
1619static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1620{
1621 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
1622 WREG32(RADEON_PCIE_DATA, (v));
1623}
1624
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001625void r100_pll_errata_after_index(struct radeon_device *rdev);
1626
1627
1628/*
1629 * ASICs helpers.
1630 */
Dave Airlieb995e432009-07-14 02:02:32 +10001631#define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \
1632 (rdev->pdev->device == 0x5969))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001633#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \
1634 (rdev->family == CHIP_RV200) || \
1635 (rdev->family == CHIP_RS100) || \
1636 (rdev->family == CHIP_RS200) || \
1637 (rdev->family == CHIP_RV250) || \
1638 (rdev->family == CHIP_RV280) || \
1639 (rdev->family == CHIP_RS300))
1640#define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300) || \
1641 (rdev->family == CHIP_RV350) || \
1642 (rdev->family == CHIP_R350) || \
1643 (rdev->family == CHIP_RV380) || \
1644 (rdev->family == CHIP_R420) || \
1645 (rdev->family == CHIP_R423) || \
1646 (rdev->family == CHIP_RV410) || \
1647 (rdev->family == CHIP_RS400) || \
1648 (rdev->family == CHIP_RS480))
Alex Deucher3313e3d2011-01-06 18:49:34 -05001649#define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \
1650 (rdev->ddev->pdev->device == 0x9443) || \
1651 (rdev->ddev->pdev->device == 0x944B) || \
1652 (rdev->ddev->pdev->device == 0x9506) || \
1653 (rdev->ddev->pdev->device == 0x9509) || \
1654 (rdev->ddev->pdev->device == 0x950F) || \
1655 (rdev->ddev->pdev->device == 0x689C) || \
1656 (rdev->ddev->pdev->device == 0x689D))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001657#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
Alex Deucher99999aa2010-11-16 12:09:41 -05001658#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \
1659 (rdev->family == CHIP_RS690) || \
1660 (rdev->family == CHIP_RS740) || \
1661 (rdev->family >= CHIP_R600))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001662#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
1663#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001664#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
Alex Deucher633b9162011-01-06 21:19:11 -05001665#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \
1666 (rdev->flags & RADEON_IS_IGP))
Alex Deucher1fe18302011-01-06 21:19:12 -05001667#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS))
Alex Deucher8848f752012-03-20 17:18:28 -04001668#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA))
1669#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
1670 (rdev->flags & RADEON_IS_IGP))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001671
1672/*
1673 * BIOS helpers.
1674 */
1675#define RBIOS8(i) (rdev->bios[i])
1676#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
1677#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
1678
1679int radeon_combios_init(struct radeon_device *rdev);
1680void radeon_combios_fini(struct radeon_device *rdev);
1681int radeon_atombios_init(struct radeon_device *rdev);
1682void radeon_atombios_fini(struct radeon_device *rdev);
1683
1684
1685/*
1686 * RING helpers.
1687 */
Andi Kleence580fa2011-10-13 16:08:47 -07001688#if DRM_DEBUG_CODE == 0
Christian Könige32eb502011-10-23 12:56:27 +02001689static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001690{
Christian Könige32eb502011-10-23 12:56:27 +02001691 ring->ring[ring->wptr++] = v;
1692 ring->wptr &= ring->ptr_mask;
1693 ring->count_dw--;
1694 ring->ring_free_dw--;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001695}
Andi Kleence580fa2011-10-13 16:08:47 -07001696#else
1697/* With debugging this is just too big to inline */
Christian Könige32eb502011-10-23 12:56:27 +02001698void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
Andi Kleence580fa2011-10-13 16:08:47 -07001699#endif
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001700
1701/*
1702 * ASICs macro.
1703 */
Jerome Glisse068a1172009-06-17 13:28:30 +02001704#define radeon_init(rdev) (rdev)->asic->init((rdev))
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001705#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
1706#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
1707#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
Christian Königeb0c19c2012-02-23 15:18:44 +01001708#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
Dave Airlie28d52042009-09-21 14:33:58 +10001709#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
Jerome Glissea2d07b72010-03-09 14:45:11 +00001710#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
Alex Deucherc5b3b852012-02-23 17:53:46 -05001711#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
1712#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
Alex Deucherf7128122012-02-23 17:53:45 -05001713#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
1714#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
1715#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
Christian König4c87bc22011-10-19 19:02:21 +02001716#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
Jerome Glisse721604a2012-01-05 22:11:05 -05001717#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
Christian König312c4a82012-05-02 15:11:09 +02001718#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
Alex Deucherb35ea4a2012-02-23 17:53:43 -05001719#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
1720#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
Alex Deucherc79a49c2012-02-23 17:53:47 -05001721#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
Christian König4c87bc22011-10-19 19:02:21 +02001722#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
1723#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
Alex Deucher27cd7762012-02-23 17:53:42 -05001724#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
1725#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
1726#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
1727#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
1728#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
1729#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
Alex Deucher798bcf72012-02-23 17:53:48 -05001730#define radeon_get_engine_clock(rdev) (rdev)->asic->pm.get_engine_clock((rdev))
1731#define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e))
1732#define radeon_get_memory_clock(rdev) (rdev)->asic->pm.get_memory_clock((rdev))
1733#define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e))
1734#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
1735#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
1736#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
Alex Deucher9e6f3d02012-02-23 17:53:49 -05001737#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
1738#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
Alex Deucherc79a49c2012-02-23 17:53:47 -05001739#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
Alex Deucher901ea572012-02-23 17:53:39 -05001740#define radeon_hpd_init(rdev) (rdev)->asic->hpd.init((rdev))
1741#define radeon_hpd_fini(rdev) (rdev)->asic->hpd.fini((rdev))
1742#define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h))
1743#define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h))
Alex Deucherdef9ba92010-04-22 12:39:58 -04001744#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
Alex Deuchera02fa392012-02-23 17:53:41 -05001745#define radeon_pm_misc(rdev) (rdev)->asic->pm.misc((rdev))
1746#define radeon_pm_prepare(rdev) (rdev)->asic->pm.prepare((rdev))
1747#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
1748#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
1749#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
Alex Deucher0f9e0062012-02-23 17:53:40 -05001750#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pflip.pre_page_flip((rdev), (crtc))
1751#define radeon_page_flip(rdev, crtc, base) rdev->asic->pflip.page_flip((rdev), (crtc), (base))
1752#define radeon_post_page_flip(rdev, crtc) rdev->asic->pflip.post_page_flip((rdev), (crtc))
Alex Deucherc79a49c2012-02-23 17:53:47 -05001753#define radeon_wait_for_vblank(rdev, crtc) rdev->asic->display.wait_for_vblank((rdev), (crtc))
Alex Deucher89e51812012-02-23 17:53:38 -05001754#define radeon_mc_wait_for_idle(rdev) rdev->asic->mc_wait_for_idle((rdev))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001755
Jerome Glisse6cf8a3f2009-09-10 21:46:48 +02001756/* Common functions */
Jerome Glisse700a0cc2010-01-13 15:16:38 +01001757/* AGP */
Jerome Glisse90aca4d2010-03-09 14:45:12 +00001758extern int radeon_gpu_reset(struct radeon_device *rdev);
Jerome Glisse700a0cc2010-01-13 15:16:38 +01001759extern void radeon_agp_disable(struct radeon_device *rdev);
Jerome Glisse21f9a432009-09-11 15:55:33 +02001760extern int radeon_modeset_init(struct radeon_device *rdev);
1761extern void radeon_modeset_fini(struct radeon_device *rdev);
Jerome Glisse9f022dd2009-09-11 15:35:22 +02001762extern bool radeon_card_posted(struct radeon_device *rdev);
Alex Deucherf47299c2010-03-16 20:54:38 -04001763extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
Alex Deucherf46c0122010-03-31 00:33:27 -04001764extern void radeon_update_display_priority(struct radeon_device *rdev);
Dave Airlie72542d72009-12-01 14:06:31 +10001765extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
Jerome Glisse21f9a432009-09-11 15:55:33 +02001766extern void radeon_scratch_init(struct radeon_device *rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04001767extern void radeon_wb_fini(struct radeon_device *rdev);
1768extern int radeon_wb_init(struct radeon_device *rdev);
1769extern void radeon_wb_disable(struct radeon_device *rdev);
Jerome Glisse21f9a432009-09-11 15:55:33 +02001770extern void radeon_surface_init(struct radeon_device *rdev);
1771extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
Jerome Glisseca6ffc62009-10-01 10:20:52 +02001772extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
Jerome Glissed39c3b82009-09-28 18:34:43 +02001773extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
Jerome Glisse312ea8d2009-12-07 15:52:58 +01001774extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
Jerome Glissed03d8582009-12-14 21:02:09 +01001775extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
Jerome Glissed594e462010-02-17 21:54:29 +00001776extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
1777extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
Dave Airlie6a9ee8a2010-02-01 15:38:10 +10001778extern int radeon_resume_kms(struct drm_device *dev);
1779extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
Dave Airlie53595332011-03-14 09:47:24 +10001780extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
Jerome Glisse6cf8a3f2009-09-10 21:46:48 +02001781
Daniel Vetter3574dda2011-02-18 17:59:19 +01001782/*
Jerome Glisse721604a2012-01-05 22:11:05 -05001783 * vm
1784 */
1785int radeon_vm_manager_init(struct radeon_device *rdev);
1786void radeon_vm_manager_fini(struct radeon_device *rdev);
1787int radeon_vm_manager_start(struct radeon_device *rdev);
1788int radeon_vm_manager_suspend(struct radeon_device *rdev);
1789int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
1790void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
1791int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm);
1792void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
1793int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1794 struct radeon_vm *vm,
1795 struct radeon_bo *bo,
1796 struct ttm_mem_reg *mem);
1797void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1798 struct radeon_bo *bo);
1799int radeon_vm_bo_add(struct radeon_device *rdev,
1800 struct radeon_vm *vm,
1801 struct radeon_bo *bo,
1802 uint64_t offset,
1803 uint32_t flags);
1804int radeon_vm_bo_rmv(struct radeon_device *rdev,
1805 struct radeon_vm *vm,
1806 struct radeon_bo *bo);
1807
Alex Deucherf122c612012-03-30 08:59:57 -04001808/* audio */
1809void r600_audio_update_hdmi(struct work_struct *work);
Jerome Glisse721604a2012-01-05 22:11:05 -05001810
1811/*
Alex Deucher16cdf042011-10-28 10:30:02 -04001812 * R600 vram scratch functions
1813 */
1814int r600_vram_scratch_init(struct radeon_device *rdev);
1815void r600_vram_scratch_fini(struct radeon_device *rdev);
1816
1817/*
Jerome Glisse285484e2011-12-16 17:03:42 -05001818 * r600 cs checking helper
1819 */
1820unsigned r600_mip_minify(unsigned size, unsigned level);
1821bool r600_fmt_is_valid_color(u32 format);
1822bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family);
1823int r600_fmt_get_blocksize(u32 format);
1824int r600_fmt_get_nblocksx(u32 format, u32 w);
1825int r600_fmt_get_nblocksy(u32 format, u32 h);
1826
1827/*
Daniel Vetter3574dda2011-02-18 17:59:19 +01001828 * r600 functions used by radeon_encoder.c
1829 */
Rafał Miłecki1b688d082012-04-30 15:44:54 +02001830struct radeon_hdmi_acr {
1831 u32 clock;
1832
1833 int n_32khz;
1834 int cts_32khz;
1835
1836 int n_44_1khz;
1837 int cts_44_1khz;
1838
1839 int n_48khz;
1840 int cts_48khz;
1841
1842};
1843
Rafał Miłeckie55d3e62012-05-06 17:29:44 +02001844extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
1845
Rafał Miłecki2cd6218c2010-03-08 22:14:01 +00001846extern void r600_hdmi_enable(struct drm_encoder *encoder);
1847extern void r600_hdmi_disable(struct drm_encoder *encoder);
Christian Koenigdafc3bd2009-10-11 23:49:13 +02001848extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
Alex Deucher416a2bd2012-05-31 19:00:25 -04001849extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1850 u32 tiling_pipe_num,
1851 u32 max_rb_num,
1852 u32 total_max_rb_num,
1853 u32 enabled_rb_mask);
Alex Deucherfe251e22010-03-24 13:36:43 -04001854
Rafał Miłeckie55d3e62012-05-06 17:29:44 +02001855/*
1856 * evergreen functions used by radeon_encoder.c
1857 */
1858
1859extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
1860
Alex Deucher0af62b02011-01-06 21:19:31 -05001861extern int ni_init_microcode(struct radeon_device *rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001862extern int ni_mc_load_microcode(struct radeon_device *rdev);
Alex Deucher0af62b02011-01-06 21:19:31 -05001863
Alberto Miloned7a29522010-07-06 11:40:24 -04001864/* radeon_acpi.c */
1865#if defined(CONFIG_ACPI)
1866extern int radeon_acpi_init(struct radeon_device *rdev);
1867#else
1868static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
1869#endif
1870
Jerome Glisse4c788672009-11-20 14:29:23 +01001871#include "radeon_object.h"
1872
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001873#endif