blob: 45164e101257e696974740bce460f89c75a53352 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RADEON_H__
29#define __RADEON_H__
30
Jerome Glisse771fe6b2009-06-05 14:42:42 +020031/* TODO: Here are things that needs to be done :
32 * - surface allocator & initializer : (bit like scratch reg) should
33 * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
34 * related to surface
35 * - WB : write back stuff (do it bit like scratch reg things)
36 * - Vblank : look at Jesse's rework and what we should do
37 * - r600/r700: gart & cp
38 * - cs : clean cs ioctl use bitmap & things like that.
39 * - power management stuff
40 * - Barrier in gart code
41 * - Unmappabled vram ?
42 * - TESTING, TESTING, TESTING
43 */
44
Jerome Glissed39c3b82009-09-28 18:34:43 +020045/* Initialization path:
46 * We expect that acceleration initialization might fail for various
47 * reasons even thought we work hard to make it works on most
48 * configurations. In order to still have a working userspace in such
49 * situation the init path must succeed up to the memory controller
50 * initialization point. Failure before this point are considered as
51 * fatal error. Here is the init callchain :
52 * radeon_device_init perform common structure, mutex initialization
53 * asic_init setup the GPU memory layout and perform all
54 * one time initialization (failure in this
55 * function are considered fatal)
56 * asic_startup setup the GPU acceleration, in order to
57 * follow guideline the first thing this
58 * function should do is setting the GPU
59 * memory controller (only MC setup failure
60 * are considered as fatal)
61 */
62
Arun Sharma600634972011-07-26 16:09:06 -070063#include <linux/atomic.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020064#include <linux/wait.h>
65#include <linux/list.h>
66#include <linux/kref.h>
67
Jerome Glisse4c788672009-11-20 14:29:23 +010068#include <ttm/ttm_bo_api.h>
69#include <ttm/ttm_bo_driver.h>
70#include <ttm/ttm_placement.h>
71#include <ttm/ttm_module.h>
Thomas Hellstrom147666f2010-11-17 12:38:32 +000072#include <ttm/ttm_execbuf_util.h>
Jerome Glisse4c788672009-11-20 14:29:23 +010073
Dave Airliec2142712009-09-22 08:50:10 +100074#include "radeon_family.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020075#include "radeon_mode.h"
76#include "radeon_reg.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020077
78/*
79 * Modules parameters.
80 */
81extern int radeon_no_wb;
82extern int radeon_modeset;
83extern int radeon_dynclks;
84extern int radeon_r4xx_atom;
85extern int radeon_agpmode;
86extern int radeon_vram_limit;
87extern int radeon_gart_size;
88extern int radeon_benchmarking;
Michel Dänzerecc0b322009-07-21 11:23:57 +020089extern int radeon_testing;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020090extern int radeon_connector_table;
Dave Airlie4ce001a2009-08-13 16:32:14 +100091extern int radeon_tv;
Christian Koenigdafc3bd2009-10-11 23:49:13 +020092extern int radeon_audio;
Alex Deucherf46c0122010-03-31 00:33:27 -040093extern int radeon_disp_priority;
Alex Deuchere2b0a8e2010-03-17 02:07:37 -040094extern int radeon_hw_i2c;
Alex Deucherd42dd572011-01-12 20:05:11 -050095extern int radeon_pcie_gen2;
Alex Deuchera18cee12011-11-01 14:20:30 -040096extern int radeon_msi;
Christian König3368ff02012-05-02 15:11:21 +020097extern int radeon_lockup_timeout;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020098
99/*
100 * Copy from radeon_drv.h so we don't have to include both and have conflicting
101 * symbol;
102 */
Jerome Glissebb635562012-05-09 15:34:46 +0200103#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
104#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
Jerome Glissee8217672010-02-15 21:36:13 +0100105/* RADEON_IB_POOL_SIZE must be a power of 2 */
Jerome Glissebb635562012-05-09 15:34:46 +0200106#define RADEON_IB_POOL_SIZE 16
107#define RADEON_DEBUGFS_MAX_COMPONENTS 32
108#define RADEONFB_CONN_LIMIT 4
109#define RADEON_BIOS_NUM_SCRATCH 8
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200110
Alex Deucher1b370782011-11-17 20:13:28 -0500111/* max number of rings */
Jerome Glissebb635562012-05-09 15:34:46 +0200112#define RADEON_NUM_RINGS 3
113
114/* fence seq are set to this number when signaled */
115#define RADEON_FENCE_SIGNALED_SEQ 0LL
116#define RADEON_FENCE_NOTEMITED_SEQ (~0LL)
Alex Deucher1b370782011-11-17 20:13:28 -0500117
118/* internal ring indices */
119/* r1xx+ has gfx CP ring */
Jerome Glissebb635562012-05-09 15:34:46 +0200120#define RADEON_RING_TYPE_GFX_INDEX 0
Alex Deucher1b370782011-11-17 20:13:28 -0500121
122/* cayman has 2 compute CP rings */
Jerome Glissebb635562012-05-09 15:34:46 +0200123#define CAYMAN_RING_TYPE_CP1_INDEX 1
124#define CAYMAN_RING_TYPE_CP2_INDEX 2
Alex Deucher1b370782011-11-17 20:13:28 -0500125
Jerome Glisse721604a2012-01-05 22:11:05 -0500126/* hardcode those limit for now */
Jerome Glissebb635562012-05-09 15:34:46 +0200127#define RADEON_VA_RESERVED_SIZE (8 << 20)
128#define RADEON_IB_VM_MAX_SIZE (64 << 10)
Jerome Glisse721604a2012-01-05 22:11:05 -0500129
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200130/*
131 * Errata workarounds.
132 */
133enum radeon_pll_errata {
134 CHIP_ERRATA_R300_CG = 0x00000001,
135 CHIP_ERRATA_PLL_DUMMYREADS = 0x00000002,
136 CHIP_ERRATA_PLL_DELAY = 0x00000004
137};
138
139
140struct radeon_device;
141
142
143/*
144 * BIOS.
145 */
Dave Airlie6a9ee8a2010-02-01 15:38:10 +1000146#define ATRM_BIOS_PAGE 4096
147
Dave Airlie8edb3812010-03-01 21:50:01 +1100148#if defined(CONFIG_VGA_SWITCHEROO)
Dave Airlie6a9ee8a2010-02-01 15:38:10 +1000149bool radeon_atrm_supported(struct pci_dev *pdev);
150int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
Dave Airlie8edb3812010-03-01 21:50:01 +1100151#else
152static inline bool radeon_atrm_supported(struct pci_dev *pdev)
153{
154 return false;
155}
156
157static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
158 return -EINVAL;
159}
160#endif
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200161bool radeon_get_bios(struct radeon_device *rdev);
162
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000163
164/*
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500165 * Mutex which allows recursive locking from the same process.
166 */
167struct radeon_mutex {
168 struct mutex mutex;
169 struct task_struct *owner;
170 int level;
171};
172
173static inline void radeon_mutex_init(struct radeon_mutex *mutex)
174{
175 mutex_init(&mutex->mutex);
176 mutex->owner = NULL;
177 mutex->level = 0;
178}
179
180static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
181{
182 if (mutex_trylock(&mutex->mutex)) {
183 /* The mutex was unlocked before, so it's ours now */
184 mutex->owner = current;
185 } else if (mutex->owner != current) {
186 /* Another process locked the mutex, take it */
187 mutex_lock(&mutex->mutex);
188 mutex->owner = current;
189 }
190 /* Otherwise the mutex was already locked by this process */
191
192 mutex->level++;
193}
194
195static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
196{
197 if (--mutex->level > 0)
198 return;
199
200 mutex->owner = NULL;
201 mutex_unlock(&mutex->mutex);
202}
203
204
205/*
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000206 * Dummy page
207 */
208struct radeon_dummy_page {
209 struct page *page;
210 dma_addr_t addr;
211};
212int radeon_dummy_page_init(struct radeon_device *rdev);
213void radeon_dummy_page_fini(struct radeon_device *rdev);
214
215
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200216/*
217 * Clocks
218 */
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200219struct radeon_clock {
220 struct radeon_pll p1pll;
221 struct radeon_pll p2pll;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500222 struct radeon_pll dcpll;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200223 struct radeon_pll spll;
224 struct radeon_pll mpll;
225 /* 10 Khz units */
226 uint32_t default_mclk;
227 uint32_t default_sclk;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500228 uint32_t default_dispclk;
229 uint32_t dp_extclk;
Alex Deucherb20f9be2011-06-08 13:01:11 -0400230 uint32_t max_pixel_clock;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200231};
232
Rafał Miłecki74338742009-11-03 00:53:02 +0100233/*
234 * Power management
235 */
236int radeon_pm_init(struct radeon_device *rdev);
Alex Deucher29fb52c2010-03-11 10:01:17 -0500237void radeon_pm_fini(struct radeon_device *rdev);
Rafał Miłeckic913e232009-12-22 23:02:16 +0100238void radeon_pm_compute_clocks(struct radeon_device *rdev);
Alex Deucherce8f5372010-05-07 15:10:16 -0400239void radeon_pm_suspend(struct radeon_device *rdev);
240void radeon_pm_resume(struct radeon_device *rdev);
Alex Deucher56278a82009-12-28 13:58:44 -0500241void radeon_combios_get_power_modes(struct radeon_device *rdev);
242void radeon_atombios_get_power_modes(struct radeon_device *rdev);
Alex Deucher8a83ec52011-04-12 14:49:23 -0400243void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
Alex Deucherf8920342010-06-30 12:02:03 -0400244void rs690_pm_info(struct radeon_device *rdev);
Alex Deucher20d391d2011-02-01 16:12:34 -0500245extern int rv6xx_get_temp(struct radeon_device *rdev);
246extern int rv770_get_temp(struct radeon_device *rdev);
247extern int evergreen_get_temp(struct radeon_device *rdev);
248extern int sumo_get_temp(struct radeon_device *rdev);
Alex Deucher1bd47d22012-03-20 17:18:10 -0400249extern int si_get_temp(struct radeon_device *rdev);
Jerome Glisse285484e2011-12-16 17:03:42 -0500250extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
251 unsigned *bankh, unsigned *mtaspect,
252 unsigned *tile_split);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000253
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200254/*
255 * Fences.
256 */
257struct radeon_fence_driver {
258 uint32_t scratch_reg;
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000259 uint64_t gpu_addr;
260 volatile uint32_t *cpu_addr;
Jerome Glissebb635562012-05-09 15:34:46 +0200261 /* seq is protected by ring emission lock */
262 uint64_t seq;
263 atomic64_t last_seq;
Christian König36abaca2012-05-02 15:11:13 +0200264 unsigned long last_activity;
Jerome Glisse0a0c7592009-12-11 20:36:19 +0100265 bool initialized;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200266};
267
268struct radeon_fence {
269 struct radeon_device *rdev;
270 struct kref kref;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200271 /* protected by radeon_fence.lock */
Jerome Glissebb635562012-05-09 15:34:46 +0200272 uint64_t seq;
Alex Deucher74652802011-08-25 13:39:48 -0400273 /* RB, DMA, etc. */
Jerome Glissebb635562012-05-09 15:34:46 +0200274 unsigned ring;
Christian König93504fc2012-01-05 22:11:06 -0500275 struct radeon_semaphore *semaphore;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200276};
277
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000278int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
279int radeon_fence_driver_init(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200280void radeon_fence_driver_fini(struct radeon_device *rdev);
Alex Deucher74652802011-08-25 13:39:48 -0400281int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200282int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
Alex Deucher74652802011-08-25 13:39:48 -0400283void radeon_fence_process(struct radeon_device *rdev, int ring);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200284bool radeon_fence_signaled(struct radeon_fence *fence);
285int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
Christian König8a47cc92012-05-09 15:34:48 +0200286int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
287int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
Jerome Glisse0085c9502012-05-09 15:34:55 +0200288int radeon_fence_wait_any(struct radeon_device *rdev,
289 struct radeon_fence **fences,
290 bool intr);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200291struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
292void radeon_fence_unref(struct radeon_fence **fence);
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200293unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200294
Dave Airliee024e112009-06-24 09:48:08 +1000295/*
296 * Tiling registers
297 */
298struct radeon_surface_reg {
Jerome Glisse4c788672009-11-20 14:29:23 +0100299 struct radeon_bo *bo;
Dave Airliee024e112009-06-24 09:48:08 +1000300};
301
302#define RADEON_GEM_MAX_SURFACES 8
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200303
304/*
Jerome Glisse4c788672009-11-20 14:29:23 +0100305 * TTM.
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200306 */
Jerome Glisse4c788672009-11-20 14:29:23 +0100307struct radeon_mman {
308 struct ttm_bo_global_ref bo_global_ref;
Dave Airlieba4420c2010-03-09 10:56:52 +1000309 struct drm_global_reference mem_global_ref;
Jerome Glisse4c788672009-11-20 14:29:23 +0100310 struct ttm_bo_device bdev;
Jerome Glisse0a0c7592009-12-11 20:36:19 +0100311 bool mem_global_referenced;
312 bool initialized;
Jerome Glisse4c788672009-11-20 14:29:23 +0100313};
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200314
Jerome Glisse721604a2012-01-05 22:11:05 -0500315/* bo virtual address in a specific vm */
316struct radeon_bo_va {
317 /* bo list is protected by bo being reserved */
318 struct list_head bo_list;
319 /* vm list is protected by vm mutex */
320 struct list_head vm_list;
321 /* constant after initialization */
322 struct radeon_vm *vm;
323 struct radeon_bo *bo;
324 uint64_t soffset;
325 uint64_t eoffset;
326 uint32_t flags;
327 bool valid;
328};
329
Jerome Glisse4c788672009-11-20 14:29:23 +0100330struct radeon_bo {
331 /* Protected by gem.mutex */
332 struct list_head list;
333 /* Protected by tbo.reserved */
Jerome Glisse312ea8d2009-12-07 15:52:58 +0100334 u32 placements[3];
335 struct ttm_placement placement;
Jerome Glisse4c788672009-11-20 14:29:23 +0100336 struct ttm_buffer_object tbo;
337 struct ttm_bo_kmap_obj kmap;
338 unsigned pin_count;
339 void *kptr;
340 u32 tiling_flags;
341 u32 pitch;
342 int surface_reg;
Jerome Glisse721604a2012-01-05 22:11:05 -0500343 /* list of all virtual address to which this bo
344 * is associated to
345 */
346 struct list_head va;
Jerome Glisse4c788672009-11-20 14:29:23 +0100347 /* Constant after initialization */
348 struct radeon_device *rdev;
Daniel Vetter441921d2011-02-18 17:59:16 +0100349 struct drm_gem_object gem_base;
Jerome Glisse4c788672009-11-20 14:29:23 +0100350};
Daniel Vetter7e4d15d2011-02-18 17:59:17 +0100351#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
Jerome Glisse4c788672009-11-20 14:29:23 +0100352
353struct radeon_bo_list {
Thomas Hellstrom147666f2010-11-17 12:38:32 +0000354 struct ttm_validate_buffer tv;
Jerome Glisse4c788672009-11-20 14:29:23 +0100355 struct radeon_bo *bo;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200356 uint64_t gpu_offset;
357 unsigned rdomain;
358 unsigned wdomain;
Jerome Glisse4c788672009-11-20 14:29:23 +0100359 u32 tiling_flags;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200360};
361
Jerome Glisseb15ba512011-11-15 11:48:34 -0500362/* sub-allocation manager, it has to be protected by another lock.
363 * By conception this is an helper for other part of the driver
364 * like the indirect buffer or semaphore, which both have their
365 * locking.
366 *
367 * Principe is simple, we keep a list of sub allocation in offset
368 * order (first entry has offset == 0, last entry has the highest
369 * offset).
370 *
371 * When allocating new object we first check if there is room at
372 * the end total_size - (last_object_offset + last_object_size) >=
373 * alloc_size. If so we allocate new object there.
374 *
375 * When there is not enough room at the end, we start waiting for
376 * each sub object until we reach object_offset+object_size >=
377 * alloc_size, this object then become the sub object we return.
378 *
379 * Alignment can't be bigger than page size.
380 *
381 * Hole are not considered for allocation to keep things simple.
382 * Assumption is that there won't be hole (all object on same
383 * alignment).
384 */
385struct radeon_sa_manager {
Christian Königa651c552012-05-09 15:34:50 +0200386 spinlock_t lock;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500387 struct radeon_bo *bo;
Christian Königc3b7fe82012-05-09 15:34:56 +0200388 struct list_head *hole;
389 struct list_head flist[RADEON_NUM_RINGS];
390 struct list_head olist;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500391 unsigned size;
392 uint64_t gpu_addr;
393 void *cpu_ptr;
394 uint32_t domain;
395};
396
397struct radeon_sa_bo;
398
399/* sub-allocation buffer */
400struct radeon_sa_bo {
Christian Königc3b7fe82012-05-09 15:34:56 +0200401 struct list_head olist;
402 struct list_head flist;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500403 struct radeon_sa_manager *manager;
Christian Könige6661a92012-05-09 15:34:52 +0200404 unsigned soffset;
405 unsigned eoffset;
Christian König557017a2012-05-09 15:34:54 +0200406 struct radeon_fence *fence;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500407};
408
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200409/*
410 * GEM objects.
411 */
412struct radeon_gem {
Jerome Glisse4c788672009-11-20 14:29:23 +0100413 struct mutex mutex;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200414 struct list_head objects;
415};
416
417int radeon_gem_init(struct radeon_device *rdev);
418void radeon_gem_fini(struct radeon_device *rdev);
419int radeon_gem_object_create(struct radeon_device *rdev, int size,
Jerome Glisse4c788672009-11-20 14:29:23 +0100420 int alignment, int initial_domain,
421 bool discardable, bool kernel,
422 struct drm_gem_object **obj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200423
Dave Airlieff72145b2011-02-07 12:16:14 +1000424int radeon_mode_dumb_create(struct drm_file *file_priv,
425 struct drm_device *dev,
426 struct drm_mode_create_dumb *args);
427int radeon_mode_dumb_mmap(struct drm_file *filp,
428 struct drm_device *dev,
429 uint32_t handle, uint64_t *offset_p);
430int radeon_mode_dumb_destroy(struct drm_file *file_priv,
431 struct drm_device *dev,
432 uint32_t handle);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200433
434/*
Jerome Glissec1341e52011-12-21 12:13:47 -0500435 * Semaphores.
436 */
Jerome Glissec1341e52011-12-21 12:13:47 -0500437/* everything here is constant */
438struct radeon_semaphore {
Jerome Glissea8c05942012-05-09 15:34:57 +0200439 struct radeon_sa_bo *sa_bo;
440 signed waiters;
Jerome Glissec1341e52011-12-21 12:13:47 -0500441 uint64_t gpu_addr;
Jerome Glissec1341e52011-12-21 12:13:47 -0500442};
443
Jerome Glissec1341e52011-12-21 12:13:47 -0500444int radeon_semaphore_create(struct radeon_device *rdev,
445 struct radeon_semaphore **semaphore);
446void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
447 struct radeon_semaphore *semaphore);
448void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
449 struct radeon_semaphore *semaphore);
Christian König8f676c42012-05-02 15:11:18 +0200450int radeon_semaphore_sync_rings(struct radeon_device *rdev,
451 struct radeon_semaphore *semaphore,
452 bool sync_to[RADEON_NUM_RINGS],
453 int dst_ring);
Jerome Glissec1341e52011-12-21 12:13:47 -0500454void radeon_semaphore_free(struct radeon_device *rdev,
Jerome Glissea8c05942012-05-09 15:34:57 +0200455 struct radeon_semaphore *semaphore,
456 struct radeon_fence *fence);
Jerome Glissec1341e52011-12-21 12:13:47 -0500457
458/*
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200459 * GART structures, functions & helpers
460 */
461struct radeon_mc;
462
Matt Turnera77f1712009-10-14 00:34:41 -0400463#define RADEON_GPU_PAGE_SIZE 4096
Jerome Glissed594e462010-02-17 21:54:29 +0000464#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
Alex Deucher003cefe2011-09-16 12:04:08 -0400465#define RADEON_GPU_PAGE_SHIFT 12
Jerome Glisse721604a2012-01-05 22:11:05 -0500466#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
Matt Turnera77f1712009-10-14 00:34:41 -0400467
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200468struct radeon_gart {
469 dma_addr_t table_addr;
Jerome Glissec9a1be92011-11-03 11:16:49 -0400470 struct radeon_bo *robj;
471 void *ptr;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200472 unsigned num_gpu_pages;
473 unsigned num_cpu_pages;
474 unsigned table_size;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200475 struct page **pages;
476 dma_addr_t *pages_addr;
477 bool ready;
478};
479
480int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
481void radeon_gart_table_ram_free(struct radeon_device *rdev);
482int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
483void radeon_gart_table_vram_free(struct radeon_device *rdev);
Jerome Glissec9a1be92011-11-03 11:16:49 -0400484int radeon_gart_table_vram_pin(struct radeon_device *rdev);
485void radeon_gart_table_vram_unpin(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200486int radeon_gart_init(struct radeon_device *rdev);
487void radeon_gart_fini(struct radeon_device *rdev);
488void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
489 int pages);
490int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
Konrad Rzeszutek Wilkc39d3512010-12-02 11:04:29 -0500491 int pages, struct page **pagelist,
492 dma_addr_t *dma_addr);
Jerome Glissec9a1be92011-11-03 11:16:49 -0400493void radeon_gart_restore(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200494
495
496/*
497 * GPU MC structures, functions & helpers
498 */
499struct radeon_mc {
500 resource_size_t aper_size;
501 resource_size_t aper_base;
502 resource_size_t agp_base;
Dave Airlie7a50f012009-07-21 20:39:30 +1000503 /* for some chips with <= 32MB we need to lie
504 * about vram size near mc fb location */
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000505 u64 mc_vram_size;
Jerome Glissed594e462010-02-17 21:54:29 +0000506 u64 visible_vram_size;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000507 u64 gtt_size;
508 u64 gtt_start;
509 u64 gtt_end;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000510 u64 vram_start;
511 u64 vram_end;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200512 unsigned vram_width;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000513 u64 real_vram_size;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200514 int vram_mtrr;
515 bool vram_is_ddr;
Jerome Glissed594e462010-02-17 21:54:29 +0000516 bool igp_sideport_enabled;
Alex Deucher8d369bb2010-07-15 10:51:10 -0400517 u64 gtt_base_align;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200518};
519
Alex Deucher06b64762010-01-05 11:27:29 -0500520bool radeon_combios_sideport_present(struct radeon_device *rdev);
521bool radeon_atombios_sideport_present(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200522
523/*
524 * GPU scratch registers structures, functions & helpers
525 */
526struct radeon_scratch {
527 unsigned num_reg;
Alex Deucher724c80e2010-08-27 18:25:25 -0400528 uint32_t reg_base;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200529 bool free[32];
530 uint32_t reg[32];
531};
532
533int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
534void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
535
536
537/*
538 * IRQS.
539 */
Alex Deucher6f34be52010-11-21 10:59:01 -0500540
541struct radeon_unpin_work {
542 struct work_struct work;
543 struct radeon_device *rdev;
544 int crtc_id;
545 struct radeon_fence *fence;
546 struct drm_pending_vblank_event *event;
547 struct radeon_bo *old_rbo;
548 u64 new_crtc_base;
549};
550
551struct r500_irq_stat_regs {
552 u32 disp_int;
Alex Deucherf122c612012-03-30 08:59:57 -0400553 u32 hdmi0_status;
Alex Deucher6f34be52010-11-21 10:59:01 -0500554};
555
556struct r600_irq_stat_regs {
557 u32 disp_int;
558 u32 disp_int_cont;
559 u32 disp_int_cont2;
560 u32 d1grph_int;
561 u32 d2grph_int;
Alex Deucherf122c612012-03-30 08:59:57 -0400562 u32 hdmi0_status;
563 u32 hdmi1_status;
Alex Deucher6f34be52010-11-21 10:59:01 -0500564};
565
566struct evergreen_irq_stat_regs {
567 u32 disp_int;
568 u32 disp_int_cont;
569 u32 disp_int_cont2;
570 u32 disp_int_cont3;
571 u32 disp_int_cont4;
572 u32 disp_int_cont5;
573 u32 d1grph_int;
574 u32 d2grph_int;
575 u32 d3grph_int;
576 u32 d4grph_int;
577 u32 d5grph_int;
578 u32 d6grph_int;
Alex Deucherf122c612012-03-30 08:59:57 -0400579 u32 afmt_status1;
580 u32 afmt_status2;
581 u32 afmt_status3;
582 u32 afmt_status4;
583 u32 afmt_status5;
584 u32 afmt_status6;
Alex Deucher6f34be52010-11-21 10:59:01 -0500585};
586
587union radeon_irq_stat_regs {
588 struct r500_irq_stat_regs r500;
589 struct r600_irq_stat_regs r600;
590 struct evergreen_irq_stat_regs evergreen;
591};
592
Ilija Hadzic54bd52062011-10-26 15:43:58 -0400593#define RADEON_MAX_HPD_PINS 6
594#define RADEON_MAX_CRTCS 6
Alex Deucherf122c612012-03-30 08:59:57 -0400595#define RADEON_MAX_AFMT_BLOCKS 6
Ilija Hadzic54bd52062011-10-26 15:43:58 -0400596
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200597struct radeon_irq {
598 bool installed;
Alex Deucher1b370782011-11-17 20:13:28 -0500599 bool sw_int[RADEON_NUM_RINGS];
Ilija Hadzic54bd52062011-10-26 15:43:58 -0400600 bool crtc_vblank_int[RADEON_MAX_CRTCS];
601 bool pflip[RADEON_MAX_CRTCS];
Rafał Miłecki73a6d3f2010-01-08 00:22:47 +0100602 wait_queue_head_t vblank_queue;
Ilija Hadzic54bd52062011-10-26 15:43:58 -0400603 bool hpd[RADEON_MAX_HPD_PINS];
Alex Deucher2031f772010-04-22 12:52:11 -0400604 bool gui_idle;
605 bool gui_idle_acked;
606 wait_queue_head_t idle_queue;
Alex Deucherf122c612012-03-30 08:59:57 -0400607 bool afmt[RADEON_MAX_AFMT_BLOCKS];
Dave Airlie1614f8b2009-12-01 16:04:56 +1000608 spinlock_t sw_lock;
Alex Deucher1b370782011-11-17 20:13:28 -0500609 int sw_refcount[RADEON_NUM_RINGS];
Alex Deucher6f34be52010-11-21 10:59:01 -0500610 union radeon_irq_stat_regs stat_regs;
Ilija Hadzic54bd52062011-10-26 15:43:58 -0400611 spinlock_t pflip_lock[RADEON_MAX_CRTCS];
612 int pflip_refcount[RADEON_MAX_CRTCS];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200613};
614
615int radeon_irq_kms_init(struct radeon_device *rdev);
616void radeon_irq_kms_fini(struct radeon_device *rdev);
Alex Deucher1b370782011-11-17 20:13:28 -0500617void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
618void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
Alex Deucher6f34be52010-11-21 10:59:01 -0500619void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
620void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200621
622/*
Christian Könige32eb502011-10-23 12:56:27 +0200623 * CP & rings.
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200624 */
Alex Deucher74652802011-08-25 13:39:48 -0400625
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200626struct radeon_ib {
Christian König2e0d9912012-05-09 15:34:53 +0200627 struct radeon_sa_bo *sa_bo;
Jerome Glissee8217672010-02-15 21:36:13 +0100628 unsigned idx;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200629 uint32_t length_dw;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500630 uint64_t gpu_addr;
631 uint32_t *ptr;
632 struct radeon_fence *fence;
Jerome Glisse721604a2012-01-05 22:11:05 -0500633 unsigned vm_id;
Alex Deucherdfcf5f32012-03-20 17:18:14 -0400634 bool is_const_ib;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200635};
636
Dave Airlieecb114a2009-09-15 11:12:56 +1000637/*
638 * locking -
639 * mutex protects scheduled_ibs, ready, alloc_bm
640 */
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200641struct radeon_ib_pool {
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500642 struct radeon_mutex mutex;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500643 struct radeon_sa_manager sa_manager;
644 struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
645 bool ready;
646 unsigned head_id;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200647};
648
Christian Könige32eb502011-10-23 12:56:27 +0200649struct radeon_ring {
Jerome Glisse4c788672009-11-20 14:29:23 +0100650 struct radeon_bo *ring_obj;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200651 volatile uint32_t *ring;
652 unsigned rptr;
Christian König5596a9d2011-10-13 12:48:45 +0200653 unsigned rptr_offs;
654 unsigned rptr_reg;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200655 unsigned wptr;
656 unsigned wptr_old;
Christian König5596a9d2011-10-13 12:48:45 +0200657 unsigned wptr_reg;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200658 unsigned ring_size;
659 unsigned ring_free_dw;
660 int count_dw;
Christian König069211e2012-05-02 15:11:20 +0200661 unsigned long last_activity;
662 unsigned last_rptr;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200663 uint64_t gpu_addr;
664 uint32_t align_mask;
665 uint32_t ptr_mask;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200666 bool ready;
Alex Deucher78c55602011-11-17 14:25:56 -0500667 u32 ptr_reg_shift;
668 u32 ptr_reg_mask;
669 u32 nop;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200670};
671
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500672/*
Jerome Glisse721604a2012-01-05 22:11:05 -0500673 * VM
674 */
675struct radeon_vm {
676 struct list_head list;
677 struct list_head va;
678 int id;
679 unsigned last_pfn;
680 u64 pt_gpu_addr;
681 u64 *pt;
Christian König2e0d9912012-05-09 15:34:53 +0200682 struct radeon_sa_bo *sa_bo;
Jerome Glisse721604a2012-01-05 22:11:05 -0500683 struct mutex mutex;
684 /* last fence for cs using this vm */
685 struct radeon_fence *fence;
686};
687
688struct radeon_vm_funcs {
689 int (*init)(struct radeon_device *rdev);
690 void (*fini)(struct radeon_device *rdev);
691 /* cs mutex must be lock for schedule_ib */
692 int (*bind)(struct radeon_device *rdev, struct radeon_vm *vm, int id);
693 void (*unbind)(struct radeon_device *rdev, struct radeon_vm *vm);
694 void (*tlb_flush)(struct radeon_device *rdev, struct radeon_vm *vm);
695 uint32_t (*page_flags)(struct radeon_device *rdev,
696 struct radeon_vm *vm,
697 uint32_t flags);
698 void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm,
699 unsigned pfn, uint64_t addr, uint32_t flags);
700};
701
702struct radeon_vm_manager {
703 struct list_head lru_vm;
704 uint32_t use_bitmap;
705 struct radeon_sa_manager sa_manager;
706 uint32_t max_pfn;
707 /* fields constant after init */
708 const struct radeon_vm_funcs *funcs;
709 /* number of VMIDs */
710 unsigned nvm;
711 /* vram base address for page table entry */
712 u64 vram_base_offset;
Alex Deucher67e915e2012-01-06 09:38:15 -0500713 /* is vm enabled? */
714 bool enabled;
Jerome Glisse721604a2012-01-05 22:11:05 -0500715};
716
717/*
718 * file private structure
719 */
720struct radeon_fpriv {
721 struct radeon_vm vm;
722};
723
724/*
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500725 * R6xx+ IH ring
726 */
727struct r600_ih {
Jerome Glisse4c788672009-11-20 14:29:23 +0100728 struct radeon_bo *ring_obj;
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500729 volatile uint32_t *ring;
730 unsigned rptr;
Christian Königbf852792011-10-13 13:19:22 +0200731 unsigned rptr_offs;
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500732 unsigned wptr;
733 unsigned wptr_old;
734 unsigned ring_size;
735 uint64_t gpu_addr;
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500736 uint32_t ptr_mask;
737 spinlock_t lock;
738 bool enabled;
739};
740
Ilija Hadzic8eec9d62011-10-12 23:29:40 -0400741struct r600_blit_cp_primitives {
742 void (*set_render_target)(struct radeon_device *rdev, int format,
743 int w, int h, u64 gpu_addr);
744 void (*cp_set_surface_sync)(struct radeon_device *rdev,
745 u32 sync_type, u32 size,
746 u64 mc_addr);
747 void (*set_shaders)(struct radeon_device *rdev);
748 void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
749 void (*set_tex_resource)(struct radeon_device *rdev,
750 int format, int w, int h, int pitch,
Alex Deucher9bb77032011-10-22 10:07:09 -0400751 u64 gpu_addr, u32 size);
Ilija Hadzic8eec9d62011-10-12 23:29:40 -0400752 void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
753 int x2, int y2);
754 void (*draw_auto)(struct radeon_device *rdev);
755 void (*set_default_state)(struct radeon_device *rdev);
756};
757
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000758struct r600_blit {
Jerome Glisseff82f052010-01-22 15:19:00 +0100759 struct mutex mutex;
Jerome Glisse4c788672009-11-20 14:29:23 +0100760 struct radeon_bo *shader_obj;
Ilija Hadzic8eec9d62011-10-12 23:29:40 -0400761 struct r600_blit_cp_primitives primitives;
762 int max_dim;
763 int ring_size_common;
764 int ring_size_per_loop;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000765 u64 shader_gpu_addr;
766 u32 vs_offset, ps_offset;
767 u32 state_offset;
768 u32 state_len;
769 u32 vb_used, vb_total;
770 struct radeon_ib *vb_ib;
771};
772
Alex Deucher6ddddfe2011-10-14 10:51:22 -0400773void r600_blit_suspend(struct radeon_device *rdev);
774
Alex Deucher347e7592012-03-20 17:18:21 -0400775/*
776 * SI RLC stuff
777 */
778struct si_rlc {
779 /* for power gating */
780 struct radeon_bo *save_restore_obj;
781 uint64_t save_restore_gpu_addr;
782 /* for clear state */
783 struct radeon_bo *clear_state_obj;
784 uint64_t clear_state_gpu_addr;
785};
786
Jerome Glisse69e130a2011-12-21 12:13:46 -0500787int radeon_ib_get(struct radeon_device *rdev, int ring,
788 struct radeon_ib **ib, unsigned size);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200789void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
Jerome Glissec1341e52011-12-21 12:13:47 -0500790bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200791int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
792int radeon_ib_pool_init(struct radeon_device *rdev);
793void radeon_ib_pool_fini(struct radeon_device *rdev);
Jerome Glisseb15ba512011-11-15 11:48:34 -0500794int radeon_ib_pool_start(struct radeon_device *rdev);
795int radeon_ib_pool_suspend(struct radeon_device *rdev);
Christian König7bd560e2012-05-02 15:11:12 +0200796int radeon_ib_ring_tests(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200797/* Ring access between begin & end cannot sleep */
Christian Könige32eb502011-10-23 12:56:27 +0200798int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
799void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
800int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
801int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
802void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
803void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
Christian Königd6999bc2012-05-09 15:34:45 +0200804void radeon_ring_undo(struct radeon_ring *ring);
Christian Könige32eb502011-10-23 12:56:27 +0200805void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
806int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
Christian König7b9ef162012-05-02 15:11:23 +0200807void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring);
Christian König069211e2012-05-02 15:11:20 +0200808void radeon_ring_lockup_update(struct radeon_ring *ring);
809bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
Christian Könige32eb502011-10-23 12:56:27 +0200810int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
Alex Deucher78c55602011-11-17 14:25:56 -0500811 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
812 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
Christian Könige32eb502011-10-23 12:56:27 +0200813void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200814
815
816/*
817 * CS.
818 */
819struct radeon_cs_reloc {
820 struct drm_gem_object *gobj;
Jerome Glisse4c788672009-11-20 14:29:23 +0100821 struct radeon_bo *robj;
822 struct radeon_bo_list lobj;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200823 uint32_t handle;
824 uint32_t flags;
825};
826
827struct radeon_cs_chunk {
828 uint32_t chunk_id;
829 uint32_t length_dw;
Jerome Glisse721604a2012-01-05 22:11:05 -0500830 int kpage_idx[2];
831 uint32_t *kpage[2];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200832 uint32_t *kdata;
Jerome Glisse721604a2012-01-05 22:11:05 -0500833 void __user *user_ptr;
834 int last_copied_page;
835 int last_page_index;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200836};
837
838struct radeon_cs_parser {
Jerome Glissec8c15ff2010-01-18 13:01:36 +0100839 struct device *dev;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200840 struct radeon_device *rdev;
841 struct drm_file *filp;
842 /* chunks */
843 unsigned nchunks;
844 struct radeon_cs_chunk *chunks;
845 uint64_t *chunks_array;
846 /* IB */
847 unsigned idx;
848 /* relocations */
849 unsigned nrelocs;
850 struct radeon_cs_reloc *relocs;
851 struct radeon_cs_reloc **relocs_ptr;
852 struct list_head validated;
853 /* indices of various chunks */
854 int chunk_ib_idx;
855 int chunk_relocs_idx;
Jerome Glisse721604a2012-01-05 22:11:05 -0500856 int chunk_flags_idx;
Alex Deucherdfcf5f32012-03-20 17:18:14 -0400857 int chunk_const_ib_idx;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200858 struct radeon_ib *ib;
Alex Deucherdfcf5f32012-03-20 17:18:14 -0400859 struct radeon_ib *const_ib;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200860 void *track;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000861 unsigned family;
Marek Olšáke70f2242011-10-25 01:38:45 +0200862 int parser_error;
Jerome Glisse721604a2012-01-05 22:11:05 -0500863 u32 cs_flags;
864 u32 ring;
865 s32 priority;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200866};
867
Dave Airlie513bcb42009-09-23 16:56:27 +1000868extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
869extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
Andi Kleence580fa2011-10-13 16:08:47 -0700870extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
Dave Airlie513bcb42009-09-23 16:56:27 +1000871
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200872struct radeon_cs_packet {
873 unsigned idx;
874 unsigned type;
875 unsigned reg;
876 unsigned opcode;
877 int count;
878 unsigned one_reg_wr;
879};
880
881typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p,
882 struct radeon_cs_packet *pkt,
883 unsigned idx, unsigned reg);
884typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
885 struct radeon_cs_packet *pkt);
886
887
888/*
889 * AGP
890 */
891int radeon_agp_init(struct radeon_device *rdev);
Dave Airlie0ebf1712009-11-05 15:39:10 +1000892void radeon_agp_resume(struct radeon_device *rdev);
Jerome Glisse10b06122010-05-21 18:48:54 +0200893void radeon_agp_suspend(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200894void radeon_agp_fini(struct radeon_device *rdev);
895
896
897/*
898 * Writeback
899 */
900struct radeon_wb {
Jerome Glisse4c788672009-11-20 14:29:23 +0100901 struct radeon_bo *wb_obj;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200902 volatile uint32_t *wb;
903 uint64_t gpu_addr;
Alex Deucher724c80e2010-08-27 18:25:25 -0400904 bool enabled;
Alex Deucherd0f8a852010-09-04 05:04:34 -0400905 bool use_event;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200906};
907
Alex Deucher724c80e2010-08-27 18:25:25 -0400908#define RADEON_WB_SCRATCH_OFFSET 0
909#define RADEON_WB_CP_RPTR_OFFSET 1024
Alex Deucher0c88a022011-03-02 20:07:31 -0500910#define RADEON_WB_CP1_RPTR_OFFSET 1280
911#define RADEON_WB_CP2_RPTR_OFFSET 1536
Alex Deucher724c80e2010-08-27 18:25:25 -0400912#define R600_WB_IH_WPTR_OFFSET 2048
Alex Deucherd0f8a852010-09-04 05:04:34 -0400913#define R600_WB_EVENT_OFFSET 3072
Alex Deucher724c80e2010-08-27 18:25:25 -0400914
Jerome Glissec93bb852009-07-13 21:04:08 +0200915/**
916 * struct radeon_pm - power management datas
917 * @max_bandwidth: maximum bandwidth the gpu has (MByte/s)
918 * @igp_sideport_mclk: sideport memory clock Mhz (rs690,rs740,rs780,rs880)
919 * @igp_system_mclk: system clock Mhz (rs690,rs740,rs780,rs880)
920 * @igp_ht_link_clk: ht link clock Mhz (rs690,rs740,rs780,rs880)
921 * @igp_ht_link_width: ht link width in bits (rs690,rs740,rs780,rs880)
922 * @k8_bandwidth: k8 bandwidth the gpu has (MByte/s) (IGP)
923 * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP)
924 * @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP)
925 * @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP)
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300926 * @sclk: GPU clock Mhz (core bandwidth depends of this clock)
Jerome Glissec93bb852009-07-13 21:04:08 +0200927 * @needed_bandwidth: current bandwidth needs
928 *
929 * It keeps track of various data needed to take powermanagement decision.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300930 * Bandwidth need is used to determine minimun clock of the GPU and memory.
Jerome Glissec93bb852009-07-13 21:04:08 +0200931 * Equation between gpu/memory clock and available bandwidth is hw dependent
932 * (type of memory, bus size, efficiency, ...)
933 */
Alex Deucherce8f5372010-05-07 15:10:16 -0400934
935enum radeon_pm_method {
936 PM_METHOD_PROFILE,
937 PM_METHOD_DYNPM,
Rafał Miłeckic913e232009-12-22 23:02:16 +0100938};
Alex Deucherce8f5372010-05-07 15:10:16 -0400939
940enum radeon_dynpm_state {
941 DYNPM_STATE_DISABLED,
942 DYNPM_STATE_MINIMUM,
943 DYNPM_STATE_PAUSED,
Rafael J. Wysocki3f53eb62010-06-17 23:02:27 +0000944 DYNPM_STATE_ACTIVE,
945 DYNPM_STATE_SUSPENDED,
Alex Deucherce8f5372010-05-07 15:10:16 -0400946};
947enum radeon_dynpm_action {
948 DYNPM_ACTION_NONE,
949 DYNPM_ACTION_MINIMUM,
950 DYNPM_ACTION_DOWNCLOCK,
951 DYNPM_ACTION_UPCLOCK,
952 DYNPM_ACTION_DEFAULT
Rafał Miłeckic913e232009-12-22 23:02:16 +0100953};
Alex Deucher56278a82009-12-28 13:58:44 -0500954
955enum radeon_voltage_type {
956 VOLTAGE_NONE = 0,
957 VOLTAGE_GPIO,
958 VOLTAGE_VDDC,
959 VOLTAGE_SW
960};
961
Alex Deucher0ec0e742009-12-23 13:21:58 -0500962enum radeon_pm_state_type {
963 POWER_STATE_TYPE_DEFAULT,
964 POWER_STATE_TYPE_POWERSAVE,
965 POWER_STATE_TYPE_BATTERY,
966 POWER_STATE_TYPE_BALANCED,
967 POWER_STATE_TYPE_PERFORMANCE,
968};
969
Alex Deucherce8f5372010-05-07 15:10:16 -0400970enum radeon_pm_profile_type {
971 PM_PROFILE_DEFAULT,
972 PM_PROFILE_AUTO,
973 PM_PROFILE_LOW,
Alex Deucherc9e75b22010-06-02 17:56:01 -0400974 PM_PROFILE_MID,
Alex Deucherce8f5372010-05-07 15:10:16 -0400975 PM_PROFILE_HIGH,
976};
977
978#define PM_PROFILE_DEFAULT_IDX 0
979#define PM_PROFILE_LOW_SH_IDX 1
Alex Deucherc9e75b22010-06-02 17:56:01 -0400980#define PM_PROFILE_MID_SH_IDX 2
981#define PM_PROFILE_HIGH_SH_IDX 3
982#define PM_PROFILE_LOW_MH_IDX 4
983#define PM_PROFILE_MID_MH_IDX 5
984#define PM_PROFILE_HIGH_MH_IDX 6
985#define PM_PROFILE_MAX 7
Alex Deucherce8f5372010-05-07 15:10:16 -0400986
987struct radeon_pm_profile {
988 int dpms_off_ps_idx;
989 int dpms_on_ps_idx;
990 int dpms_off_cm_idx;
991 int dpms_on_cm_idx;
Alex Deucher516d0e42009-12-23 14:28:05 -0500992};
993
Alex Deucher21a81222010-07-02 12:58:16 -0400994enum radeon_int_thermal_type {
995 THERMAL_TYPE_NONE,
996 THERMAL_TYPE_RV6XX,
997 THERMAL_TYPE_RV770,
998 THERMAL_TYPE_EVERGREEN,
Alex Deuchere33df252010-11-22 17:56:32 -0500999 THERMAL_TYPE_SUMO,
Alex Deucher4fddba12011-01-06 21:19:22 -05001000 THERMAL_TYPE_NI,
Alex Deucher14607d02012-03-20 17:18:09 -04001001 THERMAL_TYPE_SI,
Alex Deucher21a81222010-07-02 12:58:16 -04001002};
1003
Alex Deucher56278a82009-12-28 13:58:44 -05001004struct radeon_voltage {
1005 enum radeon_voltage_type type;
1006 /* gpio voltage */
1007 struct radeon_gpio_rec gpio;
1008 u32 delay; /* delay in usec from voltage drop to sclk change */
1009 bool active_high; /* voltage drop is active when bit is high */
1010 /* VDDC voltage */
1011 u8 vddc_id; /* index into vddc voltage table */
1012 u8 vddci_id; /* index into vddci voltage table */
1013 bool vddci_enabled;
1014 /* r6xx+ sw */
Alex Deucher2feea492011-04-12 14:49:24 -04001015 u16 voltage;
1016 /* evergreen+ vddci */
1017 u16 vddci;
Alex Deucher56278a82009-12-28 13:58:44 -05001018};
1019
Alex Deucherd7311172010-05-03 01:13:14 -04001020/* clock mode flags */
1021#define RADEON_PM_MODE_NO_DISPLAY (1 << 0)
1022
Alex Deucher56278a82009-12-28 13:58:44 -05001023struct radeon_pm_clock_info {
1024 /* memory clock */
1025 u32 mclk;
1026 /* engine clock */
1027 u32 sclk;
1028 /* voltage info */
1029 struct radeon_voltage voltage;
Alex Deucherd7311172010-05-03 01:13:14 -04001030 /* standardized clock flags */
Alex Deucher56278a82009-12-28 13:58:44 -05001031 u32 flags;
1032};
1033
Alex Deuchera48b9b42010-04-22 14:03:55 -04001034/* state flags */
Alex Deucherd7311172010-05-03 01:13:14 -04001035#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0)
Alex Deuchera48b9b42010-04-22 14:03:55 -04001036
Alex Deucher56278a82009-12-28 13:58:44 -05001037struct radeon_power_state {
Alex Deucher0ec0e742009-12-23 13:21:58 -05001038 enum radeon_pm_state_type type;
Alex Deucher8f3f1c92011-11-04 10:09:43 -04001039 struct radeon_pm_clock_info *clock_info;
Alex Deucher56278a82009-12-28 13:58:44 -05001040 /* number of valid clock modes in this power state */
1041 int num_clock_modes;
Alex Deucher56278a82009-12-28 13:58:44 -05001042 struct radeon_pm_clock_info *default_clock_mode;
Alex Deuchera48b9b42010-04-22 14:03:55 -04001043 /* standardized state flags */
1044 u32 flags;
Alex Deucher79daedc2010-04-22 14:25:19 -04001045 u32 misc; /* vbios specific flags */
1046 u32 misc2; /* vbios specific flags */
1047 int pcie_lanes; /* pcie lanes */
Alex Deucher56278a82009-12-28 13:58:44 -05001048};
1049
Rafał Miłecki27459322010-02-11 22:16:36 +00001050/*
1051 * Some modes are overclocked by very low value, accept them
1052 */
1053#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
1054
Jerome Glissec93bb852009-07-13 21:04:08 +02001055struct radeon_pm {
Rafał Miłeckic913e232009-12-22 23:02:16 +01001056 struct mutex mutex;
Alex Deuchera48b9b42010-04-22 14:03:55 -04001057 u32 active_crtcs;
1058 int active_crtc_count;
Rafał Miłeckic913e232009-12-22 23:02:16 +01001059 int req_vblank;
Rafał Miłecki839461d2010-03-02 22:06:51 +01001060 bool vblank_sync;
Alex Deucher2031f772010-04-22 12:52:11 -04001061 bool gui_idle;
Jerome Glissec93bb852009-07-13 21:04:08 +02001062 fixed20_12 max_bandwidth;
1063 fixed20_12 igp_sideport_mclk;
1064 fixed20_12 igp_system_mclk;
1065 fixed20_12 igp_ht_link_clk;
1066 fixed20_12 igp_ht_link_width;
1067 fixed20_12 k8_bandwidth;
1068 fixed20_12 sideport_bandwidth;
1069 fixed20_12 ht_bandwidth;
1070 fixed20_12 core_bandwidth;
1071 fixed20_12 sclk;
Alex Deucherf47299c2010-03-16 20:54:38 -04001072 fixed20_12 mclk;
Jerome Glissec93bb852009-07-13 21:04:08 +02001073 fixed20_12 needed_bandwidth;
Alex Deucher0975b162011-02-02 18:42:03 -05001074 struct radeon_power_state *power_state;
Alex Deucher56278a82009-12-28 13:58:44 -05001075 /* number of valid power states */
1076 int num_power_states;
Alex Deuchera48b9b42010-04-22 14:03:55 -04001077 int current_power_state_index;
1078 int current_clock_mode_index;
1079 int requested_power_state_index;
1080 int requested_clock_mode_index;
1081 int default_power_state_index;
1082 u32 current_sclk;
1083 u32 current_mclk;
Alex Deucher2feea492011-04-12 14:49:24 -04001084 u16 current_vddc;
1085 u16 current_vddci;
Alex Deucher9ace9f72011-01-06 21:19:26 -05001086 u32 default_sclk;
1087 u32 default_mclk;
Alex Deucher2feea492011-04-12 14:49:24 -04001088 u16 default_vddc;
1089 u16 default_vddci;
Alex Deucher29fb52c2010-03-11 10:01:17 -05001090 struct radeon_i2c_chan *i2c_bus;
Alex Deucherce8f5372010-05-07 15:10:16 -04001091 /* selected pm method */
1092 enum radeon_pm_method pm_method;
1093 /* dynpm power management */
1094 struct delayed_work dynpm_idle_work;
1095 enum radeon_dynpm_state dynpm_state;
1096 enum radeon_dynpm_action dynpm_planned_action;
1097 unsigned long dynpm_action_timeout;
1098 bool dynpm_can_upclock;
1099 bool dynpm_can_downclock;
1100 /* profile-based power management */
1101 enum radeon_pm_profile_type profile;
1102 int profile_index;
1103 struct radeon_pm_profile profiles[PM_PROFILE_MAX];
Alex Deucher21a81222010-07-02 12:58:16 -04001104 /* internal thermal controller on rv6xx+ */
1105 enum radeon_int_thermal_type int_thermal_type;
1106 struct device *int_hwmon_dev;
Jerome Glissec93bb852009-07-13 21:04:08 +02001107};
1108
Alex Deuchera4c9e2e2011-11-04 10:09:41 -04001109int radeon_pm_get_type_index(struct radeon_device *rdev,
1110 enum radeon_pm_state_type ps_type,
1111 int instance);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001112
Rafał Miłeckia92553a2012-04-28 23:35:20 +02001113struct r600_audio {
1114 bool enabled;
1115 int channels;
1116 int rate;
1117 int bits_per_sample;
1118 u8 status_bits;
1119 u8 category_code;
1120};
1121
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001122/*
1123 * Benchmarking
1124 */
Ilija Hadzic638dd7d2011-10-12 23:29:39 -04001125void radeon_benchmark(struct radeon_device *rdev, int test_number);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001126
1127
1128/*
Michel Dänzerecc0b322009-07-21 11:23:57 +02001129 * Testing
1130 */
1131void radeon_test_moves(struct radeon_device *rdev);
Christian König60a7e392011-09-27 12:31:00 +02001132void radeon_test_ring_sync(struct radeon_device *rdev,
Christian Könige32eb502011-10-23 12:56:27 +02001133 struct radeon_ring *cpA,
1134 struct radeon_ring *cpB);
Christian König60a7e392011-09-27 12:31:00 +02001135void radeon_test_syncing(struct radeon_device *rdev);
Michel Dänzerecc0b322009-07-21 11:23:57 +02001136
1137
1138/*
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001139 * Debugfs
1140 */
Christian König4d8bf9a2011-10-24 14:54:54 +02001141struct radeon_debugfs {
1142 struct drm_info_list *files;
1143 unsigned num_files;
1144};
1145
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001146int radeon_debugfs_add_files(struct radeon_device *rdev,
1147 struct drm_info_list *files,
1148 unsigned nfiles);
1149int radeon_debugfs_fence_init(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001150
1151
1152/*
1153 * ASIC specific functions.
1154 */
1155struct radeon_asic {
Jerome Glisse068a1172009-06-17 13:28:30 +02001156 int (*init)(struct radeon_device *rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001157 void (*fini)(struct radeon_device *rdev);
1158 int (*resume)(struct radeon_device *rdev);
1159 int (*suspend)(struct radeon_device *rdev);
Dave Airlie28d52042009-09-21 14:33:58 +10001160 void (*vga_set_state)(struct radeon_device *rdev, bool state);
Jerome Glissea2d07b72010-03-09 14:45:11 +00001161 int (*asic_reset)(struct radeon_device *rdev);
Alex Deucher54e88e02012-02-23 18:10:29 -05001162 /* ioctl hw specific callback. Some hw might want to perform special
1163 * operation on specific ioctl. For instance on wait idle some hw
1164 * might want to perform and HDP flush through MMIO as it seems that
1165 * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
1166 * through ring.
1167 */
1168 void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
1169 /* check if 3D engine is idle */
1170 bool (*gui_idle)(struct radeon_device *rdev);
1171 /* wait for mc_idle */
1172 int (*mc_wait_for_idle)(struct radeon_device *rdev);
1173 /* gart */
Alex Deucherc5b3b852012-02-23 17:53:46 -05001174 struct {
1175 void (*tlb_flush)(struct radeon_device *rdev);
1176 int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr);
1177 } gart;
Alex Deucher54e88e02012-02-23 18:10:29 -05001178 /* ring specific callbacks */
Christian König4c87bc22011-10-19 19:02:21 +02001179 struct {
1180 void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
Jerome Glisse721604a2012-01-05 22:11:05 -05001181 int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
Christian König4c87bc22011-10-19 19:02:21 +02001182 void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
Christian Könige32eb502011-10-23 12:56:27 +02001183 void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
Christian König4c87bc22011-10-19 19:02:21 +02001184 struct radeon_semaphore *semaphore, bool emit_wait);
Christian Königeb0c19c2012-02-23 15:18:44 +01001185 int (*cs_parse)(struct radeon_cs_parser *p);
Alex Deucherf7128122012-02-23 17:53:45 -05001186 void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
1187 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1188 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
Christian König312c4a82012-05-02 15:11:09 +02001189 bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
Christian König4c87bc22011-10-19 19:02:21 +02001190 } ring[RADEON_NUM_RINGS];
Alex Deucher54e88e02012-02-23 18:10:29 -05001191 /* irqs */
Alex Deucherb35ea4a2012-02-23 17:53:43 -05001192 struct {
1193 int (*set)(struct radeon_device *rdev);
1194 int (*process)(struct radeon_device *rdev);
1195 } irq;
Alex Deucher54e88e02012-02-23 18:10:29 -05001196 /* displays */
Alex Deucherc79a49c2012-02-23 17:53:47 -05001197 struct {
1198 /* display watermarks */
1199 void (*bandwidth_update)(struct radeon_device *rdev);
1200 /* get frame count */
1201 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
1202 /* wait for vblank */
1203 void (*wait_for_vblank)(struct radeon_device *rdev, int crtc);
1204 } display;
Alex Deucher54e88e02012-02-23 18:10:29 -05001205 /* copy functions for bo handling */
Alex Deucher27cd7762012-02-23 17:53:42 -05001206 struct {
1207 int (*blit)(struct radeon_device *rdev,
1208 uint64_t src_offset,
1209 uint64_t dst_offset,
1210 unsigned num_gpu_pages,
1211 struct radeon_fence *fence);
1212 u32 blit_ring_index;
1213 int (*dma)(struct radeon_device *rdev,
1214 uint64_t src_offset,
1215 uint64_t dst_offset,
1216 unsigned num_gpu_pages,
1217 struct radeon_fence *fence);
1218 u32 dma_ring_index;
1219 /* method used for bo copy */
1220 int (*copy)(struct radeon_device *rdev,
1221 uint64_t src_offset,
1222 uint64_t dst_offset,
1223 unsigned num_gpu_pages,
1224 struct radeon_fence *fence);
1225 /* ring used for bo copies */
1226 u32 copy_ring_index;
1227 } copy;
Alex Deucher54e88e02012-02-23 18:10:29 -05001228 /* surfaces */
Alex Deucher9e6f3d02012-02-23 17:53:49 -05001229 struct {
1230 int (*set_reg)(struct radeon_device *rdev, int reg,
1231 uint32_t tiling_flags, uint32_t pitch,
1232 uint32_t offset, uint32_t obj_size);
1233 void (*clear_reg)(struct radeon_device *rdev, int reg);
1234 } surface;
Alex Deucher54e88e02012-02-23 18:10:29 -05001235 /* hotplug detect */
Alex Deucher901ea572012-02-23 17:53:39 -05001236 struct {
1237 void (*init)(struct radeon_device *rdev);
1238 void (*fini)(struct radeon_device *rdev);
1239 bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
1240 void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
1241 } hpd;
Alex Deucherce8f5372010-05-07 15:10:16 -04001242 /* power management */
Alex Deuchera02fa392012-02-23 17:53:41 -05001243 struct {
1244 void (*misc)(struct radeon_device *rdev);
1245 void (*prepare)(struct radeon_device *rdev);
1246 void (*finish)(struct radeon_device *rdev);
1247 void (*init_profile)(struct radeon_device *rdev);
1248 void (*get_dynpm_state)(struct radeon_device *rdev);
Alex Deucher798bcf72012-02-23 17:53:48 -05001249 uint32_t (*get_engine_clock)(struct radeon_device *rdev);
1250 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
1251 uint32_t (*get_memory_clock)(struct radeon_device *rdev);
1252 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
1253 int (*get_pcie_lanes)(struct radeon_device *rdev);
1254 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
1255 void (*set_clock_gating)(struct radeon_device *rdev, int enable);
Alex Deuchera02fa392012-02-23 17:53:41 -05001256 } pm;
Alex Deucher6f34be52010-11-21 10:59:01 -05001257 /* pageflipping */
Alex Deucher0f9e0062012-02-23 17:53:40 -05001258 struct {
1259 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
1260 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
1261 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
1262 } pflip;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001263};
1264
Jerome Glisse21f9a432009-09-11 15:55:33 +02001265/*
1266 * Asic structures
1267 */
Dave Airlie551ebd82009-09-01 15:25:57 +10001268struct r100_asic {
Jerome Glisse225758d2010-03-09 14:45:10 +00001269 const unsigned *reg_safe_bm;
1270 unsigned reg_safe_bm_size;
1271 u32 hdp_cntl;
Dave Airlie551ebd82009-09-01 15:25:57 +10001272};
1273
Jerome Glisse21f9a432009-09-11 15:55:33 +02001274struct r300_asic {
Jerome Glisse225758d2010-03-09 14:45:10 +00001275 const unsigned *reg_safe_bm;
1276 unsigned reg_safe_bm_size;
1277 u32 resync_scratch;
1278 u32 hdp_cntl;
Jerome Glisse21f9a432009-09-11 15:55:33 +02001279};
1280
1281struct r600_asic {
Jerome Glisse225758d2010-03-09 14:45:10 +00001282 unsigned max_pipes;
1283 unsigned max_tile_pipes;
1284 unsigned max_simds;
1285 unsigned max_backends;
1286 unsigned max_gprs;
1287 unsigned max_threads;
1288 unsigned max_stack_entries;
1289 unsigned max_hw_contexts;
1290 unsigned max_gs_threads;
1291 unsigned sx_max_export_size;
1292 unsigned sx_max_export_pos_size;
1293 unsigned sx_max_export_smx_size;
1294 unsigned sq_num_cf_insts;
1295 unsigned tiling_nbanks;
1296 unsigned tiling_npipes;
1297 unsigned tiling_group_size;
Alex Deuchere7aeeba2010-06-04 13:10:12 -04001298 unsigned tile_config;
Alex Deuchere55b9422011-07-15 19:53:52 +00001299 unsigned backend_map;
Jerome Glisse21f9a432009-09-11 15:55:33 +02001300};
1301
1302struct rv770_asic {
Jerome Glisse225758d2010-03-09 14:45:10 +00001303 unsigned max_pipes;
1304 unsigned max_tile_pipes;
1305 unsigned max_simds;
1306 unsigned max_backends;
1307 unsigned max_gprs;
1308 unsigned max_threads;
1309 unsigned max_stack_entries;
1310 unsigned max_hw_contexts;
1311 unsigned max_gs_threads;
1312 unsigned sx_max_export_size;
1313 unsigned sx_max_export_pos_size;
1314 unsigned sx_max_export_smx_size;
1315 unsigned sq_num_cf_insts;
1316 unsigned sx_num_of_sets;
1317 unsigned sc_prim_fifo_size;
1318 unsigned sc_hiz_tile_fifo_size;
1319 unsigned sc_earlyz_tile_fifo_fize;
1320 unsigned tiling_nbanks;
1321 unsigned tiling_npipes;
1322 unsigned tiling_group_size;
Alex Deuchere7aeeba2010-06-04 13:10:12 -04001323 unsigned tile_config;
Alex Deuchere55b9422011-07-15 19:53:52 +00001324 unsigned backend_map;
Jerome Glisse21f9a432009-09-11 15:55:33 +02001325};
1326
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001327struct evergreen_asic {
1328 unsigned num_ses;
1329 unsigned max_pipes;
1330 unsigned max_tile_pipes;
1331 unsigned max_simds;
1332 unsigned max_backends;
1333 unsigned max_gprs;
1334 unsigned max_threads;
1335 unsigned max_stack_entries;
1336 unsigned max_hw_contexts;
1337 unsigned max_gs_threads;
1338 unsigned sx_max_export_size;
1339 unsigned sx_max_export_pos_size;
1340 unsigned sx_max_export_smx_size;
1341 unsigned sq_num_cf_insts;
1342 unsigned sx_num_of_sets;
1343 unsigned sc_prim_fifo_size;
1344 unsigned sc_hiz_tile_fifo_size;
1345 unsigned sc_earlyz_tile_fifo_size;
1346 unsigned tiling_nbanks;
1347 unsigned tiling_npipes;
1348 unsigned tiling_group_size;
Alex Deuchere7aeeba2010-06-04 13:10:12 -04001349 unsigned tile_config;
Alex Deuchere55b9422011-07-15 19:53:52 +00001350 unsigned backend_map;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001351};
1352
Alex Deucherfecf1d02011-03-02 20:07:29 -05001353struct cayman_asic {
1354 unsigned max_shader_engines;
1355 unsigned max_pipes_per_simd;
1356 unsigned max_tile_pipes;
1357 unsigned max_simds_per_se;
1358 unsigned max_backends_per_se;
1359 unsigned max_texture_channel_caches;
1360 unsigned max_gprs;
1361 unsigned max_threads;
1362 unsigned max_gs_threads;
1363 unsigned max_stack_entries;
1364 unsigned sx_num_of_sets;
1365 unsigned sx_max_export_size;
1366 unsigned sx_max_export_pos_size;
1367 unsigned sx_max_export_smx_size;
1368 unsigned max_hw_contexts;
1369 unsigned sq_num_cf_insts;
1370 unsigned sc_prim_fifo_size;
1371 unsigned sc_hiz_tile_fifo_size;
1372 unsigned sc_earlyz_tile_fifo_size;
1373
1374 unsigned num_shader_engines;
1375 unsigned num_shader_pipes_per_simd;
1376 unsigned num_tile_pipes;
1377 unsigned num_simds_per_se;
1378 unsigned num_backends_per_se;
1379 unsigned backend_disable_mask_per_asic;
1380 unsigned backend_map;
1381 unsigned num_texture_channel_caches;
1382 unsigned mem_max_burst_length_bytes;
1383 unsigned mem_row_size_in_kb;
1384 unsigned shader_engine_tile_size;
1385 unsigned num_gpus;
1386 unsigned multi_gpu_tile_size;
1387
1388 unsigned tile_config;
Alex Deucherfecf1d02011-03-02 20:07:29 -05001389};
1390
Alex Deucher0a96d722012-03-20 17:18:11 -04001391struct si_asic {
1392 unsigned max_shader_engines;
1393 unsigned max_pipes_per_simd;
1394 unsigned max_tile_pipes;
1395 unsigned max_simds_per_se;
1396 unsigned max_backends_per_se;
1397 unsigned max_texture_channel_caches;
1398 unsigned max_gprs;
1399 unsigned max_gs_threads;
1400 unsigned max_hw_contexts;
1401 unsigned sc_prim_fifo_size_frontend;
1402 unsigned sc_prim_fifo_size_backend;
1403 unsigned sc_hiz_tile_fifo_size;
1404 unsigned sc_earlyz_tile_fifo_size;
1405
1406 unsigned num_shader_engines;
1407 unsigned num_tile_pipes;
1408 unsigned num_backends_per_se;
1409 unsigned backend_disable_mask_per_asic;
1410 unsigned backend_map;
1411 unsigned num_texture_channel_caches;
1412 unsigned mem_max_burst_length_bytes;
1413 unsigned mem_row_size_in_kb;
1414 unsigned shader_engine_tile_size;
1415 unsigned num_gpus;
1416 unsigned multi_gpu_tile_size;
1417
1418 unsigned tile_config;
Alex Deucher0a96d722012-03-20 17:18:11 -04001419};
1420
Jerome Glisse068a1172009-06-17 13:28:30 +02001421union radeon_asic_config {
1422 struct r300_asic r300;
Dave Airlie551ebd82009-09-01 15:25:57 +10001423 struct r100_asic r100;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001424 struct r600_asic r600;
1425 struct rv770_asic rv770;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001426 struct evergreen_asic evergreen;
Alex Deucherfecf1d02011-03-02 20:07:29 -05001427 struct cayman_asic cayman;
Alex Deucher0a96d722012-03-20 17:18:11 -04001428 struct si_asic si;
Jerome Glisse068a1172009-06-17 13:28:30 +02001429};
1430
Daniel Vetter0a10c852010-03-11 21:19:14 +00001431/*
1432 * asic initizalization from radeon_asic.c
1433 */
1434void radeon_agp_disable(struct radeon_device *rdev);
1435int radeon_asic_init(struct radeon_device *rdev);
1436
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001437
1438/*
1439 * IOCTL.
1440 */
1441int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
1442 struct drm_file *filp);
1443int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
1444 struct drm_file *filp);
1445int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
1446 struct drm_file *file_priv);
1447int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
1448 struct drm_file *file_priv);
1449int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1450 struct drm_file *file_priv);
1451int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
1452 struct drm_file *file_priv);
1453int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1454 struct drm_file *filp);
1455int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
1456 struct drm_file *filp);
1457int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
1458 struct drm_file *filp);
1459int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
1460 struct drm_file *filp);
Jerome Glisse721604a2012-01-05 22:11:05 -05001461int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
1462 struct drm_file *filp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001463int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
Dave Airliee024e112009-06-24 09:48:08 +10001464int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
1465 struct drm_file *filp);
1466int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
1467 struct drm_file *filp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001468
Alex Deucher16cdf042011-10-28 10:30:02 -04001469/* VRAM scratch page for HDP bug, default vram page */
1470struct r600_vram_scratch {
Alex Deucher87cbf8f2010-08-27 13:59:54 -04001471 struct radeon_bo *robj;
1472 volatile uint32_t *ptr;
Alex Deucher16cdf042011-10-28 10:30:02 -04001473 u64 gpu_addr;
Alex Deucher87cbf8f2010-08-27 13:59:54 -04001474};
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001475
Michel Dänzer7a1619b2011-11-10 18:57:26 +01001476
1477/*
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001478 * Core structure, functions and helpers.
1479 */
1480typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t);
1481typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t);
1482
1483struct radeon_device {
Jerome Glisse9f022dd2009-09-11 15:35:22 +02001484 struct device *dev;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001485 struct drm_device *ddev;
1486 struct pci_dev *pdev;
1487 /* ASIC */
Jerome Glisse068a1172009-06-17 13:28:30 +02001488 union radeon_asic_config config;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001489 enum radeon_family family;
1490 unsigned long flags;
1491 int usec_timeout;
1492 enum radeon_pll_errata pll_errata;
1493 int num_gb_pipes;
Alex Deucherf779b3e2009-08-19 19:11:39 -04001494 int num_z_pipes;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001495 int disp_priority;
1496 /* BIOS */
1497 uint8_t *bios;
1498 bool is_atom_bios;
1499 uint16_t bios_header_start;
Jerome Glisse4c788672009-11-20 14:29:23 +01001500 struct radeon_bo *stollen_vga_memory;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001501 /* Register mmio */
Dave Airlie4c9bc752009-06-29 18:29:12 +10001502 resource_size_t rmmio_base;
1503 resource_size_t rmmio_size;
Benjamin Herrenschmidta0533fb2011-07-13 06:28:12 +00001504 void __iomem *rmmio;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001505 radeon_rreg_t mc_rreg;
1506 radeon_wreg_t mc_wreg;
1507 radeon_rreg_t pll_rreg;
1508 radeon_wreg_t pll_wreg;
Dave Airliede1b2892009-08-12 18:43:14 +10001509 uint32_t pcie_reg_mask;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001510 radeon_rreg_t pciep_rreg;
1511 radeon_wreg_t pciep_wreg;
Alex Deucher351a52a2010-06-30 11:52:50 -04001512 /* io port */
1513 void __iomem *rio_mem;
1514 resource_size_t rio_mem_size;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001515 struct radeon_clock clock;
1516 struct radeon_mc mc;
1517 struct radeon_gart gart;
1518 struct radeon_mode_info mode_info;
1519 struct radeon_scratch scratch;
1520 struct radeon_mman mman;
Alex Deucher74652802011-08-25 13:39:48 -04001521 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
Jerome Glisse0085c9502012-05-09 15:34:55 +02001522 wait_queue_head_t fence_queue;
Christian Königd6999bc2012-05-09 15:34:45 +02001523 struct mutex ring_lock;
Christian Könige32eb502011-10-23 12:56:27 +02001524 struct radeon_ring ring[RADEON_NUM_RINGS];
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001525 struct radeon_ib_pool ib_pool;
1526 struct radeon_irq irq;
1527 struct radeon_asic *asic;
1528 struct radeon_gem gem;
Jerome Glissec93bb852009-07-13 21:04:08 +02001529 struct radeon_pm pm;
Yang Zhaof657c2a2009-09-15 12:21:01 +10001530 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
Michel Dänzer7a1619b2011-11-10 18:57:26 +01001531 struct radeon_mutex cs_mutex;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001532 struct radeon_wb wb;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001533 struct radeon_dummy_page dummy_page;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001534 bool shutdown;
1535 bool suspend;
Dave Airliead49f502009-07-10 22:36:26 +10001536 bool need_dma32;
Jerome Glisse733289c2009-09-16 15:24:21 +02001537 bool accel_working;
Dave Airliee024e112009-06-24 09:48:08 +10001538 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001539 const struct firmware *me_fw; /* all family ME firmware */
1540 const struct firmware *pfp_fw; /* r6/700 PFP firmware */
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001541 const struct firmware *rlc_fw; /* r6/700 RLC firmware */
Alex Deucher0af62b02011-01-06 21:19:31 -05001542 const struct firmware *mc_fw; /* NI MC firmware */
Alex Deucher0f0de062012-03-20 17:18:17 -04001543 const struct firmware *ce_fw; /* SI CE firmware */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001544 struct r600_blit r600_blit;
Alex Deucher16cdf042011-10-28 10:30:02 -04001545 struct r600_vram_scratch vram_scratch;
Alex Deucher3e5cb982009-10-16 12:21:24 -04001546 int msi_enabled; /* msi enabled */
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001547 struct r600_ih ih; /* r6/700 interrupt ring */
Alex Deucher347e7592012-03-20 17:18:21 -04001548 struct si_rlc rlc;
Alex Deucherd4877cf2009-12-04 16:56:37 -05001549 struct work_struct hotplug_work;
Alex Deucherf122c612012-03-30 08:59:57 -04001550 struct work_struct audio_work;
Alex Deucher18917b62010-02-01 16:02:25 -05001551 int num_crtc; /* number of crtcs */
Alex Deucher40bacf12009-12-23 03:23:21 -05001552 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
Matthew Garrett5876dd22010-04-26 15:52:20 -04001553 struct mutex vram_mutex;
Rafał Miłeckia92553a2012-04-28 23:35:20 +02001554 struct r600_audio audio; /* audio stuff */
Alex Deucherce8f5372010-05-07 15:10:16 -04001555 struct notifier_block acpi_nb;
Marek Olšák9eba4a92011-01-05 05:46:48 +01001556 /* only one userspace can use Hyperz features or CMASK at a time */
Dave Airlieab9e1f52010-07-13 11:11:11 +10001557 struct drm_file *hyperz_filp;
Marek Olšák9eba4a92011-01-05 05:46:48 +01001558 struct drm_file *cmask_filp;
Alex Deucherf376b942010-08-05 21:21:16 -04001559 /* i2c buses */
1560 struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
Christian König4d8bf9a2011-10-24 14:54:54 +02001561 /* debugfs */
1562 struct radeon_debugfs debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
1563 unsigned debugfs_count;
Jerome Glisse721604a2012-01-05 22:11:05 -05001564 /* virtual memory */
1565 struct radeon_vm_manager vm_manager;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001566};
1567
1568int radeon_device_init(struct radeon_device *rdev,
1569 struct drm_device *ddev,
1570 struct pci_dev *pdev,
1571 uint32_t flags);
1572void radeon_device_fini(struct radeon_device *rdev);
1573int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
1574
Andi Kleen6fcbef72011-10-13 16:08:42 -07001575uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
1576void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
1577u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
1578void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
Alex Deucher351a52a2010-06-30 11:52:50 -04001579
Jerome Glisse4c788672009-11-20 14:29:23 +01001580/*
1581 * Cast helper
1582 */
1583#define to_radeon_fence(p) ((struct radeon_fence *)(p))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001584
1585/*
1586 * Registers read & write functions.
1587 */
Benjamin Herrenschmidta0533fb2011-07-13 06:28:12 +00001588#define RREG8(reg) readb((rdev->rmmio) + (reg))
1589#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
1590#define RREG16(reg) readw((rdev->rmmio) + (reg))
1591#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
Dave Airliede1b2892009-08-12 18:43:14 +10001592#define RREG32(reg) r100_mm_rreg(rdev, (reg))
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001593#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
Dave Airliede1b2892009-08-12 18:43:14 +10001594#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001595#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1596#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1597#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
1598#define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v))
1599#define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg))
1600#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
Dave Airliede1b2892009-08-12 18:43:14 +10001601#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
1602#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
Rafał Miłeckiaa5120d2010-02-18 20:24:28 +00001603#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg))
1604#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001605#define WREG32_P(reg, val, mask) \
1606 do { \
1607 uint32_t tmp_ = RREG32(reg); \
1608 tmp_ &= (mask); \
1609 tmp_ |= ((val) & ~(mask)); \
1610 WREG32(reg, tmp_); \
1611 } while (0)
1612#define WREG32_PLL_P(reg, val, mask) \
1613 do { \
1614 uint32_t tmp_ = RREG32_PLL(reg); \
1615 tmp_ &= (mask); \
1616 tmp_ |= ((val) & ~(mask)); \
1617 WREG32_PLL(reg, tmp_); \
1618 } while (0)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001619#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg)))
Alex Deucher351a52a2010-06-30 11:52:50 -04001620#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
1621#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001622
Dave Airliede1b2892009-08-12 18:43:14 +10001623/*
1624 * Indirect registers accessor
1625 */
1626static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
1627{
1628 uint32_t r;
1629
1630 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
1631 r = RREG32(RADEON_PCIE_DATA);
1632 return r;
1633}
1634
1635static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1636{
1637 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
1638 WREG32(RADEON_PCIE_DATA, (v));
1639}
1640
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001641void r100_pll_errata_after_index(struct radeon_device *rdev);
1642
1643
1644/*
1645 * ASICs helpers.
1646 */
Dave Airlieb995e432009-07-14 02:02:32 +10001647#define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \
1648 (rdev->pdev->device == 0x5969))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001649#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \
1650 (rdev->family == CHIP_RV200) || \
1651 (rdev->family == CHIP_RS100) || \
1652 (rdev->family == CHIP_RS200) || \
1653 (rdev->family == CHIP_RV250) || \
1654 (rdev->family == CHIP_RV280) || \
1655 (rdev->family == CHIP_RS300))
1656#define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300) || \
1657 (rdev->family == CHIP_RV350) || \
1658 (rdev->family == CHIP_R350) || \
1659 (rdev->family == CHIP_RV380) || \
1660 (rdev->family == CHIP_R420) || \
1661 (rdev->family == CHIP_R423) || \
1662 (rdev->family == CHIP_RV410) || \
1663 (rdev->family == CHIP_RS400) || \
1664 (rdev->family == CHIP_RS480))
Alex Deucher3313e3d2011-01-06 18:49:34 -05001665#define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \
1666 (rdev->ddev->pdev->device == 0x9443) || \
1667 (rdev->ddev->pdev->device == 0x944B) || \
1668 (rdev->ddev->pdev->device == 0x9506) || \
1669 (rdev->ddev->pdev->device == 0x9509) || \
1670 (rdev->ddev->pdev->device == 0x950F) || \
1671 (rdev->ddev->pdev->device == 0x689C) || \
1672 (rdev->ddev->pdev->device == 0x689D))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001673#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
Alex Deucher99999aa2010-11-16 12:09:41 -05001674#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \
1675 (rdev->family == CHIP_RS690) || \
1676 (rdev->family == CHIP_RS740) || \
1677 (rdev->family >= CHIP_R600))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001678#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
1679#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001680#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
Alex Deucher633b9162011-01-06 21:19:11 -05001681#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \
1682 (rdev->flags & RADEON_IS_IGP))
Alex Deucher1fe18302011-01-06 21:19:12 -05001683#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS))
Alex Deucher8848f752012-03-20 17:18:28 -04001684#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA))
1685#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
1686 (rdev->flags & RADEON_IS_IGP))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001687
1688/*
1689 * BIOS helpers.
1690 */
1691#define RBIOS8(i) (rdev->bios[i])
1692#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
1693#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
1694
1695int radeon_combios_init(struct radeon_device *rdev);
1696void radeon_combios_fini(struct radeon_device *rdev);
1697int radeon_atombios_init(struct radeon_device *rdev);
1698void radeon_atombios_fini(struct radeon_device *rdev);
1699
1700
1701/*
1702 * RING helpers.
1703 */
Andi Kleence580fa2011-10-13 16:08:47 -07001704#if DRM_DEBUG_CODE == 0
Christian Könige32eb502011-10-23 12:56:27 +02001705static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001706{
Christian Könige32eb502011-10-23 12:56:27 +02001707 ring->ring[ring->wptr++] = v;
1708 ring->wptr &= ring->ptr_mask;
1709 ring->count_dw--;
1710 ring->ring_free_dw--;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001711}
Andi Kleence580fa2011-10-13 16:08:47 -07001712#else
1713/* With debugging this is just too big to inline */
Christian Könige32eb502011-10-23 12:56:27 +02001714void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
Andi Kleence580fa2011-10-13 16:08:47 -07001715#endif
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001716
1717/*
1718 * ASICs macro.
1719 */
Jerome Glisse068a1172009-06-17 13:28:30 +02001720#define radeon_init(rdev) (rdev)->asic->init((rdev))
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001721#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
1722#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
1723#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
Christian Königeb0c19c2012-02-23 15:18:44 +01001724#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
Dave Airlie28d52042009-09-21 14:33:58 +10001725#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
Jerome Glissea2d07b72010-03-09 14:45:11 +00001726#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
Alex Deucherc5b3b852012-02-23 17:53:46 -05001727#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
1728#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
Alex Deucherf7128122012-02-23 17:53:45 -05001729#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
1730#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
1731#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
Christian König4c87bc22011-10-19 19:02:21 +02001732#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
Jerome Glisse721604a2012-01-05 22:11:05 -05001733#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
Christian König312c4a82012-05-02 15:11:09 +02001734#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
Alex Deucherb35ea4a2012-02-23 17:53:43 -05001735#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
1736#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
Alex Deucherc79a49c2012-02-23 17:53:47 -05001737#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
Christian König4c87bc22011-10-19 19:02:21 +02001738#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
1739#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
Alex Deucher27cd7762012-02-23 17:53:42 -05001740#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
1741#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
1742#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
1743#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
1744#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
1745#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
Alex Deucher798bcf72012-02-23 17:53:48 -05001746#define radeon_get_engine_clock(rdev) (rdev)->asic->pm.get_engine_clock((rdev))
1747#define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e))
1748#define radeon_get_memory_clock(rdev) (rdev)->asic->pm.get_memory_clock((rdev))
1749#define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e))
1750#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
1751#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
1752#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
Alex Deucher9e6f3d02012-02-23 17:53:49 -05001753#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
1754#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
Alex Deucherc79a49c2012-02-23 17:53:47 -05001755#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
Alex Deucher901ea572012-02-23 17:53:39 -05001756#define radeon_hpd_init(rdev) (rdev)->asic->hpd.init((rdev))
1757#define radeon_hpd_fini(rdev) (rdev)->asic->hpd.fini((rdev))
1758#define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h))
1759#define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h))
Alex Deucherdef9ba92010-04-22 12:39:58 -04001760#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
Alex Deuchera02fa392012-02-23 17:53:41 -05001761#define radeon_pm_misc(rdev) (rdev)->asic->pm.misc((rdev))
1762#define radeon_pm_prepare(rdev) (rdev)->asic->pm.prepare((rdev))
1763#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
1764#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
1765#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
Alex Deucher0f9e0062012-02-23 17:53:40 -05001766#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pflip.pre_page_flip((rdev), (crtc))
1767#define radeon_page_flip(rdev, crtc, base) rdev->asic->pflip.page_flip((rdev), (crtc), (base))
1768#define radeon_post_page_flip(rdev, crtc) rdev->asic->pflip.post_page_flip((rdev), (crtc))
Alex Deucherc79a49c2012-02-23 17:53:47 -05001769#define radeon_wait_for_vblank(rdev, crtc) rdev->asic->display.wait_for_vblank((rdev), (crtc))
Alex Deucher89e51812012-02-23 17:53:38 -05001770#define radeon_mc_wait_for_idle(rdev) rdev->asic->mc_wait_for_idle((rdev))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001771
Jerome Glisse6cf8a3f2009-09-10 21:46:48 +02001772/* Common functions */
Jerome Glisse700a0cc2010-01-13 15:16:38 +01001773/* AGP */
Jerome Glisse90aca4d2010-03-09 14:45:12 +00001774extern int radeon_gpu_reset(struct radeon_device *rdev);
Jerome Glisse700a0cc2010-01-13 15:16:38 +01001775extern void radeon_agp_disable(struct radeon_device *rdev);
Jerome Glisse21f9a432009-09-11 15:55:33 +02001776extern int radeon_modeset_init(struct radeon_device *rdev);
1777extern void radeon_modeset_fini(struct radeon_device *rdev);
Jerome Glisse9f022dd2009-09-11 15:35:22 +02001778extern bool radeon_card_posted(struct radeon_device *rdev);
Alex Deucherf47299c2010-03-16 20:54:38 -04001779extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
Alex Deucherf46c0122010-03-31 00:33:27 -04001780extern void radeon_update_display_priority(struct radeon_device *rdev);
Dave Airlie72542d72009-12-01 14:06:31 +10001781extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
Jerome Glisse21f9a432009-09-11 15:55:33 +02001782extern void radeon_scratch_init(struct radeon_device *rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04001783extern void radeon_wb_fini(struct radeon_device *rdev);
1784extern int radeon_wb_init(struct radeon_device *rdev);
1785extern void radeon_wb_disable(struct radeon_device *rdev);
Jerome Glisse21f9a432009-09-11 15:55:33 +02001786extern void radeon_surface_init(struct radeon_device *rdev);
1787extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
Jerome Glisseca6ffc62009-10-01 10:20:52 +02001788extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
Jerome Glissed39c3b82009-09-28 18:34:43 +02001789extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
Jerome Glisse312ea8d2009-12-07 15:52:58 +01001790extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
Jerome Glissed03d8582009-12-14 21:02:09 +01001791extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
Jerome Glissed594e462010-02-17 21:54:29 +00001792extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
1793extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
Dave Airlie6a9ee8a2010-02-01 15:38:10 +10001794extern int radeon_resume_kms(struct drm_device *dev);
1795extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
Dave Airlie53595332011-03-14 09:47:24 +10001796extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
Jerome Glisse6cf8a3f2009-09-10 21:46:48 +02001797
Daniel Vetter3574dda2011-02-18 17:59:19 +01001798/*
Jerome Glisse721604a2012-01-05 22:11:05 -05001799 * vm
1800 */
1801int radeon_vm_manager_init(struct radeon_device *rdev);
1802void radeon_vm_manager_fini(struct radeon_device *rdev);
1803int radeon_vm_manager_start(struct radeon_device *rdev);
1804int radeon_vm_manager_suspend(struct radeon_device *rdev);
1805int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
1806void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
1807int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm);
1808void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
1809int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1810 struct radeon_vm *vm,
1811 struct radeon_bo *bo,
1812 struct ttm_mem_reg *mem);
1813void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1814 struct radeon_bo *bo);
1815int radeon_vm_bo_add(struct radeon_device *rdev,
1816 struct radeon_vm *vm,
1817 struct radeon_bo *bo,
1818 uint64_t offset,
1819 uint32_t flags);
1820int radeon_vm_bo_rmv(struct radeon_device *rdev,
1821 struct radeon_vm *vm,
1822 struct radeon_bo *bo);
1823
Alex Deucherf122c612012-03-30 08:59:57 -04001824/* audio */
1825void r600_audio_update_hdmi(struct work_struct *work);
Jerome Glisse721604a2012-01-05 22:11:05 -05001826
1827/*
Alex Deucher16cdf042011-10-28 10:30:02 -04001828 * R600 vram scratch functions
1829 */
1830int r600_vram_scratch_init(struct radeon_device *rdev);
1831void r600_vram_scratch_fini(struct radeon_device *rdev);
1832
1833/*
Jerome Glisse285484e2011-12-16 17:03:42 -05001834 * r600 cs checking helper
1835 */
1836unsigned r600_mip_minify(unsigned size, unsigned level);
1837bool r600_fmt_is_valid_color(u32 format);
1838bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family);
1839int r600_fmt_get_blocksize(u32 format);
1840int r600_fmt_get_nblocksx(u32 format, u32 w);
1841int r600_fmt_get_nblocksy(u32 format, u32 h);
1842
1843/*
Daniel Vetter3574dda2011-02-18 17:59:19 +01001844 * r600 functions used by radeon_encoder.c
1845 */
Rafał Miłecki2cd6218c2010-03-08 22:14:01 +00001846extern void r600_hdmi_enable(struct drm_encoder *encoder);
1847extern void r600_hdmi_disable(struct drm_encoder *encoder);
Christian Koenigdafc3bd2009-10-11 23:49:13 +02001848extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
Alex Deucherfe251e22010-03-24 13:36:43 -04001849
Alex Deucher0af62b02011-01-06 21:19:31 -05001850extern int ni_init_microcode(struct radeon_device *rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001851extern int ni_mc_load_microcode(struct radeon_device *rdev);
Alex Deucher0af62b02011-01-06 21:19:31 -05001852
Alberto Miloned7a29522010-07-06 11:40:24 -04001853/* radeon_acpi.c */
1854#if defined(CONFIG_ACPI)
1855extern int radeon_acpi_init(struct radeon_device *rdev);
1856#else
1857static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
1858#endif
1859
Jerome Glisse4c788672009-11-20 14:29:23 +01001860#include "radeon_object.h"
1861
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001862#endif