blob: 6715e4c695fab2c6ae0736813e0b871ed68eca55 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RADEON_H__
29#define __RADEON_H__
30
Jerome Glisse771fe6b2009-06-05 14:42:42 +020031/* TODO: Here are things that needs to be done :
32 * - surface allocator & initializer : (bit like scratch reg) should
33 * initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
34 * related to surface
35 * - WB : write back stuff (do it bit like scratch reg things)
36 * - Vblank : look at Jesse's rework and what we should do
37 * - r600/r700: gart & cp
38 * - cs : clean cs ioctl use bitmap & things like that.
39 * - power management stuff
40 * - Barrier in gart code
41 * - Unmappabled vram ?
42 * - TESTING, TESTING, TESTING
43 */
44
Jerome Glissed39c3b82009-09-28 18:34:43 +020045/* Initialization path:
46 * We expect that acceleration initialization might fail for various
47 * reasons even thought we work hard to make it works on most
48 * configurations. In order to still have a working userspace in such
49 * situation the init path must succeed up to the memory controller
50 * initialization point. Failure before this point are considered as
51 * fatal error. Here is the init callchain :
52 * radeon_device_init perform common structure, mutex initialization
53 * asic_init setup the GPU memory layout and perform all
54 * one time initialization (failure in this
55 * function are considered fatal)
56 * asic_startup setup the GPU acceleration, in order to
57 * follow guideline the first thing this
58 * function should do is setting the GPU
59 * memory controller (only MC setup failure
60 * are considered as fatal)
61 */
62
Arun Sharma600634972011-07-26 16:09:06 -070063#include <linux/atomic.h>
Jerome Glisse771fe6b2009-06-05 14:42:42 +020064#include <linux/wait.h>
65#include <linux/list.h>
66#include <linux/kref.h>
67
Jerome Glisse4c788672009-11-20 14:29:23 +010068#include <ttm/ttm_bo_api.h>
69#include <ttm/ttm_bo_driver.h>
70#include <ttm/ttm_placement.h>
71#include <ttm/ttm_module.h>
Thomas Hellstrom147666f2010-11-17 12:38:32 +000072#include <ttm/ttm_execbuf_util.h>
Jerome Glisse4c788672009-11-20 14:29:23 +010073
Dave Airliec2142712009-09-22 08:50:10 +100074#include "radeon_family.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020075#include "radeon_mode.h"
76#include "radeon_reg.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020077
78/*
79 * Modules parameters.
80 */
81extern int radeon_no_wb;
82extern int radeon_modeset;
83extern int radeon_dynclks;
84extern int radeon_r4xx_atom;
85extern int radeon_agpmode;
86extern int radeon_vram_limit;
87extern int radeon_gart_size;
88extern int radeon_benchmarking;
Michel Dänzerecc0b322009-07-21 11:23:57 +020089extern int radeon_testing;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020090extern int radeon_connector_table;
Dave Airlie4ce001a2009-08-13 16:32:14 +100091extern int radeon_tv;
Christian Koenigdafc3bd2009-10-11 23:49:13 +020092extern int radeon_audio;
Alex Deucherf46c0122010-03-31 00:33:27 -040093extern int radeon_disp_priority;
Alex Deuchere2b0a8e2010-03-17 02:07:37 -040094extern int radeon_hw_i2c;
Alex Deucherd42dd572011-01-12 20:05:11 -050095extern int radeon_pcie_gen2;
Alex Deuchera18cee12011-11-01 14:20:30 -040096extern int radeon_msi;
Christian König3368ff02012-05-02 15:11:21 +020097extern int radeon_lockup_timeout;
Jerome Glisse771fe6b2009-06-05 14:42:42 +020098
99/*
100 * Copy from radeon_drv.h so we don't have to include both and have conflicting
101 * symbol;
102 */
Jerome Glissebb635562012-05-09 15:34:46 +0200103#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
104#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
Jerome Glissee8217672010-02-15 21:36:13 +0100105/* RADEON_IB_POOL_SIZE must be a power of 2 */
Jerome Glissebb635562012-05-09 15:34:46 +0200106#define RADEON_IB_POOL_SIZE 16
107#define RADEON_DEBUGFS_MAX_COMPONENTS 32
108#define RADEONFB_CONN_LIMIT 4
109#define RADEON_BIOS_NUM_SCRATCH 8
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200110
Alex Deucher1b370782011-11-17 20:13:28 -0500111/* max number of rings */
Jerome Glissebb635562012-05-09 15:34:46 +0200112#define RADEON_NUM_RINGS 3
113
114/* fence seq are set to this number when signaled */
115#define RADEON_FENCE_SIGNALED_SEQ 0LL
Alex Deucher1b370782011-11-17 20:13:28 -0500116
117/* internal ring indices */
118/* r1xx+ has gfx CP ring */
Jerome Glissebb635562012-05-09 15:34:46 +0200119#define RADEON_RING_TYPE_GFX_INDEX 0
Alex Deucher1b370782011-11-17 20:13:28 -0500120
121/* cayman has 2 compute CP rings */
Jerome Glissebb635562012-05-09 15:34:46 +0200122#define CAYMAN_RING_TYPE_CP1_INDEX 1
123#define CAYMAN_RING_TYPE_CP2_INDEX 2
Alex Deucher1b370782011-11-17 20:13:28 -0500124
Jerome Glisse721604a2012-01-05 22:11:05 -0500125/* hardcode those limit for now */
Jerome Glissebb635562012-05-09 15:34:46 +0200126#define RADEON_VA_RESERVED_SIZE (8 << 20)
127#define RADEON_IB_VM_MAX_SIZE (64 << 10)
Jerome Glisse721604a2012-01-05 22:11:05 -0500128
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200129/*
130 * Errata workarounds.
131 */
132enum radeon_pll_errata {
133 CHIP_ERRATA_R300_CG = 0x00000001,
134 CHIP_ERRATA_PLL_DUMMYREADS = 0x00000002,
135 CHIP_ERRATA_PLL_DELAY = 0x00000004
136};
137
138
139struct radeon_device;
140
141
142/*
143 * BIOS.
144 */
Dave Airlie6a9ee8a2010-02-01 15:38:10 +1000145#define ATRM_BIOS_PAGE 4096
146
Dave Airlie8edb3812010-03-01 21:50:01 +1100147#if defined(CONFIG_VGA_SWITCHEROO)
Dave Airlie6a9ee8a2010-02-01 15:38:10 +1000148bool radeon_atrm_supported(struct pci_dev *pdev);
149int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
Dave Airlie8edb3812010-03-01 21:50:01 +1100150#else
151static inline bool radeon_atrm_supported(struct pci_dev *pdev)
152{
153 return false;
154}
155
156static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
157 return -EINVAL;
158}
159#endif
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200160bool radeon_get_bios(struct radeon_device *rdev);
161
Jerome Glisse9fc04b52012-01-23 11:52:15 -0500162/*
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000163 * Dummy page
164 */
165struct radeon_dummy_page {
166 struct page *page;
167 dma_addr_t addr;
168};
169int radeon_dummy_page_init(struct radeon_device *rdev);
170void radeon_dummy_page_fini(struct radeon_device *rdev);
171
172
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200173/*
174 * Clocks
175 */
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200176struct radeon_clock {
177 struct radeon_pll p1pll;
178 struct radeon_pll p2pll;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500179 struct radeon_pll dcpll;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200180 struct radeon_pll spll;
181 struct radeon_pll mpll;
182 /* 10 Khz units */
183 uint32_t default_mclk;
184 uint32_t default_sclk;
Alex Deucherbcc1c2a2010-01-12 17:54:34 -0500185 uint32_t default_dispclk;
186 uint32_t dp_extclk;
Alex Deucherb20f9be2011-06-08 13:01:11 -0400187 uint32_t max_pixel_clock;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200188};
189
Rafał Miłecki74338742009-11-03 00:53:02 +0100190/*
191 * Power management
192 */
193int radeon_pm_init(struct radeon_device *rdev);
Alex Deucher29fb52c2010-03-11 10:01:17 -0500194void radeon_pm_fini(struct radeon_device *rdev);
Rafał Miłeckic913e232009-12-22 23:02:16 +0100195void radeon_pm_compute_clocks(struct radeon_device *rdev);
Alex Deucherce8f5372010-05-07 15:10:16 -0400196void radeon_pm_suspend(struct radeon_device *rdev);
197void radeon_pm_resume(struct radeon_device *rdev);
Alex Deucher56278a82009-12-28 13:58:44 -0500198void radeon_combios_get_power_modes(struct radeon_device *rdev);
199void radeon_atombios_get_power_modes(struct radeon_device *rdev);
Alex Deucher8a83ec52011-04-12 14:49:23 -0400200void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
Alex Deucherf8920342010-06-30 12:02:03 -0400201void rs690_pm_info(struct radeon_device *rdev);
Alex Deucher20d391d2011-02-01 16:12:34 -0500202extern int rv6xx_get_temp(struct radeon_device *rdev);
203extern int rv770_get_temp(struct radeon_device *rdev);
204extern int evergreen_get_temp(struct radeon_device *rdev);
205extern int sumo_get_temp(struct radeon_device *rdev);
Alex Deucher1bd47d22012-03-20 17:18:10 -0400206extern int si_get_temp(struct radeon_device *rdev);
Jerome Glisse285484e2011-12-16 17:03:42 -0500207extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
208 unsigned *bankh, unsigned *mtaspect,
209 unsigned *tile_split);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000210
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200211/*
212 * Fences.
213 */
214struct radeon_fence_driver {
215 uint32_t scratch_reg;
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000216 uint64_t gpu_addr;
217 volatile uint32_t *cpu_addr;
Christian König68e250b2012-05-10 15:57:31 +0200218 /* sync_seq is protected by ring emission lock */
219 uint64_t sync_seq[RADEON_NUM_RINGS];
Jerome Glissebb635562012-05-09 15:34:46 +0200220 atomic64_t last_seq;
Christian König36abaca2012-05-02 15:11:13 +0200221 unsigned long last_activity;
Jerome Glisse0a0c7592009-12-11 20:36:19 +0100222 bool initialized;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200223};
224
225struct radeon_fence {
226 struct radeon_device *rdev;
227 struct kref kref;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200228 /* protected by radeon_fence.lock */
Jerome Glissebb635562012-05-09 15:34:46 +0200229 uint64_t seq;
Alex Deucher74652802011-08-25 13:39:48 -0400230 /* RB, DMA, etc. */
Jerome Glissebb635562012-05-09 15:34:46 +0200231 unsigned ring;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200232};
233
Jerome Glisse30eb77f2011-11-20 20:45:34 +0000234int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
235int radeon_fence_driver_init(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200236void radeon_fence_driver_fini(struct radeon_device *rdev);
Christian König876dc9f2012-05-08 14:24:01 +0200237int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
Alex Deucher74652802011-08-25 13:39:48 -0400238void radeon_fence_process(struct radeon_device *rdev, int ring);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200239bool radeon_fence_signaled(struct radeon_fence *fence);
240int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
Christian König8a47cc92012-05-09 15:34:48 +0200241int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
Christian König7ecc45e2012-06-29 11:33:12 +0200242void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
Jerome Glisse0085c9502012-05-09 15:34:55 +0200243int radeon_fence_wait_any(struct radeon_device *rdev,
244 struct radeon_fence **fences,
245 bool intr);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200246struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
247void radeon_fence_unref(struct radeon_fence **fence);
Jerome Glisse3b7a2b22012-05-09 15:34:47 +0200248unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
Christian König68e250b2012-05-10 15:57:31 +0200249bool radeon_fence_need_sync(struct radeon_fence *fence, int ring);
250void radeon_fence_note_sync(struct radeon_fence *fence, int ring);
251static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a,
252 struct radeon_fence *b)
253{
254 if (!a) {
255 return b;
256 }
257
258 if (!b) {
259 return a;
260 }
261
262 BUG_ON(a->ring != b->ring);
263
264 if (a->seq > b->seq) {
265 return a;
266 } else {
267 return b;
268 }
269}
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200270
Dave Airliee024e112009-06-24 09:48:08 +1000271/*
272 * Tiling registers
273 */
274struct radeon_surface_reg {
Jerome Glisse4c788672009-11-20 14:29:23 +0100275 struct radeon_bo *bo;
Dave Airliee024e112009-06-24 09:48:08 +1000276};
277
278#define RADEON_GEM_MAX_SURFACES 8
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200279
280/*
Jerome Glisse4c788672009-11-20 14:29:23 +0100281 * TTM.
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200282 */
Jerome Glisse4c788672009-11-20 14:29:23 +0100283struct radeon_mman {
284 struct ttm_bo_global_ref bo_global_ref;
Dave Airlieba4420c2010-03-09 10:56:52 +1000285 struct drm_global_reference mem_global_ref;
Jerome Glisse4c788672009-11-20 14:29:23 +0100286 struct ttm_bo_device bdev;
Jerome Glisse0a0c7592009-12-11 20:36:19 +0100287 bool mem_global_referenced;
288 bool initialized;
Jerome Glisse4c788672009-11-20 14:29:23 +0100289};
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200290
Jerome Glisse721604a2012-01-05 22:11:05 -0500291/* bo virtual address in a specific vm */
292struct radeon_bo_va {
293 /* bo list is protected by bo being reserved */
294 struct list_head bo_list;
295 /* vm list is protected by vm mutex */
296 struct list_head vm_list;
297 /* constant after initialization */
298 struct radeon_vm *vm;
299 struct radeon_bo *bo;
300 uint64_t soffset;
301 uint64_t eoffset;
302 uint32_t flags;
303 bool valid;
304};
305
Jerome Glisse4c788672009-11-20 14:29:23 +0100306struct radeon_bo {
307 /* Protected by gem.mutex */
308 struct list_head list;
309 /* Protected by tbo.reserved */
Jerome Glisse312ea8d2009-12-07 15:52:58 +0100310 u32 placements[3];
311 struct ttm_placement placement;
Jerome Glisse4c788672009-11-20 14:29:23 +0100312 struct ttm_buffer_object tbo;
313 struct ttm_bo_kmap_obj kmap;
314 unsigned pin_count;
315 void *kptr;
316 u32 tiling_flags;
317 u32 pitch;
318 int surface_reg;
Jerome Glisse721604a2012-01-05 22:11:05 -0500319 /* list of all virtual address to which this bo
320 * is associated to
321 */
322 struct list_head va;
Jerome Glisse4c788672009-11-20 14:29:23 +0100323 /* Constant after initialization */
324 struct radeon_device *rdev;
Daniel Vetter441921d2011-02-18 17:59:16 +0100325 struct drm_gem_object gem_base;
Dave Airlie63bc6202012-05-31 13:52:53 +0100326
327 struct ttm_bo_kmap_obj dma_buf_vmap;
328 int vmapping_count;
Jerome Glisse4c788672009-11-20 14:29:23 +0100329};
Daniel Vetter7e4d15d2011-02-18 17:59:17 +0100330#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
Jerome Glisse4c788672009-11-20 14:29:23 +0100331
332struct radeon_bo_list {
Thomas Hellstrom147666f2010-11-17 12:38:32 +0000333 struct ttm_validate_buffer tv;
Jerome Glisse4c788672009-11-20 14:29:23 +0100334 struct radeon_bo *bo;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200335 uint64_t gpu_offset;
336 unsigned rdomain;
337 unsigned wdomain;
Jerome Glisse4c788672009-11-20 14:29:23 +0100338 u32 tiling_flags;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200339};
340
Jerome Glisseb15ba512011-11-15 11:48:34 -0500341/* sub-allocation manager, it has to be protected by another lock.
342 * By conception this is an helper for other part of the driver
343 * like the indirect buffer or semaphore, which both have their
344 * locking.
345 *
346 * Principe is simple, we keep a list of sub allocation in offset
347 * order (first entry has offset == 0, last entry has the highest
348 * offset).
349 *
350 * When allocating new object we first check if there is room at
351 * the end total_size - (last_object_offset + last_object_size) >=
352 * alloc_size. If so we allocate new object there.
353 *
354 * When there is not enough room at the end, we start waiting for
355 * each sub object until we reach object_offset+object_size >=
356 * alloc_size, this object then become the sub object we return.
357 *
358 * Alignment can't be bigger than page size.
359 *
360 * Hole are not considered for allocation to keep things simple.
361 * Assumption is that there won't be hole (all object on same
362 * alignment).
363 */
364struct radeon_sa_manager {
Christian Königa651c552012-05-09 15:34:50 +0200365 spinlock_t lock;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500366 struct radeon_bo *bo;
Christian Königc3b7fe82012-05-09 15:34:56 +0200367 struct list_head *hole;
368 struct list_head flist[RADEON_NUM_RINGS];
369 struct list_head olist;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500370 unsigned size;
371 uint64_t gpu_addr;
372 void *cpu_ptr;
373 uint32_t domain;
374};
375
376struct radeon_sa_bo;
377
378/* sub-allocation buffer */
379struct radeon_sa_bo {
Christian Königc3b7fe82012-05-09 15:34:56 +0200380 struct list_head olist;
381 struct list_head flist;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500382 struct radeon_sa_manager *manager;
Christian Könige6661a92012-05-09 15:34:52 +0200383 unsigned soffset;
384 unsigned eoffset;
Christian König557017a2012-05-09 15:34:54 +0200385 struct radeon_fence *fence;
Jerome Glisseb15ba512011-11-15 11:48:34 -0500386};
387
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200388/*
389 * GEM objects.
390 */
391struct radeon_gem {
Jerome Glisse4c788672009-11-20 14:29:23 +0100392 struct mutex mutex;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200393 struct list_head objects;
394};
395
396int radeon_gem_init(struct radeon_device *rdev);
397void radeon_gem_fini(struct radeon_device *rdev);
398int radeon_gem_object_create(struct radeon_device *rdev, int size,
Jerome Glisse4c788672009-11-20 14:29:23 +0100399 int alignment, int initial_domain,
400 bool discardable, bool kernel,
401 struct drm_gem_object **obj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200402
Dave Airlieff72145b2011-02-07 12:16:14 +1000403int radeon_mode_dumb_create(struct drm_file *file_priv,
404 struct drm_device *dev,
405 struct drm_mode_create_dumb *args);
406int radeon_mode_dumb_mmap(struct drm_file *filp,
407 struct drm_device *dev,
408 uint32_t handle, uint64_t *offset_p);
409int radeon_mode_dumb_destroy(struct drm_file *file_priv,
410 struct drm_device *dev,
411 uint32_t handle);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200412
413/*
Jerome Glissec1341e52011-12-21 12:13:47 -0500414 * Semaphores.
415 */
Jerome Glissec1341e52011-12-21 12:13:47 -0500416/* everything here is constant */
417struct radeon_semaphore {
Jerome Glissea8c05942012-05-09 15:34:57 +0200418 struct radeon_sa_bo *sa_bo;
419 signed waiters;
Jerome Glissec1341e52011-12-21 12:13:47 -0500420 uint64_t gpu_addr;
Jerome Glissec1341e52011-12-21 12:13:47 -0500421};
422
Jerome Glissec1341e52011-12-21 12:13:47 -0500423int radeon_semaphore_create(struct radeon_device *rdev,
424 struct radeon_semaphore **semaphore);
425void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
426 struct radeon_semaphore *semaphore);
427void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
428 struct radeon_semaphore *semaphore);
Christian König8f676c42012-05-02 15:11:18 +0200429int radeon_semaphore_sync_rings(struct radeon_device *rdev,
430 struct radeon_semaphore *semaphore,
Christian König220907d2012-05-10 16:46:43 +0200431 int signaler, int waiter);
Jerome Glissec1341e52011-12-21 12:13:47 -0500432void radeon_semaphore_free(struct radeon_device *rdev,
Christian König220907d2012-05-10 16:46:43 +0200433 struct radeon_semaphore **semaphore,
Jerome Glissea8c05942012-05-09 15:34:57 +0200434 struct radeon_fence *fence);
Jerome Glissec1341e52011-12-21 12:13:47 -0500435
436/*
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200437 * GART structures, functions & helpers
438 */
439struct radeon_mc;
440
Matt Turnera77f1712009-10-14 00:34:41 -0400441#define RADEON_GPU_PAGE_SIZE 4096
Jerome Glissed594e462010-02-17 21:54:29 +0000442#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
Alex Deucher003cefe2011-09-16 12:04:08 -0400443#define RADEON_GPU_PAGE_SHIFT 12
Jerome Glisse721604a2012-01-05 22:11:05 -0500444#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
Matt Turnera77f1712009-10-14 00:34:41 -0400445
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200446struct radeon_gart {
447 dma_addr_t table_addr;
Jerome Glissec9a1be92011-11-03 11:16:49 -0400448 struct radeon_bo *robj;
449 void *ptr;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200450 unsigned num_gpu_pages;
451 unsigned num_cpu_pages;
452 unsigned table_size;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200453 struct page **pages;
454 dma_addr_t *pages_addr;
455 bool ready;
456};
457
458int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
459void radeon_gart_table_ram_free(struct radeon_device *rdev);
460int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
461void radeon_gart_table_vram_free(struct radeon_device *rdev);
Jerome Glissec9a1be92011-11-03 11:16:49 -0400462int radeon_gart_table_vram_pin(struct radeon_device *rdev);
463void radeon_gart_table_vram_unpin(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200464int radeon_gart_init(struct radeon_device *rdev);
465void radeon_gart_fini(struct radeon_device *rdev);
466void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
467 int pages);
468int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
Konrad Rzeszutek Wilkc39d3512010-12-02 11:04:29 -0500469 int pages, struct page **pagelist,
470 dma_addr_t *dma_addr);
Jerome Glissec9a1be92011-11-03 11:16:49 -0400471void radeon_gart_restore(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200472
473
474/*
475 * GPU MC structures, functions & helpers
476 */
477struct radeon_mc {
478 resource_size_t aper_size;
479 resource_size_t aper_base;
480 resource_size_t agp_base;
Dave Airlie7a50f012009-07-21 20:39:30 +1000481 /* for some chips with <= 32MB we need to lie
482 * about vram size near mc fb location */
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000483 u64 mc_vram_size;
Jerome Glissed594e462010-02-17 21:54:29 +0000484 u64 visible_vram_size;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000485 u64 gtt_size;
486 u64 gtt_start;
487 u64 gtt_end;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000488 u64 vram_start;
489 u64 vram_end;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200490 unsigned vram_width;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000491 u64 real_vram_size;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200492 int vram_mtrr;
493 bool vram_is_ddr;
Jerome Glissed594e462010-02-17 21:54:29 +0000494 bool igp_sideport_enabled;
Alex Deucher8d369bb2010-07-15 10:51:10 -0400495 u64 gtt_base_align;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200496};
497
Alex Deucher06b64762010-01-05 11:27:29 -0500498bool radeon_combios_sideport_present(struct radeon_device *rdev);
499bool radeon_atombios_sideport_present(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200500
501/*
502 * GPU scratch registers structures, functions & helpers
503 */
504struct radeon_scratch {
505 unsigned num_reg;
Alex Deucher724c80e2010-08-27 18:25:25 -0400506 uint32_t reg_base;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200507 bool free[32];
508 uint32_t reg[32];
509};
510
511int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
512void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);
513
514
515/*
516 * IRQS.
517 */
Alex Deucher6f34be52010-11-21 10:59:01 -0500518
519struct radeon_unpin_work {
520 struct work_struct work;
521 struct radeon_device *rdev;
522 int crtc_id;
523 struct radeon_fence *fence;
524 struct drm_pending_vblank_event *event;
525 struct radeon_bo *old_rbo;
526 u64 new_crtc_base;
527};
528
529struct r500_irq_stat_regs {
530 u32 disp_int;
Alex Deucherf122c612012-03-30 08:59:57 -0400531 u32 hdmi0_status;
Alex Deucher6f34be52010-11-21 10:59:01 -0500532};
533
534struct r600_irq_stat_regs {
535 u32 disp_int;
536 u32 disp_int_cont;
537 u32 disp_int_cont2;
538 u32 d1grph_int;
539 u32 d2grph_int;
Alex Deucherf122c612012-03-30 08:59:57 -0400540 u32 hdmi0_status;
541 u32 hdmi1_status;
Alex Deucher6f34be52010-11-21 10:59:01 -0500542};
543
544struct evergreen_irq_stat_regs {
545 u32 disp_int;
546 u32 disp_int_cont;
547 u32 disp_int_cont2;
548 u32 disp_int_cont3;
549 u32 disp_int_cont4;
550 u32 disp_int_cont5;
551 u32 d1grph_int;
552 u32 d2grph_int;
553 u32 d3grph_int;
554 u32 d4grph_int;
555 u32 d5grph_int;
556 u32 d6grph_int;
Alex Deucherf122c612012-03-30 08:59:57 -0400557 u32 afmt_status1;
558 u32 afmt_status2;
559 u32 afmt_status3;
560 u32 afmt_status4;
561 u32 afmt_status5;
562 u32 afmt_status6;
Alex Deucher6f34be52010-11-21 10:59:01 -0500563};
564
565union radeon_irq_stat_regs {
566 struct r500_irq_stat_regs r500;
567 struct r600_irq_stat_regs r600;
568 struct evergreen_irq_stat_regs evergreen;
569};
570
Ilija Hadzic54bd52062011-10-26 15:43:58 -0400571#define RADEON_MAX_HPD_PINS 6
572#define RADEON_MAX_CRTCS 6
Alex Deucherf122c612012-03-30 08:59:57 -0400573#define RADEON_MAX_AFMT_BLOCKS 6
Ilija Hadzic54bd52062011-10-26 15:43:58 -0400574
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200575struct radeon_irq {
Christian Koenigfb982572012-05-17 01:33:30 +0200576 bool installed;
577 spinlock_t lock;
Christian Koenig736fc372012-05-17 19:52:00 +0200578 atomic_t ring_int[RADEON_NUM_RINGS];
Christian Koenigfb982572012-05-17 01:33:30 +0200579 bool crtc_vblank_int[RADEON_MAX_CRTCS];
Christian Koenig736fc372012-05-17 19:52:00 +0200580 atomic_t pflip[RADEON_MAX_CRTCS];
Christian Koenigfb982572012-05-17 01:33:30 +0200581 wait_queue_head_t vblank_queue;
582 bool hpd[RADEON_MAX_HPD_PINS];
583 bool gui_idle;
584 bool gui_idle_acked;
585 wait_queue_head_t idle_queue;
586 bool afmt[RADEON_MAX_AFMT_BLOCKS];
587 union radeon_irq_stat_regs stat_regs;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200588};
589
590int radeon_irq_kms_init(struct radeon_device *rdev);
591void radeon_irq_kms_fini(struct radeon_device *rdev);
Alex Deucher1b370782011-11-17 20:13:28 -0500592void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
593void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
Alex Deucher6f34be52010-11-21 10:59:01 -0500594void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
595void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
Christian Koenigfb982572012-05-17 01:33:30 +0200596void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block);
597void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block);
598void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
599void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
600int radeon_irq_kms_wait_gui_idle(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200601
602/*
Christian Könige32eb502011-10-23 12:56:27 +0200603 * CP & rings.
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200604 */
Alex Deucher74652802011-08-25 13:39:48 -0400605
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200606struct radeon_ib {
Jerome Glisse68470ae2012-05-09 15:35:00 +0200607 struct radeon_sa_bo *sa_bo;
608 uint32_t length_dw;
609 uint64_t gpu_addr;
610 uint32_t *ptr;
Christian König876dc9f2012-05-08 14:24:01 +0200611 int ring;
Jerome Glisse68470ae2012-05-09 15:35:00 +0200612 struct radeon_fence *fence;
613 unsigned vm_id;
614 bool is_const_ib;
Christian König220907d2012-05-10 16:46:43 +0200615 struct radeon_fence *sync_to[RADEON_NUM_RINGS];
Jerome Glisse68470ae2012-05-09 15:35:00 +0200616 struct radeon_semaphore *semaphore;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200617};
618
Christian Könige32eb502011-10-23 12:56:27 +0200619struct radeon_ring {
Jerome Glisse4c788672009-11-20 14:29:23 +0100620 struct radeon_bo *ring_obj;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200621 volatile uint32_t *ring;
622 unsigned rptr;
Christian König5596a9d2011-10-13 12:48:45 +0200623 unsigned rptr_offs;
624 unsigned rptr_reg;
Christian König45df6802012-07-06 16:22:55 +0200625 unsigned rptr_save_reg;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200626 unsigned wptr;
627 unsigned wptr_old;
Christian König5596a9d2011-10-13 12:48:45 +0200628 unsigned wptr_reg;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200629 unsigned ring_size;
630 unsigned ring_free_dw;
631 int count_dw;
Christian König069211e2012-05-02 15:11:20 +0200632 unsigned long last_activity;
633 unsigned last_rptr;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200634 uint64_t gpu_addr;
635 uint32_t align_mask;
636 uint32_t ptr_mask;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200637 bool ready;
Alex Deucher78c55602011-11-17 14:25:56 -0500638 u32 ptr_reg_shift;
639 u32 ptr_reg_mask;
640 u32 nop;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200641};
642
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500643/*
Jerome Glisse721604a2012-01-05 22:11:05 -0500644 * VM
645 */
646struct radeon_vm {
647 struct list_head list;
648 struct list_head va;
649 int id;
650 unsigned last_pfn;
651 u64 pt_gpu_addr;
652 u64 *pt;
Christian König2e0d9912012-05-09 15:34:53 +0200653 struct radeon_sa_bo *sa_bo;
Jerome Glisse721604a2012-01-05 22:11:05 -0500654 struct mutex mutex;
655 /* last fence for cs using this vm */
656 struct radeon_fence *fence;
657};
658
659struct radeon_vm_funcs {
660 int (*init)(struct radeon_device *rdev);
661 void (*fini)(struct radeon_device *rdev);
662 /* cs mutex must be lock for schedule_ib */
663 int (*bind)(struct radeon_device *rdev, struct radeon_vm *vm, int id);
664 void (*unbind)(struct radeon_device *rdev, struct radeon_vm *vm);
665 void (*tlb_flush)(struct radeon_device *rdev, struct radeon_vm *vm);
666 uint32_t (*page_flags)(struct radeon_device *rdev,
667 struct radeon_vm *vm,
668 uint32_t flags);
669 void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm,
670 unsigned pfn, uint64_t addr, uint32_t flags);
671};
672
673struct radeon_vm_manager {
Christian König36ff39c2012-05-09 10:07:08 +0200674 struct mutex lock;
Jerome Glisse721604a2012-01-05 22:11:05 -0500675 struct list_head lru_vm;
676 uint32_t use_bitmap;
677 struct radeon_sa_manager sa_manager;
678 uint32_t max_pfn;
679 /* fields constant after init */
680 const struct radeon_vm_funcs *funcs;
681 /* number of VMIDs */
682 unsigned nvm;
683 /* vram base address for page table entry */
684 u64 vram_base_offset;
Alex Deucher67e915e2012-01-06 09:38:15 -0500685 /* is vm enabled? */
686 bool enabled;
Jerome Glisse721604a2012-01-05 22:11:05 -0500687};
688
689/*
690 * file private structure
691 */
692struct radeon_fpriv {
693 struct radeon_vm vm;
694};
695
696/*
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500697 * R6xx+ IH ring
698 */
699struct r600_ih {
Jerome Glisse4c788672009-11-20 14:29:23 +0100700 struct radeon_bo *ring_obj;
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500701 volatile uint32_t *ring;
702 unsigned rptr;
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500703 unsigned ring_size;
704 uint64_t gpu_addr;
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500705 uint32_t ptr_mask;
Christian Koenigc20dc362012-05-16 21:45:24 +0200706 atomic_t lock;
Alex Deucherd8f60cf2009-12-01 13:43:46 -0500707 bool enabled;
708};
709
Ilija Hadzic8eec9d62011-10-12 23:29:40 -0400710struct r600_blit_cp_primitives {
711 void (*set_render_target)(struct radeon_device *rdev, int format,
712 int w, int h, u64 gpu_addr);
713 void (*cp_set_surface_sync)(struct radeon_device *rdev,
714 u32 sync_type, u32 size,
715 u64 mc_addr);
716 void (*set_shaders)(struct radeon_device *rdev);
717 void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
718 void (*set_tex_resource)(struct radeon_device *rdev,
719 int format, int w, int h, int pitch,
Alex Deucher9bb77032011-10-22 10:07:09 -0400720 u64 gpu_addr, u32 size);
Ilija Hadzic8eec9d62011-10-12 23:29:40 -0400721 void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
722 int x2, int y2);
723 void (*draw_auto)(struct radeon_device *rdev);
724 void (*set_default_state)(struct radeon_device *rdev);
725};
726
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000727struct r600_blit {
Jerome Glisse4c788672009-11-20 14:29:23 +0100728 struct radeon_bo *shader_obj;
Ilija Hadzic8eec9d62011-10-12 23:29:40 -0400729 struct r600_blit_cp_primitives primitives;
730 int max_dim;
731 int ring_size_common;
732 int ring_size_per_loop;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000733 u64 shader_gpu_addr;
734 u32 vs_offset, ps_offset;
735 u32 state_offset;
736 u32 state_len;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000737};
738
Alex Deucher347e7592012-03-20 17:18:21 -0400739/*
740 * SI RLC stuff
741 */
742struct si_rlc {
743 /* for power gating */
744 struct radeon_bo *save_restore_obj;
745 uint64_t save_restore_gpu_addr;
746 /* for clear state */
747 struct radeon_bo *clear_state_obj;
748 uint64_t clear_state_gpu_addr;
749};
750
Jerome Glisse69e130a2011-12-21 12:13:46 -0500751int radeon_ib_get(struct radeon_device *rdev, int ring,
Jerome Glissef2e39222012-05-09 15:35:02 +0200752 struct radeon_ib *ib, unsigned size);
753void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200754int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
755int radeon_ib_pool_init(struct radeon_device *rdev);
756void radeon_ib_pool_fini(struct radeon_device *rdev);
Christian König7bd560e2012-05-02 15:11:12 +0200757int radeon_ib_ring_tests(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200758/* Ring access between begin & end cannot sleep */
Christian Könige32eb502011-10-23 12:56:27 +0200759int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
760void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
761int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
762int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
763void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
764void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
Christian Königd6999bc2012-05-09 15:34:45 +0200765void radeon_ring_undo(struct radeon_ring *ring);
Christian Könige32eb502011-10-23 12:56:27 +0200766void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
767int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
Christian König7b9ef162012-05-02 15:11:23 +0200768void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring);
Christian König069211e2012-05-02 15:11:20 +0200769void radeon_ring_lockup_update(struct radeon_ring *ring);
770bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
Christian König55d7c222012-07-09 11:52:44 +0200771unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
772 uint32_t **data);
773int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
774 unsigned size, uint32_t *data);
Christian Könige32eb502011-10-23 12:56:27 +0200775int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
Alex Deucher78c55602011-11-17 14:25:56 -0500776 unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
777 u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
Christian Könige32eb502011-10-23 12:56:27 +0200778void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200779
780
781/*
782 * CS.
783 */
784struct radeon_cs_reloc {
785 struct drm_gem_object *gobj;
Jerome Glisse4c788672009-11-20 14:29:23 +0100786 struct radeon_bo *robj;
787 struct radeon_bo_list lobj;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200788 uint32_t handle;
789 uint32_t flags;
790};
791
792struct radeon_cs_chunk {
793 uint32_t chunk_id;
794 uint32_t length_dw;
Jerome Glisse721604a2012-01-05 22:11:05 -0500795 int kpage_idx[2];
796 uint32_t *kpage[2];
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200797 uint32_t *kdata;
Jerome Glisse721604a2012-01-05 22:11:05 -0500798 void __user *user_ptr;
799 int last_copied_page;
800 int last_page_index;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200801};
802
803struct radeon_cs_parser {
Jerome Glissec8c15ff2010-01-18 13:01:36 +0100804 struct device *dev;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200805 struct radeon_device *rdev;
806 struct drm_file *filp;
807 /* chunks */
808 unsigned nchunks;
809 struct radeon_cs_chunk *chunks;
810 uint64_t *chunks_array;
811 /* IB */
812 unsigned idx;
813 /* relocations */
814 unsigned nrelocs;
815 struct radeon_cs_reloc *relocs;
816 struct radeon_cs_reloc **relocs_ptr;
817 struct list_head validated;
818 /* indices of various chunks */
819 int chunk_ib_idx;
820 int chunk_relocs_idx;
Jerome Glisse721604a2012-01-05 22:11:05 -0500821 int chunk_flags_idx;
Alex Deucherdfcf5f32012-03-20 17:18:14 -0400822 int chunk_const_ib_idx;
Jerome Glissef2e39222012-05-09 15:35:02 +0200823 struct radeon_ib ib;
824 struct radeon_ib const_ib;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200825 void *track;
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000826 unsigned family;
Marek Olšáke70f2242011-10-25 01:38:45 +0200827 int parser_error;
Jerome Glisse721604a2012-01-05 22:11:05 -0500828 u32 cs_flags;
829 u32 ring;
830 s32 priority;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200831};
832
Dave Airlie513bcb42009-09-23 16:56:27 +1000833extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
Andi Kleence580fa2011-10-13 16:08:47 -0700834extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
Dave Airlie513bcb42009-09-23 16:56:27 +1000835
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200836struct radeon_cs_packet {
837 unsigned idx;
838 unsigned type;
839 unsigned reg;
840 unsigned opcode;
841 int count;
842 unsigned one_reg_wr;
843};
844
845typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p,
846 struct radeon_cs_packet *pkt,
847 unsigned idx, unsigned reg);
848typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
849 struct radeon_cs_packet *pkt);
850
851
852/*
853 * AGP
854 */
855int radeon_agp_init(struct radeon_device *rdev);
Dave Airlie0ebf1712009-11-05 15:39:10 +1000856void radeon_agp_resume(struct radeon_device *rdev);
Jerome Glisse10b06122010-05-21 18:48:54 +0200857void radeon_agp_suspend(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200858void radeon_agp_fini(struct radeon_device *rdev);
859
860
861/*
862 * Writeback
863 */
864struct radeon_wb {
Jerome Glisse4c788672009-11-20 14:29:23 +0100865 struct radeon_bo *wb_obj;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200866 volatile uint32_t *wb;
867 uint64_t gpu_addr;
Alex Deucher724c80e2010-08-27 18:25:25 -0400868 bool enabled;
Alex Deucherd0f8a852010-09-04 05:04:34 -0400869 bool use_event;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200870};
871
Alex Deucher724c80e2010-08-27 18:25:25 -0400872#define RADEON_WB_SCRATCH_OFFSET 0
873#define RADEON_WB_CP_RPTR_OFFSET 1024
Alex Deucher0c88a022011-03-02 20:07:31 -0500874#define RADEON_WB_CP1_RPTR_OFFSET 1280
875#define RADEON_WB_CP2_RPTR_OFFSET 1536
Alex Deucher724c80e2010-08-27 18:25:25 -0400876#define R600_WB_IH_WPTR_OFFSET 2048
Alex Deucherd0f8a852010-09-04 05:04:34 -0400877#define R600_WB_EVENT_OFFSET 3072
Alex Deucher724c80e2010-08-27 18:25:25 -0400878
Jerome Glissec93bb852009-07-13 21:04:08 +0200879/**
880 * struct radeon_pm - power management datas
881 * @max_bandwidth: maximum bandwidth the gpu has (MByte/s)
882 * @igp_sideport_mclk: sideport memory clock Mhz (rs690,rs740,rs780,rs880)
883 * @igp_system_mclk: system clock Mhz (rs690,rs740,rs780,rs880)
884 * @igp_ht_link_clk: ht link clock Mhz (rs690,rs740,rs780,rs880)
885 * @igp_ht_link_width: ht link width in bits (rs690,rs740,rs780,rs880)
886 * @k8_bandwidth: k8 bandwidth the gpu has (MByte/s) (IGP)
887 * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP)
888 * @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP)
889 * @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP)
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300890 * @sclk: GPU clock Mhz (core bandwidth depends of this clock)
Jerome Glissec93bb852009-07-13 21:04:08 +0200891 * @needed_bandwidth: current bandwidth needs
892 *
893 * It keeps track of various data needed to take powermanagement decision.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300894 * Bandwidth need is used to determine minimun clock of the GPU and memory.
Jerome Glissec93bb852009-07-13 21:04:08 +0200895 * Equation between gpu/memory clock and available bandwidth is hw dependent
896 * (type of memory, bus size, efficiency, ...)
897 */
Alex Deucherce8f5372010-05-07 15:10:16 -0400898
899enum radeon_pm_method {
900 PM_METHOD_PROFILE,
901 PM_METHOD_DYNPM,
Rafał Miłeckic913e232009-12-22 23:02:16 +0100902};
Alex Deucherce8f5372010-05-07 15:10:16 -0400903
904enum radeon_dynpm_state {
905 DYNPM_STATE_DISABLED,
906 DYNPM_STATE_MINIMUM,
907 DYNPM_STATE_PAUSED,
Rafael J. Wysocki3f53eb62010-06-17 23:02:27 +0000908 DYNPM_STATE_ACTIVE,
909 DYNPM_STATE_SUSPENDED,
Alex Deucherce8f5372010-05-07 15:10:16 -0400910};
911enum radeon_dynpm_action {
912 DYNPM_ACTION_NONE,
913 DYNPM_ACTION_MINIMUM,
914 DYNPM_ACTION_DOWNCLOCK,
915 DYNPM_ACTION_UPCLOCK,
916 DYNPM_ACTION_DEFAULT
Rafał Miłeckic913e232009-12-22 23:02:16 +0100917};
Alex Deucher56278a82009-12-28 13:58:44 -0500918
919enum radeon_voltage_type {
920 VOLTAGE_NONE = 0,
921 VOLTAGE_GPIO,
922 VOLTAGE_VDDC,
923 VOLTAGE_SW
924};
925
Alex Deucher0ec0e742009-12-23 13:21:58 -0500926enum radeon_pm_state_type {
927 POWER_STATE_TYPE_DEFAULT,
928 POWER_STATE_TYPE_POWERSAVE,
929 POWER_STATE_TYPE_BATTERY,
930 POWER_STATE_TYPE_BALANCED,
931 POWER_STATE_TYPE_PERFORMANCE,
932};
933
Alex Deucherce8f5372010-05-07 15:10:16 -0400934enum radeon_pm_profile_type {
935 PM_PROFILE_DEFAULT,
936 PM_PROFILE_AUTO,
937 PM_PROFILE_LOW,
Alex Deucherc9e75b22010-06-02 17:56:01 -0400938 PM_PROFILE_MID,
Alex Deucherce8f5372010-05-07 15:10:16 -0400939 PM_PROFILE_HIGH,
940};
941
942#define PM_PROFILE_DEFAULT_IDX 0
943#define PM_PROFILE_LOW_SH_IDX 1
Alex Deucherc9e75b22010-06-02 17:56:01 -0400944#define PM_PROFILE_MID_SH_IDX 2
945#define PM_PROFILE_HIGH_SH_IDX 3
946#define PM_PROFILE_LOW_MH_IDX 4
947#define PM_PROFILE_MID_MH_IDX 5
948#define PM_PROFILE_HIGH_MH_IDX 6
949#define PM_PROFILE_MAX 7
Alex Deucherce8f5372010-05-07 15:10:16 -0400950
951struct radeon_pm_profile {
952 int dpms_off_ps_idx;
953 int dpms_on_ps_idx;
954 int dpms_off_cm_idx;
955 int dpms_on_cm_idx;
Alex Deucher516d0e42009-12-23 14:28:05 -0500956};
957
Alex Deucher21a81222010-07-02 12:58:16 -0400958enum radeon_int_thermal_type {
959 THERMAL_TYPE_NONE,
960 THERMAL_TYPE_RV6XX,
961 THERMAL_TYPE_RV770,
962 THERMAL_TYPE_EVERGREEN,
Alex Deuchere33df252010-11-22 17:56:32 -0500963 THERMAL_TYPE_SUMO,
Alex Deucher4fddba12011-01-06 21:19:22 -0500964 THERMAL_TYPE_NI,
Alex Deucher14607d02012-03-20 17:18:09 -0400965 THERMAL_TYPE_SI,
Alex Deucher21a81222010-07-02 12:58:16 -0400966};
967
Alex Deucher56278a82009-12-28 13:58:44 -0500968struct radeon_voltage {
969 enum radeon_voltage_type type;
970 /* gpio voltage */
971 struct radeon_gpio_rec gpio;
972 u32 delay; /* delay in usec from voltage drop to sclk change */
973 bool active_high; /* voltage drop is active when bit is high */
974 /* VDDC voltage */
975 u8 vddc_id; /* index into vddc voltage table */
976 u8 vddci_id; /* index into vddci voltage table */
977 bool vddci_enabled;
978 /* r6xx+ sw */
Alex Deucher2feea492011-04-12 14:49:24 -0400979 u16 voltage;
980 /* evergreen+ vddci */
981 u16 vddci;
Alex Deucher56278a82009-12-28 13:58:44 -0500982};
983
Alex Deucherd7311172010-05-03 01:13:14 -0400984/* clock mode flags */
985#define RADEON_PM_MODE_NO_DISPLAY (1 << 0)
986
Alex Deucher56278a82009-12-28 13:58:44 -0500987struct radeon_pm_clock_info {
988 /* memory clock */
989 u32 mclk;
990 /* engine clock */
991 u32 sclk;
992 /* voltage info */
993 struct radeon_voltage voltage;
Alex Deucherd7311172010-05-03 01:13:14 -0400994 /* standardized clock flags */
Alex Deucher56278a82009-12-28 13:58:44 -0500995 u32 flags;
996};
997
Alex Deuchera48b9b42010-04-22 14:03:55 -0400998/* state flags */
Alex Deucherd7311172010-05-03 01:13:14 -0400999#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0)
Alex Deuchera48b9b42010-04-22 14:03:55 -04001000
Alex Deucher56278a82009-12-28 13:58:44 -05001001struct radeon_power_state {
Alex Deucher0ec0e742009-12-23 13:21:58 -05001002 enum radeon_pm_state_type type;
Alex Deucher8f3f1c92011-11-04 10:09:43 -04001003 struct radeon_pm_clock_info *clock_info;
Alex Deucher56278a82009-12-28 13:58:44 -05001004 /* number of valid clock modes in this power state */
1005 int num_clock_modes;
Alex Deucher56278a82009-12-28 13:58:44 -05001006 struct radeon_pm_clock_info *default_clock_mode;
Alex Deuchera48b9b42010-04-22 14:03:55 -04001007 /* standardized state flags */
1008 u32 flags;
Alex Deucher79daedc2010-04-22 14:25:19 -04001009 u32 misc; /* vbios specific flags */
1010 u32 misc2; /* vbios specific flags */
1011 int pcie_lanes; /* pcie lanes */
Alex Deucher56278a82009-12-28 13:58:44 -05001012};
1013
Rafał Miłecki27459322010-02-11 22:16:36 +00001014/*
1015 * Some modes are overclocked by very low value, accept them
1016 */
1017#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
1018
Jerome Glissec93bb852009-07-13 21:04:08 +02001019struct radeon_pm {
Rafał Miłeckic913e232009-12-22 23:02:16 +01001020 struct mutex mutex;
Christian Königdb7fce32012-05-11 14:57:18 +02001021 /* write locked while reprogramming mclk */
1022 struct rw_semaphore mclk_lock;
Alex Deuchera48b9b42010-04-22 14:03:55 -04001023 u32 active_crtcs;
1024 int active_crtc_count;
Rafał Miłeckic913e232009-12-22 23:02:16 +01001025 int req_vblank;
Rafał Miłecki839461d2010-03-02 22:06:51 +01001026 bool vblank_sync;
Jerome Glissec93bb852009-07-13 21:04:08 +02001027 fixed20_12 max_bandwidth;
1028 fixed20_12 igp_sideport_mclk;
1029 fixed20_12 igp_system_mclk;
1030 fixed20_12 igp_ht_link_clk;
1031 fixed20_12 igp_ht_link_width;
1032 fixed20_12 k8_bandwidth;
1033 fixed20_12 sideport_bandwidth;
1034 fixed20_12 ht_bandwidth;
1035 fixed20_12 core_bandwidth;
1036 fixed20_12 sclk;
Alex Deucherf47299c2010-03-16 20:54:38 -04001037 fixed20_12 mclk;
Jerome Glissec93bb852009-07-13 21:04:08 +02001038 fixed20_12 needed_bandwidth;
Alex Deucher0975b162011-02-02 18:42:03 -05001039 struct radeon_power_state *power_state;
Alex Deucher56278a82009-12-28 13:58:44 -05001040 /* number of valid power states */
1041 int num_power_states;
Alex Deuchera48b9b42010-04-22 14:03:55 -04001042 int current_power_state_index;
1043 int current_clock_mode_index;
1044 int requested_power_state_index;
1045 int requested_clock_mode_index;
1046 int default_power_state_index;
1047 u32 current_sclk;
1048 u32 current_mclk;
Alex Deucher2feea492011-04-12 14:49:24 -04001049 u16 current_vddc;
1050 u16 current_vddci;
Alex Deucher9ace9f72011-01-06 21:19:26 -05001051 u32 default_sclk;
1052 u32 default_mclk;
Alex Deucher2feea492011-04-12 14:49:24 -04001053 u16 default_vddc;
1054 u16 default_vddci;
Alex Deucher29fb52c2010-03-11 10:01:17 -05001055 struct radeon_i2c_chan *i2c_bus;
Alex Deucherce8f5372010-05-07 15:10:16 -04001056 /* selected pm method */
1057 enum radeon_pm_method pm_method;
1058 /* dynpm power management */
1059 struct delayed_work dynpm_idle_work;
1060 enum radeon_dynpm_state dynpm_state;
1061 enum radeon_dynpm_action dynpm_planned_action;
1062 unsigned long dynpm_action_timeout;
1063 bool dynpm_can_upclock;
1064 bool dynpm_can_downclock;
1065 /* profile-based power management */
1066 enum radeon_pm_profile_type profile;
1067 int profile_index;
1068 struct radeon_pm_profile profiles[PM_PROFILE_MAX];
Alex Deucher21a81222010-07-02 12:58:16 -04001069 /* internal thermal controller on rv6xx+ */
1070 enum radeon_int_thermal_type int_thermal_type;
1071 struct device *int_hwmon_dev;
Jerome Glissec93bb852009-07-13 21:04:08 +02001072};
1073
Alex Deuchera4c9e2e2011-11-04 10:09:41 -04001074int radeon_pm_get_type_index(struct radeon_device *rdev,
1075 enum radeon_pm_state_type ps_type,
1076 int instance);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001077
Rafał Miłeckia92553a2012-04-28 23:35:20 +02001078struct r600_audio {
Rafał Miłeckia92553a2012-04-28 23:35:20 +02001079 int channels;
1080 int rate;
1081 int bits_per_sample;
1082 u8 status_bits;
1083 u8 category_code;
1084};
1085
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001086/*
1087 * Benchmarking
1088 */
Ilija Hadzic638dd7d2011-10-12 23:29:39 -04001089void radeon_benchmark(struct radeon_device *rdev, int test_number);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001090
1091
1092/*
Michel Dänzerecc0b322009-07-21 11:23:57 +02001093 * Testing
1094 */
1095void radeon_test_moves(struct radeon_device *rdev);
Christian König60a7e392011-09-27 12:31:00 +02001096void radeon_test_ring_sync(struct radeon_device *rdev,
Christian Könige32eb502011-10-23 12:56:27 +02001097 struct radeon_ring *cpA,
1098 struct radeon_ring *cpB);
Christian König60a7e392011-09-27 12:31:00 +02001099void radeon_test_syncing(struct radeon_device *rdev);
Michel Dänzerecc0b322009-07-21 11:23:57 +02001100
1101
1102/*
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001103 * Debugfs
1104 */
Christian König4d8bf9a2011-10-24 14:54:54 +02001105struct radeon_debugfs {
1106 struct drm_info_list *files;
1107 unsigned num_files;
1108};
1109
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001110int radeon_debugfs_add_files(struct radeon_device *rdev,
1111 struct drm_info_list *files,
1112 unsigned nfiles);
1113int radeon_debugfs_fence_init(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001114
1115
1116/*
1117 * ASIC specific functions.
1118 */
1119struct radeon_asic {
Jerome Glisse068a1172009-06-17 13:28:30 +02001120 int (*init)(struct radeon_device *rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001121 void (*fini)(struct radeon_device *rdev);
1122 int (*resume)(struct radeon_device *rdev);
1123 int (*suspend)(struct radeon_device *rdev);
Dave Airlie28d52042009-09-21 14:33:58 +10001124 void (*vga_set_state)(struct radeon_device *rdev, bool state);
Jerome Glissea2d07b72010-03-09 14:45:11 +00001125 int (*asic_reset)(struct radeon_device *rdev);
Alex Deucher54e88e02012-02-23 18:10:29 -05001126 /* ioctl hw specific callback. Some hw might want to perform special
1127 * operation on specific ioctl. For instance on wait idle some hw
1128 * might want to perform and HDP flush through MMIO as it seems that
1129 * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
1130 * through ring.
1131 */
1132 void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
1133 /* check if 3D engine is idle */
1134 bool (*gui_idle)(struct radeon_device *rdev);
1135 /* wait for mc_idle */
1136 int (*mc_wait_for_idle)(struct radeon_device *rdev);
1137 /* gart */
Alex Deucherc5b3b852012-02-23 17:53:46 -05001138 struct {
1139 void (*tlb_flush)(struct radeon_device *rdev);
1140 int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr);
1141 } gart;
Alex Deucher54e88e02012-02-23 18:10:29 -05001142 /* ring specific callbacks */
Christian König4c87bc22011-10-19 19:02:21 +02001143 struct {
1144 void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
Jerome Glisse721604a2012-01-05 22:11:05 -05001145 int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
Christian König4c87bc22011-10-19 19:02:21 +02001146 void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
Christian Könige32eb502011-10-23 12:56:27 +02001147 void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
Christian König4c87bc22011-10-19 19:02:21 +02001148 struct radeon_semaphore *semaphore, bool emit_wait);
Christian Königeb0c19c2012-02-23 15:18:44 +01001149 int (*cs_parse)(struct radeon_cs_parser *p);
Alex Deucherf7128122012-02-23 17:53:45 -05001150 void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
1151 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1152 int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
Christian König312c4a82012-05-02 15:11:09 +02001153 bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
Christian König4c87bc22011-10-19 19:02:21 +02001154 } ring[RADEON_NUM_RINGS];
Alex Deucher54e88e02012-02-23 18:10:29 -05001155 /* irqs */
Alex Deucherb35ea4a2012-02-23 17:53:43 -05001156 struct {
1157 int (*set)(struct radeon_device *rdev);
1158 int (*process)(struct radeon_device *rdev);
1159 } irq;
Alex Deucher54e88e02012-02-23 18:10:29 -05001160 /* displays */
Alex Deucherc79a49c2012-02-23 17:53:47 -05001161 struct {
1162 /* display watermarks */
1163 void (*bandwidth_update)(struct radeon_device *rdev);
1164 /* get frame count */
1165 u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
1166 /* wait for vblank */
1167 void (*wait_for_vblank)(struct radeon_device *rdev, int crtc);
1168 } display;
Alex Deucher54e88e02012-02-23 18:10:29 -05001169 /* copy functions for bo handling */
Alex Deucher27cd7762012-02-23 17:53:42 -05001170 struct {
1171 int (*blit)(struct radeon_device *rdev,
1172 uint64_t src_offset,
1173 uint64_t dst_offset,
1174 unsigned num_gpu_pages,
Christian König876dc9f2012-05-08 14:24:01 +02001175 struct radeon_fence **fence);
Alex Deucher27cd7762012-02-23 17:53:42 -05001176 u32 blit_ring_index;
1177 int (*dma)(struct radeon_device *rdev,
1178 uint64_t src_offset,
1179 uint64_t dst_offset,
1180 unsigned num_gpu_pages,
Christian König876dc9f2012-05-08 14:24:01 +02001181 struct radeon_fence **fence);
Alex Deucher27cd7762012-02-23 17:53:42 -05001182 u32 dma_ring_index;
1183 /* method used for bo copy */
1184 int (*copy)(struct radeon_device *rdev,
1185 uint64_t src_offset,
1186 uint64_t dst_offset,
1187 unsigned num_gpu_pages,
Christian König876dc9f2012-05-08 14:24:01 +02001188 struct radeon_fence **fence);
Alex Deucher27cd7762012-02-23 17:53:42 -05001189 /* ring used for bo copies */
1190 u32 copy_ring_index;
1191 } copy;
Alex Deucher54e88e02012-02-23 18:10:29 -05001192 /* surfaces */
Alex Deucher9e6f3d02012-02-23 17:53:49 -05001193 struct {
1194 int (*set_reg)(struct radeon_device *rdev, int reg,
1195 uint32_t tiling_flags, uint32_t pitch,
1196 uint32_t offset, uint32_t obj_size);
1197 void (*clear_reg)(struct radeon_device *rdev, int reg);
1198 } surface;
Alex Deucher54e88e02012-02-23 18:10:29 -05001199 /* hotplug detect */
Alex Deucher901ea572012-02-23 17:53:39 -05001200 struct {
1201 void (*init)(struct radeon_device *rdev);
1202 void (*fini)(struct radeon_device *rdev);
1203 bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
1204 void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
1205 } hpd;
Alex Deucherce8f5372010-05-07 15:10:16 -04001206 /* power management */
Alex Deuchera02fa392012-02-23 17:53:41 -05001207 struct {
1208 void (*misc)(struct radeon_device *rdev);
1209 void (*prepare)(struct radeon_device *rdev);
1210 void (*finish)(struct radeon_device *rdev);
1211 void (*init_profile)(struct radeon_device *rdev);
1212 void (*get_dynpm_state)(struct radeon_device *rdev);
Alex Deucher798bcf72012-02-23 17:53:48 -05001213 uint32_t (*get_engine_clock)(struct radeon_device *rdev);
1214 void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
1215 uint32_t (*get_memory_clock)(struct radeon_device *rdev);
1216 void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
1217 int (*get_pcie_lanes)(struct radeon_device *rdev);
1218 void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
1219 void (*set_clock_gating)(struct radeon_device *rdev, int enable);
Alex Deuchera02fa392012-02-23 17:53:41 -05001220 } pm;
Alex Deucher6f34be52010-11-21 10:59:01 -05001221 /* pageflipping */
Alex Deucher0f9e0062012-02-23 17:53:40 -05001222 struct {
1223 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
1224 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
1225 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
1226 } pflip;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001227};
1228
Jerome Glisse21f9a432009-09-11 15:55:33 +02001229/*
1230 * Asic structures
1231 */
Dave Airlie551ebd82009-09-01 15:25:57 +10001232struct r100_asic {
Jerome Glisse225758d2010-03-09 14:45:10 +00001233 const unsigned *reg_safe_bm;
1234 unsigned reg_safe_bm_size;
1235 u32 hdp_cntl;
Dave Airlie551ebd82009-09-01 15:25:57 +10001236};
1237
Jerome Glisse21f9a432009-09-11 15:55:33 +02001238struct r300_asic {
Jerome Glisse225758d2010-03-09 14:45:10 +00001239 const unsigned *reg_safe_bm;
1240 unsigned reg_safe_bm_size;
1241 u32 resync_scratch;
1242 u32 hdp_cntl;
Jerome Glisse21f9a432009-09-11 15:55:33 +02001243};
1244
1245struct r600_asic {
Jerome Glisse225758d2010-03-09 14:45:10 +00001246 unsigned max_pipes;
1247 unsigned max_tile_pipes;
1248 unsigned max_simds;
1249 unsigned max_backends;
1250 unsigned max_gprs;
1251 unsigned max_threads;
1252 unsigned max_stack_entries;
1253 unsigned max_hw_contexts;
1254 unsigned max_gs_threads;
1255 unsigned sx_max_export_size;
1256 unsigned sx_max_export_pos_size;
1257 unsigned sx_max_export_smx_size;
1258 unsigned sq_num_cf_insts;
1259 unsigned tiling_nbanks;
1260 unsigned tiling_npipes;
1261 unsigned tiling_group_size;
Alex Deuchere7aeeba2010-06-04 13:10:12 -04001262 unsigned tile_config;
Alex Deuchere55b9422011-07-15 19:53:52 +00001263 unsigned backend_map;
Jerome Glisse21f9a432009-09-11 15:55:33 +02001264};
1265
1266struct rv770_asic {
Jerome Glisse225758d2010-03-09 14:45:10 +00001267 unsigned max_pipes;
1268 unsigned max_tile_pipes;
1269 unsigned max_simds;
1270 unsigned max_backends;
1271 unsigned max_gprs;
1272 unsigned max_threads;
1273 unsigned max_stack_entries;
1274 unsigned max_hw_contexts;
1275 unsigned max_gs_threads;
1276 unsigned sx_max_export_size;
1277 unsigned sx_max_export_pos_size;
1278 unsigned sx_max_export_smx_size;
1279 unsigned sq_num_cf_insts;
1280 unsigned sx_num_of_sets;
1281 unsigned sc_prim_fifo_size;
1282 unsigned sc_hiz_tile_fifo_size;
1283 unsigned sc_earlyz_tile_fifo_fize;
1284 unsigned tiling_nbanks;
1285 unsigned tiling_npipes;
1286 unsigned tiling_group_size;
Alex Deuchere7aeeba2010-06-04 13:10:12 -04001287 unsigned tile_config;
Alex Deuchere55b9422011-07-15 19:53:52 +00001288 unsigned backend_map;
Jerome Glisse21f9a432009-09-11 15:55:33 +02001289};
1290
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001291struct evergreen_asic {
1292 unsigned num_ses;
1293 unsigned max_pipes;
1294 unsigned max_tile_pipes;
1295 unsigned max_simds;
1296 unsigned max_backends;
1297 unsigned max_gprs;
1298 unsigned max_threads;
1299 unsigned max_stack_entries;
1300 unsigned max_hw_contexts;
1301 unsigned max_gs_threads;
1302 unsigned sx_max_export_size;
1303 unsigned sx_max_export_pos_size;
1304 unsigned sx_max_export_smx_size;
1305 unsigned sq_num_cf_insts;
1306 unsigned sx_num_of_sets;
1307 unsigned sc_prim_fifo_size;
1308 unsigned sc_hiz_tile_fifo_size;
1309 unsigned sc_earlyz_tile_fifo_size;
1310 unsigned tiling_nbanks;
1311 unsigned tiling_npipes;
1312 unsigned tiling_group_size;
Alex Deuchere7aeeba2010-06-04 13:10:12 -04001313 unsigned tile_config;
Alex Deuchere55b9422011-07-15 19:53:52 +00001314 unsigned backend_map;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001315};
1316
Alex Deucherfecf1d02011-03-02 20:07:29 -05001317struct cayman_asic {
1318 unsigned max_shader_engines;
1319 unsigned max_pipes_per_simd;
1320 unsigned max_tile_pipes;
1321 unsigned max_simds_per_se;
1322 unsigned max_backends_per_se;
1323 unsigned max_texture_channel_caches;
1324 unsigned max_gprs;
1325 unsigned max_threads;
1326 unsigned max_gs_threads;
1327 unsigned max_stack_entries;
1328 unsigned sx_num_of_sets;
1329 unsigned sx_max_export_size;
1330 unsigned sx_max_export_pos_size;
1331 unsigned sx_max_export_smx_size;
1332 unsigned max_hw_contexts;
1333 unsigned sq_num_cf_insts;
1334 unsigned sc_prim_fifo_size;
1335 unsigned sc_hiz_tile_fifo_size;
1336 unsigned sc_earlyz_tile_fifo_size;
1337
1338 unsigned num_shader_engines;
1339 unsigned num_shader_pipes_per_simd;
1340 unsigned num_tile_pipes;
1341 unsigned num_simds_per_se;
1342 unsigned num_backends_per_se;
1343 unsigned backend_disable_mask_per_asic;
1344 unsigned backend_map;
1345 unsigned num_texture_channel_caches;
1346 unsigned mem_max_burst_length_bytes;
1347 unsigned mem_row_size_in_kb;
1348 unsigned shader_engine_tile_size;
1349 unsigned num_gpus;
1350 unsigned multi_gpu_tile_size;
1351
1352 unsigned tile_config;
Alex Deucherfecf1d02011-03-02 20:07:29 -05001353};
1354
Alex Deucher0a96d722012-03-20 17:18:11 -04001355struct si_asic {
1356 unsigned max_shader_engines;
Alex Deucher0a96d722012-03-20 17:18:11 -04001357 unsigned max_tile_pipes;
Alex Deucher1a8ca752012-06-01 18:58:22 -04001358 unsigned max_cu_per_sh;
1359 unsigned max_sh_per_se;
Alex Deucher0a96d722012-03-20 17:18:11 -04001360 unsigned max_backends_per_se;
1361 unsigned max_texture_channel_caches;
1362 unsigned max_gprs;
1363 unsigned max_gs_threads;
1364 unsigned max_hw_contexts;
1365 unsigned sc_prim_fifo_size_frontend;
1366 unsigned sc_prim_fifo_size_backend;
1367 unsigned sc_hiz_tile_fifo_size;
1368 unsigned sc_earlyz_tile_fifo_size;
1369
Alex Deucher0a96d722012-03-20 17:18:11 -04001370 unsigned num_tile_pipes;
1371 unsigned num_backends_per_se;
1372 unsigned backend_disable_mask_per_asic;
1373 unsigned backend_map;
1374 unsigned num_texture_channel_caches;
1375 unsigned mem_max_burst_length_bytes;
1376 unsigned mem_row_size_in_kb;
1377 unsigned shader_engine_tile_size;
1378 unsigned num_gpus;
1379 unsigned multi_gpu_tile_size;
1380
1381 unsigned tile_config;
Alex Deucher0a96d722012-03-20 17:18:11 -04001382};
1383
Jerome Glisse068a1172009-06-17 13:28:30 +02001384union radeon_asic_config {
1385 struct r300_asic r300;
Dave Airlie551ebd82009-09-01 15:25:57 +10001386 struct r100_asic r100;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001387 struct r600_asic r600;
1388 struct rv770_asic rv770;
Alex Deucher32fcdbf2010-03-24 13:33:47 -04001389 struct evergreen_asic evergreen;
Alex Deucherfecf1d02011-03-02 20:07:29 -05001390 struct cayman_asic cayman;
Alex Deucher0a96d722012-03-20 17:18:11 -04001391 struct si_asic si;
Jerome Glisse068a1172009-06-17 13:28:30 +02001392};
1393
Daniel Vetter0a10c852010-03-11 21:19:14 +00001394/*
1395 * asic initizalization from radeon_asic.c
1396 */
1397void radeon_agp_disable(struct radeon_device *rdev);
1398int radeon_asic_init(struct radeon_device *rdev);
1399
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001400
1401/*
1402 * IOCTL.
1403 */
1404int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
1405 struct drm_file *filp);
1406int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
1407 struct drm_file *filp);
1408int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
1409 struct drm_file *file_priv);
1410int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
1411 struct drm_file *file_priv);
1412int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1413 struct drm_file *file_priv);
1414int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
1415 struct drm_file *file_priv);
1416int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1417 struct drm_file *filp);
1418int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
1419 struct drm_file *filp);
1420int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
1421 struct drm_file *filp);
1422int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
1423 struct drm_file *filp);
Jerome Glisse721604a2012-01-05 22:11:05 -05001424int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
1425 struct drm_file *filp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001426int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
Dave Airliee024e112009-06-24 09:48:08 +10001427int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
1428 struct drm_file *filp);
1429int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
1430 struct drm_file *filp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001431
Alex Deucher16cdf042011-10-28 10:30:02 -04001432/* VRAM scratch page for HDP bug, default vram page */
1433struct r600_vram_scratch {
Alex Deucher87cbf8f2010-08-27 13:59:54 -04001434 struct radeon_bo *robj;
1435 volatile uint32_t *ptr;
Alex Deucher16cdf042011-10-28 10:30:02 -04001436 u64 gpu_addr;
Alex Deucher87cbf8f2010-08-27 13:59:54 -04001437};
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001438
Michel Dänzer7a1619b2011-11-10 18:57:26 +01001439
1440/*
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001441 * Core structure, functions and helpers.
1442 */
1443typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t);
1444typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t);
1445
1446struct radeon_device {
Jerome Glisse9f022dd2009-09-11 15:35:22 +02001447 struct device *dev;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001448 struct drm_device *ddev;
1449 struct pci_dev *pdev;
Jerome Glissedee53e72012-07-02 12:45:19 -04001450 struct rw_semaphore exclusive_lock;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001451 /* ASIC */
Jerome Glisse068a1172009-06-17 13:28:30 +02001452 union radeon_asic_config config;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001453 enum radeon_family family;
1454 unsigned long flags;
1455 int usec_timeout;
1456 enum radeon_pll_errata pll_errata;
1457 int num_gb_pipes;
Alex Deucherf779b3e2009-08-19 19:11:39 -04001458 int num_z_pipes;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001459 int disp_priority;
1460 /* BIOS */
1461 uint8_t *bios;
1462 bool is_atom_bios;
1463 uint16_t bios_header_start;
Jerome Glisse4c788672009-11-20 14:29:23 +01001464 struct radeon_bo *stollen_vga_memory;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001465 /* Register mmio */
Dave Airlie4c9bc752009-06-29 18:29:12 +10001466 resource_size_t rmmio_base;
1467 resource_size_t rmmio_size;
Benjamin Herrenschmidta0533fb2011-07-13 06:28:12 +00001468 void __iomem *rmmio;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001469 radeon_rreg_t mc_rreg;
1470 radeon_wreg_t mc_wreg;
1471 radeon_rreg_t pll_rreg;
1472 radeon_wreg_t pll_wreg;
Dave Airliede1b2892009-08-12 18:43:14 +10001473 uint32_t pcie_reg_mask;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001474 radeon_rreg_t pciep_rreg;
1475 radeon_wreg_t pciep_wreg;
Alex Deucher351a52a2010-06-30 11:52:50 -04001476 /* io port */
1477 void __iomem *rio_mem;
1478 resource_size_t rio_mem_size;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001479 struct radeon_clock clock;
1480 struct radeon_mc mc;
1481 struct radeon_gart gart;
1482 struct radeon_mode_info mode_info;
1483 struct radeon_scratch scratch;
1484 struct radeon_mman mman;
Alex Deucher74652802011-08-25 13:39:48 -04001485 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
Jerome Glisse0085c9502012-05-09 15:34:55 +02001486 wait_queue_head_t fence_queue;
Christian Königd6999bc2012-05-09 15:34:45 +02001487 struct mutex ring_lock;
Christian Könige32eb502011-10-23 12:56:27 +02001488 struct radeon_ring ring[RADEON_NUM_RINGS];
Jerome Glissec507f7e2012-05-09 15:34:58 +02001489 bool ib_pool_ready;
1490 struct radeon_sa_manager ring_tmp_bo;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001491 struct radeon_irq irq;
1492 struct radeon_asic *asic;
1493 struct radeon_gem gem;
Jerome Glissec93bb852009-07-13 21:04:08 +02001494 struct radeon_pm pm;
Yang Zhaof657c2a2009-09-15 12:21:01 +10001495 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001496 struct radeon_wb wb;
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001497 struct radeon_dummy_page dummy_page;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001498 bool shutdown;
1499 bool suspend;
Dave Airliead49f502009-07-10 22:36:26 +10001500 bool need_dma32;
Jerome Glisse733289c2009-09-16 15:24:21 +02001501 bool accel_working;
Dave Airliee024e112009-06-24 09:48:08 +10001502 struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001503 const struct firmware *me_fw; /* all family ME firmware */
1504 const struct firmware *pfp_fw; /* r6/700 PFP firmware */
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001505 const struct firmware *rlc_fw; /* r6/700 RLC firmware */
Alex Deucher0af62b02011-01-06 21:19:31 -05001506 const struct firmware *mc_fw; /* NI MC firmware */
Alex Deucher0f0de062012-03-20 17:18:17 -04001507 const struct firmware *ce_fw; /* SI CE firmware */
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001508 struct r600_blit r600_blit;
Alex Deucher16cdf042011-10-28 10:30:02 -04001509 struct r600_vram_scratch vram_scratch;
Alex Deucher3e5cb982009-10-16 12:21:24 -04001510 int msi_enabled; /* msi enabled */
Alex Deucherd8f60cf2009-12-01 13:43:46 -05001511 struct r600_ih ih; /* r6/700 interrupt ring */
Alex Deucher347e7592012-03-20 17:18:21 -04001512 struct si_rlc rlc;
Alex Deucherd4877cf2009-12-04 16:56:37 -05001513 struct work_struct hotplug_work;
Alex Deucherf122c612012-03-30 08:59:57 -04001514 struct work_struct audio_work;
Alex Deucher18917b62010-02-01 16:02:25 -05001515 int num_crtc; /* number of crtcs */
Alex Deucher40bacf12009-12-23 03:23:21 -05001516 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
Rafał Miłecki3299de92012-05-14 21:25:57 +02001517 bool audio_enabled;
1518 struct r600_audio audio_status; /* audio stuff */
Alex Deucherce8f5372010-05-07 15:10:16 -04001519 struct notifier_block acpi_nb;
Marek Olšák9eba4a92011-01-05 05:46:48 +01001520 /* only one userspace can use Hyperz features or CMASK at a time */
Dave Airlieab9e1f52010-07-13 11:11:11 +10001521 struct drm_file *hyperz_filp;
Marek Olšák9eba4a92011-01-05 05:46:48 +01001522 struct drm_file *cmask_filp;
Alex Deucherf376b942010-08-05 21:21:16 -04001523 /* i2c buses */
1524 struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
Christian König4d8bf9a2011-10-24 14:54:54 +02001525 /* debugfs */
1526 struct radeon_debugfs debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
1527 unsigned debugfs_count;
Jerome Glisse721604a2012-01-05 22:11:05 -05001528 /* virtual memory */
1529 struct radeon_vm_manager vm_manager;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001530};
1531
1532int radeon_device_init(struct radeon_device *rdev,
1533 struct drm_device *ddev,
1534 struct pci_dev *pdev,
1535 uint32_t flags);
1536void radeon_device_fini(struct radeon_device *rdev);
1537int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
1538
Andi Kleen6fcbef72011-10-13 16:08:42 -07001539uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
1540void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
1541u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
1542void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
Alex Deucher351a52a2010-06-30 11:52:50 -04001543
Jerome Glisse4c788672009-11-20 14:29:23 +01001544/*
1545 * Cast helper
1546 */
1547#define to_radeon_fence(p) ((struct radeon_fence *)(p))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001548
1549/*
1550 * Registers read & write functions.
1551 */
Benjamin Herrenschmidta0533fb2011-07-13 06:28:12 +00001552#define RREG8(reg) readb((rdev->rmmio) + (reg))
1553#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
1554#define RREG16(reg) readw((rdev->rmmio) + (reg))
1555#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
Dave Airliede1b2892009-08-12 18:43:14 +10001556#define RREG32(reg) r100_mm_rreg(rdev, (reg))
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001557#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
Dave Airliede1b2892009-08-12 18:43:14 +10001558#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001559#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1560#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1561#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
1562#define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v))
1563#define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg))
1564#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
Dave Airliede1b2892009-08-12 18:43:14 +10001565#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
1566#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
Rafał Miłeckiaa5120d2010-02-18 20:24:28 +00001567#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg))
1568#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001569#define WREG32_P(reg, val, mask) \
1570 do { \
1571 uint32_t tmp_ = RREG32(reg); \
1572 tmp_ &= (mask); \
1573 tmp_ |= ((val) & ~(mask)); \
1574 WREG32(reg, tmp_); \
1575 } while (0)
1576#define WREG32_PLL_P(reg, val, mask) \
1577 do { \
1578 uint32_t tmp_ = RREG32_PLL(reg); \
1579 tmp_ &= (mask); \
1580 tmp_ |= ((val) & ~(mask)); \
1581 WREG32_PLL(reg, tmp_); \
1582 } while (0)
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001583#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg)))
Alex Deucher351a52a2010-06-30 11:52:50 -04001584#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
1585#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001586
Dave Airliede1b2892009-08-12 18:43:14 +10001587/*
1588 * Indirect registers accessor
1589 */
1590static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
1591{
1592 uint32_t r;
1593
1594 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
1595 r = RREG32(RADEON_PCIE_DATA);
1596 return r;
1597}
1598
1599static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1600{
1601 WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
1602 WREG32(RADEON_PCIE_DATA, (v));
1603}
1604
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001605void r100_pll_errata_after_index(struct radeon_device *rdev);
1606
1607
1608/*
1609 * ASICs helpers.
1610 */
Dave Airlieb995e432009-07-14 02:02:32 +10001611#define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \
1612 (rdev->pdev->device == 0x5969))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001613#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \
1614 (rdev->family == CHIP_RV200) || \
1615 (rdev->family == CHIP_RS100) || \
1616 (rdev->family == CHIP_RS200) || \
1617 (rdev->family == CHIP_RV250) || \
1618 (rdev->family == CHIP_RV280) || \
1619 (rdev->family == CHIP_RS300))
1620#define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300) || \
1621 (rdev->family == CHIP_RV350) || \
1622 (rdev->family == CHIP_R350) || \
1623 (rdev->family == CHIP_RV380) || \
1624 (rdev->family == CHIP_R420) || \
1625 (rdev->family == CHIP_R423) || \
1626 (rdev->family == CHIP_RV410) || \
1627 (rdev->family == CHIP_RS400) || \
1628 (rdev->family == CHIP_RS480))
Alex Deucher3313e3d2011-01-06 18:49:34 -05001629#define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \
1630 (rdev->ddev->pdev->device == 0x9443) || \
1631 (rdev->ddev->pdev->device == 0x944B) || \
1632 (rdev->ddev->pdev->device == 0x9506) || \
1633 (rdev->ddev->pdev->device == 0x9509) || \
1634 (rdev->ddev->pdev->device == 0x950F) || \
1635 (rdev->ddev->pdev->device == 0x689C) || \
1636 (rdev->ddev->pdev->device == 0x689D))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001637#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
Alex Deucher99999aa2010-11-16 12:09:41 -05001638#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \
1639 (rdev->family == CHIP_RS690) || \
1640 (rdev->family == CHIP_RS740) || \
1641 (rdev->family >= CHIP_R600))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001642#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
1643#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
Alex Deucherbcc1c2a2010-01-12 17:54:34 -05001644#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
Alex Deucher633b9162011-01-06 21:19:11 -05001645#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \
1646 (rdev->flags & RADEON_IS_IGP))
Alex Deucher1fe18302011-01-06 21:19:12 -05001647#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS))
Alex Deucher8848f752012-03-20 17:18:28 -04001648#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA))
1649#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
1650 (rdev->flags & RADEON_IS_IGP))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001651
1652/*
1653 * BIOS helpers.
1654 */
1655#define RBIOS8(i) (rdev->bios[i])
1656#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
1657#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
1658
1659int radeon_combios_init(struct radeon_device *rdev);
1660void radeon_combios_fini(struct radeon_device *rdev);
1661int radeon_atombios_init(struct radeon_device *rdev);
1662void radeon_atombios_fini(struct radeon_device *rdev);
1663
1664
1665/*
1666 * RING helpers.
1667 */
Andi Kleence580fa2011-10-13 16:08:47 -07001668#if DRM_DEBUG_CODE == 0
Christian Könige32eb502011-10-23 12:56:27 +02001669static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001670{
Christian Könige32eb502011-10-23 12:56:27 +02001671 ring->ring[ring->wptr++] = v;
1672 ring->wptr &= ring->ptr_mask;
1673 ring->count_dw--;
1674 ring->ring_free_dw--;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001675}
Andi Kleence580fa2011-10-13 16:08:47 -07001676#else
1677/* With debugging this is just too big to inline */
Christian Könige32eb502011-10-23 12:56:27 +02001678void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
Andi Kleence580fa2011-10-13 16:08:47 -07001679#endif
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001680
1681/*
1682 * ASICs macro.
1683 */
Jerome Glisse068a1172009-06-17 13:28:30 +02001684#define radeon_init(rdev) (rdev)->asic->init((rdev))
Jerome Glisse3ce0a232009-09-08 10:10:24 +10001685#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
1686#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
1687#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
Christian Königeb0c19c2012-02-23 15:18:44 +01001688#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
Dave Airlie28d52042009-09-21 14:33:58 +10001689#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
Jerome Glissea2d07b72010-03-09 14:45:11 +00001690#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
Alex Deucherc5b3b852012-02-23 17:53:46 -05001691#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
1692#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
Alex Deucherf7128122012-02-23 17:53:45 -05001693#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
1694#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
1695#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
Christian König4c87bc22011-10-19 19:02:21 +02001696#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
Jerome Glisse721604a2012-01-05 22:11:05 -05001697#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
Christian König312c4a82012-05-02 15:11:09 +02001698#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
Alex Deucherb35ea4a2012-02-23 17:53:43 -05001699#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
1700#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
Alex Deucherc79a49c2012-02-23 17:53:47 -05001701#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
Christian König4c87bc22011-10-19 19:02:21 +02001702#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
1703#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
Alex Deucher27cd7762012-02-23 17:53:42 -05001704#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
1705#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
1706#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
1707#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
1708#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
1709#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
Alex Deucher798bcf72012-02-23 17:53:48 -05001710#define radeon_get_engine_clock(rdev) (rdev)->asic->pm.get_engine_clock((rdev))
1711#define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e))
1712#define radeon_get_memory_clock(rdev) (rdev)->asic->pm.get_memory_clock((rdev))
1713#define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e))
1714#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
1715#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
1716#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
Alex Deucher9e6f3d02012-02-23 17:53:49 -05001717#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
1718#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
Alex Deucherc79a49c2012-02-23 17:53:47 -05001719#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
Alex Deucher901ea572012-02-23 17:53:39 -05001720#define radeon_hpd_init(rdev) (rdev)->asic->hpd.init((rdev))
1721#define radeon_hpd_fini(rdev) (rdev)->asic->hpd.fini((rdev))
1722#define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h))
1723#define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h))
Alex Deucherdef9ba92010-04-22 12:39:58 -04001724#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
Alex Deuchera02fa392012-02-23 17:53:41 -05001725#define radeon_pm_misc(rdev) (rdev)->asic->pm.misc((rdev))
1726#define radeon_pm_prepare(rdev) (rdev)->asic->pm.prepare((rdev))
1727#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
1728#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
1729#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
Alex Deucher0f9e0062012-02-23 17:53:40 -05001730#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pflip.pre_page_flip((rdev), (crtc))
1731#define radeon_page_flip(rdev, crtc, base) rdev->asic->pflip.page_flip((rdev), (crtc), (base))
1732#define radeon_post_page_flip(rdev, crtc) rdev->asic->pflip.post_page_flip((rdev), (crtc))
Alex Deucherc79a49c2012-02-23 17:53:47 -05001733#define radeon_wait_for_vblank(rdev, crtc) rdev->asic->display.wait_for_vblank((rdev), (crtc))
Alex Deucher89e51812012-02-23 17:53:38 -05001734#define radeon_mc_wait_for_idle(rdev) rdev->asic->mc_wait_for_idle((rdev))
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001735
Jerome Glisse6cf8a3f2009-09-10 21:46:48 +02001736/* Common functions */
Jerome Glisse700a0cc2010-01-13 15:16:38 +01001737/* AGP */
Jerome Glisse90aca4d2010-03-09 14:45:12 +00001738extern int radeon_gpu_reset(struct radeon_device *rdev);
Jerome Glisse700a0cc2010-01-13 15:16:38 +01001739extern void radeon_agp_disable(struct radeon_device *rdev);
Jerome Glisse21f9a432009-09-11 15:55:33 +02001740extern int radeon_modeset_init(struct radeon_device *rdev);
1741extern void radeon_modeset_fini(struct radeon_device *rdev);
Jerome Glisse9f022dd2009-09-11 15:35:22 +02001742extern bool radeon_card_posted(struct radeon_device *rdev);
Alex Deucherf47299c2010-03-16 20:54:38 -04001743extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
Alex Deucherf46c0122010-03-31 00:33:27 -04001744extern void radeon_update_display_priority(struct radeon_device *rdev);
Dave Airlie72542d72009-12-01 14:06:31 +10001745extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
Jerome Glisse21f9a432009-09-11 15:55:33 +02001746extern void radeon_scratch_init(struct radeon_device *rdev);
Alex Deucher724c80e2010-08-27 18:25:25 -04001747extern void radeon_wb_fini(struct radeon_device *rdev);
1748extern int radeon_wb_init(struct radeon_device *rdev);
1749extern void radeon_wb_disable(struct radeon_device *rdev);
Jerome Glisse21f9a432009-09-11 15:55:33 +02001750extern void radeon_surface_init(struct radeon_device *rdev);
1751extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
Jerome Glisseca6ffc62009-10-01 10:20:52 +02001752extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
Jerome Glissed39c3b82009-09-28 18:34:43 +02001753extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
Jerome Glisse312ea8d2009-12-07 15:52:58 +01001754extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
Jerome Glissed03d8582009-12-14 21:02:09 +01001755extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
Jerome Glissed594e462010-02-17 21:54:29 +00001756extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
1757extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
Dave Airlie6a9ee8a2010-02-01 15:38:10 +10001758extern int radeon_resume_kms(struct drm_device *dev);
1759extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
Dave Airlie53595332011-03-14 09:47:24 +10001760extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
Jerome Glisse6cf8a3f2009-09-10 21:46:48 +02001761
Daniel Vetter3574dda2011-02-18 17:59:19 +01001762/*
Jerome Glisse721604a2012-01-05 22:11:05 -05001763 * vm
1764 */
1765int radeon_vm_manager_init(struct radeon_device *rdev);
1766void radeon_vm_manager_fini(struct radeon_device *rdev);
Jerome Glisse721604a2012-01-05 22:11:05 -05001767int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
1768void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
1769int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm);
1770void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
1771int radeon_vm_bo_update_pte(struct radeon_device *rdev,
1772 struct radeon_vm *vm,
1773 struct radeon_bo *bo,
1774 struct ttm_mem_reg *mem);
1775void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1776 struct radeon_bo *bo);
1777int radeon_vm_bo_add(struct radeon_device *rdev,
1778 struct radeon_vm *vm,
1779 struct radeon_bo *bo,
1780 uint64_t offset,
1781 uint32_t flags);
1782int radeon_vm_bo_rmv(struct radeon_device *rdev,
1783 struct radeon_vm *vm,
1784 struct radeon_bo *bo);
1785
Alex Deucherf122c612012-03-30 08:59:57 -04001786/* audio */
1787void r600_audio_update_hdmi(struct work_struct *work);
Jerome Glisse721604a2012-01-05 22:11:05 -05001788
1789/*
Alex Deucher16cdf042011-10-28 10:30:02 -04001790 * R600 vram scratch functions
1791 */
1792int r600_vram_scratch_init(struct radeon_device *rdev);
1793void r600_vram_scratch_fini(struct radeon_device *rdev);
1794
1795/*
Jerome Glisse285484e2011-12-16 17:03:42 -05001796 * r600 cs checking helper
1797 */
1798unsigned r600_mip_minify(unsigned size, unsigned level);
1799bool r600_fmt_is_valid_color(u32 format);
1800bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family);
1801int r600_fmt_get_blocksize(u32 format);
1802int r600_fmt_get_nblocksx(u32 format, u32 w);
1803int r600_fmt_get_nblocksy(u32 format, u32 h);
1804
1805/*
Daniel Vetter3574dda2011-02-18 17:59:19 +01001806 * r600 functions used by radeon_encoder.c
1807 */
Rafał Miłecki1b688d082012-04-30 15:44:54 +02001808struct radeon_hdmi_acr {
1809 u32 clock;
1810
1811 int n_32khz;
1812 int cts_32khz;
1813
1814 int n_44_1khz;
1815 int cts_44_1khz;
1816
1817 int n_48khz;
1818 int cts_48khz;
1819
1820};
1821
Rafał Miłeckie55d3e62012-05-06 17:29:44 +02001822extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
1823
Rafał Miłecki2cd6218c2010-03-08 22:14:01 +00001824extern void r600_hdmi_enable(struct drm_encoder *encoder);
1825extern void r600_hdmi_disable(struct drm_encoder *encoder);
Christian Koenigdafc3bd2009-10-11 23:49:13 +02001826extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
Alex Deucher416a2bd2012-05-31 19:00:25 -04001827extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1828 u32 tiling_pipe_num,
1829 u32 max_rb_num,
1830 u32 total_max_rb_num,
1831 u32 enabled_rb_mask);
Alex Deucherfe251e22010-03-24 13:36:43 -04001832
Rafał Miłeckie55d3e62012-05-06 17:29:44 +02001833/*
1834 * evergreen functions used by radeon_encoder.c
1835 */
1836
1837extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
1838
Alex Deucher0af62b02011-01-06 21:19:31 -05001839extern int ni_init_microcode(struct radeon_device *rdev);
Alex Deucher755d8192011-03-02 20:07:34 -05001840extern int ni_mc_load_microcode(struct radeon_device *rdev);
Alex Deucher0af62b02011-01-06 21:19:31 -05001841
Alberto Miloned7a29522010-07-06 11:40:24 -04001842/* radeon_acpi.c */
1843#if defined(CONFIG_ACPI)
1844extern int radeon_acpi_init(struct radeon_device *rdev);
1845#else
1846static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
1847#endif
1848
Jerome Glisse4c788672009-11-20 14:29:23 +01001849#include "radeon_object.h"
1850
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001851#endif