blob: ccbf5253914d7af34574716d1045613ce86c1799 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __RADEON_ASIC_H__
29#define __RADEON_ASIC_H__
30
31/*
32 * common functions
33 */
34void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
35void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
36
37void radeon_atom_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
38void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock);
39void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
40
41/*
42 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
43 */
Jerome Glisse068a1172009-06-17 13:28:30 +020044int r100_init(struct radeon_device *rdev);
Dave Airlie551ebd82009-09-01 15:25:57 +100045int r200_init(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020046uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
47void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
48void r100_errata(struct radeon_device *rdev);
49void r100_vram_info(struct radeon_device *rdev);
50int r100_gpu_reset(struct radeon_device *rdev);
51int r100_mc_init(struct radeon_device *rdev);
52void r100_mc_fini(struct radeon_device *rdev);
Michel Dänzer7ed220d2009-08-13 11:10:51 +020053u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020054int r100_wb_init(struct radeon_device *rdev);
55void r100_wb_fini(struct radeon_device *rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +020056int r100_pci_gart_init(struct radeon_device *rdev);
57void r100_pci_gart_fini(struct radeon_device *rdev);
58int r100_pci_gart_enable(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020059void r100_pci_gart_disable(struct radeon_device *rdev);
60void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
61int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
62int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
63void r100_cp_fini(struct radeon_device *rdev);
64void r100_cp_disable(struct radeon_device *rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +100065void r100_cp_commit(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020066void r100_ring_start(struct radeon_device *rdev);
67int r100_irq_set(struct radeon_device *rdev);
68int r100_irq_process(struct radeon_device *rdev);
69void r100_fence_ring_emit(struct radeon_device *rdev,
70 struct radeon_fence *fence);
71int r100_cs_parse(struct radeon_cs_parser *p);
72void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
73uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
74int r100_copy_blit(struct radeon_device *rdev,
75 uint64_t src_offset,
76 uint64_t dst_offset,
77 unsigned num_pages,
78 struct radeon_fence *fence);
Dave Airliee024e112009-06-24 09:48:08 +100079int r100_set_surface_reg(struct radeon_device *rdev, int reg,
80 uint32_t tiling_flags, uint32_t pitch,
81 uint32_t offset, uint32_t obj_size);
82int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
Jerome Glissec93bb852009-07-13 21:04:08 +020083void r100_bandwidth_update(struct radeon_device *rdev);
Jerome Glisse3ce0a232009-09-08 10:10:24 +100084void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
85int r100_ib_test(struct radeon_device *rdev);
86int r100_ring_test(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020087
88static struct radeon_asic r100_asic = {
Jerome Glisse068a1172009-06-17 13:28:30 +020089 .init = &r100_init,
Jerome Glisse771fe6b2009-06-05 14:42:42 +020090 .errata = &r100_errata,
91 .vram_info = &r100_vram_info,
92 .gpu_reset = &r100_gpu_reset,
93 .mc_init = &r100_mc_init,
94 .mc_fini = &r100_mc_fini,
95 .wb_init = &r100_wb_init,
96 .wb_fini = &r100_wb_fini,
Jerome Glisse4aac0472009-09-14 18:29:49 +020097 .gart_init = &r100_pci_gart_init,
98 .gart_fini = &r100_pci_gart_fini,
99 .gart_enable = &r100_pci_gart_enable,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200100 .gart_disable = &r100_pci_gart_disable,
101 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
102 .gart_set_page = &r100_pci_gart_set_page,
103 .cp_init = &r100_cp_init,
104 .cp_fini = &r100_cp_fini,
105 .cp_disable = &r100_cp_disable,
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000106 .cp_commit = &r100_cp_commit,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200107 .ring_start = &r100_ring_start,
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000108 .ring_test = &r100_ring_test,
109 .ring_ib_execute = &r100_ring_ib_execute,
110 .ib_test = &r100_ib_test,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200111 .irq_set = &r100_irq_set,
112 .irq_process = &r100_irq_process,
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200113 .get_vblank_counter = &r100_get_vblank_counter,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200114 .fence_ring_emit = &r100_fence_ring_emit,
115 .cs_parse = &r100_cs_parse,
116 .copy_blit = &r100_copy_blit,
117 .copy_dma = NULL,
118 .copy = &r100_copy_blit,
119 .set_engine_clock = &radeon_legacy_set_engine_clock,
120 .set_memory_clock = NULL,
121 .set_pcie_lanes = NULL,
122 .set_clock_gating = &radeon_legacy_set_clock_gating,
Dave Airliee024e112009-06-24 09:48:08 +1000123 .set_surface_reg = r100_set_surface_reg,
124 .clear_surface_reg = r100_clear_surface_reg,
Jerome Glissec93bb852009-07-13 21:04:08 +0200125 .bandwidth_update = &r100_bandwidth_update,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200126};
127
128
129/*
130 * r300,r350,rv350,rv380
131 */
Jerome Glisse068a1172009-06-17 13:28:30 +0200132int r300_init(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200133void r300_errata(struct radeon_device *rdev);
134void r300_vram_info(struct radeon_device *rdev);
135int r300_gpu_reset(struct radeon_device *rdev);
136int r300_mc_init(struct radeon_device *rdev);
137void r300_mc_fini(struct radeon_device *rdev);
138void r300_ring_start(struct radeon_device *rdev);
139void r300_fence_ring_emit(struct radeon_device *rdev,
140 struct radeon_fence *fence);
141int r300_cs_parse(struct radeon_cs_parser *p);
Jerome Glisse4aac0472009-09-14 18:29:49 +0200142int rv370_pcie_gart_init(struct radeon_device *rdev);
143void rv370_pcie_gart_fini(struct radeon_device *rdev);
144int rv370_pcie_gart_enable(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200145void rv370_pcie_gart_disable(struct radeon_device *rdev);
146void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
147int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
148uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
149void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
150void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
151int r300_copy_dma(struct radeon_device *rdev,
152 uint64_t src_offset,
153 uint64_t dst_offset,
154 unsigned num_pages,
155 struct radeon_fence *fence);
Dave Airliee024e112009-06-24 09:48:08 +1000156
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200157static struct radeon_asic r300_asic = {
Jerome Glisse068a1172009-06-17 13:28:30 +0200158 .init = &r300_init,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200159 .errata = &r300_errata,
160 .vram_info = &r300_vram_info,
161 .gpu_reset = &r300_gpu_reset,
162 .mc_init = &r300_mc_init,
163 .mc_fini = &r300_mc_fini,
164 .wb_init = &r100_wb_init,
165 .wb_fini = &r100_wb_fini,
Jerome Glisse4aac0472009-09-14 18:29:49 +0200166 .gart_init = &r100_pci_gart_init,
167 .gart_fini = &r100_pci_gart_fini,
168 .gart_enable = &r100_pci_gart_enable,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200169 .gart_disable = &r100_pci_gart_disable,
170 .gart_tlb_flush = &r100_pci_gart_tlb_flush,
171 .gart_set_page = &r100_pci_gart_set_page,
172 .cp_init = &r100_cp_init,
173 .cp_fini = &r100_cp_fini,
174 .cp_disable = &r100_cp_disable,
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000175 .cp_commit = &r100_cp_commit,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200176 .ring_start = &r300_ring_start,
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000177 .ring_test = &r100_ring_test,
178 .ring_ib_execute = &r100_ring_ib_execute,
179 .ib_test = &r100_ib_test,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200180 .irq_set = &r100_irq_set,
181 .irq_process = &r100_irq_process,
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200182 .get_vblank_counter = &r100_get_vblank_counter,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200183 .fence_ring_emit = &r300_fence_ring_emit,
184 .cs_parse = &r300_cs_parse,
185 .copy_blit = &r100_copy_blit,
186 .copy_dma = &r300_copy_dma,
187 .copy = &r100_copy_blit,
188 .set_engine_clock = &radeon_legacy_set_engine_clock,
189 .set_memory_clock = NULL,
190 .set_pcie_lanes = &rv370_set_pcie_lanes,
191 .set_clock_gating = &radeon_legacy_set_clock_gating,
Dave Airliee024e112009-06-24 09:48:08 +1000192 .set_surface_reg = r100_set_surface_reg,
193 .clear_surface_reg = r100_clear_surface_reg,
Jerome Glissec93bb852009-07-13 21:04:08 +0200194 .bandwidth_update = &r100_bandwidth_update,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200195};
196
197/*
198 * r420,r423,rv410
199 */
Jerome Glisse9f022dd2009-09-11 15:35:22 +0200200extern int r420_init(struct radeon_device *rdev);
201extern void r420_fini(struct radeon_device *rdev);
202extern int r420_suspend(struct radeon_device *rdev);
203extern int r420_resume(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200204static struct radeon_asic r420_asic = {
Jerome Glisse9f022dd2009-09-11 15:35:22 +0200205 .init = &r420_init,
206 .fini = &r420_fini,
207 .suspend = &r420_suspend,
208 .resume = &r420_resume,
209 .errata = NULL,
210 .vram_info = NULL,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200211 .gpu_reset = &r300_gpu_reset,
Jerome Glisse9f022dd2009-09-11 15:35:22 +0200212 .mc_init = NULL,
213 .mc_fini = NULL,
214 .wb_init = NULL,
215 .wb_fini = NULL,
Jerome Glisse4aac0472009-09-14 18:29:49 +0200216 .gart_enable = NULL,
217 .gart_disable = NULL,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200218 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
219 .gart_set_page = &rv370_pcie_gart_set_page,
Jerome Glisse9f022dd2009-09-11 15:35:22 +0200220 .cp_init = NULL,
221 .cp_fini = NULL,
222 .cp_disable = NULL,
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000223 .cp_commit = &r100_cp_commit,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200224 .ring_start = &r300_ring_start,
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000225 .ring_test = &r100_ring_test,
226 .ring_ib_execute = &r100_ring_ib_execute,
Jerome Glisse9f022dd2009-09-11 15:35:22 +0200227 .ib_test = NULL,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200228 .irq_set = &r100_irq_set,
229 .irq_process = &r100_irq_process,
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200230 .get_vblank_counter = &r100_get_vblank_counter,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200231 .fence_ring_emit = &r300_fence_ring_emit,
232 .cs_parse = &r300_cs_parse,
233 .copy_blit = &r100_copy_blit,
234 .copy_dma = &r300_copy_dma,
235 .copy = &r100_copy_blit,
236 .set_engine_clock = &radeon_atom_set_engine_clock,
237 .set_memory_clock = &radeon_atom_set_memory_clock,
238 .set_pcie_lanes = &rv370_set_pcie_lanes,
239 .set_clock_gating = &radeon_atom_set_clock_gating,
Dave Airliee024e112009-06-24 09:48:08 +1000240 .set_surface_reg = r100_set_surface_reg,
241 .clear_surface_reg = r100_clear_surface_reg,
Jerome Glissec93bb852009-07-13 21:04:08 +0200242 .bandwidth_update = &r100_bandwidth_update,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200243};
244
245
246/*
247 * rs400,rs480
248 */
249void rs400_errata(struct radeon_device *rdev);
250void rs400_vram_info(struct radeon_device *rdev);
251int rs400_mc_init(struct radeon_device *rdev);
252void rs400_mc_fini(struct radeon_device *rdev);
Jerome Glisse4aac0472009-09-14 18:29:49 +0200253int rs400_gart_init(struct radeon_device *rdev);
254void rs400_gart_fini(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200255int rs400_gart_enable(struct radeon_device *rdev);
256void rs400_gart_disable(struct radeon_device *rdev);
257void rs400_gart_tlb_flush(struct radeon_device *rdev);
258int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
259uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
260void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
261static struct radeon_asic rs400_asic = {
Jerome Glisse068a1172009-06-17 13:28:30 +0200262 .init = &r300_init,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200263 .errata = &rs400_errata,
264 .vram_info = &rs400_vram_info,
265 .gpu_reset = &r300_gpu_reset,
266 .mc_init = &rs400_mc_init,
267 .mc_fini = &rs400_mc_fini,
268 .wb_init = &r100_wb_init,
269 .wb_fini = &r100_wb_fini,
Jerome Glisse4aac0472009-09-14 18:29:49 +0200270 .gart_init = &rs400_gart_init,
271 .gart_fini = &rs400_gart_fini,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200272 .gart_enable = &rs400_gart_enable,
273 .gart_disable = &rs400_gart_disable,
274 .gart_tlb_flush = &rs400_gart_tlb_flush,
275 .gart_set_page = &rs400_gart_set_page,
276 .cp_init = &r100_cp_init,
277 .cp_fini = &r100_cp_fini,
278 .cp_disable = &r100_cp_disable,
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000279 .cp_commit = &r100_cp_commit,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200280 .ring_start = &r300_ring_start,
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000281 .ring_test = &r100_ring_test,
282 .ring_ib_execute = &r100_ring_ib_execute,
283 .ib_test = &r100_ib_test,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200284 .irq_set = &r100_irq_set,
285 .irq_process = &r100_irq_process,
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200286 .get_vblank_counter = &r100_get_vblank_counter,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200287 .fence_ring_emit = &r300_fence_ring_emit,
288 .cs_parse = &r300_cs_parse,
289 .copy_blit = &r100_copy_blit,
290 .copy_dma = &r300_copy_dma,
291 .copy = &r100_copy_blit,
292 .set_engine_clock = &radeon_legacy_set_engine_clock,
293 .set_memory_clock = NULL,
294 .set_pcie_lanes = NULL,
295 .set_clock_gating = &radeon_legacy_set_clock_gating,
Dave Airliee024e112009-06-24 09:48:08 +1000296 .set_surface_reg = r100_set_surface_reg,
297 .clear_surface_reg = r100_clear_surface_reg,
Jerome Glissec93bb852009-07-13 21:04:08 +0200298 .bandwidth_update = &r100_bandwidth_update,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200299};
300
301
302/*
303 * rs600.
304 */
Dave Airlie3f7dc91a2009-08-27 11:10:15 +1000305int rs600_init(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200306void rs600_errata(struct radeon_device *rdev);
307void rs600_vram_info(struct radeon_device *rdev);
308int rs600_mc_init(struct radeon_device *rdev);
309void rs600_mc_fini(struct radeon_device *rdev);
310int rs600_irq_set(struct radeon_device *rdev);
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200311int rs600_irq_process(struct radeon_device *rdev);
312u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
Jerome Glisse4aac0472009-09-14 18:29:49 +0200313int rs600_gart_init(struct radeon_device *rdev);
314void rs600_gart_fini(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200315int rs600_gart_enable(struct radeon_device *rdev);
316void rs600_gart_disable(struct radeon_device *rdev);
317void rs600_gart_tlb_flush(struct radeon_device *rdev);
318int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
319uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
320void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
Jerome Glissec93bb852009-07-13 21:04:08 +0200321void rs600_bandwidth_update(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200322static struct radeon_asic rs600_asic = {
Dave Airlie3f7dc91a2009-08-27 11:10:15 +1000323 .init = &rs600_init,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200324 .errata = &rs600_errata,
325 .vram_info = &rs600_vram_info,
326 .gpu_reset = &r300_gpu_reset,
327 .mc_init = &rs600_mc_init,
328 .mc_fini = &rs600_mc_fini,
329 .wb_init = &r100_wb_init,
330 .wb_fini = &r100_wb_fini,
Jerome Glisse4aac0472009-09-14 18:29:49 +0200331 .gart_init = &rs600_gart_init,
332 .gart_fini = &rs600_gart_fini,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200333 .gart_enable = &rs600_gart_enable,
334 .gart_disable = &rs600_gart_disable,
335 .gart_tlb_flush = &rs600_gart_tlb_flush,
336 .gart_set_page = &rs600_gart_set_page,
337 .cp_init = &r100_cp_init,
338 .cp_fini = &r100_cp_fini,
339 .cp_disable = &r100_cp_disable,
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000340 .cp_commit = &r100_cp_commit,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200341 .ring_start = &r300_ring_start,
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000342 .ring_test = &r100_ring_test,
343 .ring_ib_execute = &r100_ring_ib_execute,
344 .ib_test = &r100_ib_test,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200345 .irq_set = &rs600_irq_set,
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200346 .irq_process = &rs600_irq_process,
347 .get_vblank_counter = &rs600_get_vblank_counter,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200348 .fence_ring_emit = &r300_fence_ring_emit,
349 .cs_parse = &r300_cs_parse,
350 .copy_blit = &r100_copy_blit,
351 .copy_dma = &r300_copy_dma,
352 .copy = &r100_copy_blit,
353 .set_engine_clock = &radeon_atom_set_engine_clock,
354 .set_memory_clock = &radeon_atom_set_memory_clock,
355 .set_pcie_lanes = NULL,
356 .set_clock_gating = &radeon_atom_set_clock_gating,
Jerome Glissec93bb852009-07-13 21:04:08 +0200357 .bandwidth_update = &rs600_bandwidth_update,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200358};
359
360
361/*
362 * rs690,rs740
363 */
364void rs690_errata(struct radeon_device *rdev);
365void rs690_vram_info(struct radeon_device *rdev);
366int rs690_mc_init(struct radeon_device *rdev);
367void rs690_mc_fini(struct radeon_device *rdev);
368uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
369void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
Jerome Glissec93bb852009-07-13 21:04:08 +0200370void rs690_bandwidth_update(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200371static struct radeon_asic rs690_asic = {
Dave Airlie3f7dc91a2009-08-27 11:10:15 +1000372 .init = &rs600_init,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200373 .errata = &rs690_errata,
374 .vram_info = &rs690_vram_info,
375 .gpu_reset = &r300_gpu_reset,
376 .mc_init = &rs690_mc_init,
377 .mc_fini = &rs690_mc_fini,
378 .wb_init = &r100_wb_init,
379 .wb_fini = &r100_wb_fini,
Jerome Glisse4aac0472009-09-14 18:29:49 +0200380 .gart_init = &rs400_gart_init,
381 .gart_fini = &rs400_gart_fini,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200382 .gart_enable = &rs400_gart_enable,
383 .gart_disable = &rs400_gart_disable,
384 .gart_tlb_flush = &rs400_gart_tlb_flush,
385 .gart_set_page = &rs400_gart_set_page,
386 .cp_init = &r100_cp_init,
387 .cp_fini = &r100_cp_fini,
388 .cp_disable = &r100_cp_disable,
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000389 .cp_commit = &r100_cp_commit,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200390 .ring_start = &r300_ring_start,
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000391 .ring_test = &r100_ring_test,
392 .ring_ib_execute = &r100_ring_ib_execute,
393 .ib_test = &r100_ib_test,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200394 .irq_set = &rs600_irq_set,
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200395 .irq_process = &rs600_irq_process,
396 .get_vblank_counter = &rs600_get_vblank_counter,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200397 .fence_ring_emit = &r300_fence_ring_emit,
398 .cs_parse = &r300_cs_parse,
399 .copy_blit = &r100_copy_blit,
400 .copy_dma = &r300_copy_dma,
401 .copy = &r300_copy_dma,
402 .set_engine_clock = &radeon_atom_set_engine_clock,
403 .set_memory_clock = &radeon_atom_set_memory_clock,
404 .set_pcie_lanes = NULL,
405 .set_clock_gating = &radeon_atom_set_clock_gating,
Dave Airliee024e112009-06-24 09:48:08 +1000406 .set_surface_reg = r100_set_surface_reg,
407 .clear_surface_reg = r100_clear_surface_reg,
Jerome Glissec93bb852009-07-13 21:04:08 +0200408 .bandwidth_update = &rs690_bandwidth_update,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200409};
410
411
412/*
413 * rv515
414 */
Jerome Glisse068a1172009-06-17 13:28:30 +0200415int rv515_init(struct radeon_device *rdev);
Jerome Glissed39c3b82009-09-28 18:34:43 +0200416void rv515_fini(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200417int rv515_gpu_reset(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200418uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
419void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
420void rv515_ring_start(struct radeon_device *rdev);
421uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
422void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
Jerome Glissec93bb852009-07-13 21:04:08 +0200423void rv515_bandwidth_update(struct radeon_device *rdev);
Jerome Glissed39c3b82009-09-28 18:34:43 +0200424int rv515_resume(struct radeon_device *rdev);
425int rv515_suspend(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200426static struct radeon_asic rv515_asic = {
Jerome Glisse068a1172009-06-17 13:28:30 +0200427 .init = &rv515_init,
Jerome Glissed39c3b82009-09-28 18:34:43 +0200428 .fini = &rv515_fini,
429 .suspend = &rv515_suspend,
430 .resume = &rv515_resume,
431 .errata = NULL,
432 .vram_info = NULL,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200433 .gpu_reset = &rv515_gpu_reset,
Jerome Glissed39c3b82009-09-28 18:34:43 +0200434 .mc_init = NULL,
435 .mc_fini = NULL,
436 .wb_init = NULL,
437 .wb_fini = NULL,
Jerome Glisse4aac0472009-09-14 18:29:49 +0200438 .gart_init = &rv370_pcie_gart_init,
439 .gart_fini = &rv370_pcie_gart_fini,
Jerome Glissed39c3b82009-09-28 18:34:43 +0200440 .gart_enable = NULL,
441 .gart_disable = NULL,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200442 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
443 .gart_set_page = &rv370_pcie_gart_set_page,
Jerome Glissed39c3b82009-09-28 18:34:43 +0200444 .cp_init = NULL,
445 .cp_fini = NULL,
446 .cp_disable = NULL,
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000447 .cp_commit = &r100_cp_commit,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200448 .ring_start = &rv515_ring_start,
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000449 .ring_test = &r100_ring_test,
450 .ring_ib_execute = &r100_ring_ib_execute,
Jerome Glissed39c3b82009-09-28 18:34:43 +0200451 .ib_test = NULL,
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200452 .irq_set = &rs600_irq_set,
453 .irq_process = &rs600_irq_process,
454 .get_vblank_counter = &rs600_get_vblank_counter,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200455 .fence_ring_emit = &r300_fence_ring_emit,
Jerome Glisse068a1172009-06-17 13:28:30 +0200456 .cs_parse = &r300_cs_parse,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200457 .copy_blit = &r100_copy_blit,
458 .copy_dma = &r300_copy_dma,
459 .copy = &r100_copy_blit,
460 .set_engine_clock = &radeon_atom_set_engine_clock,
461 .set_memory_clock = &radeon_atom_set_memory_clock,
462 .set_pcie_lanes = &rv370_set_pcie_lanes,
463 .set_clock_gating = &radeon_atom_set_clock_gating,
Dave Airliee024e112009-06-24 09:48:08 +1000464 .set_surface_reg = r100_set_surface_reg,
465 .clear_surface_reg = r100_clear_surface_reg,
Jerome Glissec93bb852009-07-13 21:04:08 +0200466 .bandwidth_update = &rv515_bandwidth_update,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200467};
468
469
470/*
471 * r520,rv530,rv560,rv570,r580
472 */
Jerome Glissed39c3b82009-09-28 18:34:43 +0200473int r520_init(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200474void r520_errata(struct radeon_device *rdev);
475void r520_vram_info(struct radeon_device *rdev);
476int r520_mc_init(struct radeon_device *rdev);
477void r520_mc_fini(struct radeon_device *rdev);
Jerome Glissec93bb852009-07-13 21:04:08 +0200478void r520_bandwidth_update(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200479static struct radeon_asic r520_asic = {
Jerome Glissed39c3b82009-09-28 18:34:43 +0200480 .init = &r520_init,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200481 .errata = &r520_errata,
482 .vram_info = &r520_vram_info,
483 .gpu_reset = &rv515_gpu_reset,
484 .mc_init = &r520_mc_init,
485 .mc_fini = &r520_mc_fini,
486 .wb_init = &r100_wb_init,
487 .wb_fini = &r100_wb_fini,
Jerome Glisse4aac0472009-09-14 18:29:49 +0200488 .gart_init = &rv370_pcie_gart_init,
489 .gart_fini = &rv370_pcie_gart_fini,
490 .gart_enable = &rv370_pcie_gart_enable,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200491 .gart_disable = &rv370_pcie_gart_disable,
492 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
493 .gart_set_page = &rv370_pcie_gart_set_page,
494 .cp_init = &r100_cp_init,
495 .cp_fini = &r100_cp_fini,
496 .cp_disable = &r100_cp_disable,
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000497 .cp_commit = &r100_cp_commit,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200498 .ring_start = &rv515_ring_start,
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000499 .ring_test = &r100_ring_test,
500 .ring_ib_execute = &r100_ring_ib_execute,
501 .ib_test = &r100_ib_test,
Michel Dänzer7ed220d2009-08-13 11:10:51 +0200502 .irq_set = &rs600_irq_set,
503 .irq_process = &rs600_irq_process,
504 .get_vblank_counter = &rs600_get_vblank_counter,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200505 .fence_ring_emit = &r300_fence_ring_emit,
Jerome Glisse068a1172009-06-17 13:28:30 +0200506 .cs_parse = &r300_cs_parse,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200507 .copy_blit = &r100_copy_blit,
508 .copy_dma = &r300_copy_dma,
509 .copy = &r100_copy_blit,
510 .set_engine_clock = &radeon_atom_set_engine_clock,
511 .set_memory_clock = &radeon_atom_set_memory_clock,
512 .set_pcie_lanes = &rv370_set_pcie_lanes,
513 .set_clock_gating = &radeon_atom_set_clock_gating,
Dave Airliee024e112009-06-24 09:48:08 +1000514 .set_surface_reg = r100_set_surface_reg,
515 .clear_surface_reg = r100_clear_surface_reg,
Jerome Glissec93bb852009-07-13 21:04:08 +0200516 .bandwidth_update = &r520_bandwidth_update,
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200517};
518
519/*
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000520 * r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200521 */
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000522int r600_init(struct radeon_device *rdev);
523void r600_fini(struct radeon_device *rdev);
524int r600_suspend(struct radeon_device *rdev);
525int r600_resume(struct radeon_device *rdev);
526int r600_wb_init(struct radeon_device *rdev);
527void r600_wb_fini(struct radeon_device *rdev);
528void r600_cp_commit(struct radeon_device *rdev);
529void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200530uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
531void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000532int r600_cs_parse(struct radeon_cs_parser *p);
533void r600_fence_ring_emit(struct radeon_device *rdev,
534 struct radeon_fence *fence);
535int r600_copy_dma(struct radeon_device *rdev,
536 uint64_t src_offset,
537 uint64_t dst_offset,
538 unsigned num_pages,
539 struct radeon_fence *fence);
540int r600_irq_process(struct radeon_device *rdev);
541int r600_irq_set(struct radeon_device *rdev);
542int r600_gpu_reset(struct radeon_device *rdev);
543int r600_set_surface_reg(struct radeon_device *rdev, int reg,
544 uint32_t tiling_flags, uint32_t pitch,
545 uint32_t offset, uint32_t obj_size);
546int r600_clear_surface_reg(struct radeon_device *rdev, int reg);
547void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
548int r600_ib_test(struct radeon_device *rdev);
549int r600_ring_test(struct radeon_device *rdev);
550int r600_copy_blit(struct radeon_device *rdev,
551 uint64_t src_offset, uint64_t dst_offset,
552 unsigned num_pages, struct radeon_fence *fence);
553
554static struct radeon_asic r600_asic = {
555 .errata = NULL,
556 .init = &r600_init,
557 .fini = &r600_fini,
558 .suspend = &r600_suspend,
559 .resume = &r600_resume,
560 .cp_commit = &r600_cp_commit,
561 .vram_info = NULL,
562 .gpu_reset = &r600_gpu_reset,
563 .mc_init = NULL,
564 .mc_fini = NULL,
565 .wb_init = &r600_wb_init,
566 .wb_fini = &r600_wb_fini,
567 .gart_enable = NULL,
568 .gart_disable = NULL,
569 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
570 .gart_set_page = &rs600_gart_set_page,
571 .cp_init = NULL,
572 .cp_fini = NULL,
573 .cp_disable = NULL,
574 .ring_start = NULL,
575 .ring_test = &r600_ring_test,
576 .ring_ib_execute = &r600_ring_ib_execute,
577 .ib_test = &r600_ib_test,
578 .irq_set = &r600_irq_set,
579 .irq_process = &r600_irq_process,
580 .fence_ring_emit = &r600_fence_ring_emit,
581 .cs_parse = &r600_cs_parse,
582 .copy_blit = &r600_copy_blit,
583 .copy_dma = &r600_copy_blit,
Alex Deuchera3812872009-09-10 15:54:35 -0400584 .copy = &r600_copy_blit,
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000585 .set_engine_clock = &radeon_atom_set_engine_clock,
586 .set_memory_clock = &radeon_atom_set_memory_clock,
587 .set_pcie_lanes = NULL,
588 .set_clock_gating = &radeon_atom_set_clock_gating,
589 .set_surface_reg = r600_set_surface_reg,
590 .clear_surface_reg = r600_clear_surface_reg,
591 .bandwidth_update = &r520_bandwidth_update,
592};
593
594/*
595 * rv770,rv730,rv710,rv740
596 */
597int rv770_init(struct radeon_device *rdev);
598void rv770_fini(struct radeon_device *rdev);
599int rv770_suspend(struct radeon_device *rdev);
600int rv770_resume(struct radeon_device *rdev);
601int rv770_gpu_reset(struct radeon_device *rdev);
602
603static struct radeon_asic rv770_asic = {
604 .errata = NULL,
605 .init = &rv770_init,
606 .fini = &rv770_fini,
607 .suspend = &rv770_suspend,
608 .resume = &rv770_resume,
609 .cp_commit = &r600_cp_commit,
610 .vram_info = NULL,
611 .gpu_reset = &rv770_gpu_reset,
612 .mc_init = NULL,
613 .mc_fini = NULL,
614 .wb_init = &r600_wb_init,
615 .wb_fini = &r600_wb_fini,
616 .gart_enable = NULL,
617 .gart_disable = NULL,
618 .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
619 .gart_set_page = &rs600_gart_set_page,
620 .cp_init = NULL,
621 .cp_fini = NULL,
622 .cp_disable = NULL,
623 .ring_start = NULL,
624 .ring_test = &r600_ring_test,
625 .ring_ib_execute = &r600_ring_ib_execute,
626 .ib_test = &r600_ib_test,
627 .irq_set = &r600_irq_set,
628 .irq_process = &r600_irq_process,
629 .fence_ring_emit = &r600_fence_ring_emit,
630 .cs_parse = &r600_cs_parse,
631 .copy_blit = &r600_copy_blit,
632 .copy_dma = &r600_copy_blit,
Alex Deuchera3812872009-09-10 15:54:35 -0400633 .copy = &r600_copy_blit,
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000634 .set_engine_clock = &radeon_atom_set_engine_clock,
635 .set_memory_clock = &radeon_atom_set_memory_clock,
636 .set_pcie_lanes = NULL,
637 .set_clock_gating = &radeon_atom_set_clock_gating,
638 .set_surface_reg = r600_set_surface_reg,
639 .clear_surface_reg = r600_clear_surface_reg,
640 .bandwidth_update = &r520_bandwidth_update,
641};
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200642
643#endif