blob: a5f82f7beed60e04eafc26c99c72c5e18ff3cdc3 [file] [log] [blame]
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/seq_file.h>
29#include "drmP.h"
30#include "drm.h"
31#include "radeon_reg.h"
32#include "radeon.h"
Dave Airliee024e112009-06-24 09:48:08 +100033#include "radeon_drm.h"
Jerome Glissec93bb852009-07-13 21:04:08 +020034#include "radeon_share.h"
Dave Airlie551ebd82009-09-01 15:25:57 +100035#include "r100_track.h"
Jerome Glisse3ce0a232009-09-08 10:10:24 +100036#include "r300d.h"
Jerome Glisse771fe6b2009-06-05 14:42:42 +020037
Dave Airlie50f15302009-08-21 13:21:01 +100038#include "r300_reg_safe.h"
39
Jerome Glisse771fe6b2009-06-05 14:42:42 +020040/* r300,r350,rv350,rv370,rv380 depends on : */
41void r100_hdp_reset(struct radeon_device *rdev);
42int r100_cp_reset(struct radeon_device *rdev);
43int r100_rb2d_reset(struct radeon_device *rdev);
44int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
45int r100_pci_gart_enable(struct radeon_device *rdev);
46void r100_pci_gart_disable(struct radeon_device *rdev);
47void r100_mc_setup(struct radeon_device *rdev);
48void r100_mc_disable_clients(struct radeon_device *rdev);
49int r100_gui_wait_for_idle(struct radeon_device *rdev);
50int r100_cs_packet_parse(struct radeon_cs_parser *p,
51 struct radeon_cs_packet *pkt,
52 unsigned idx);
Dave Airlie531369e2009-06-29 11:21:25 +100053int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020054int r100_cs_parse_packet0(struct radeon_cs_parser *p,
55 struct radeon_cs_packet *pkt,
Jerome Glisse068a1172009-06-17 13:28:30 +020056 const unsigned *auth, unsigned n,
Jerome Glisse771fe6b2009-06-05 14:42:42 +020057 radeon_packet0_check_t check);
Jerome Glisse068a1172009-06-17 13:28:30 +020058int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
59 struct radeon_cs_packet *pkt,
60 struct radeon_object *robj);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020061
62/* This files gather functions specifics to:
63 * r300,r350,rv350,rv370,rv380
64 *
65 * Some of these functions might be used by newer ASICs.
66 */
67void r300_gpu_init(struct radeon_device *rdev);
68int r300_mc_wait_for_idle(struct radeon_device *rdev);
69int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev);
70
71
72/*
73 * rv370,rv380 PCIE GART
74 */
75void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
76{
77 uint32_t tmp;
78 int i;
79
80 /* Workaround HW bug do flush 2 times */
81 for (i = 0; i < 2; i++) {
82 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
83 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
84 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
85 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
Jerome Glisse771fe6b2009-06-05 14:42:42 +020086 }
Dave Airliede1b2892009-08-12 18:43:14 +100087 mb();
Jerome Glisse771fe6b2009-06-05 14:42:42 +020088}
89
90int rv370_pcie_gart_enable(struct radeon_device *rdev)
91{
92 uint32_t table_addr;
93 uint32_t tmp;
94 int r;
95
96 /* Initialize common gart structure */
97 r = radeon_gart_init(rdev);
98 if (r) {
99 return r;
100 }
101 r = rv370_debugfs_pcie_gart_info_init(rdev);
102 if (r) {
103 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
104 }
105 rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
106 r = radeon_gart_table_vram_alloc(rdev);
107 if (r) {
108 return r;
109 }
110 /* discard memory request outside of configured range */
111 tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
112 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
113 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
114 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 4096;
115 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
116 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
117 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
118 table_addr = rdev->gart.table_addr;
119 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
120 /* FIXME: setup default page */
121 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location);
122 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
123 /* Clear error */
124 WREG32_PCIE(0x18, 0);
125 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
126 tmp |= RADEON_PCIE_TX_GART_EN;
127 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
128 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
129 rv370_pcie_gart_tlb_flush(rdev);
130 DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
Jerome Glisse3ce0a232009-09-08 10:10:24 +1000131 (unsigned)(rdev->mc.gtt_size >> 20), table_addr);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200132 rdev->gart.ready = true;
133 return 0;
134}
135
136void rv370_pcie_gart_disable(struct radeon_device *rdev)
137{
138 uint32_t tmp;
139
140 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
141 tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
142 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
143 if (rdev->gart.table.vram.robj) {
144 radeon_object_kunmap(rdev->gart.table.vram.robj);
145 radeon_object_unpin(rdev->gart.table.vram.robj);
146 }
147}
148
149int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
150{
151 void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
152
153 if (i < 0 || i > rdev->gart.num_gpu_pages) {
154 return -EINVAL;
155 }
Dave Airlieed10f952009-06-29 18:29:11 +1000156 addr = (lower_32_bits(addr) >> 8) |
157 ((upper_32_bits(addr) & 0xff) << 24) |
158 0xc;
Dave Airlie77bd36f2009-07-10 09:33:00 +1000159 /* on x86 we want this to be CPU endian, on powerpc
160 * on powerpc without HW swappers, it'll get swapped on way
161 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
162 writel(addr, ((void __iomem *)ptr) + (i * 4));
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200163 return 0;
164}
165
166int r300_gart_enable(struct radeon_device *rdev)
167{
168#if __OS_HAS_AGP
169 if (rdev->flags & RADEON_IS_AGP) {
170 if (rdev->family > CHIP_RV350) {
171 rv370_pcie_gart_disable(rdev);
172 } else {
173 r100_pci_gart_disable(rdev);
174 }
175 return 0;
176 }
177#endif
178 if (rdev->flags & RADEON_IS_PCIE) {
179 rdev->asic->gart_disable = &rv370_pcie_gart_disable;
180 rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
181 rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
182 return rv370_pcie_gart_enable(rdev);
183 }
184 return r100_pci_gart_enable(rdev);
185}
186
187
188/*
189 * MC
190 */
191int r300_mc_init(struct radeon_device *rdev)
192{
193 int r;
194
195 if (r100_debugfs_rbbm_init(rdev)) {
196 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
197 }
198
199 r300_gpu_init(rdev);
200 r100_pci_gart_disable(rdev);
201 if (rdev->flags & RADEON_IS_PCIE) {
202 rv370_pcie_gart_disable(rdev);
203 }
204
205 /* Setup GPU memory space */
206 rdev->mc.vram_location = 0xFFFFFFFFUL;
207 rdev->mc.gtt_location = 0xFFFFFFFFUL;
208 if (rdev->flags & RADEON_IS_AGP) {
209 r = radeon_agp_init(rdev);
210 if (r) {
211 printk(KERN_WARNING "[drm] Disabling AGP\n");
212 rdev->flags &= ~RADEON_IS_AGP;
213 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
214 } else {
215 rdev->mc.gtt_location = rdev->mc.agp_base;
216 }
217 }
218 r = radeon_mc_setup(rdev);
219 if (r) {
220 return r;
221 }
222
223 /* Program GPU memory space */
224 r100_mc_disable_clients(rdev);
225 if (r300_mc_wait_for_idle(rdev)) {
226 printk(KERN_WARNING "Failed to wait MC idle while "
227 "programming pipes. Bad things might happen.\n");
228 }
229 r100_mc_setup(rdev);
230 return 0;
231}
232
233void r300_mc_fini(struct radeon_device *rdev)
234{
235 if (rdev->flags & RADEON_IS_PCIE) {
236 rv370_pcie_gart_disable(rdev);
237 radeon_gart_table_vram_free(rdev);
238 } else {
239 r100_pci_gart_disable(rdev);
240 radeon_gart_table_ram_free(rdev);
241 }
242 radeon_gart_fini(rdev);
243}
244
245
246/*
247 * Fence emission
248 */
249void r300_fence_ring_emit(struct radeon_device *rdev,
250 struct radeon_fence *fence)
251{
252 /* Who ever call radeon_fence_emit should call ring_lock and ask
253 * for enough space (today caller are ib schedule and buffer move) */
254 /* Write SC register so SC & US assert idle */
255 radeon_ring_write(rdev, PACKET0(0x43E0, 0));
256 radeon_ring_write(rdev, 0);
257 radeon_ring_write(rdev, PACKET0(0x43E4, 0));
258 radeon_ring_write(rdev, 0);
259 /* Flush 3D cache */
260 radeon_ring_write(rdev, PACKET0(0x4E4C, 0));
261 radeon_ring_write(rdev, (2 << 0));
262 radeon_ring_write(rdev, PACKET0(0x4F18, 0));
263 radeon_ring_write(rdev, (1 << 0));
264 /* Wait until IDLE & CLEAN */
265 radeon_ring_write(rdev, PACKET0(0x1720, 0));
266 radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9));
267 /* Emit fence sequence & fire IRQ */
268 radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
269 radeon_ring_write(rdev, fence->seq);
270 radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
271 radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
272}
273
274
275/*
276 * Global GPU functions
277 */
278int r300_copy_dma(struct radeon_device *rdev,
279 uint64_t src_offset,
280 uint64_t dst_offset,
281 unsigned num_pages,
282 struct radeon_fence *fence)
283{
284 uint32_t size;
285 uint32_t cur_size;
286 int i, num_loops;
287 int r = 0;
288
289 /* radeon pitch is /64 */
290 size = num_pages << PAGE_SHIFT;
291 num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
292 r = radeon_ring_lock(rdev, num_loops * 4 + 64);
293 if (r) {
294 DRM_ERROR("radeon: moving bo (%d).\n", r);
295 return r;
296 }
297 /* Must wait for 2D idle & clean before DMA or hangs might happen */
Jerome Glisse068a1172009-06-17 13:28:30 +0200298 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 ));
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200299 radeon_ring_write(rdev, (1 << 16));
300 for (i = 0; i < num_loops; i++) {
301 cur_size = size;
302 if (cur_size > 0x1FFFFF) {
303 cur_size = 0x1FFFFF;
304 }
305 size -= cur_size;
306 radeon_ring_write(rdev, PACKET0(0x720, 2));
307 radeon_ring_write(rdev, src_offset);
308 radeon_ring_write(rdev, dst_offset);
309 radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
310 src_offset += cur_size;
311 dst_offset += cur_size;
312 }
313 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
314 radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
315 if (fence) {
316 r = radeon_fence_emit(rdev, fence);
317 }
318 radeon_ring_unlock_commit(rdev);
319 return r;
320}
321
322void r300_ring_start(struct radeon_device *rdev)
323{
324 unsigned gb_tile_config;
325 int r;
326
327 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
328 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
Jerome Glisse068a1172009-06-17 13:28:30 +0200329 switch(rdev->num_gb_pipes) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200330 case 2:
331 gb_tile_config |= R300_PIPE_COUNT_R300;
332 break;
333 case 3:
334 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
335 break;
336 case 4:
337 gb_tile_config |= R300_PIPE_COUNT_R420;
338 break;
339 case 1:
340 default:
341 gb_tile_config |= R300_PIPE_COUNT_RV350;
342 break;
343 }
344
345 r = radeon_ring_lock(rdev, 64);
346 if (r) {
347 return;
348 }
349 radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
350 radeon_ring_write(rdev,
351 RADEON_ISYNC_ANY2D_IDLE3D |
352 RADEON_ISYNC_ANY3D_IDLE2D |
353 RADEON_ISYNC_WAIT_IDLEGUI |
354 RADEON_ISYNC_CPSCRATCH_IDLEGUI);
355 radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
356 radeon_ring_write(rdev, gb_tile_config);
357 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
358 radeon_ring_write(rdev,
359 RADEON_WAIT_2D_IDLECLEAN |
360 RADEON_WAIT_3D_IDLECLEAN);
361 radeon_ring_write(rdev, PACKET0(0x170C, 0));
362 radeon_ring_write(rdev, 1 << 31);
363 radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
364 radeon_ring_write(rdev, 0);
365 radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
366 radeon_ring_write(rdev, 0);
367 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
368 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
369 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
370 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
371 radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
372 radeon_ring_write(rdev,
373 RADEON_WAIT_2D_IDLECLEAN |
374 RADEON_WAIT_3D_IDLECLEAN);
375 radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
376 radeon_ring_write(rdev, 0);
377 radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
378 radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
379 radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
380 radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
381 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
382 radeon_ring_write(rdev,
383 ((6 << R300_MS_X0_SHIFT) |
384 (6 << R300_MS_Y0_SHIFT) |
385 (6 << R300_MS_X1_SHIFT) |
386 (6 << R300_MS_Y1_SHIFT) |
387 (6 << R300_MS_X2_SHIFT) |
388 (6 << R300_MS_Y2_SHIFT) |
389 (6 << R300_MSBD0_Y_SHIFT) |
390 (6 << R300_MSBD0_X_SHIFT)));
391 radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
392 radeon_ring_write(rdev,
393 ((6 << R300_MS_X3_SHIFT) |
394 (6 << R300_MS_Y3_SHIFT) |
395 (6 << R300_MS_X4_SHIFT) |
396 (6 << R300_MS_Y4_SHIFT) |
397 (6 << R300_MS_X5_SHIFT) |
398 (6 << R300_MS_Y5_SHIFT) |
399 (6 << R300_MSBD1_SHIFT)));
400 radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
401 radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
402 radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
403 radeon_ring_write(rdev,
404 R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
405 radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
406 radeon_ring_write(rdev,
407 R300_GEOMETRY_ROUND_NEAREST |
408 R300_COLOR_ROUND_NEAREST);
409 radeon_ring_unlock_commit(rdev);
410}
411
412void r300_errata(struct radeon_device *rdev)
413{
414 rdev->pll_errata = 0;
415
416 if (rdev->family == CHIP_R300 &&
417 (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) {
418 rdev->pll_errata |= CHIP_ERRATA_R300_CG;
419 }
420}
421
422int r300_mc_wait_for_idle(struct radeon_device *rdev)
423{
424 unsigned i;
425 uint32_t tmp;
426
427 for (i = 0; i < rdev->usec_timeout; i++) {
428 /* read MC_STATUS */
429 tmp = RREG32(0x0150);
430 if (tmp & (1 << 4)) {
431 return 0;
432 }
433 DRM_UDELAY(1);
434 }
435 return -1;
436}
437
438void r300_gpu_init(struct radeon_device *rdev)
439{
440 uint32_t gb_tile_config, tmp;
441
442 r100_hdp_reset(rdev);
443 /* FIXME: rv380 one pipes ? */
444 if ((rdev->family == CHIP_R300) || (rdev->family == CHIP_R350)) {
445 /* r300,r350 */
446 rdev->num_gb_pipes = 2;
447 } else {
448 /* rv350,rv370,rv380 */
449 rdev->num_gb_pipes = 1;
450 }
Alex Deucherf779b3e2009-08-19 19:11:39 -0400451 rdev->num_z_pipes = 1;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200452 gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
453 switch (rdev->num_gb_pipes) {
454 case 2:
455 gb_tile_config |= R300_PIPE_COUNT_R300;
456 break;
457 case 3:
458 gb_tile_config |= R300_PIPE_COUNT_R420_3P;
459 break;
460 case 4:
461 gb_tile_config |= R300_PIPE_COUNT_R420;
462 break;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200463 default:
Jerome Glisse068a1172009-06-17 13:28:30 +0200464 case 1:
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200465 gb_tile_config |= R300_PIPE_COUNT_RV350;
466 break;
467 }
468 WREG32(R300_GB_TILE_CONFIG, gb_tile_config);
469
470 if (r100_gui_wait_for_idle(rdev)) {
471 printk(KERN_WARNING "Failed to wait GUI idle while "
472 "programming pipes. Bad things might happen.\n");
473 }
474
475 tmp = RREG32(0x170C);
476 WREG32(0x170C, tmp | (1 << 31));
477
478 WREG32(R300_RB2D_DSTCACHE_MODE,
479 R300_DC_AUTOFLUSH_ENABLE |
480 R300_DC_DC_DISABLE_IGNORE_PE);
481
482 if (r100_gui_wait_for_idle(rdev)) {
483 printk(KERN_WARNING "Failed to wait GUI idle while "
484 "programming pipes. Bad things might happen.\n");
485 }
486 if (r300_mc_wait_for_idle(rdev)) {
487 printk(KERN_WARNING "Failed to wait MC idle while "
488 "programming pipes. Bad things might happen.\n");
489 }
Alex Deucherf779b3e2009-08-19 19:11:39 -0400490 DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n",
491 rdev->num_gb_pipes, rdev->num_z_pipes);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200492}
493
494int r300_ga_reset(struct radeon_device *rdev)
495{
496 uint32_t tmp;
497 bool reinit_cp;
498 int i;
499
500 reinit_cp = rdev->cp.ready;
501 rdev->cp.ready = false;
502 for (i = 0; i < rdev->usec_timeout; i++) {
503 WREG32(RADEON_CP_CSQ_MODE, 0);
504 WREG32(RADEON_CP_CSQ_CNTL, 0);
505 WREG32(RADEON_RBBM_SOFT_RESET, 0x32005);
506 (void)RREG32(RADEON_RBBM_SOFT_RESET);
507 udelay(200);
508 WREG32(RADEON_RBBM_SOFT_RESET, 0);
509 /* Wait to prevent race in RBBM_STATUS */
510 mdelay(1);
511 tmp = RREG32(RADEON_RBBM_STATUS);
512 if (tmp & ((1 << 20) | (1 << 26))) {
513 DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp);
514 /* GA still busy soft reset it */
515 WREG32(0x429C, 0x200);
516 WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
517 WREG32(0x43E0, 0);
518 WREG32(0x43E4, 0);
519 WREG32(0x24AC, 0);
520 }
521 /* Wait to prevent race in RBBM_STATUS */
522 mdelay(1);
523 tmp = RREG32(RADEON_RBBM_STATUS);
524 if (!(tmp & ((1 << 20) | (1 << 26)))) {
525 break;
526 }
527 }
528 for (i = 0; i < rdev->usec_timeout; i++) {
529 tmp = RREG32(RADEON_RBBM_STATUS);
530 if (!(tmp & ((1 << 20) | (1 << 26)))) {
531 DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
532 tmp);
533 if (reinit_cp) {
534 return r100_cp_init(rdev, rdev->cp.ring_size);
535 }
536 return 0;
537 }
538 DRM_UDELAY(1);
539 }
540 tmp = RREG32(RADEON_RBBM_STATUS);
541 DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp);
542 return -1;
543}
544
545int r300_gpu_reset(struct radeon_device *rdev)
546{
547 uint32_t status;
548
549 /* reset order likely matter */
550 status = RREG32(RADEON_RBBM_STATUS);
551 /* reset HDP */
552 r100_hdp_reset(rdev);
553 /* reset rb2d */
554 if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
555 r100_rb2d_reset(rdev);
556 }
557 /* reset GA */
558 if (status & ((1 << 20) | (1 << 26))) {
559 r300_ga_reset(rdev);
560 }
561 /* reset CP */
562 status = RREG32(RADEON_RBBM_STATUS);
563 if (status & (1 << 16)) {
564 r100_cp_reset(rdev);
565 }
566 /* Check if GPU is idle */
567 status = RREG32(RADEON_RBBM_STATUS);
568 if (status & (1 << 31)) {
569 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
570 return -1;
571 }
572 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
573 return 0;
574}
575
576
577/*
578 * r300,r350,rv350,rv380 VRAM info
579 */
580void r300_vram_info(struct radeon_device *rdev)
581{
582 uint32_t tmp;
583
584 /* DDR for all card after R300 & IGP */
585 rdev->mc.vram_is_ddr = true;
586 tmp = RREG32(RADEON_MEM_CNTL);
587 if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
588 rdev->mc.vram_width = 128;
589 } else {
590 rdev->mc.vram_width = 64;
591 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200592
Dave Airlie2a0f8912009-07-11 04:44:47 +1000593 r100_vram_init_sizes(rdev);
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200594}
595
596
597/*
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200598 * PCIE Lanes
599 */
600
601void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
602{
603 uint32_t link_width_cntl, mask;
604
605 if (rdev->flags & RADEON_IS_IGP)
606 return;
607
608 if (!(rdev->flags & RADEON_IS_PCIE))
609 return;
610
611 /* FIXME wait for idle */
612
613 switch (lanes) {
614 case 0:
615 mask = RADEON_PCIE_LC_LINK_WIDTH_X0;
616 break;
617 case 1:
618 mask = RADEON_PCIE_LC_LINK_WIDTH_X1;
619 break;
620 case 2:
621 mask = RADEON_PCIE_LC_LINK_WIDTH_X2;
622 break;
623 case 4:
624 mask = RADEON_PCIE_LC_LINK_WIDTH_X4;
625 break;
626 case 8:
627 mask = RADEON_PCIE_LC_LINK_WIDTH_X8;
628 break;
629 case 12:
630 mask = RADEON_PCIE_LC_LINK_WIDTH_X12;
631 break;
632 case 16:
633 default:
634 mask = RADEON_PCIE_LC_LINK_WIDTH_X16;
635 break;
636 }
637
638 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
639
640 if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) ==
641 (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT))
642 return;
643
644 link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK |
645 RADEON_PCIE_LC_RECONFIG_NOW |
646 RADEON_PCIE_LC_RECONFIG_LATER |
647 RADEON_PCIE_LC_SHORT_RECONFIG_EN);
648 link_width_cntl |= mask;
649 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
650 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl |
651 RADEON_PCIE_LC_RECONFIG_NOW));
652
653 /* wait for lane set to complete */
654 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
655 while (link_width_cntl == 0xffffffff)
656 link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
657
658}
659
660
661/*
662 * Debugfs info
663 */
664#if defined(CONFIG_DEBUG_FS)
665static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
666{
667 struct drm_info_node *node = (struct drm_info_node *) m->private;
668 struct drm_device *dev = node->minor->dev;
669 struct radeon_device *rdev = dev->dev_private;
670 uint32_t tmp;
671
672 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
673 seq_printf(m, "PCIE_TX_GART_CNTL 0x%08x\n", tmp);
674 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE);
675 seq_printf(m, "PCIE_TX_GART_BASE 0x%08x\n", tmp);
676 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO);
677 seq_printf(m, "PCIE_TX_GART_START_LO 0x%08x\n", tmp);
678 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI);
679 seq_printf(m, "PCIE_TX_GART_START_HI 0x%08x\n", tmp);
680 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO);
681 seq_printf(m, "PCIE_TX_GART_END_LO 0x%08x\n", tmp);
682 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI);
683 seq_printf(m, "PCIE_TX_GART_END_HI 0x%08x\n", tmp);
684 tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR);
685 seq_printf(m, "PCIE_TX_GART_ERROR 0x%08x\n", tmp);
686 return 0;
687}
688
689static struct drm_info_list rv370_pcie_gart_info_list[] = {
690 {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info, 0, NULL},
691};
692#endif
693
694int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
695{
696#if defined(CONFIG_DEBUG_FS)
697 return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1);
698#else
699 return 0;
700#endif
701}
702
703
704/*
705 * CS functions
706 */
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200707static int r300_packet0_check(struct radeon_cs_parser *p,
708 struct radeon_cs_packet *pkt,
709 unsigned idx, unsigned reg)
710{
711 struct radeon_cs_chunk *ib_chunk;
712 struct radeon_cs_reloc *reloc;
Dave Airlie551ebd82009-09-01 15:25:57 +1000713 struct r100_cs_track *track;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200714 volatile uint32_t *ib;
Dave Airliee024e112009-06-24 09:48:08 +1000715 uint32_t tmp, tile_flags = 0;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200716 unsigned i;
717 int r;
718
719 ib = p->ib->ptr;
720 ib_chunk = &p->chunks[p->chunk_ib_idx];
Dave Airlie551ebd82009-09-01 15:25:57 +1000721 track = (struct r100_cs_track *)p->track;
Jerome Glisse068a1172009-06-17 13:28:30 +0200722 switch(reg) {
Dave Airlie531369e2009-06-29 11:21:25 +1000723 case AVIVO_D1MODE_VLINE_START_END:
724 case RADEON_CRTC_GUI_TRIG_VLINE:
725 r = r100_cs_packet_parse_vline(p);
726 if (r) {
727 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
728 idx, reg);
729 r100_cs_dump_packet(p, pkt);
730 return r;
731 }
732 break;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200733 case RADEON_DST_PITCH_OFFSET:
734 case RADEON_SRC_PITCH_OFFSET:
Dave Airlie551ebd82009-09-01 15:25:57 +1000735 r = r100_reloc_pitch_offset(p, pkt, idx, reg);
736 if (r)
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200737 return r;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200738 break;
739 case R300_RB3D_COLOROFFSET0:
740 case R300_RB3D_COLOROFFSET1:
741 case R300_RB3D_COLOROFFSET2:
742 case R300_RB3D_COLOROFFSET3:
743 i = (reg - R300_RB3D_COLOROFFSET0) >> 2;
744 r = r100_cs_packet_next_reloc(p, &reloc);
745 if (r) {
746 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
747 idx, reg);
748 r100_cs_dump_packet(p, pkt);
749 return r;
750 }
751 track->cb[i].robj = reloc->robj;
752 track->cb[i].offset = ib_chunk->kdata[idx];
753 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
754 break;
755 case R300_ZB_DEPTHOFFSET:
756 r = r100_cs_packet_next_reloc(p, &reloc);
757 if (r) {
758 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
759 idx, reg);
760 r100_cs_dump_packet(p, pkt);
761 return r;
762 }
763 track->zb.robj = reloc->robj;
764 track->zb.offset = ib_chunk->kdata[idx];
765 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
766 break;
767 case R300_TX_OFFSET_0:
768 case R300_TX_OFFSET_0+4:
769 case R300_TX_OFFSET_0+8:
770 case R300_TX_OFFSET_0+12:
771 case R300_TX_OFFSET_0+16:
772 case R300_TX_OFFSET_0+20:
773 case R300_TX_OFFSET_0+24:
774 case R300_TX_OFFSET_0+28:
775 case R300_TX_OFFSET_0+32:
776 case R300_TX_OFFSET_0+36:
777 case R300_TX_OFFSET_0+40:
778 case R300_TX_OFFSET_0+44:
779 case R300_TX_OFFSET_0+48:
780 case R300_TX_OFFSET_0+52:
781 case R300_TX_OFFSET_0+56:
782 case R300_TX_OFFSET_0+60:
Jerome Glisse068a1172009-06-17 13:28:30 +0200783 i = (reg - R300_TX_OFFSET_0) >> 2;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200784 r = r100_cs_packet_next_reloc(p, &reloc);
785 if (r) {
786 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
787 idx, reg);
788 r100_cs_dump_packet(p, pkt);
789 return r;
790 }
791 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
Jerome Glisse068a1172009-06-17 13:28:30 +0200792 track->textures[i].robj = reloc->robj;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200793 break;
794 /* Tracked registers */
Jerome Glisse068a1172009-06-17 13:28:30 +0200795 case 0x2084:
796 /* VAP_VF_CNTL */
797 track->vap_vf_cntl = ib_chunk->kdata[idx];
798 break;
799 case 0x20B4:
800 /* VAP_VTX_SIZE */
801 track->vtx_size = ib_chunk->kdata[idx] & 0x7F;
802 break;
803 case 0x2134:
804 /* VAP_VF_MAX_VTX_INDX */
805 track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL;
806 break;
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200807 case 0x43E4:
808 /* SC_SCISSOR1 */
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200809 track->maxy = ((ib_chunk->kdata[idx] >> 13) & 0x1FFF) + 1;
810 if (p->rdev->family < CHIP_RV515) {
811 track->maxy -= 1440;
812 }
813 break;
814 case 0x4E00:
815 /* RB3D_CCTL */
816 track->num_cb = ((ib_chunk->kdata[idx] >> 5) & 0x3) + 1;
817 break;
818 case 0x4E38:
819 case 0x4E3C:
820 case 0x4E40:
821 case 0x4E44:
822 /* RB3D_COLORPITCH0 */
823 /* RB3D_COLORPITCH1 */
824 /* RB3D_COLORPITCH2 */
825 /* RB3D_COLORPITCH3 */
Dave Airliee024e112009-06-24 09:48:08 +1000826 r = r100_cs_packet_next_reloc(p, &reloc);
827 if (r) {
828 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
829 idx, reg);
830 r100_cs_dump_packet(p, pkt);
831 return r;
832 }
833
834 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
835 tile_flags |= R300_COLOR_TILE_ENABLE;
836 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
837 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
838
839 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
840 tmp |= tile_flags;
841 ib[idx] = tmp;
842
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200843 i = (reg - 0x4E38) >> 2;
844 track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE;
845 switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) {
846 case 9:
847 case 11:
848 case 12:
849 track->cb[i].cpp = 1;
850 break;
851 case 3:
852 case 4:
853 case 13:
854 case 15:
855 track->cb[i].cpp = 2;
856 break;
857 case 6:
858 track->cb[i].cpp = 4;
859 break;
860 case 10:
861 track->cb[i].cpp = 8;
862 break;
863 case 7:
864 track->cb[i].cpp = 16;
865 break;
866 default:
867 DRM_ERROR("Invalid color buffer format (%d) !\n",
868 ((ib_chunk->kdata[idx] >> 21) & 0xF));
869 return -EINVAL;
870 }
871 break;
872 case 0x4F00:
873 /* ZB_CNTL */
874 if (ib_chunk->kdata[idx] & 2) {
875 track->z_enabled = true;
876 } else {
877 track->z_enabled = false;
878 }
879 break;
880 case 0x4F10:
881 /* ZB_FORMAT */
882 switch ((ib_chunk->kdata[idx] & 0xF)) {
883 case 0:
884 case 1:
885 track->zb.cpp = 2;
886 break;
887 case 2:
888 track->zb.cpp = 4;
889 break;
890 default:
891 DRM_ERROR("Invalid z buffer format (%d) !\n",
892 (ib_chunk->kdata[idx] & 0xF));
893 return -EINVAL;
894 }
895 break;
896 case 0x4F24:
897 /* ZB_DEPTHPITCH */
Dave Airliee024e112009-06-24 09:48:08 +1000898 r = r100_cs_packet_next_reloc(p, &reloc);
899 if (r) {
900 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
901 idx, reg);
902 r100_cs_dump_packet(p, pkt);
903 return r;
904 }
905
906 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
907 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
908 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
909 tile_flags |= R300_DEPTHMICROTILE_TILED;;
910
911 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
912 tmp |= tile_flags;
913 ib[idx] = tmp;
914
Jerome Glisse771fe6b2009-06-05 14:42:42 +0200915 track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC;
916 break;
Jerome Glisse068a1172009-06-17 13:28:30 +0200917 case 0x4104:
918 for (i = 0; i < 16; i++) {
919 bool enabled;
920
921 enabled = !!(ib_chunk->kdata[idx] & (1 << i));
922 track->textures[i].enabled = enabled;
923 }
924 break;
925 case 0x44C0:
926 case 0x44C4:
927 case 0x44C8:
928 case 0x44CC:
929 case 0x44D0:
930 case 0x44D4:
931 case 0x44D8:
932 case 0x44DC:
933 case 0x44E0:
934 case 0x44E4:
935 case 0x44E8:
936 case 0x44EC:
937 case 0x44F0:
938 case 0x44F4:
939 case 0x44F8:
940 case 0x44FC:
941 /* TX_FORMAT1_[0-15] */
942 i = (reg - 0x44C0) >> 2;
943 tmp = (ib_chunk->kdata[idx] >> 25) & 0x3;
944 track->textures[i].tex_coord_type = tmp;
945 switch ((ib_chunk->kdata[idx] & 0x1F)) {
Dave Airlie551ebd82009-09-01 15:25:57 +1000946 case R300_TX_FORMAT_X8:
947 case R300_TX_FORMAT_Y4X4:
948 case R300_TX_FORMAT_Z3Y3X2:
Jerome Glisse068a1172009-06-17 13:28:30 +0200949 track->textures[i].cpp = 1;
950 break;
Dave Airlie551ebd82009-09-01 15:25:57 +1000951 case R300_TX_FORMAT_X16:
952 case R300_TX_FORMAT_Y8X8:
953 case R300_TX_FORMAT_Z5Y6X5:
954 case R300_TX_FORMAT_Z6Y5X5:
955 case R300_TX_FORMAT_W4Z4Y4X4:
956 case R300_TX_FORMAT_W1Z5Y5X5:
957 case R300_TX_FORMAT_DXT1:
958 case R300_TX_FORMAT_D3DMFT_CxV8U8:
959 case R300_TX_FORMAT_B8G8_B8G8:
960 case R300_TX_FORMAT_G8R8_G8B8:
Jerome Glisse068a1172009-06-17 13:28:30 +0200961 track->textures[i].cpp = 2;
962 break;
Dave Airlie551ebd82009-09-01 15:25:57 +1000963 case R300_TX_FORMAT_Y16X16:
964 case R300_TX_FORMAT_Z11Y11X10:
965 case R300_TX_FORMAT_Z10Y11X11:
966 case R300_TX_FORMAT_W8Z8Y8X8:
967 case R300_TX_FORMAT_W2Z10Y10X10:
968 case 0x17:
969 case R300_TX_FORMAT_FL_I32:
970 case 0x1e:
971 case R300_TX_FORMAT_DXT3:
972 case R300_TX_FORMAT_DXT5:
Jerome Glisse068a1172009-06-17 13:28:30 +0200973 track->textures[i].cpp = 4;
974 break;
Dave Airlie551ebd82009-09-01 15:25:57 +1000975 case R300_TX_FORMAT_W16Z16Y16X16:
976 case R300_TX_FORMAT_FL_R16G16B16A16:
977 case R300_TX_FORMAT_FL_I32A32:
Jerome Glisse068a1172009-06-17 13:28:30 +0200978 track->textures[i].cpp = 8;
979 break;
Dave Airlie551ebd82009-09-01 15:25:57 +1000980 case R300_TX_FORMAT_FL_R32G32B32A32:
Jerome Glisse068a1172009-06-17 13:28:30 +0200981 track->textures[i].cpp = 16;
982 break;
983 default:
984 DRM_ERROR("Invalid texture format %u\n",
985 (ib_chunk->kdata[idx] & 0x1F));
986 return -EINVAL;
987 break;
988 }
989 break;
990 case 0x4400:
991 case 0x4404:
992 case 0x4408:
993 case 0x440C:
994 case 0x4410:
995 case 0x4414:
996 case 0x4418:
997 case 0x441C:
998 case 0x4420:
999 case 0x4424:
1000 case 0x4428:
1001 case 0x442C:
1002 case 0x4430:
1003 case 0x4434:
1004 case 0x4438:
1005 case 0x443C:
1006 /* TX_FILTER0_[0-15] */
1007 i = (reg - 0x4400) >> 2;
Dave Airlie551ebd82009-09-01 15:25:57 +10001008 tmp = ib_chunk->kdata[idx] & 0x7;
Jerome Glisse068a1172009-06-17 13:28:30 +02001009 if (tmp == 2 || tmp == 4 || tmp == 6) {
1010 track->textures[i].roundup_w = false;
1011 }
Dave Airlie551ebd82009-09-01 15:25:57 +10001012 tmp = (ib_chunk->kdata[idx] >> 3) & 0x7;
Jerome Glisse068a1172009-06-17 13:28:30 +02001013 if (tmp == 2 || tmp == 4 || tmp == 6) {
1014 track->textures[i].roundup_h = false;
1015 }
1016 break;
1017 case 0x4500:
1018 case 0x4504:
1019 case 0x4508:
1020 case 0x450C:
1021 case 0x4510:
1022 case 0x4514:
1023 case 0x4518:
1024 case 0x451C:
1025 case 0x4520:
1026 case 0x4524:
1027 case 0x4528:
1028 case 0x452C:
1029 case 0x4530:
1030 case 0x4534:
1031 case 0x4538:
1032 case 0x453C:
1033 /* TX_FORMAT2_[0-15] */
1034 i = (reg - 0x4500) >> 2;
1035 tmp = ib_chunk->kdata[idx] & 0x3FFF;
1036 track->textures[i].pitch = tmp + 1;
1037 if (p->rdev->family >= CHIP_RV515) {
1038 tmp = ((ib_chunk->kdata[idx] >> 15) & 1) << 11;
1039 track->textures[i].width_11 = tmp;
1040 tmp = ((ib_chunk->kdata[idx] >> 16) & 1) << 11;
1041 track->textures[i].height_11 = tmp;
1042 }
1043 break;
1044 case 0x4480:
1045 case 0x4484:
1046 case 0x4488:
1047 case 0x448C:
1048 case 0x4490:
1049 case 0x4494:
1050 case 0x4498:
1051 case 0x449C:
1052 case 0x44A0:
1053 case 0x44A4:
1054 case 0x44A8:
1055 case 0x44AC:
1056 case 0x44B0:
1057 case 0x44B4:
1058 case 0x44B8:
1059 case 0x44BC:
1060 /* TX_FORMAT0_[0-15] */
1061 i = (reg - 0x4480) >> 2;
1062 tmp = ib_chunk->kdata[idx] & 0x7FF;
1063 track->textures[i].width = tmp + 1;
1064 tmp = (ib_chunk->kdata[idx] >> 11) & 0x7FF;
1065 track->textures[i].height = tmp + 1;
1066 tmp = (ib_chunk->kdata[idx] >> 26) & 0xF;
1067 track->textures[i].num_levels = tmp;
1068 tmp = ib_chunk->kdata[idx] & (1 << 31);
1069 track->textures[i].use_pitch = !!tmp;
1070 tmp = (ib_chunk->kdata[idx] >> 22) & 0xF;
1071 track->textures[i].txdepth = tmp;
1072 break;
Dave Airlie3f8befe2009-08-15 20:54:13 +10001073 case R300_ZB_ZPASS_ADDR:
1074 r = r100_cs_packet_next_reloc(p, &reloc);
1075 if (r) {
1076 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1077 idx, reg);
1078 r100_cs_dump_packet(p, pkt);
1079 return r;
1080 }
1081 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1082 break;
1083 case 0x4be8:
1084 /* valid register only on RV530 */
1085 if (p->rdev->family == CHIP_RV530)
1086 break;
1087 /* fallthrough do not move */
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001088 default:
Jerome Glisse068a1172009-06-17 13:28:30 +02001089 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1090 reg, idx);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001091 return -EINVAL;
1092 }
1093 return 0;
1094}
1095
1096static int r300_packet3_check(struct radeon_cs_parser *p,
1097 struct radeon_cs_packet *pkt)
1098{
1099 struct radeon_cs_chunk *ib_chunk;
Dave Airlie551ebd82009-09-01 15:25:57 +10001100
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001101 struct radeon_cs_reloc *reloc;
Dave Airlie551ebd82009-09-01 15:25:57 +10001102 struct r100_cs_track *track;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001103 volatile uint32_t *ib;
1104 unsigned idx;
1105 unsigned i, c;
1106 int r;
1107
1108 ib = p->ib->ptr;
1109 ib_chunk = &p->chunks[p->chunk_ib_idx];
1110 idx = pkt->idx + 1;
Dave Airlie551ebd82009-09-01 15:25:57 +10001111 track = (struct r100_cs_track *)p->track;
Jerome Glisse068a1172009-06-17 13:28:30 +02001112 switch(pkt->opcode) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001113 case PACKET3_3D_LOAD_VBPNTR:
Jerome Glisse068a1172009-06-17 13:28:30 +02001114 c = ib_chunk->kdata[idx++] & 0x1F;
1115 track->num_arrays = c;
1116 for (i = 0; i < (c - 1); i+=2, idx+=3) {
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001117 r = r100_cs_packet_next_reloc(p, &reloc);
1118 if (r) {
1119 DRM_ERROR("No reloc for packet3 %d\n",
1120 pkt->opcode);
1121 r100_cs_dump_packet(p, pkt);
1122 return r;
1123 }
1124 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
Jerome Glisse068a1172009-06-17 13:28:30 +02001125 track->arrays[i + 0].robj = reloc->robj;
1126 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1127 track->arrays[i + 0].esize &= 0x7F;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001128 r = r100_cs_packet_next_reloc(p, &reloc);
1129 if (r) {
1130 DRM_ERROR("No reloc for packet3 %d\n",
1131 pkt->opcode);
1132 r100_cs_dump_packet(p, pkt);
1133 return r;
1134 }
1135 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
Jerome Glisse068a1172009-06-17 13:28:30 +02001136 track->arrays[i + 1].robj = reloc->robj;
1137 track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
1138 track->arrays[i + 1].esize &= 0x7F;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001139 }
1140 if (c & 1) {
1141 r = r100_cs_packet_next_reloc(p, &reloc);
1142 if (r) {
1143 DRM_ERROR("No reloc for packet3 %d\n",
1144 pkt->opcode);
1145 r100_cs_dump_packet(p, pkt);
1146 return r;
1147 }
1148 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
Jerome Glisse068a1172009-06-17 13:28:30 +02001149 track->arrays[i + 0].robj = reloc->robj;
1150 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1151 track->arrays[i + 0].esize &= 0x7F;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001152 }
1153 break;
1154 case PACKET3_INDX_BUFFER:
1155 r = r100_cs_packet_next_reloc(p, &reloc);
1156 if (r) {
1157 DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1158 r100_cs_dump_packet(p, pkt);
1159 return r;
1160 }
1161 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
Jerome Glisse068a1172009-06-17 13:28:30 +02001162 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1163 if (r) {
1164 return r;
1165 }
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001166 break;
1167 /* Draw packet */
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001168 case PACKET3_3D_DRAW_IMMD:
Jerome Glisse068a1172009-06-17 13:28:30 +02001169 /* Number of dwords is vtx_size * (num_vertices - 1)
1170 * PRIM_WALK must be equal to 3 vertex data in embedded
1171 * in cmd stream */
1172 if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) {
1173 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1174 return -EINVAL;
1175 }
1176 track->vap_vf_cntl = ib_chunk->kdata[idx+1];
1177 track->immd_dwords = pkt->count - 1;
Dave Airlie551ebd82009-09-01 15:25:57 +10001178 r = r100_cs_track_check(p->rdev, track);
Jerome Glisse068a1172009-06-17 13:28:30 +02001179 if (r) {
1180 return r;
1181 }
1182 break;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001183 case PACKET3_3D_DRAW_IMMD_2:
Jerome Glisse068a1172009-06-17 13:28:30 +02001184 /* Number of dwords is vtx_size * (num_vertices - 1)
1185 * PRIM_WALK must be equal to 3 vertex data in embedded
1186 * in cmd stream */
1187 if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) {
1188 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1189 return -EINVAL;
1190 }
1191 track->vap_vf_cntl = ib_chunk->kdata[idx];
1192 track->immd_dwords = pkt->count;
Dave Airlie551ebd82009-09-01 15:25:57 +10001193 r = r100_cs_track_check(p->rdev, track);
Jerome Glisse068a1172009-06-17 13:28:30 +02001194 if (r) {
1195 return r;
1196 }
1197 break;
1198 case PACKET3_3D_DRAW_VBUF:
1199 track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
Dave Airlie551ebd82009-09-01 15:25:57 +10001200 r = r100_cs_track_check(p->rdev, track);
Jerome Glisse068a1172009-06-17 13:28:30 +02001201 if (r) {
1202 return r;
1203 }
1204 break;
1205 case PACKET3_3D_DRAW_VBUF_2:
1206 track->vap_vf_cntl = ib_chunk->kdata[idx];
Dave Airlie551ebd82009-09-01 15:25:57 +10001207 r = r100_cs_track_check(p->rdev, track);
Jerome Glisse068a1172009-06-17 13:28:30 +02001208 if (r) {
1209 return r;
1210 }
1211 break;
1212 case PACKET3_3D_DRAW_INDX:
1213 track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
Dave Airlie551ebd82009-09-01 15:25:57 +10001214 r = r100_cs_track_check(p->rdev, track);
Jerome Glisse068a1172009-06-17 13:28:30 +02001215 if (r) {
1216 return r;
1217 }
1218 break;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001219 case PACKET3_3D_DRAW_INDX_2:
Jerome Glisse068a1172009-06-17 13:28:30 +02001220 track->vap_vf_cntl = ib_chunk->kdata[idx];
Dave Airlie551ebd82009-09-01 15:25:57 +10001221 r = r100_cs_track_check(p->rdev, track);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001222 if (r) {
1223 return r;
1224 }
1225 break;
1226 case PACKET3_NOP:
1227 break;
1228 default:
1229 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1230 return -EINVAL;
1231 }
1232 return 0;
1233}
1234
1235int r300_cs_parse(struct radeon_cs_parser *p)
1236{
1237 struct radeon_cs_packet pkt;
Dave Airlie551ebd82009-09-01 15:25:57 +10001238 struct r100_cs_track track;
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001239 int r;
1240
Dave Airlie551ebd82009-09-01 15:25:57 +10001241 r100_cs_track_clear(p->rdev, &track);
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001242 p->track = &track;
1243 do {
1244 r = r100_cs_packet_parse(p, &pkt, p->idx);
1245 if (r) {
1246 return r;
1247 }
1248 p->idx += pkt.count + 2;
1249 switch (pkt.type) {
1250 case PACKET_TYPE0:
1251 r = r100_cs_parse_packet0(p, &pkt,
Jerome Glisse068a1172009-06-17 13:28:30 +02001252 p->rdev->config.r300.reg_safe_bm,
1253 p->rdev->config.r300.reg_safe_bm_size,
Jerome Glisse771fe6b2009-06-05 14:42:42 +02001254 &r300_packet0_check);
1255 break;
1256 case PACKET_TYPE2:
1257 break;
1258 case PACKET_TYPE3:
1259 r = r300_packet3_check(p, &pkt);
1260 break;
1261 default:
1262 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
1263 return -EINVAL;
1264 }
1265 if (r) {
1266 return r;
1267 }
1268 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1269 return 0;
1270}
Jerome Glisse068a1172009-06-17 13:28:30 +02001271
1272int r300_init(struct radeon_device *rdev)
1273{
1274 rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
1275 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
1276 return 0;
1277}