blob: 3efa633966e8d592e686df6908835dc0c1432ec9 [file] [log] [blame]
Dave Airlie414ed532005-08-16 20:43:16 +10001/* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*-
2 *
3 * Copyright (C) The Weather Channel, Inc. 2002.
4 * Copyright (C) 2004 Nicolai Haehnle.
5 * All Rights Reserved.
6 *
7 * The Weather Channel (TM) funded Tungsten Graphics to develop the
8 * initial release of the Radeon 8500 driver under the XFree86 license.
9 * This notice must be preserved.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
20 * Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
28 * DEALINGS IN THE SOFTWARE.
29 *
30 * Authors:
31 * Nicolai Haehnle <prefect_@gmx.net>
32 */
33
34#include "drmP.h"
35#include "drm.h"
36#include "radeon_drm.h"
37#include "radeon_drv.h"
38#include "r300_reg.h"
39
David Miller958a6f82009-02-18 01:35:23 -080040#include <asm/unaligned.h>
41
Dave Airlie414ed532005-08-16 20:43:16 +100042#define R300_SIMULTANEOUS_CLIPRECTS 4
43
44/* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects
45 */
46static const int r300_cliprect_cntl[4] = {
47 0xAAAA,
48 0xEEEE,
49 0xFEFE,
50 0xFFFE
51};
52
Dave Airlie414ed532005-08-16 20:43:16 +100053/**
54 * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
55 * buffer, starting with index n.
56 */
Dave Airlied985c102006-01-02 21:32:48 +110057static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
58 drm_radeon_kcmd_buffer_t *cmdbuf, int n)
Dave Airlie414ed532005-08-16 20:43:16 +100059{
Dave Airliec60ce622007-07-11 15:27:12 +100060 struct drm_clip_rect box;
Dave Airlie414ed532005-08-16 20:43:16 +100061 int nr;
62 int i;
63 RING_LOCALS;
64
65 nr = cmdbuf->nbox - n;
66 if (nr > R300_SIMULTANEOUS_CLIPRECTS)
67 nr = R300_SIMULTANEOUS_CLIPRECTS;
68
69 DRM_DEBUG("%i cliprects\n", nr);
70
71 if (nr) {
Dave Airlieb5e89ed2005-09-25 14:28:13 +100072 BEGIN_RING(6 + nr * 2);
73 OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
Dave Airlie414ed532005-08-16 20:43:16 +100074
Dave Airlieb5e89ed2005-09-25 14:28:13 +100075 for (i = 0; i < nr; ++i) {
76 if (DRM_COPY_FROM_USER_UNCHECKED
77 (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
Dave Airlie414ed532005-08-16 20:43:16 +100078 DRM_ERROR("copy cliprect faulted\n");
Eric Anholt20caafa2007-08-25 19:22:43 +100079 return -EFAULT;
Dave Airlie414ed532005-08-16 20:43:16 +100080 }
81
Nicolai Haehnle649ffc02008-08-13 09:50:12 +100082 box.x2--; /* Hardware expects inclusive bottom-right corner */
83 box.y2--;
84
Dave Airlie3d5e2c12008-02-07 15:01:05 +100085 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
86 box.x1 = (box.x1) &
87 R300_CLIPRECT_MASK;
88 box.y1 = (box.y1) &
89 R300_CLIPRECT_MASK;
90 box.x2 = (box.x2) &
91 R300_CLIPRECT_MASK;
92 box.y2 = (box.y2) &
93 R300_CLIPRECT_MASK;
94 } else {
95 box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) &
96 R300_CLIPRECT_MASK;
97 box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) &
98 R300_CLIPRECT_MASK;
99 box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) &
100 R300_CLIPRECT_MASK;
101 box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
102 R300_CLIPRECT_MASK;
Dave Airlie3d5e2c12008-02-07 15:01:05 +1000103 }
Nicolai Haehnle649ffc02008-08-13 09:50:12 +1000104
Dave Airlie414ed532005-08-16 20:43:16 +1000105 OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000106 (box.y1 << R300_CLIPRECT_Y_SHIFT));
Dave Airlie414ed532005-08-16 20:43:16 +1000107 OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000108 (box.y2 << R300_CLIPRECT_Y_SHIFT));
Dave Airlie3d5e2c12008-02-07 15:01:05 +1000109
Dave Airlie414ed532005-08-16 20:43:16 +1000110 }
111
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000112 OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]);
Dave Airlie414ed532005-08-16 20:43:16 +1000113
114 /* TODO/SECURITY: Force scissors to a safe value, otherwise the
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000115 * client might be able to trample over memory.
116 * The impact should be very limited, but I'd rather be safe than
117 * sorry.
118 */
119 OUT_RING(CP_PACKET0(R300_RE_SCISSORS_TL, 1));
120 OUT_RING(0);
121 OUT_RING(R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK);
Dave Airlie414ed532005-08-16 20:43:16 +1000122 ADVANCE_RING();
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000123 } else {
Dave Airlie414ed532005-08-16 20:43:16 +1000124 /* Why we allow zero cliprect rendering:
125 * There are some commands in a command buffer that must be submitted
126 * even when there are no cliprects, e.g. DMA buffer discard
127 * or state setting (though state setting could be avoided by
128 * simulating a loss of context).
129 *
130 * Now since the cmdbuf interface is so chaotic right now (and is
131 * bound to remain that way for a bit until things settle down),
132 * it is basically impossible to filter out the commands that are
133 * necessary and those that aren't.
134 *
135 * So I choose the safe way and don't do any filtering at all;
136 * instead, I simply set up the engine so that all rendering
137 * can't produce any fragments.
138 */
139 BEGIN_RING(2);
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000140 OUT_RING_REG(R300_RE_CLIPRECT_CNTL, 0);
Dave Airlie414ed532005-08-16 20:43:16 +1000141 ADVANCE_RING();
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000142 }
Dave Airlie414ed532005-08-16 20:43:16 +1000143
Jerome Glisse54f961a2008-08-13 09:46:31 +1000144 /* flus cache and wait idle clean after cliprect change */
145 BEGIN_RING(2);
146 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
147 OUT_RING(R300_RB3D_DC_FLUSH);
148 ADVANCE_RING();
149 BEGIN_RING(2);
150 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
151 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
152 ADVANCE_RING();
153 /* set flush flag */
154 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
155
Dave Airlie414ed532005-08-16 20:43:16 +1000156 return 0;
157}
158
Dave Airlieb3a83632005-09-30 18:37:36 +1000159static u8 r300_reg_flags[0x10000 >> 2];
Dave Airlie414ed532005-08-16 20:43:16 +1000160
Dave Airlie3d5e2c12008-02-07 15:01:05 +1000161void r300_init_reg_flags(struct drm_device *dev)
Dave Airlie414ed532005-08-16 20:43:16 +1000162{
163 int i;
Dave Airlie3d5e2c12008-02-07 15:01:05 +1000164 drm_radeon_private_t *dev_priv = dev->dev_private;
165
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000166 memset(r300_reg_flags, 0, 0x10000 >> 2);
167#define ADD_RANGE_MARK(reg, count,mark) \
Dave Airlie414ed532005-08-16 20:43:16 +1000168 for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
169 r300_reg_flags[i]|=(mark);
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000170
171#define MARK_SAFE 1
172#define MARK_CHECK_OFFSET 2
173
174#define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE)
Dave Airlie414ed532005-08-16 20:43:16 +1000175
176 /* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
177 ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
Oliver McFaddenc6c656b2007-07-11 12:24:10 +1000178 ADD_RANGE(R300_VAP_CNTL, 1);
Dave Airlie414ed532005-08-16 20:43:16 +1000179 ADD_RANGE(R300_SE_VTE_CNTL, 2);
180 ADD_RANGE(0x2134, 2);
Oliver McFaddenc6c656b2007-07-11 12:24:10 +1000181 ADD_RANGE(R300_VAP_CNTL_STATUS, 1);
Dave Airlie414ed532005-08-16 20:43:16 +1000182 ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
183 ADD_RANGE(0x21DC, 1);
Oliver McFaddenc6c656b2007-07-11 12:24:10 +1000184 ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
185 ADD_RANGE(R300_VAP_CLIP_X_0, 4);
Jerome Glisse54f961a2008-08-13 09:46:31 +1000186 ADD_RANGE(R300_VAP_PVS_STATE_FLUSH_REG, 1);
Oliver McFaddenc6c656b2007-07-11 12:24:10 +1000187 ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
Dave Airlie414ed532005-08-16 20:43:16 +1000188 ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
189 ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
190 ADD_RANGE(R300_GB_ENABLE, 1);
191 ADD_RANGE(R300_GB_MSPOS0, 5);
Jerome Glisse54f961a2008-08-13 09:46:31 +1000192 ADD_RANGE(R300_TX_INVALTAGS, 1);
Dave Airlie414ed532005-08-16 20:43:16 +1000193 ADD_RANGE(R300_TX_ENABLE, 1);
194 ADD_RANGE(0x4200, 4);
195 ADD_RANGE(0x4214, 1);
196 ADD_RANGE(R300_RE_POINTSIZE, 1);
197 ADD_RANGE(0x4230, 3);
198 ADD_RANGE(R300_RE_LINE_CNT, 1);
Oliver McFaddenc6c656b2007-07-11 12:24:10 +1000199 ADD_RANGE(R300_RE_UNK4238, 1);
Dave Airlie414ed532005-08-16 20:43:16 +1000200 ADD_RANGE(0x4260, 3);
Oliver McFaddenc6c656b2007-07-11 12:24:10 +1000201 ADD_RANGE(R300_RE_SHADE, 4);
202 ADD_RANGE(R300_RE_POLYGON_MODE, 5);
203 ADD_RANGE(R300_RE_ZBIAS_CNTL, 1);
Dave Airlie414ed532005-08-16 20:43:16 +1000204 ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
Oliver McFaddenc6c656b2007-07-11 12:24:10 +1000205 ADD_RANGE(R300_RE_OCCLUSION_CNTL, 1);
Dave Airlie414ed532005-08-16 20:43:16 +1000206 ADD_RANGE(R300_RE_CULL_CNTL, 1);
207 ADD_RANGE(0x42C0, 2);
208 ADD_RANGE(R300_RS_CNTL_0, 2);
Dave Airliec0beb2a2008-05-28 13:52:28 +1000209
Dave Airlie21efa2b2008-06-19 13:01:58 +1000210 ADD_RANGE(R300_SC_HYPERZ, 2);
Dave Airlie414ed532005-08-16 20:43:16 +1000211 ADD_RANGE(0x43E8, 1);
Dave Airliec0beb2a2008-05-28 13:52:28 +1000212
Dave Airlie414ed532005-08-16 20:43:16 +1000213 ADD_RANGE(0x46A4, 5);
Dave Airliec0beb2a2008-05-28 13:52:28 +1000214
Oliver McFaddenc6c656b2007-07-11 12:24:10 +1000215 ADD_RANGE(R300_RE_FOG_STATE, 1);
216 ADD_RANGE(R300_FOG_COLOR_R, 3);
Dave Airlie414ed532005-08-16 20:43:16 +1000217 ADD_RANGE(R300_PP_ALPHA_TEST, 2);
218 ADD_RANGE(0x4BD8, 1);
219 ADD_RANGE(R300_PFS_PARAM_0_X, 64);
220 ADD_RANGE(0x4E00, 1);
221 ADD_RANGE(R300_RB3D_CBLEND, 2);
222 ADD_RANGE(R300_RB3D_COLORMASK, 1);
Oliver McFaddenc6c656b2007-07-11 12:24:10 +1000223 ADD_RANGE(R300_RB3D_BLEND_COLOR, 3);
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000224 ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */
Dave Airlie414ed532005-08-16 20:43:16 +1000225 ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
226 ADD_RANGE(0x4E50, 9);
227 ADD_RANGE(0x4E88, 1);
228 ADD_RANGE(0x4EA0, 2);
Dave Airlie21efa2b2008-06-19 13:01:58 +1000229 ADD_RANGE(R300_ZB_CNTL, 3);
230 ADD_RANGE(R300_ZB_FORMAT, 4);
231 ADD_RANGE_MARK(R300_ZB_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
232 ADD_RANGE(R300_ZB_DEPTHPITCH, 1);
233 ADD_RANGE(R300_ZB_DEPTHCLEARVALUE, 1);
234 ADD_RANGE(R300_ZB_ZMASK_OFFSET, 13);
Dave Airlie414ed532005-08-16 20:43:16 +1000235
236 ADD_RANGE(R300_TX_FILTER_0, 16);
Dave Airlie45f17102006-03-19 19:12:10 +1100237 ADD_RANGE(R300_TX_FILTER1_0, 16);
Dave Airlie414ed532005-08-16 20:43:16 +1000238 ADD_RANGE(R300_TX_SIZE_0, 16);
239 ADD_RANGE(R300_TX_FORMAT_0, 16);
Dave Airlied985c102006-01-02 21:32:48 +1100240 ADD_RANGE(R300_TX_PITCH_0, 16);
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000241 /* Texture offset is dangerous and needs more checking */
Dave Airlie414ed532005-08-16 20:43:16 +1000242 ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
Dave Airlie45f17102006-03-19 19:12:10 +1100243 ADD_RANGE(R300_TX_CHROMA_KEY_0, 16);
Dave Airlie414ed532005-08-16 20:43:16 +1000244 ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
245
246 /* Sporadic registers used as primitives are emitted */
Dave Airlie21efa2b2008-06-19 13:01:58 +1000247 ADD_RANGE(R300_ZB_ZCACHE_CTLSTAT, 1);
Dave Airlie414ed532005-08-16 20:43:16 +1000248 ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
249 ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
250 ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
251
Dave Airlie3d5e2c12008-02-07 15:01:05 +1000252 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
Dave Airliec0beb2a2008-05-28 13:52:28 +1000253 ADD_RANGE(R500_VAP_INDEX_OFFSET, 1);
254 ADD_RANGE(R500_US_CONFIG, 2);
255 ADD_RANGE(R500_US_CODE_ADDR, 3);
256 ADD_RANGE(R500_US_FC_CTRL, 1);
257 ADD_RANGE(R500_RS_IP_0, 16);
258 ADD_RANGE(R500_RS_INST_0, 16);
259 ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2);
260 ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2);
Dave Airlie21efa2b2008-06-19 13:01:58 +1000261 ADD_RANGE(R500_ZB_FIFO_SIZE, 2);
Dave Airliec0beb2a2008-05-28 13:52:28 +1000262 } else {
263 ADD_RANGE(R300_PFS_CNTL_0, 3);
264 ADD_RANGE(R300_PFS_NODE_0, 4);
265 ADD_RANGE(R300_PFS_TEXI_0, 64);
266 ADD_RANGE(R300_PFS_INSTR0_0, 64);
267 ADD_RANGE(R300_PFS_INSTR1_0, 64);
268 ADD_RANGE(R300_PFS_INSTR2_0, 64);
269 ADD_RANGE(R300_PFS_INSTR3_0, 64);
270 ADD_RANGE(R300_RS_INTERP_0, 8);
271 ADD_RANGE(R300_RS_ROUTE_0, 8);
272
Dave Airlie3d5e2c12008-02-07 15:01:05 +1000273 }
Dave Airlie414ed532005-08-16 20:43:16 +1000274}
275
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000276static __inline__ int r300_check_range(unsigned reg, int count)
Dave Airlie414ed532005-08-16 20:43:16 +1000277{
278 int i;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000279 if (reg & ~0xffff)
280 return -1;
281 for (i = (reg >> 2); i < (reg >> 2) + count; i++)
282 if (r300_reg_flags[i] != MARK_SAFE)
283 return 1;
Dave Airlie414ed532005-08-16 20:43:16 +1000284 return 0;
285}
286
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000287static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
288 dev_priv,
Dave Airlieb3a83632005-09-30 18:37:36 +1000289 drm_radeon_kcmd_buffer_t
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000290 * cmdbuf,
291 drm_r300_cmd_header_t
292 header)
Dave Airlie414ed532005-08-16 20:43:16 +1000293{
294 int reg;
295 int sz;
296 int i;
297 int values[64];
298 RING_LOCALS;
299
300 sz = header.packet0.count;
301 reg = (header.packet0.reghi << 8) | header.packet0.reglo;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000302
303 if ((sz > 64) || (sz < 0)) {
304 DRM_ERROR
305 ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
306 reg, sz);
Eric Anholt20caafa2007-08-25 19:22:43 +1000307 return -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000308 }
309 for (i = 0; i < sz; i++) {
Dave Airlieb3a83632005-09-30 18:37:36 +1000310 values[i] = ((int *)cmdbuf->buf)[i];
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000311 switch (r300_reg_flags[(reg >> 2) + i]) {
Dave Airlie414ed532005-08-16 20:43:16 +1000312 case MARK_SAFE:
313 break;
314 case MARK_CHECK_OFFSET:
=?utf-8?q?Michel_D=C3=A4nzer?=1d6bb8e2006-12-15 18:54:35 +1100315 if (!radeon_check_offset(dev_priv, (u32) values[i])) {
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000316 DRM_ERROR
317 ("Offset failed range check (reg=%04x sz=%d)\n",
318 reg, sz);
Eric Anholt20caafa2007-08-25 19:22:43 +1000319 return -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000320 }
Dave Airlie414ed532005-08-16 20:43:16 +1000321 break;
322 default:
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000323 DRM_ERROR("Register %04x failed check as flag=%02x\n",
324 reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
Eric Anholt20caafa2007-08-25 19:22:43 +1000325 return -EINVAL;
Dave Airlie414ed532005-08-16 20:43:16 +1000326 }
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000327 }
328
329 BEGIN_RING(1 + sz);
330 OUT_RING(CP_PACKET0(reg, sz - 1));
331 OUT_RING_TABLE(values, sz);
Dave Airlie414ed532005-08-16 20:43:16 +1000332 ADVANCE_RING();
333
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000334 cmdbuf->buf += sz * 4;
335 cmdbuf->bufsz -= sz * 4;
Dave Airlie414ed532005-08-16 20:43:16 +1000336
337 return 0;
338}
339
340/**
341 * Emits a packet0 setting arbitrary registers.
342 * Called by r300_do_cp_cmdbuf.
343 *
344 * Note that checks are performed on contents and addresses of the registers
345 */
Dave Airlied985c102006-01-02 21:32:48 +1100346static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
347 drm_radeon_kcmd_buffer_t *cmdbuf,
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000348 drm_r300_cmd_header_t header)
Dave Airlie414ed532005-08-16 20:43:16 +1000349{
350 int reg;
351 int sz;
352 RING_LOCALS;
353
354 sz = header.packet0.count;
355 reg = (header.packet0.reghi << 8) | header.packet0.reglo;
356
357 if (!sz)
358 return 0;
359
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000360 if (sz * 4 > cmdbuf->bufsz)
Eric Anholt20caafa2007-08-25 19:22:43 +1000361 return -EINVAL;
Dave Airlie414ed532005-08-16 20:43:16 +1000362
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000363 if (reg + sz * 4 >= 0x10000) {
364 DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg,
365 sz);
Eric Anholt20caafa2007-08-25 19:22:43 +1000366 return -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000367 }
368
369 if (r300_check_range(reg, sz)) {
Dave Airlie414ed532005-08-16 20:43:16 +1000370 /* go and check everything */
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000371 return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf,
372 header);
373 }
Dave Airlie414ed532005-08-16 20:43:16 +1000374 /* the rest of the data is safe to emit, whatever the values the user passed */
375
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000376 BEGIN_RING(1 + sz);
377 OUT_RING(CP_PACKET0(reg, sz - 1));
Dave Airlieb3a83632005-09-30 18:37:36 +1000378 OUT_RING_TABLE((int *)cmdbuf->buf, sz);
Dave Airlie414ed532005-08-16 20:43:16 +1000379 ADVANCE_RING();
380
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000381 cmdbuf->buf += sz * 4;
382 cmdbuf->bufsz -= sz * 4;
Dave Airlie414ed532005-08-16 20:43:16 +1000383
384 return 0;
385}
386
Dave Airlie414ed532005-08-16 20:43:16 +1000387/**
388 * Uploads user-supplied vertex program instructions or parameters onto
389 * the graphics card.
390 * Called by r300_do_cp_cmdbuf.
391 */
Dave Airlied985c102006-01-02 21:32:48 +1100392static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
393 drm_radeon_kcmd_buffer_t *cmdbuf,
Dave Airlie414ed532005-08-16 20:43:16 +1000394 drm_r300_cmd_header_t header)
395{
396 int sz;
397 int addr;
398 RING_LOCALS;
399
400 sz = header.vpu.count;
401 addr = (header.vpu.adrhi << 8) | header.vpu.adrlo;
402
403 if (!sz)
404 return 0;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000405 if (sz * 16 > cmdbuf->bufsz)
Eric Anholt20caafa2007-08-25 19:22:43 +1000406 return -EINVAL;
Dave Airlie414ed532005-08-16 20:43:16 +1000407
Jerome Glisse54f961a2008-08-13 09:46:31 +1000408 /* VAP is very sensitive so we purge cache before we program it
409 * and we also flush its state before & after */
410 BEGIN_RING(6);
411 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
412 OUT_RING(R300_RB3D_DC_FLUSH);
413 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
414 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
415 OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
416 OUT_RING(0);
417 ADVANCE_RING();
418 /* set flush flag */
419 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
420
421 BEGIN_RING(3 + sz * 4);
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000422 OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
423 OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
Dave Airlieb3a83632005-09-30 18:37:36 +1000424 OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4);
Jerome Glisse54f961a2008-08-13 09:46:31 +1000425 ADVANCE_RING();
Dave Airlie414ed532005-08-16 20:43:16 +1000426
Jerome Glisse54f961a2008-08-13 09:46:31 +1000427 BEGIN_RING(2);
428 OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
429 OUT_RING(0);
Dave Airlie414ed532005-08-16 20:43:16 +1000430 ADVANCE_RING();
431
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000432 cmdbuf->buf += sz * 16;
433 cmdbuf->bufsz -= sz * 16;
Dave Airlie414ed532005-08-16 20:43:16 +1000434
435 return 0;
436}
437
Dave Airlie414ed532005-08-16 20:43:16 +1000438/**
439 * Emit a clear packet from userspace.
440 * Called by r300_emit_packet3.
441 */
Dave Airlied985c102006-01-02 21:32:48 +1100442static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
443 drm_radeon_kcmd_buffer_t *cmdbuf)
Dave Airlie414ed532005-08-16 20:43:16 +1000444{
445 RING_LOCALS;
446
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000447 if (8 * 4 > cmdbuf->bufsz)
Eric Anholt20caafa2007-08-25 19:22:43 +1000448 return -EINVAL;
Dave Airlie414ed532005-08-16 20:43:16 +1000449
450 BEGIN_RING(10);
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000451 OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
452 OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
453 (1 << R300_PRIM_NUM_VERTICES_SHIFT));
Dave Airlieb3a83632005-09-30 18:37:36 +1000454 OUT_RING_TABLE((int *)cmdbuf->buf, 8);
Dave Airlie414ed532005-08-16 20:43:16 +1000455 ADVANCE_RING();
456
Jerome Glisse54f961a2008-08-13 09:46:31 +1000457 BEGIN_RING(4);
458 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
459 OUT_RING(R300_RB3D_DC_FLUSH);
460 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
461 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
462 ADVANCE_RING();
463 /* set flush flag */
464 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
465
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000466 cmdbuf->buf += 8 * 4;
467 cmdbuf->bufsz -= 8 * 4;
Dave Airlie414ed532005-08-16 20:43:16 +1000468
469 return 0;
470}
471
Dave Airlied985c102006-01-02 21:32:48 +1100472static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
473 drm_radeon_kcmd_buffer_t *cmdbuf,
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000474 u32 header)
Dave Airlie414ed532005-08-16 20:43:16 +1000475{
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000476 int count, i, k;
477#define MAX_ARRAY_PACKET 64
Dave Airlie414ed532005-08-16 20:43:16 +1000478 u32 payload[MAX_ARRAY_PACKET];
479 u32 narrays;
480 RING_LOCALS;
481
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000482 count = (header >> 16) & 0x3fff;
483
484 if ((count + 1) > MAX_ARRAY_PACKET) {
485 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
486 count);
Eric Anholt20caafa2007-08-25 19:22:43 +1000487 return -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000488 }
489 memset(payload, 0, MAX_ARRAY_PACKET * 4);
490 memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
491
Dave Airlie414ed532005-08-16 20:43:16 +1000492 /* carefully check packet contents */
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000493
494 narrays = payload[0];
495 k = 0;
496 i = 1;
497 while ((k < narrays) && (i < (count + 1))) {
498 i++; /* skip attribute field */
=?utf-8?q?Michel_D=C3=A4nzer?=1d6bb8e2006-12-15 18:54:35 +1100499 if (!radeon_check_offset(dev_priv, payload[i])) {
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000500 DRM_ERROR
501 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
502 k, i);
Eric Anholt20caafa2007-08-25 19:22:43 +1000503 return -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000504 }
Dave Airlie414ed532005-08-16 20:43:16 +1000505 k++;
506 i++;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000507 if (k == narrays)
508 break;
Dave Airlie414ed532005-08-16 20:43:16 +1000509 /* have one more to process, they come in pairs */
=?utf-8?q?Michel_D=C3=A4nzer?=1d6bb8e2006-12-15 18:54:35 +1100510 if (!radeon_check_offset(dev_priv, payload[i])) {
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000511 DRM_ERROR
512 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
513 k, i);
Eric Anholt20caafa2007-08-25 19:22:43 +1000514 return -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000515 }
Dave Airlie414ed532005-08-16 20:43:16 +1000516 k++;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000517 i++;
518 }
Dave Airlie414ed532005-08-16 20:43:16 +1000519 /* do the counts match what we expect ? */
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000520 if ((k != narrays) || (i != (count + 1))) {
521 DRM_ERROR
522 ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
523 k, i, narrays, count + 1);
Eric Anholt20caafa2007-08-25 19:22:43 +1000524 return -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000525 }
Dave Airlie414ed532005-08-16 20:43:16 +1000526
527 /* all clear, output packet */
528
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000529 BEGIN_RING(count + 2);
Dave Airlie414ed532005-08-16 20:43:16 +1000530 OUT_RING(header);
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000531 OUT_RING_TABLE(payload, count + 1);
Dave Airlie414ed532005-08-16 20:43:16 +1000532 ADVANCE_RING();
533
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000534 cmdbuf->buf += (count + 2) * 4;
535 cmdbuf->bufsz -= (count + 2) * 4;
Dave Airlie414ed532005-08-16 20:43:16 +1000536
537 return 0;
538}
Dave Airlied5ea7022006-03-19 19:37:55 +1100539
Dave Airlie4e5e2e22006-02-18 15:51:35 +1100540static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
541 drm_radeon_kcmd_buffer_t *cmdbuf)
542{
543 u32 *cmd = (u32 *) cmdbuf->buf;
544 int count, ret;
545 RING_LOCALS;
546
547 count=(cmd[0]>>16) & 0x3fff;
548
549 if (cmd[0] & 0x8000) {
550 u32 offset;
551
Dave Airliebc5f4522007-11-05 12:50:58 +1000552 if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
Dave Airlie4e5e2e22006-02-18 15:51:35 +1100553 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
554 offset = cmd[2] << 10;
=?utf-8?q?Michel_D=C3=A4nzer?=1d6bb8e2006-12-15 18:54:35 +1100555 ret = !radeon_check_offset(dev_priv, offset);
Dave Airlie73d72cf2006-02-18 16:30:54 +1100556 if (ret) {
Dave Airlie4e5e2e22006-02-18 15:51:35 +1100557 DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
Eric Anholt20caafa2007-08-25 19:22:43 +1000558 return -EINVAL;
Dave Airlie4e5e2e22006-02-18 15:51:35 +1100559 }
560 }
561
562 if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
563 (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
564 offset = cmd[3] << 10;
=?utf-8?q?Michel_D=C3=A4nzer?=1d6bb8e2006-12-15 18:54:35 +1100565 ret = !radeon_check_offset(dev_priv, offset);
Dave Airlie73d72cf2006-02-18 16:30:54 +1100566 if (ret) {
Dave Airlie4e5e2e22006-02-18 15:51:35 +1100567 DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
Eric Anholt20caafa2007-08-25 19:22:43 +1000568 return -EINVAL;
Dave Airlie4e5e2e22006-02-18 15:51:35 +1100569 }
Dave Airliebc5f4522007-11-05 12:50:58 +1000570
Dave Airlie4e5e2e22006-02-18 15:51:35 +1100571 }
572 }
573
574 BEGIN_RING(count+2);
575 OUT_RING(cmd[0]);
576 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
577 ADVANCE_RING();
578
579 cmdbuf->buf += (count+2)*4;
580 cmdbuf->bufsz -= (count+2)*4;
581
582 return 0;
583}
Dave Airlie414ed532005-08-16 20:43:16 +1000584
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000585static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
586 drm_radeon_kcmd_buffer_t *cmdbuf)
Roland Scheideggera1aa2892006-10-24 21:45:00 +1000587{
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000588 u32 *cmd;
589 int count;
590 int expected_count;
Roland Scheideggera1aa2892006-10-24 21:45:00 +1000591 RING_LOCALS;
592
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000593 cmd = (u32 *) cmdbuf->buf;
594 count = (cmd[0]>>16) & 0x3fff;
595 expected_count = cmd[1] >> 16;
596 if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
597 expected_count = (expected_count+1)/2;
Roland Scheideggera1aa2892006-10-24 21:45:00 +1000598
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000599 if (count && count != expected_count) {
600 DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n",
601 count, expected_count);
Eric Anholt20caafa2007-08-25 19:22:43 +1000602 return -EINVAL;
Roland Scheideggera1aa2892006-10-24 21:45:00 +1000603 }
604
605 BEGIN_RING(count+2);
606 OUT_RING(cmd[0]);
607 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
608 ADVANCE_RING();
609
610 cmdbuf->buf += (count+2)*4;
611 cmdbuf->bufsz -= (count+2)*4;
612
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000613 if (!count) {
614 drm_r300_cmd_header_t header;
615
616 if (cmdbuf->bufsz < 4*4 + sizeof(header)) {
617 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
618 return -EINVAL;
619 }
620
621 header.u = *(unsigned int *)cmdbuf->buf;
622
623 cmdbuf->buf += sizeof(header);
624 cmdbuf->bufsz -= sizeof(header);
625 cmd = (u32 *) cmdbuf->buf;
626
627 if (header.header.cmd_type != R300_CMD_PACKET3 ||
628 header.packet3.packet != R300_CMD_PACKET3_RAW ||
629 cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
630 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
631 return -EINVAL;
632 }
633
634 if ((cmd[1] & 0x8000ffff) != 0x80000810) {
635 DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
636 return -EINVAL;
637 }
638 if (!radeon_check_offset(dev_priv, cmd[2])) {
639 DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
640 return -EINVAL;
641 }
642 if (cmd[3] != expected_count) {
643 DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
644 cmd[3], expected_count);
645 return -EINVAL;
646 }
647
648 BEGIN_RING(4);
649 OUT_RING(cmd[0]);
650 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3);
651 ADVANCE_RING();
652
653 cmdbuf->buf += 4*4;
654 cmdbuf->bufsz -= 4*4;
655 }
656
Roland Scheideggera1aa2892006-10-24 21:45:00 +1000657 return 0;
658}
659
Dave Airlied985c102006-01-02 21:32:48 +1100660static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
661 drm_radeon_kcmd_buffer_t *cmdbuf)
Dave Airlie414ed532005-08-16 20:43:16 +1000662{
663 u32 header;
664 int count;
665 RING_LOCALS;
666
667 if (4 > cmdbuf->bufsz)
Eric Anholt20caafa2007-08-25 19:22:43 +1000668 return -EINVAL;
Dave Airlie414ed532005-08-16 20:43:16 +1000669
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000670 /* Fixme !! This simply emits a packet without much checking.
Dave Airlie414ed532005-08-16 20:43:16 +1000671 We need to be smarter. */
672
673 /* obtain first word - actual packet3 header */
Dave Airlieb3a83632005-09-30 18:37:36 +1000674 header = *(u32 *) cmdbuf->buf;
Dave Airlie414ed532005-08-16 20:43:16 +1000675
676 /* Is it packet 3 ? */
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000677 if ((header >> 30) != 0x3) {
Dave Airlie414ed532005-08-16 20:43:16 +1000678 DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
Eric Anholt20caafa2007-08-25 19:22:43 +1000679 return -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000680 }
Dave Airlie414ed532005-08-16 20:43:16 +1000681
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000682 count = (header >> 16) & 0x3fff;
Dave Airlie414ed532005-08-16 20:43:16 +1000683
684 /* Check again now that we know how much data to expect */
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000685 if ((count + 2) * 4 > cmdbuf->bufsz) {
686 DRM_ERROR
687 ("Expected packet3 of length %d but have only %d bytes left\n",
688 (count + 2) * 4, cmdbuf->bufsz);
Eric Anholt20caafa2007-08-25 19:22:43 +1000689 return -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000690 }
Dave Airlie414ed532005-08-16 20:43:16 +1000691
692 /* Is it a packet type we know about ? */
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000693 switch (header & 0xff00) {
694 case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
Dave Airlie414ed532005-08-16 20:43:16 +1000695 return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
696
Dave Airlie4e5e2e22006-02-18 15:51:35 +1100697 case RADEON_CNTL_BITBLT_MULTI:
698 return r300_emit_bitblt_multi(dev_priv, cmdbuf);
699
Jerome Glisse54f961a2008-08-13 09:46:31 +1000700 case RADEON_CP_INDX_BUFFER:
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000701 DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n");
702 return -EINVAL;
Jerome Glisse54f961a2008-08-13 09:46:31 +1000703 case RADEON_CP_3D_DRAW_IMMD_2:
704 /* triggers drawing using in-packet vertex data */
705 case RADEON_CP_3D_DRAW_VBUF_2:
706 /* triggers drawing of vertex buffers setup elsewhere */
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000707 dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
708 RADEON_PURGE_EMITED);
709 break;
Jerome Glisse54f961a2008-08-13 09:46:31 +1000710 case RADEON_CP_3D_DRAW_INDX_2:
711 /* triggers drawing using indices to vertex buffer */
712 /* whenever we send vertex we clear flush & purge */
713 dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
714 RADEON_PURGE_EMITED);
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000715 return r300_emit_draw_indx_2(dev_priv, cmdbuf);
Dave Airlie414ed532005-08-16 20:43:16 +1000716 case RADEON_WAIT_FOR_IDLE:
717 case RADEON_CP_NOP:
718 /* these packets are safe */
719 break;
720 default:
721 DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
Eric Anholt20caafa2007-08-25 19:22:43 +1000722 return -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000723 }
Dave Airlie414ed532005-08-16 20:43:16 +1000724
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000725 BEGIN_RING(count + 2);
Dave Airlie414ed532005-08-16 20:43:16 +1000726 OUT_RING(header);
Dave Airlieb3a83632005-09-30 18:37:36 +1000727 OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
Dave Airlie414ed532005-08-16 20:43:16 +1000728 ADVANCE_RING();
729
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000730 cmdbuf->buf += (count + 2) * 4;
731 cmdbuf->bufsz -= (count + 2) * 4;
Dave Airlie414ed532005-08-16 20:43:16 +1000732
733 return 0;
734}
735
Dave Airlie414ed532005-08-16 20:43:16 +1000736/**
737 * Emit a rendering packet3 from userspace.
738 * Called by r300_do_cp_cmdbuf.
739 */
Dave Airlied985c102006-01-02 21:32:48 +1100740static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
741 drm_radeon_kcmd_buffer_t *cmdbuf,
Dave Airlie414ed532005-08-16 20:43:16 +1000742 drm_r300_cmd_header_t header)
743{
744 int n;
745 int ret;
Dave Airlieb3a83632005-09-30 18:37:36 +1000746 char *orig_buf = cmdbuf->buf;
Dave Airlie414ed532005-08-16 20:43:16 +1000747 int orig_bufsz = cmdbuf->bufsz;
748
749 /* This is a do-while-loop so that we run the interior at least once,
750 * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
751 */
752 n = 0;
753 do {
754 if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) {
755 ret = r300_emit_cliprects(dev_priv, cmdbuf, n);
756 if (ret)
757 return ret;
758
759 cmdbuf->buf = orig_buf;
760 cmdbuf->bufsz = orig_bufsz;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000761 }
Dave Airlie414ed532005-08-16 20:43:16 +1000762
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000763 switch (header.packet3.packet) {
Dave Airlie414ed532005-08-16 20:43:16 +1000764 case R300_CMD_PACKET3_CLEAR:
765 DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
766 ret = r300_emit_clear(dev_priv, cmdbuf);
767 if (ret) {
768 DRM_ERROR("r300_emit_clear failed\n");
769 return ret;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000770 }
Dave Airlie414ed532005-08-16 20:43:16 +1000771 break;
772
773 case R300_CMD_PACKET3_RAW:
774 DRM_DEBUG("R300_CMD_PACKET3_RAW\n");
775 ret = r300_emit_raw_packet3(dev_priv, cmdbuf);
776 if (ret) {
777 DRM_ERROR("r300_emit_raw_packet3 failed\n");
778 return ret;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000779 }
Dave Airlie414ed532005-08-16 20:43:16 +1000780 break;
781
782 default:
783 DRM_ERROR("bad packet3 type %i at %p\n",
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000784 header.packet3.packet,
785 cmdbuf->buf - sizeof(header));
Eric Anholt20caafa2007-08-25 19:22:43 +1000786 return -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000787 }
Dave Airlie414ed532005-08-16 20:43:16 +1000788
789 n += R300_SIMULTANEOUS_CLIPRECTS;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000790 } while (n < cmdbuf->nbox);
Dave Airlie414ed532005-08-16 20:43:16 +1000791
792 return 0;
793}
794
795/* Some of the R300 chips seem to be extremely touchy about the two registers
796 * that are configured in r300_pacify.
797 * Among the worst offenders seems to be the R300 ND (0x4E44): When userspace
798 * sends a command buffer that contains only state setting commands and a
799 * vertex program/parameter upload sequence, this will eventually lead to a
800 * lockup, unless the sequence is bracketed by calls to r300_pacify.
801 * So we should take great care to *always* call r300_pacify before
802 * *anything* 3D related, and again afterwards. This is what the
803 * call bracket in r300_do_cp_cmdbuf is for.
804 */
805
806/**
807 * Emit the sequence to pacify R300.
808 */
Dave Airlied985c102006-01-02 21:32:48 +1100809static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
Dave Airlie414ed532005-08-16 20:43:16 +1000810{
Jerome Glisse54f961a2008-08-13 09:46:31 +1000811 uint32_t cache_z, cache_3d, cache_2d;
Dave Airlie414ed532005-08-16 20:43:16 +1000812 RING_LOCALS;
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000813
Jerome Glisse54f961a2008-08-13 09:46:31 +1000814 cache_z = R300_ZC_FLUSH;
815 cache_2d = R300_RB2D_DC_FLUSH;
816 cache_3d = R300_RB3D_DC_FLUSH;
817 if (!(dev_priv->track_flush & RADEON_PURGE_EMITED)) {
818 /* we can purge, primitive where draw since last purge */
819 cache_z |= R300_ZC_FREE;
820 cache_2d |= R300_RB2D_DC_FREE;
821 cache_3d |= R300_RB3D_DC_FREE;
822 }
Dave Airlie414ed532005-08-16 20:43:16 +1000823
Jerome Glisse54f961a2008-08-13 09:46:31 +1000824 /* flush & purge zbuffer */
825 BEGIN_RING(2);
Dave Airlie21efa2b2008-06-19 13:01:58 +1000826 OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));
Jerome Glisse54f961a2008-08-13 09:46:31 +1000827 OUT_RING(cache_z);
Dave Airlie414ed532005-08-16 20:43:16 +1000828 ADVANCE_RING();
Jerome Glisse54f961a2008-08-13 09:46:31 +1000829 /* flush & purge 3d */
830 BEGIN_RING(2);
831 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
832 OUT_RING(cache_3d);
833 ADVANCE_RING();
834 /* flush & purge texture */
835 BEGIN_RING(2);
836 OUT_RING(CP_PACKET0(R300_TX_INVALTAGS, 0));
837 OUT_RING(0);
838 ADVANCE_RING();
839 /* FIXME: is this one really needed ? */
840 BEGIN_RING(2);
841 OUT_RING(CP_PACKET0(R300_RB3D_AARESOLVE_CTL, 0));
842 OUT_RING(0);
843 ADVANCE_RING();
844 BEGIN_RING(2);
845 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
846 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
847 ADVANCE_RING();
848 /* flush & purge 2d through E2 as RB2D will trigger lockup */
849 BEGIN_RING(4);
850 OUT_RING(CP_PACKET0(R300_DSTCACHE_CTLSTAT, 0));
851 OUT_RING(cache_2d);
852 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
853 OUT_RING(RADEON_WAIT_2D_IDLECLEAN |
854 RADEON_WAIT_HOST_IDLECLEAN);
855 ADVANCE_RING();
856 /* set flush & purge flags */
857 dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
Dave Airlie414ed532005-08-16 20:43:16 +1000858}
859
Dave Airlie414ed532005-08-16 20:43:16 +1000860/**
861 * Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
862 * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
863 * be careful about how this function is called.
864 */
Dave Airlie7c1c2872008-11-28 14:22:24 +1000865static void r300_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf)
Dave Airlie414ed532005-08-16 20:43:16 +1000866{
Dave Airlie414ed532005-08-16 20:43:16 +1000867 drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
Dave Airlie7c1c2872008-11-28 14:22:24 +1000868 struct drm_radeon_master_private *master_priv = master->driver_priv;
Dave Airlie414ed532005-08-16 20:43:16 +1000869
Dave Airlie7c1c2872008-11-28 14:22:24 +1000870 buf_priv->age = ++master_priv->sarea_priv->last_dispatch;
Dave Airlie414ed532005-08-16 20:43:16 +1000871 buf->pending = 1;
872 buf->used = 0;
873}
874
Dave Airlie0c76be32008-03-30 07:51:49 +1000875static void r300_cmd_wait(drm_radeon_private_t * dev_priv,
876 drm_r300_cmd_header_t header)
877{
878 u32 wait_until;
879 RING_LOCALS;
880
881 if (!header.wait.flags)
882 return;
883
884 wait_until = 0;
885
886 switch(header.wait.flags) {
887 case R300_WAIT_2D:
888 wait_until = RADEON_WAIT_2D_IDLE;
889 break;
890 case R300_WAIT_3D:
891 wait_until = RADEON_WAIT_3D_IDLE;
892 break;
893 case R300_NEW_WAIT_2D_3D:
894 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_3D_IDLE;
895 break;
896 case R300_NEW_WAIT_2D_2D_CLEAN:
897 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
898 break;
899 case R300_NEW_WAIT_3D_3D_CLEAN:
900 wait_until = RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
901 break;
902 case R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN:
903 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
904 wait_until |= RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
905 break;
906 default:
907 return;
908 }
909
910 BEGIN_RING(2);
911 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
912 OUT_RING(wait_until);
913 ADVANCE_RING();
914}
915
Dave Airlieee4621f2006-03-19 19:45:26 +1100916static int r300_scratch(drm_radeon_private_t *dev_priv,
917 drm_radeon_kcmd_buffer_t *cmdbuf,
918 drm_r300_cmd_header_t header)
919{
920 u32 *ref_age_base;
921 u32 i, buf_idx, h_pending;
David Miller958a6f82009-02-18 01:35:23 -0800922 u64 ptr_addr;
Dave Airlieee4621f2006-03-19 19:45:26 +1100923 RING_LOCALS;
Dave Airliebc5f4522007-11-05 12:50:58 +1000924
925 if (cmdbuf->bufsz <
Dave Airlieee4621f2006-03-19 19:45:26 +1100926 (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) {
Eric Anholt20caafa2007-08-25 19:22:43 +1000927 return -EINVAL;
Dave Airlieee4621f2006-03-19 19:45:26 +1100928 }
Dave Airliebc5f4522007-11-05 12:50:58 +1000929
Dave Airlieee4621f2006-03-19 19:45:26 +1100930 if (header.scratch.reg >= 5) {
Eric Anholt20caafa2007-08-25 19:22:43 +1000931 return -EINVAL;
Dave Airlieee4621f2006-03-19 19:45:26 +1100932 }
Dave Airliebc5f4522007-11-05 12:50:58 +1000933
Dave Airlieee4621f2006-03-19 19:45:26 +1100934 dev_priv->scratch_ages[header.scratch.reg]++;
Dave Airliebc5f4522007-11-05 12:50:58 +1000935
David Miller958a6f82009-02-18 01:35:23 -0800936 ptr_addr = get_unaligned((u64 *)cmdbuf->buf);
937 ref_age_base = (u32 *)(unsigned long)ptr_addr;
Dave Airliebc5f4522007-11-05 12:50:58 +1000938
Dave Airlieee4621f2006-03-19 19:45:26 +1100939 cmdbuf->buf += sizeof(u64);
940 cmdbuf->bufsz -= sizeof(u64);
Dave Airliebc5f4522007-11-05 12:50:58 +1000941
Dave Airlieee4621f2006-03-19 19:45:26 +1100942 for (i=0; i < header.scratch.n_bufs; i++) {
943 buf_idx = *(u32 *)cmdbuf->buf;
944 buf_idx *= 2; /* 8 bytes per buf */
Dave Airliebc5f4522007-11-05 12:50:58 +1000945
Dave Airlieee4621f2006-03-19 19:45:26 +1100946 if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
Eric Anholt20caafa2007-08-25 19:22:43 +1000947 return -EINVAL;
Dave Airlieee4621f2006-03-19 19:45:26 +1100948 }
Dave Airliebc5f4522007-11-05 12:50:58 +1000949
Dave Airlieee4621f2006-03-19 19:45:26 +1100950 if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
Eric Anholt20caafa2007-08-25 19:22:43 +1000951 return -EINVAL;
Dave Airlieee4621f2006-03-19 19:45:26 +1100952 }
Dave Airliebc5f4522007-11-05 12:50:58 +1000953
Dave Airlieee4621f2006-03-19 19:45:26 +1100954 if (h_pending == 0) {
Eric Anholt20caafa2007-08-25 19:22:43 +1000955 return -EINVAL;
Dave Airlieee4621f2006-03-19 19:45:26 +1100956 }
Dave Airliebc5f4522007-11-05 12:50:58 +1000957
Dave Airlieee4621f2006-03-19 19:45:26 +1100958 h_pending--;
Dave Airliebc5f4522007-11-05 12:50:58 +1000959
Dave Airlieee4621f2006-03-19 19:45:26 +1100960 if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
Eric Anholt20caafa2007-08-25 19:22:43 +1000961 return -EINVAL;
Dave Airlieee4621f2006-03-19 19:45:26 +1100962 }
Dave Airliebc5f4522007-11-05 12:50:58 +1000963
Dave Airlieee4621f2006-03-19 19:45:26 +1100964 cmdbuf->buf += sizeof(buf_idx);
965 cmdbuf->bufsz -= sizeof(buf_idx);
966 }
Dave Airliebc5f4522007-11-05 12:50:58 +1000967
Dave Airlieee4621f2006-03-19 19:45:26 +1100968 BEGIN_RING(2);
Oliver McFaddenc6c656b2007-07-11 12:24:10 +1000969 OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
970 OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
Dave Airlieee4621f2006-03-19 19:45:26 +1100971 ADVANCE_RING();
Dave Airliebc5f4522007-11-05 12:50:58 +1000972
Dave Airlieee4621f2006-03-19 19:45:26 +1100973 return 0;
974}
975
Dave Airlie414ed532005-08-16 20:43:16 +1000976/**
Dave Airliec0beb2a2008-05-28 13:52:28 +1000977 * Uploads user-supplied vertex program instructions or parameters onto
978 * the graphics card.
979 * Called by r300_do_cp_cmdbuf.
980 */
981static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
982 drm_radeon_kcmd_buffer_t *cmdbuf,
983 drm_r300_cmd_header_t header)
984{
985 int sz;
986 int addr;
987 int type;
988 int clamp;
989 int stride;
990 RING_LOCALS;
991
992 sz = header.r500fp.count;
993 /* address is 9 bits 0 - 8, bit 1 of flags is part of address */
994 addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
995
996 type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
997 clamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
998
999 addr |= (type << 16);
1000 addr |= (clamp << 17);
1001
1002 stride = type ? 4 : 6;
1003
1004 DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
1005 if (!sz)
1006 return 0;
1007 if (sz * stride * 4 > cmdbuf->bufsz)
1008 return -EINVAL;
1009
1010 BEGIN_RING(3 + sz * stride);
1011 OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
1012 OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
1013 OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride);
1014
1015 ADVANCE_RING();
1016
1017 cmdbuf->buf += sz * stride * 4;
1018 cmdbuf->bufsz -= sz * stride * 4;
1019
1020 return 0;
1021}
1022
1023
1024/**
Dave Airlie414ed532005-08-16 20:43:16 +10001025 * Parses and validates a user-supplied command buffer and emits appropriate
1026 * commands on the DMA ring buffer.
1027 * Called by the ioctl handler function radeon_cp_cmdbuf.
1028 */
Dave Airlie84b1fd12007-07-11 15:53:27 +10001029int r300_do_cp_cmdbuf(struct drm_device *dev,
Eric Anholt6c340ea2007-08-25 20:23:09 +10001030 struct drm_file *file_priv,
Dave Airlied985c102006-01-02 21:32:48 +11001031 drm_radeon_kcmd_buffer_t *cmdbuf)
Dave Airlie414ed532005-08-16 20:43:16 +10001032{
1033 drm_radeon_private_t *dev_priv = dev->dev_private;
Dave Airlie7c1c2872008-11-28 14:22:24 +10001034 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
Dave Airliecdd55a22007-07-11 16:32:08 +10001035 struct drm_device_dma *dma = dev->dma;
Dave Airlie056219e2007-07-11 16:17:42 +10001036 struct drm_buf *buf = NULL;
Dave Airlie414ed532005-08-16 20:43:16 +10001037 int emit_dispatch_age = 0;
1038 int ret = 0;
1039
1040 DRM_DEBUG("\n");
1041
Jerome Glisse54f961a2008-08-13 09:46:31 +10001042 /* pacify */
Dave Airlie414ed532005-08-16 20:43:16 +10001043 r300_pacify(dev_priv);
1044
1045 if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
1046 ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
1047 if (ret)
1048 goto cleanup;
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001049 }
Dave Airlie414ed532005-08-16 20:43:16 +10001050
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001051 while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) {
Dave Airlie414ed532005-08-16 20:43:16 +10001052 int idx;
1053 drm_r300_cmd_header_t header;
1054
1055 header.u = *(unsigned int *)cmdbuf->buf;
1056
1057 cmdbuf->buf += sizeof(header);
1058 cmdbuf->bufsz -= sizeof(header);
1059
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001060 switch (header.header.cmd_type) {
1061 case R300_CMD_PACKET0:
Dave Airlie414ed532005-08-16 20:43:16 +10001062 DRM_DEBUG("R300_CMD_PACKET0\n");
1063 ret = r300_emit_packet0(dev_priv, cmdbuf, header);
1064 if (ret) {
1065 DRM_ERROR("r300_emit_packet0 failed\n");
1066 goto cleanup;
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001067 }
Dave Airlie414ed532005-08-16 20:43:16 +10001068 break;
1069
1070 case R300_CMD_VPU:
1071 DRM_DEBUG("R300_CMD_VPU\n");
1072 ret = r300_emit_vpu(dev_priv, cmdbuf, header);
1073 if (ret) {
1074 DRM_ERROR("r300_emit_vpu failed\n");
1075 goto cleanup;
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001076 }
Dave Airlie414ed532005-08-16 20:43:16 +10001077 break;
1078
1079 case R300_CMD_PACKET3:
1080 DRM_DEBUG("R300_CMD_PACKET3\n");
1081 ret = r300_emit_packet3(dev_priv, cmdbuf, header);
1082 if (ret) {
1083 DRM_ERROR("r300_emit_packet3 failed\n");
1084 goto cleanup;
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001085 }
Dave Airlie414ed532005-08-16 20:43:16 +10001086 break;
1087
1088 case R300_CMD_END3D:
1089 DRM_DEBUG("R300_CMD_END3D\n");
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001090 /* TODO:
1091 Ideally userspace driver should not need to issue this call,
1092 i.e. the drm driver should issue it automatically and prevent
1093 lockups.
Dave Airlie414ed532005-08-16 20:43:16 +10001094
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001095 In practice, we do not understand why this call is needed and what
1096 it does (except for some vague guesses that it has to do with cache
1097 coherence) and so the user space driver does it.
1098
1099 Once we are sure which uses prevent lockups the code could be moved
1100 into the kernel and the userspace driver will not
1101 need to use this command.
1102
1103 Note that issuing this command does not hurt anything
1104 except, possibly, performance */
Dave Airlie414ed532005-08-16 20:43:16 +10001105 r300_pacify(dev_priv);
1106 break;
1107
1108 case R300_CMD_CP_DELAY:
1109 /* simple enough, we can do it here */
1110 DRM_DEBUG("R300_CMD_CP_DELAY\n");
1111 {
1112 int i;
1113 RING_LOCALS;
1114
1115 BEGIN_RING(header.delay.count);
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001116 for (i = 0; i < header.delay.count; i++)
Dave Airlie414ed532005-08-16 20:43:16 +10001117 OUT_RING(RADEON_CP_PACKET2);
1118 ADVANCE_RING();
1119 }
1120 break;
1121
1122 case R300_CMD_DMA_DISCARD:
1123 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001124 idx = header.dma.buf_idx;
1125 if (idx < 0 || idx >= dma->buf_count) {
1126 DRM_ERROR("buffer index %d (of %d max)\n",
1127 idx, dma->buf_count - 1);
Eric Anholt20caafa2007-08-25 19:22:43 +10001128 ret = -EINVAL;
Dave Airlie414ed532005-08-16 20:43:16 +10001129 goto cleanup;
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001130 }
1131
1132 buf = dma->buflist[idx];
Eric Anholt6c340ea2007-08-25 20:23:09 +10001133 if (buf->file_priv != file_priv || buf->pending) {
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001134 DRM_ERROR("bad buffer %p %p %d\n",
Eric Anholt6c340ea2007-08-25 20:23:09 +10001135 buf->file_priv, file_priv,
1136 buf->pending);
Eric Anholt20caafa2007-08-25 19:22:43 +10001137 ret = -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001138 goto cleanup;
1139 }
Dave Airlie414ed532005-08-16 20:43:16 +10001140
1141 emit_dispatch_age = 1;
Dave Airlie7c1c2872008-11-28 14:22:24 +10001142 r300_discard_buffer(dev, file_priv->master, buf);
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001143 break;
Dave Airlie414ed532005-08-16 20:43:16 +10001144
1145 case R300_CMD_WAIT:
Dave Airlie414ed532005-08-16 20:43:16 +10001146 DRM_DEBUG("R300_CMD_WAIT\n");
Dave Airlie0c76be32008-03-30 07:51:49 +10001147 r300_cmd_wait(dev_priv, header);
Dave Airlie414ed532005-08-16 20:43:16 +10001148 break;
1149
Dave Airlieee4621f2006-03-19 19:45:26 +11001150 case R300_CMD_SCRATCH:
1151 DRM_DEBUG("R300_CMD_SCRATCH\n");
1152 ret = r300_scratch(dev_priv, cmdbuf, header);
1153 if (ret) {
1154 DRM_ERROR("r300_scratch failed\n");
1155 goto cleanup;
1156 }
1157 break;
Dave Airliebc5f4522007-11-05 12:50:58 +10001158
Dave Airliec0beb2a2008-05-28 13:52:28 +10001159 case R300_CMD_R500FP:
1160 if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) {
1161 DRM_ERROR("Calling r500 command on r300 card\n");
1162 ret = -EINVAL;
1163 goto cleanup;
1164 }
1165 DRM_DEBUG("R300_CMD_R500FP\n");
1166 ret = r300_emit_r500fp(dev_priv, cmdbuf, header);
1167 if (ret) {
1168 DRM_ERROR("r300_emit_r500fp failed\n");
1169 goto cleanup;
1170 }
1171 break;
Dave Airlie414ed532005-08-16 20:43:16 +10001172 default:
1173 DRM_ERROR("bad cmd_type %i at %p\n",
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001174 header.header.cmd_type,
Dave Airlie414ed532005-08-16 20:43:16 +10001175 cmdbuf->buf - sizeof(header));
Eric Anholt20caafa2007-08-25 19:22:43 +10001176 ret = -EINVAL;
Dave Airlie414ed532005-08-16 20:43:16 +10001177 goto cleanup;
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001178 }
Dave Airlie414ed532005-08-16 20:43:16 +10001179 }
1180
1181 DRM_DEBUG("END\n");
1182
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001183 cleanup:
Dave Airlie414ed532005-08-16 20:43:16 +10001184 r300_pacify(dev_priv);
1185
1186 /* We emit the vertex buffer age here, outside the pacifier "brackets"
1187 * for two reasons:
1188 * (1) This may coalesce multiple age emissions into a single one and
1189 * (2) more importantly, some chips lock up hard when scratch registers
1190 * are written inside the pacifier bracket.
1191 */
1192 if (emit_dispatch_age) {
1193 RING_LOCALS;
1194
1195 /* Emit the vertex buffer age */
1196 BEGIN_RING(2);
Dave Airlie7c1c2872008-11-28 14:22:24 +10001197 RADEON_DISPATCH_AGE(master_priv->sarea_priv->last_dispatch);
Dave Airlie414ed532005-08-16 20:43:16 +10001198 ADVANCE_RING();
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001199 }
Dave Airlie414ed532005-08-16 20:43:16 +10001200
1201 COMMIT_RING();
1202
1203 return ret;
1204}