blob: 9418e388b04544e39e016a54fcd33324590d0feb [file] [log] [blame]
Dave Airlie414ed532005-08-16 20:43:16 +10001/* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*-
2 *
3 * Copyright (C) The Weather Channel, Inc. 2002.
4 * Copyright (C) 2004 Nicolai Haehnle.
5 * All Rights Reserved.
6 *
7 * The Weather Channel (TM) funded Tungsten Graphics to develop the
8 * initial release of the Radeon 8500 driver under the XFree86 license.
9 * This notice must be preserved.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the "Software"),
13 * to deal in the Software without restriction, including without limitation
14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 * and/or sell copies of the Software, and to permit persons to whom the
16 * Software is furnished to do so, subject to the following conditions:
17 *
18 * The above copyright notice and this permission notice (including the next
19 * paragraph) shall be included in all copies or substantial portions of the
20 * Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
28 * DEALINGS IN THE SOFTWARE.
29 *
30 * Authors:
31 * Nicolai Haehnle <prefect_@gmx.net>
Christian König14adc892013-01-21 13:58:46 +010032 *
33 * ------------------------ This file is DEPRECATED! -------------------------
Dave Airlie414ed532005-08-16 20:43:16 +100034 */
35
David Howells760285e2012-10-02 18:01:07 +010036#include <drm/drmP.h>
David Howells760285e2012-10-02 18:01:07 +010037#include <drm/radeon_drm.h>
Dave Airlie414ed532005-08-16 20:43:16 +100038#include "radeon_drv.h"
39#include "r300_reg.h"
David Herrmann9f50bd82014-08-29 12:12:27 +020040#include "drm_buffer.h"
Dave Airlie414ed532005-08-16 20:43:16 +100041
David Miller958a6f82009-02-18 01:35:23 -080042#include <asm/unaligned.h>
43
Dave Airlie414ed532005-08-16 20:43:16 +100044#define R300_SIMULTANEOUS_CLIPRECTS 4
45
46/* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects
47 */
48static const int r300_cliprect_cntl[4] = {
49 0xAAAA,
50 0xEEEE,
51 0xFEFE,
52 0xFFFE
53};
54
Dave Airlie414ed532005-08-16 20:43:16 +100055/**
56 * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
57 * buffer, starting with index n.
58 */
Dave Airlied985c102006-01-02 21:32:48 +110059static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
60 drm_radeon_kcmd_buffer_t *cmdbuf, int n)
Dave Airlie414ed532005-08-16 20:43:16 +100061{
Dave Airliec60ce622007-07-11 15:27:12 +100062 struct drm_clip_rect box;
Dave Airlie414ed532005-08-16 20:43:16 +100063 int nr;
64 int i;
65 RING_LOCALS;
66
67 nr = cmdbuf->nbox - n;
68 if (nr > R300_SIMULTANEOUS_CLIPRECTS)
69 nr = R300_SIMULTANEOUS_CLIPRECTS;
70
71 DRM_DEBUG("%i cliprects\n", nr);
72
73 if (nr) {
Dave Airlieb5e89ed2005-09-25 14:28:13 +100074 BEGIN_RING(6 + nr * 2);
75 OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
Dave Airlie414ed532005-08-16 20:43:16 +100076
Dave Airlieb5e89ed2005-09-25 14:28:13 +100077 for (i = 0; i < nr; ++i) {
Daniel Vetter1d6ac182013-12-11 11:34:44 +010078 if (copy_from_user
Dave Airlieb5e89ed2005-09-25 14:28:13 +100079 (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
Dave Airlie414ed532005-08-16 20:43:16 +100080 DRM_ERROR("copy cliprect faulted\n");
Eric Anholt20caafa2007-08-25 19:22:43 +100081 return -EFAULT;
Dave Airlie414ed532005-08-16 20:43:16 +100082 }
83
Nicolai Haehnle649ffc02008-08-13 09:50:12 +100084 box.x2--; /* Hardware expects inclusive bottom-right corner */
85 box.y2--;
86
Dave Airlie3d5e2c12008-02-07 15:01:05 +100087 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
88 box.x1 = (box.x1) &
89 R300_CLIPRECT_MASK;
90 box.y1 = (box.y1) &
91 R300_CLIPRECT_MASK;
92 box.x2 = (box.x2) &
93 R300_CLIPRECT_MASK;
94 box.y2 = (box.y2) &
95 R300_CLIPRECT_MASK;
96 } else {
97 box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) &
98 R300_CLIPRECT_MASK;
99 box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) &
100 R300_CLIPRECT_MASK;
101 box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) &
102 R300_CLIPRECT_MASK;
103 box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) &
104 R300_CLIPRECT_MASK;
Dave Airlie3d5e2c12008-02-07 15:01:05 +1000105 }
Nicolai Haehnle649ffc02008-08-13 09:50:12 +1000106
Dave Airlie414ed532005-08-16 20:43:16 +1000107 OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000108 (box.y1 << R300_CLIPRECT_Y_SHIFT));
Dave Airlie414ed532005-08-16 20:43:16 +1000109 OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000110 (box.y2 << R300_CLIPRECT_Y_SHIFT));
Dave Airlie3d5e2c12008-02-07 15:01:05 +1000111
Dave Airlie414ed532005-08-16 20:43:16 +1000112 }
113
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000114 OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]);
Dave Airlie414ed532005-08-16 20:43:16 +1000115
116 /* TODO/SECURITY: Force scissors to a safe value, otherwise the
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000117 * client might be able to trample over memory.
118 * The impact should be very limited, but I'd rather be safe than
119 * sorry.
120 */
121 OUT_RING(CP_PACKET0(R300_RE_SCISSORS_TL, 1));
122 OUT_RING(0);
123 OUT_RING(R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK);
Dave Airlie414ed532005-08-16 20:43:16 +1000124 ADVANCE_RING();
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000125 } else {
Dave Airlie414ed532005-08-16 20:43:16 +1000126 /* Why we allow zero cliprect rendering:
127 * There are some commands in a command buffer that must be submitted
128 * even when there are no cliprects, e.g. DMA buffer discard
129 * or state setting (though state setting could be avoided by
130 * simulating a loss of context).
131 *
132 * Now since the cmdbuf interface is so chaotic right now (and is
133 * bound to remain that way for a bit until things settle down),
134 * it is basically impossible to filter out the commands that are
135 * necessary and those that aren't.
136 *
137 * So I choose the safe way and don't do any filtering at all;
138 * instead, I simply set up the engine so that all rendering
139 * can't produce any fragments.
140 */
141 BEGIN_RING(2);
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000142 OUT_RING_REG(R300_RE_CLIPRECT_CNTL, 0);
Dave Airlie414ed532005-08-16 20:43:16 +1000143 ADVANCE_RING();
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000144 }
Dave Airlie414ed532005-08-16 20:43:16 +1000145
Jerome Glisse54f961a2008-08-13 09:46:31 +1000146 /* flus cache and wait idle clean after cliprect change */
147 BEGIN_RING(2);
148 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
149 OUT_RING(R300_RB3D_DC_FLUSH);
150 ADVANCE_RING();
151 BEGIN_RING(2);
152 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
153 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
154 ADVANCE_RING();
155 /* set flush flag */
156 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
157
Dave Airlie414ed532005-08-16 20:43:16 +1000158 return 0;
159}
160
Dave Airlieb3a83632005-09-30 18:37:36 +1000161static u8 r300_reg_flags[0x10000 >> 2];
Dave Airlie414ed532005-08-16 20:43:16 +1000162
Dave Airlie3d5e2c12008-02-07 15:01:05 +1000163void r300_init_reg_flags(struct drm_device *dev)
Dave Airlie414ed532005-08-16 20:43:16 +1000164{
165 int i;
Dave Airlie3d5e2c12008-02-07 15:01:05 +1000166 drm_radeon_private_t *dev_priv = dev->dev_private;
167
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000168 memset(r300_reg_flags, 0, 0x10000 >> 2);
169#define ADD_RANGE_MARK(reg, count,mark) \
Dave Airlie414ed532005-08-16 20:43:16 +1000170 for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
171 r300_reg_flags[i]|=(mark);
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000172
173#define MARK_SAFE 1
174#define MARK_CHECK_OFFSET 2
175
176#define ADD_RANGE(reg, count) ADD_RANGE_MARK(reg, count, MARK_SAFE)
Dave Airlie414ed532005-08-16 20:43:16 +1000177
178 /* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
179 ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
Oliver McFaddenc6c656b2007-07-11 12:24:10 +1000180 ADD_RANGE(R300_VAP_CNTL, 1);
Dave Airlie414ed532005-08-16 20:43:16 +1000181 ADD_RANGE(R300_SE_VTE_CNTL, 2);
182 ADD_RANGE(0x2134, 2);
Oliver McFaddenc6c656b2007-07-11 12:24:10 +1000183 ADD_RANGE(R300_VAP_CNTL_STATUS, 1);
Dave Airlie414ed532005-08-16 20:43:16 +1000184 ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
185 ADD_RANGE(0x21DC, 1);
Oliver McFaddenc6c656b2007-07-11 12:24:10 +1000186 ADD_RANGE(R300_VAP_UNKNOWN_221C, 1);
187 ADD_RANGE(R300_VAP_CLIP_X_0, 4);
Jerome Glisse54f961a2008-08-13 09:46:31 +1000188 ADD_RANGE(R300_VAP_PVS_STATE_FLUSH_REG, 1);
Oliver McFaddenc6c656b2007-07-11 12:24:10 +1000189 ADD_RANGE(R300_VAP_UNKNOWN_2288, 1);
Dave Airlie414ed532005-08-16 20:43:16 +1000190 ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
191 ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
192 ADD_RANGE(R300_GB_ENABLE, 1);
193 ADD_RANGE(R300_GB_MSPOS0, 5);
Jerome Glisse54f961a2008-08-13 09:46:31 +1000194 ADD_RANGE(R300_TX_INVALTAGS, 1);
Dave Airlie414ed532005-08-16 20:43:16 +1000195 ADD_RANGE(R300_TX_ENABLE, 1);
196 ADD_RANGE(0x4200, 4);
197 ADD_RANGE(0x4214, 1);
198 ADD_RANGE(R300_RE_POINTSIZE, 1);
199 ADD_RANGE(0x4230, 3);
200 ADD_RANGE(R300_RE_LINE_CNT, 1);
Oliver McFaddenc6c656b2007-07-11 12:24:10 +1000201 ADD_RANGE(R300_RE_UNK4238, 1);
Dave Airlie414ed532005-08-16 20:43:16 +1000202 ADD_RANGE(0x4260, 3);
Oliver McFaddenc6c656b2007-07-11 12:24:10 +1000203 ADD_RANGE(R300_RE_SHADE, 4);
204 ADD_RANGE(R300_RE_POLYGON_MODE, 5);
205 ADD_RANGE(R300_RE_ZBIAS_CNTL, 1);
Dave Airlie414ed532005-08-16 20:43:16 +1000206 ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
Oliver McFaddenc6c656b2007-07-11 12:24:10 +1000207 ADD_RANGE(R300_RE_OCCLUSION_CNTL, 1);
Dave Airlie414ed532005-08-16 20:43:16 +1000208 ADD_RANGE(R300_RE_CULL_CNTL, 1);
209 ADD_RANGE(0x42C0, 2);
210 ADD_RANGE(R300_RS_CNTL_0, 2);
Dave Airliec0beb2a2008-05-28 13:52:28 +1000211
Maciej Cencoraaf7ae352009-03-24 01:48:50 +0100212 ADD_RANGE(R300_SU_REG_DEST, 1);
213 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV530)
214 ADD_RANGE(RV530_FG_ZBREG_DEST, 1);
215
Dave Airlie21efa2b2008-06-19 13:01:58 +1000216 ADD_RANGE(R300_SC_HYPERZ, 2);
Dave Airlie414ed532005-08-16 20:43:16 +1000217 ADD_RANGE(0x43E8, 1);
Dave Airliec0beb2a2008-05-28 13:52:28 +1000218
Dave Airlie414ed532005-08-16 20:43:16 +1000219 ADD_RANGE(0x46A4, 5);
Dave Airliec0beb2a2008-05-28 13:52:28 +1000220
Oliver McFaddenc6c656b2007-07-11 12:24:10 +1000221 ADD_RANGE(R300_RE_FOG_STATE, 1);
222 ADD_RANGE(R300_FOG_COLOR_R, 3);
Dave Airlie414ed532005-08-16 20:43:16 +1000223 ADD_RANGE(R300_PP_ALPHA_TEST, 2);
224 ADD_RANGE(0x4BD8, 1);
225 ADD_RANGE(R300_PFS_PARAM_0_X, 64);
226 ADD_RANGE(0x4E00, 1);
227 ADD_RANGE(R300_RB3D_CBLEND, 2);
228 ADD_RANGE(R300_RB3D_COLORMASK, 1);
Oliver McFaddenc6c656b2007-07-11 12:24:10 +1000229 ADD_RANGE(R300_RB3D_BLEND_COLOR, 3);
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000230 ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */
Dave Airlie414ed532005-08-16 20:43:16 +1000231 ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
232 ADD_RANGE(0x4E50, 9);
233 ADD_RANGE(0x4E88, 1);
234 ADD_RANGE(0x4EA0, 2);
Dave Airlie21efa2b2008-06-19 13:01:58 +1000235 ADD_RANGE(R300_ZB_CNTL, 3);
236 ADD_RANGE(R300_ZB_FORMAT, 4);
237 ADD_RANGE_MARK(R300_ZB_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
238 ADD_RANGE(R300_ZB_DEPTHPITCH, 1);
239 ADD_RANGE(R300_ZB_DEPTHCLEARVALUE, 1);
240 ADD_RANGE(R300_ZB_ZMASK_OFFSET, 13);
Maciej Cencoraaf7ae352009-03-24 01:48:50 +0100241 ADD_RANGE(R300_ZB_ZPASS_DATA, 2); /* ZB_ZPASS_DATA, ZB_ZPASS_ADDR */
Dave Airlie414ed532005-08-16 20:43:16 +1000242
243 ADD_RANGE(R300_TX_FILTER_0, 16);
Dave Airlie45f17102006-03-19 19:12:10 +1100244 ADD_RANGE(R300_TX_FILTER1_0, 16);
Dave Airlie414ed532005-08-16 20:43:16 +1000245 ADD_RANGE(R300_TX_SIZE_0, 16);
246 ADD_RANGE(R300_TX_FORMAT_0, 16);
Dave Airlied985c102006-01-02 21:32:48 +1100247 ADD_RANGE(R300_TX_PITCH_0, 16);
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000248 /* Texture offset is dangerous and needs more checking */
Dave Airlie414ed532005-08-16 20:43:16 +1000249 ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
Dave Airlie45f17102006-03-19 19:12:10 +1100250 ADD_RANGE(R300_TX_CHROMA_KEY_0, 16);
Dave Airlie414ed532005-08-16 20:43:16 +1000251 ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
252
253 /* Sporadic registers used as primitives are emitted */
Dave Airlie21efa2b2008-06-19 13:01:58 +1000254 ADD_RANGE(R300_ZB_ZCACHE_CTLSTAT, 1);
Dave Airlie414ed532005-08-16 20:43:16 +1000255 ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
256 ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
257 ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
258
Dave Airlie3d5e2c12008-02-07 15:01:05 +1000259 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) {
Dave Airliec0beb2a2008-05-28 13:52:28 +1000260 ADD_RANGE(R500_VAP_INDEX_OFFSET, 1);
261 ADD_RANGE(R500_US_CONFIG, 2);
262 ADD_RANGE(R500_US_CODE_ADDR, 3);
263 ADD_RANGE(R500_US_FC_CTRL, 1);
264 ADD_RANGE(R500_RS_IP_0, 16);
265 ADD_RANGE(R500_RS_INST_0, 16);
266 ADD_RANGE(R500_RB3D_COLOR_CLEAR_VALUE_AR, 2);
267 ADD_RANGE(R500_RB3D_CONSTANT_COLOR_AR, 2);
Dave Airlie21efa2b2008-06-19 13:01:58 +1000268 ADD_RANGE(R500_ZB_FIFO_SIZE, 2);
Dave Airliec0beb2a2008-05-28 13:52:28 +1000269 } else {
270 ADD_RANGE(R300_PFS_CNTL_0, 3);
271 ADD_RANGE(R300_PFS_NODE_0, 4);
272 ADD_RANGE(R300_PFS_TEXI_0, 64);
273 ADD_RANGE(R300_PFS_INSTR0_0, 64);
274 ADD_RANGE(R300_PFS_INSTR1_0, 64);
275 ADD_RANGE(R300_PFS_INSTR2_0, 64);
276 ADD_RANGE(R300_PFS_INSTR3_0, 64);
277 ADD_RANGE(R300_RS_INTERP_0, 8);
278 ADD_RANGE(R300_RS_ROUTE_0, 8);
279
Dave Airlie3d5e2c12008-02-07 15:01:05 +1000280 }
Dave Airlie414ed532005-08-16 20:43:16 +1000281}
282
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000283static __inline__ int r300_check_range(unsigned reg, int count)
Dave Airlie414ed532005-08-16 20:43:16 +1000284{
285 int i;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000286 if (reg & ~0xffff)
287 return -1;
288 for (i = (reg >> 2); i < (reg >> 2) + count; i++)
289 if (r300_reg_flags[i] != MARK_SAFE)
290 return 1;
Dave Airlie414ed532005-08-16 20:43:16 +1000291 return 0;
292}
293
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000294static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
295 dev_priv,
Dave Airlieb3a83632005-09-30 18:37:36 +1000296 drm_radeon_kcmd_buffer_t
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000297 * cmdbuf,
298 drm_r300_cmd_header_t
299 header)
Dave Airlie414ed532005-08-16 20:43:16 +1000300{
301 int reg;
302 int sz;
303 int i;
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200304 u32 *value;
Dave Airlie414ed532005-08-16 20:43:16 +1000305 RING_LOCALS;
306
307 sz = header.packet0.count;
308 reg = (header.packet0.reghi << 8) | header.packet0.reglo;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000309
310 if ((sz > 64) || (sz < 0)) {
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200311 DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
312 reg, sz);
Eric Anholt20caafa2007-08-25 19:22:43 +1000313 return -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000314 }
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200315
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000316 for (i = 0; i < sz; i++) {
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000317 switch (r300_reg_flags[(reg >> 2) + i]) {
Dave Airlie414ed532005-08-16 20:43:16 +1000318 case MARK_SAFE:
319 break;
320 case MARK_CHECK_OFFSET:
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200321 value = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
322 if (!radeon_check_offset(dev_priv, *value)) {
323 DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n",
324 reg, sz);
Eric Anholt20caafa2007-08-25 19:22:43 +1000325 return -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000326 }
Dave Airlie414ed532005-08-16 20:43:16 +1000327 break;
328 default:
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000329 DRM_ERROR("Register %04x failed check as flag=%02x\n",
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200330 reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
Eric Anholt20caafa2007-08-25 19:22:43 +1000331 return -EINVAL;
Dave Airlie414ed532005-08-16 20:43:16 +1000332 }
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000333 }
334
335 BEGIN_RING(1 + sz);
336 OUT_RING(CP_PACKET0(reg, sz - 1));
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200337 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
Dave Airlie414ed532005-08-16 20:43:16 +1000338 ADVANCE_RING();
339
Dave Airlie414ed532005-08-16 20:43:16 +1000340 return 0;
341}
342
343/**
344 * Emits a packet0 setting arbitrary registers.
345 * Called by r300_do_cp_cmdbuf.
346 *
347 * Note that checks are performed on contents and addresses of the registers
348 */
Dave Airlied985c102006-01-02 21:32:48 +1100349static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
350 drm_radeon_kcmd_buffer_t *cmdbuf,
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000351 drm_r300_cmd_header_t header)
Dave Airlie414ed532005-08-16 20:43:16 +1000352{
353 int reg;
354 int sz;
355 RING_LOCALS;
356
357 sz = header.packet0.count;
358 reg = (header.packet0.reghi << 8) | header.packet0.reglo;
359
360 if (!sz)
361 return 0;
362
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200363 if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
Eric Anholt20caafa2007-08-25 19:22:43 +1000364 return -EINVAL;
Dave Airlie414ed532005-08-16 20:43:16 +1000365
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000366 if (reg + sz * 4 >= 0x10000) {
367 DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg,
368 sz);
Eric Anholt20caafa2007-08-25 19:22:43 +1000369 return -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000370 }
371
372 if (r300_check_range(reg, sz)) {
Dave Airlie414ed532005-08-16 20:43:16 +1000373 /* go and check everything */
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000374 return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf,
375 header);
376 }
Dave Airlie414ed532005-08-16 20:43:16 +1000377 /* the rest of the data is safe to emit, whatever the values the user passed */
378
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000379 BEGIN_RING(1 + sz);
380 OUT_RING(CP_PACKET0(reg, sz - 1));
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200381 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
Dave Airlie414ed532005-08-16 20:43:16 +1000382 ADVANCE_RING();
383
Dave Airlie414ed532005-08-16 20:43:16 +1000384 return 0;
385}
386
Dave Airlie414ed532005-08-16 20:43:16 +1000387/**
388 * Uploads user-supplied vertex program instructions or parameters onto
389 * the graphics card.
390 * Called by r300_do_cp_cmdbuf.
391 */
Dave Airlied985c102006-01-02 21:32:48 +1100392static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
393 drm_radeon_kcmd_buffer_t *cmdbuf,
Dave Airlie414ed532005-08-16 20:43:16 +1000394 drm_r300_cmd_header_t header)
395{
396 int sz;
397 int addr;
398 RING_LOCALS;
399
400 sz = header.vpu.count;
401 addr = (header.vpu.adrhi << 8) | header.vpu.adrlo;
402
403 if (!sz)
404 return 0;
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200405 if (sz * 16 > drm_buffer_unprocessed(cmdbuf->buffer))
Eric Anholt20caafa2007-08-25 19:22:43 +1000406 return -EINVAL;
Dave Airlie414ed532005-08-16 20:43:16 +1000407
Jerome Glisse54f961a2008-08-13 09:46:31 +1000408 /* VAP is very sensitive so we purge cache before we program it
409 * and we also flush its state before & after */
410 BEGIN_RING(6);
411 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
412 OUT_RING(R300_RB3D_DC_FLUSH);
413 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
414 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
415 OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
416 OUT_RING(0);
417 ADVANCE_RING();
418 /* set flush flag */
419 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
420
421 BEGIN_RING(3 + sz * 4);
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000422 OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
423 OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200424 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * 4);
Jerome Glisse54f961a2008-08-13 09:46:31 +1000425 ADVANCE_RING();
Dave Airlie414ed532005-08-16 20:43:16 +1000426
Jerome Glisse54f961a2008-08-13 09:46:31 +1000427 BEGIN_RING(2);
428 OUT_RING(CP_PACKET0(R300_VAP_PVS_STATE_FLUSH_REG, 0));
429 OUT_RING(0);
Dave Airlie414ed532005-08-16 20:43:16 +1000430 ADVANCE_RING();
431
Dave Airlie414ed532005-08-16 20:43:16 +1000432 return 0;
433}
434
Dave Airlie414ed532005-08-16 20:43:16 +1000435/**
436 * Emit a clear packet from userspace.
437 * Called by r300_emit_packet3.
438 */
Dave Airlied985c102006-01-02 21:32:48 +1100439static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
440 drm_radeon_kcmd_buffer_t *cmdbuf)
Dave Airlie414ed532005-08-16 20:43:16 +1000441{
442 RING_LOCALS;
443
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200444 if (8 * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
Eric Anholt20caafa2007-08-25 19:22:43 +1000445 return -EINVAL;
Dave Airlie414ed532005-08-16 20:43:16 +1000446
447 BEGIN_RING(10);
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000448 OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
449 OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
450 (1 << R300_PRIM_NUM_VERTICES_SHIFT));
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200451 OUT_RING_DRM_BUFFER(cmdbuf->buffer, 8);
Dave Airlie414ed532005-08-16 20:43:16 +1000452 ADVANCE_RING();
453
Jerome Glisse54f961a2008-08-13 09:46:31 +1000454 BEGIN_RING(4);
455 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
456 OUT_RING(R300_RB3D_DC_FLUSH);
457 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
458 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
459 ADVANCE_RING();
460 /* set flush flag */
461 dev_priv->track_flush |= RADEON_FLUSH_EMITED;
462
Dave Airlie414ed532005-08-16 20:43:16 +1000463 return 0;
464}
465
Dave Airlied985c102006-01-02 21:32:48 +1100466static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
467 drm_radeon_kcmd_buffer_t *cmdbuf,
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000468 u32 header)
Dave Airlie414ed532005-08-16 20:43:16 +1000469{
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000470 int count, i, k;
471#define MAX_ARRAY_PACKET 64
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200472 u32 *data;
Dave Airlie414ed532005-08-16 20:43:16 +1000473 u32 narrays;
474 RING_LOCALS;
475
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200476 count = (header & RADEON_CP_PACKET_COUNT_MASK) >> 16;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000477
478 if ((count + 1) > MAX_ARRAY_PACKET) {
479 DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
480 count);
Eric Anholt20caafa2007-08-25 19:22:43 +1000481 return -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000482 }
Dave Airlie414ed532005-08-16 20:43:16 +1000483 /* carefully check packet contents */
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000484
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200485 /* We have already read the header so advance the buffer. */
486 drm_buffer_advance(cmdbuf->buffer, 4);
487
488 narrays = *(u32 *)drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000489 k = 0;
490 i = 1;
491 while ((k < narrays) && (i < (count + 1))) {
492 i++; /* skip attribute field */
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200493 data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
494 if (!radeon_check_offset(dev_priv, *data)) {
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000495 DRM_ERROR
496 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
497 k, i);
Eric Anholt20caafa2007-08-25 19:22:43 +1000498 return -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000499 }
Dave Airlie414ed532005-08-16 20:43:16 +1000500 k++;
501 i++;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000502 if (k == narrays)
503 break;
Dave Airlie414ed532005-08-16 20:43:16 +1000504 /* have one more to process, they come in pairs */
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200505 data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
506 if (!radeon_check_offset(dev_priv, *data)) {
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000507 DRM_ERROR
508 ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
509 k, i);
Eric Anholt20caafa2007-08-25 19:22:43 +1000510 return -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000511 }
Dave Airlie414ed532005-08-16 20:43:16 +1000512 k++;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000513 i++;
514 }
Dave Airlie414ed532005-08-16 20:43:16 +1000515 /* do the counts match what we expect ? */
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000516 if ((k != narrays) || (i != (count + 1))) {
517 DRM_ERROR
518 ("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
519 k, i, narrays, count + 1);
Eric Anholt20caafa2007-08-25 19:22:43 +1000520 return -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000521 }
Dave Airlie414ed532005-08-16 20:43:16 +1000522
523 /* all clear, output packet */
524
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000525 BEGIN_RING(count + 2);
Dave Airlie414ed532005-08-16 20:43:16 +1000526 OUT_RING(header);
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200527 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 1);
Dave Airlie414ed532005-08-16 20:43:16 +1000528 ADVANCE_RING();
529
Dave Airlie414ed532005-08-16 20:43:16 +1000530 return 0;
531}
Dave Airlied5ea7022006-03-19 19:37:55 +1100532
Dave Airlie4e5e2e22006-02-18 15:51:35 +1100533static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
534 drm_radeon_kcmd_buffer_t *cmdbuf)
535{
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200536 u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
Dave Airlie4e5e2e22006-02-18 15:51:35 +1100537 int count, ret;
538 RING_LOCALS;
539
Dave Airlie4e5e2e22006-02-18 15:51:35 +1100540
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200541 count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
542
543 if (*cmd & 0x8000) {
Dave Airlie4e5e2e22006-02-18 15:51:35 +1100544 u32 offset;
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200545 u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
546 if (*cmd1 & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
Dave Airlie4e5e2e22006-02-18 15:51:35 +1100547 | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200548
549 u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
550 offset = *cmd2 << 10;
=?utf-8?q?Michel_D=C3=A4nzer?=1d6bb8e2006-12-15 18:54:35 +1100551 ret = !radeon_check_offset(dev_priv, offset);
Dave Airlie73d72cf2006-02-18 16:30:54 +1100552 if (ret) {
Dave Airlie4e5e2e22006-02-18 15:51:35 +1100553 DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
Eric Anholt20caafa2007-08-25 19:22:43 +1000554 return -EINVAL;
Dave Airlie4e5e2e22006-02-18 15:51:35 +1100555 }
556 }
557
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200558 if ((*cmd1 & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
559 (*cmd1 & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
560 u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
561 offset = *cmd3 << 10;
=?utf-8?q?Michel_D=C3=A4nzer?=1d6bb8e2006-12-15 18:54:35 +1100562 ret = !radeon_check_offset(dev_priv, offset);
Dave Airlie73d72cf2006-02-18 16:30:54 +1100563 if (ret) {
Dave Airlie4e5e2e22006-02-18 15:51:35 +1100564 DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
Eric Anholt20caafa2007-08-25 19:22:43 +1000565 return -EINVAL;
Dave Airlie4e5e2e22006-02-18 15:51:35 +1100566 }
Dave Airliebc5f4522007-11-05 12:50:58 +1000567
Dave Airlie4e5e2e22006-02-18 15:51:35 +1100568 }
569 }
570
571 BEGIN_RING(count+2);
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200572 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
Dave Airlie4e5e2e22006-02-18 15:51:35 +1100573 ADVANCE_RING();
574
Dave Airlie4e5e2e22006-02-18 15:51:35 +1100575 return 0;
576}
Dave Airlie414ed532005-08-16 20:43:16 +1000577
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000578static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
579 drm_radeon_kcmd_buffer_t *cmdbuf)
Roland Scheideggera1aa2892006-10-24 21:45:00 +1000580{
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200581 u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
582 u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000583 int count;
584 int expected_count;
Roland Scheideggera1aa2892006-10-24 21:45:00 +1000585 RING_LOCALS;
586
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200587 count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
588
589 expected_count = *cmd1 >> 16;
590 if (!(*cmd1 & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000591 expected_count = (expected_count+1)/2;
Roland Scheideggera1aa2892006-10-24 21:45:00 +1000592
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000593 if (count && count != expected_count) {
594 DRM_ERROR("3D_DRAW_INDX_2: packet size %i, expected %i\n",
595 count, expected_count);
Eric Anholt20caafa2007-08-25 19:22:43 +1000596 return -EINVAL;
Roland Scheideggera1aa2892006-10-24 21:45:00 +1000597 }
598
599 BEGIN_RING(count+2);
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200600 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
Roland Scheideggera1aa2892006-10-24 21:45:00 +1000601 ADVANCE_RING();
602
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000603 if (!count) {
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200604 drm_r300_cmd_header_t stack_header, *header;
605 u32 *cmd1, *cmd2, *cmd3;
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000606
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200607 if (drm_buffer_unprocessed(cmdbuf->buffer)
608 < 4*4 + sizeof(stack_header)) {
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000609 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
610 return -EINVAL;
611 }
612
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200613 header = drm_buffer_read_object(cmdbuf->buffer,
614 sizeof(stack_header), &stack_header);
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000615
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200616 cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
617 cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
618 cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
619 cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000620
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200621 if (header->header.cmd_type != R300_CMD_PACKET3 ||
622 header->packet3.packet != R300_CMD_PACKET3_RAW ||
623 *cmd != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000624 DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
625 return -EINVAL;
626 }
627
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200628 if ((*cmd1 & 0x8000ffff) != 0x80000810) {
629 DRM_ERROR("Invalid indx_buffer reg address %08X\n",
630 *cmd1);
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000631 return -EINVAL;
632 }
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200633 if (!radeon_check_offset(dev_priv, *cmd2)) {
634 DRM_ERROR("Invalid indx_buffer offset is %08X\n",
635 *cmd2);
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000636 return -EINVAL;
637 }
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200638 if (*cmd3 != expected_count) {
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000639 DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200640 *cmd3, expected_count);
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000641 return -EINVAL;
642 }
643
644 BEGIN_RING(4);
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200645 OUT_RING_DRM_BUFFER(cmdbuf->buffer, 4);
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000646 ADVANCE_RING();
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000647 }
648
Roland Scheideggera1aa2892006-10-24 21:45:00 +1000649 return 0;
650}
651
Dave Airlied985c102006-01-02 21:32:48 +1100652static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
653 drm_radeon_kcmd_buffer_t *cmdbuf)
Dave Airlie414ed532005-08-16 20:43:16 +1000654{
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200655 u32 *header;
Dave Airlie414ed532005-08-16 20:43:16 +1000656 int count;
657 RING_LOCALS;
658
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200659 if (4 > drm_buffer_unprocessed(cmdbuf->buffer))
Eric Anholt20caafa2007-08-25 19:22:43 +1000660 return -EINVAL;
Dave Airlie414ed532005-08-16 20:43:16 +1000661
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000662 /* Fixme !! This simply emits a packet without much checking.
Dave Airlie414ed532005-08-16 20:43:16 +1000663 We need to be smarter. */
664
665 /* obtain first word - actual packet3 header */
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200666 header = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
Dave Airlie414ed532005-08-16 20:43:16 +1000667
668 /* Is it packet 3 ? */
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200669 if ((*header >> 30) != 0x3) {
670 DRM_ERROR("Not a packet3 header (0x%08x)\n", *header);
Eric Anholt20caafa2007-08-25 19:22:43 +1000671 return -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000672 }
Dave Airlie414ed532005-08-16 20:43:16 +1000673
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200674 count = (*header >> 16) & 0x3fff;
Dave Airlie414ed532005-08-16 20:43:16 +1000675
676 /* Check again now that we know how much data to expect */
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200677 if ((count + 2) * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) {
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000678 DRM_ERROR
679 ("Expected packet3 of length %d but have only %d bytes left\n",
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200680 (count + 2) * 4, drm_buffer_unprocessed(cmdbuf->buffer));
Eric Anholt20caafa2007-08-25 19:22:43 +1000681 return -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000682 }
Dave Airlie414ed532005-08-16 20:43:16 +1000683
684 /* Is it a packet type we know about ? */
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200685 switch (*header & 0xff00) {
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000686 case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200687 return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, *header);
Dave Airlie414ed532005-08-16 20:43:16 +1000688
Dave Airlie4e5e2e22006-02-18 15:51:35 +1100689 case RADEON_CNTL_BITBLT_MULTI:
690 return r300_emit_bitblt_multi(dev_priv, cmdbuf);
691
Jerome Glisse54f961a2008-08-13 09:46:31 +1000692 case RADEON_CP_INDX_BUFFER:
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000693 DRM_ERROR("packet3 INDX_BUFFER without preceding 3D_DRAW_INDX_2 is illegal.\n");
694 return -EINVAL;
Jerome Glisse54f961a2008-08-13 09:46:31 +1000695 case RADEON_CP_3D_DRAW_IMMD_2:
696 /* triggers drawing using in-packet vertex data */
697 case RADEON_CP_3D_DRAW_VBUF_2:
698 /* triggers drawing of vertex buffers setup elsewhere */
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000699 dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
700 RADEON_PURGE_EMITED);
701 break;
Jerome Glisse54f961a2008-08-13 09:46:31 +1000702 case RADEON_CP_3D_DRAW_INDX_2:
703 /* triggers drawing using indices to vertex buffer */
704 /* whenever we send vertex we clear flush & purge */
705 dev_priv->track_flush &= ~(RADEON_FLUSH_EMITED |
706 RADEON_PURGE_EMITED);
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000707 return r300_emit_draw_indx_2(dev_priv, cmdbuf);
Dave Airlie414ed532005-08-16 20:43:16 +1000708 case RADEON_WAIT_FOR_IDLE:
709 case RADEON_CP_NOP:
710 /* these packets are safe */
711 break;
712 default:
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200713 DRM_ERROR("Unknown packet3 header (0x%08x)\n", *header);
Eric Anholt20caafa2007-08-25 19:22:43 +1000714 return -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000715 }
Dave Airlie414ed532005-08-16 20:43:16 +1000716
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000717 BEGIN_RING(count + 2);
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200718 OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
Dave Airlie414ed532005-08-16 20:43:16 +1000719 ADVANCE_RING();
720
Dave Airlie414ed532005-08-16 20:43:16 +1000721 return 0;
722}
723
Dave Airlie414ed532005-08-16 20:43:16 +1000724/**
725 * Emit a rendering packet3 from userspace.
726 * Called by r300_do_cp_cmdbuf.
727 */
Dave Airlied985c102006-01-02 21:32:48 +1100728static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
729 drm_radeon_kcmd_buffer_t *cmdbuf,
Dave Airlie414ed532005-08-16 20:43:16 +1000730 drm_r300_cmd_header_t header)
731{
732 int n;
733 int ret;
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200734 int orig_iter = cmdbuf->buffer->iterator;
Dave Airlie414ed532005-08-16 20:43:16 +1000735
736 /* This is a do-while-loop so that we run the interior at least once,
737 * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
738 */
739 n = 0;
740 do {
741 if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) {
742 ret = r300_emit_cliprects(dev_priv, cmdbuf, n);
743 if (ret)
744 return ret;
745
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200746 cmdbuf->buffer->iterator = orig_iter;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000747 }
Dave Airlie414ed532005-08-16 20:43:16 +1000748
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000749 switch (header.packet3.packet) {
Dave Airlie414ed532005-08-16 20:43:16 +1000750 case R300_CMD_PACKET3_CLEAR:
751 DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
752 ret = r300_emit_clear(dev_priv, cmdbuf);
753 if (ret) {
754 DRM_ERROR("r300_emit_clear failed\n");
755 return ret;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000756 }
Dave Airlie414ed532005-08-16 20:43:16 +1000757 break;
758
759 case R300_CMD_PACKET3_RAW:
760 DRM_DEBUG("R300_CMD_PACKET3_RAW\n");
761 ret = r300_emit_raw_packet3(dev_priv, cmdbuf);
762 if (ret) {
763 DRM_ERROR("r300_emit_raw_packet3 failed\n");
764 return ret;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000765 }
Dave Airlie414ed532005-08-16 20:43:16 +1000766 break;
767
768 default:
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200769 DRM_ERROR("bad packet3 type %i at byte %d\n",
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000770 header.packet3.packet,
Pauli Nieminen55a5cb52010-03-01 11:37:11 +0200771 cmdbuf->buffer->iterator - (int)sizeof(header));
Eric Anholt20caafa2007-08-25 19:22:43 +1000772 return -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000773 }
Dave Airlie414ed532005-08-16 20:43:16 +1000774
775 n += R300_SIMULTANEOUS_CLIPRECTS;
Dave Airlieb5e89ed2005-09-25 14:28:13 +1000776 } while (n < cmdbuf->nbox);
Dave Airlie414ed532005-08-16 20:43:16 +1000777
778 return 0;
779}
780
781/* Some of the R300 chips seem to be extremely touchy about the two registers
782 * that are configured in r300_pacify.
783 * Among the worst offenders seems to be the R300 ND (0x4E44): When userspace
784 * sends a command buffer that contains only state setting commands and a
785 * vertex program/parameter upload sequence, this will eventually lead to a
786 * lockup, unless the sequence is bracketed by calls to r300_pacify.
787 * So we should take great care to *always* call r300_pacify before
788 * *anything* 3D related, and again afterwards. This is what the
789 * call bracket in r300_do_cp_cmdbuf is for.
790 */
791
792/**
793 * Emit the sequence to pacify R300.
794 */
Andi Kleence580fa2011-10-13 16:08:47 -0700795static void r300_pacify(drm_radeon_private_t *dev_priv)
Dave Airlie414ed532005-08-16 20:43:16 +1000796{
Jerome Glisse54f961a2008-08-13 09:46:31 +1000797 uint32_t cache_z, cache_3d, cache_2d;
Dave Airlie414ed532005-08-16 20:43:16 +1000798 RING_LOCALS;
Nicolai Haehnlee2898c52008-08-13 09:49:15 +1000799
Jerome Glisse54f961a2008-08-13 09:46:31 +1000800 cache_z = R300_ZC_FLUSH;
801 cache_2d = R300_RB2D_DC_FLUSH;
802 cache_3d = R300_RB3D_DC_FLUSH;
803 if (!(dev_priv->track_flush & RADEON_PURGE_EMITED)) {
804 /* we can purge, primitive where draw since last purge */
805 cache_z |= R300_ZC_FREE;
806 cache_2d |= R300_RB2D_DC_FREE;
807 cache_3d |= R300_RB3D_DC_FREE;
808 }
Dave Airlie414ed532005-08-16 20:43:16 +1000809
Jerome Glisse54f961a2008-08-13 09:46:31 +1000810 /* flush & purge zbuffer */
811 BEGIN_RING(2);
Dave Airlie21efa2b2008-06-19 13:01:58 +1000812 OUT_RING(CP_PACKET0(R300_ZB_ZCACHE_CTLSTAT, 0));
Jerome Glisse54f961a2008-08-13 09:46:31 +1000813 OUT_RING(cache_z);
Dave Airlie414ed532005-08-16 20:43:16 +1000814 ADVANCE_RING();
Jerome Glisse54f961a2008-08-13 09:46:31 +1000815 /* flush & purge 3d */
816 BEGIN_RING(2);
817 OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
818 OUT_RING(cache_3d);
819 ADVANCE_RING();
820 /* flush & purge texture */
821 BEGIN_RING(2);
822 OUT_RING(CP_PACKET0(R300_TX_INVALTAGS, 0));
823 OUT_RING(0);
824 ADVANCE_RING();
825 /* FIXME: is this one really needed ? */
826 BEGIN_RING(2);
827 OUT_RING(CP_PACKET0(R300_RB3D_AARESOLVE_CTL, 0));
828 OUT_RING(0);
829 ADVANCE_RING();
830 BEGIN_RING(2);
831 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
832 OUT_RING(RADEON_WAIT_3D_IDLECLEAN);
833 ADVANCE_RING();
834 /* flush & purge 2d through E2 as RB2D will trigger lockup */
835 BEGIN_RING(4);
836 OUT_RING(CP_PACKET0(R300_DSTCACHE_CTLSTAT, 0));
837 OUT_RING(cache_2d);
838 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
839 OUT_RING(RADEON_WAIT_2D_IDLECLEAN |
840 RADEON_WAIT_HOST_IDLECLEAN);
841 ADVANCE_RING();
842 /* set flush & purge flags */
843 dev_priv->track_flush |= RADEON_FLUSH_EMITED | RADEON_PURGE_EMITED;
Dave Airlie414ed532005-08-16 20:43:16 +1000844}
845
Dave Airlie414ed532005-08-16 20:43:16 +1000846/**
847 * Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
848 * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
849 * be careful about how this function is called.
850 */
Dave Airlie7c1c2872008-11-28 14:22:24 +1000851static void r300_discard_buffer(struct drm_device *dev, struct drm_master *master, struct drm_buf *buf)
Dave Airlie414ed532005-08-16 20:43:16 +1000852{
Dave Airlie414ed532005-08-16 20:43:16 +1000853 drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
Dave Airlie7c1c2872008-11-28 14:22:24 +1000854 struct drm_radeon_master_private *master_priv = master->driver_priv;
Dave Airlie414ed532005-08-16 20:43:16 +1000855
Dave Airlie7c1c2872008-11-28 14:22:24 +1000856 buf_priv->age = ++master_priv->sarea_priv->last_dispatch;
Dave Airlie414ed532005-08-16 20:43:16 +1000857 buf->pending = 1;
858 buf->used = 0;
859}
860
Dave Airlie0c76be32008-03-30 07:51:49 +1000861static void r300_cmd_wait(drm_radeon_private_t * dev_priv,
862 drm_r300_cmd_header_t header)
863{
864 u32 wait_until;
865 RING_LOCALS;
866
867 if (!header.wait.flags)
868 return;
869
870 wait_until = 0;
871
872 switch(header.wait.flags) {
873 case R300_WAIT_2D:
874 wait_until = RADEON_WAIT_2D_IDLE;
875 break;
876 case R300_WAIT_3D:
877 wait_until = RADEON_WAIT_3D_IDLE;
878 break;
879 case R300_NEW_WAIT_2D_3D:
880 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_3D_IDLE;
881 break;
882 case R300_NEW_WAIT_2D_2D_CLEAN:
883 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
884 break;
885 case R300_NEW_WAIT_3D_3D_CLEAN:
886 wait_until = RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
887 break;
888 case R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN:
889 wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
890 wait_until |= RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
891 break;
892 default:
893 return;
894 }
895
896 BEGIN_RING(2);
897 OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
898 OUT_RING(wait_until);
899 ADVANCE_RING();
900}
901
Dave Airlieee4621f2006-03-19 19:45:26 +1100902static int r300_scratch(drm_radeon_private_t *dev_priv,
903 drm_radeon_kcmd_buffer_t *cmdbuf,
904 drm_r300_cmd_header_t header)
905{
906 u32 *ref_age_base;
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200907 u32 i, *buf_idx, h_pending;
908 u64 *ptr_addr;
909 u64 stack_ptr_addr;
Dave Airlieee4621f2006-03-19 19:45:26 +1100910 RING_LOCALS;
Dave Airliebc5f4522007-11-05 12:50:58 +1000911
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200912 if (drm_buffer_unprocessed(cmdbuf->buffer) <
913 (sizeof(u64) + header.scratch.n_bufs * sizeof(*buf_idx))) {
Eric Anholt20caafa2007-08-25 19:22:43 +1000914 return -EINVAL;
Dave Airlieee4621f2006-03-19 19:45:26 +1100915 }
Dave Airliebc5f4522007-11-05 12:50:58 +1000916
Dave Airlieee4621f2006-03-19 19:45:26 +1100917 if (header.scratch.reg >= 5) {
Eric Anholt20caafa2007-08-25 19:22:43 +1000918 return -EINVAL;
Dave Airlieee4621f2006-03-19 19:45:26 +1100919 }
Dave Airliebc5f4522007-11-05 12:50:58 +1000920
Dave Airlieee4621f2006-03-19 19:45:26 +1100921 dev_priv->scratch_ages[header.scratch.reg]++;
Dave Airliebc5f4522007-11-05 12:50:58 +1000922
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200923 ptr_addr = drm_buffer_read_object(cmdbuf->buffer,
924 sizeof(stack_ptr_addr), &stack_ptr_addr);
David Miller88b04502010-04-26 02:55:42 -0700925 ref_age_base = (u32 *)(unsigned long)get_unaligned(ptr_addr);
Dave Airliebc5f4522007-11-05 12:50:58 +1000926
Dave Airlieee4621f2006-03-19 19:45:26 +1100927 for (i=0; i < header.scratch.n_bufs; i++) {
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200928 buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
929 *buf_idx *= 2; /* 8 bytes per buf */
Dave Airliebc5f4522007-11-05 12:50:58 +1000930
Daniel Vetter1d6ac182013-12-11 11:34:44 +0100931 if (copy_to_user(ref_age_base + *buf_idx,
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200932 &dev_priv->scratch_ages[header.scratch.reg],
933 sizeof(u32)))
Eric Anholt20caafa2007-08-25 19:22:43 +1000934 return -EINVAL;
Dave Airliebc5f4522007-11-05 12:50:58 +1000935
Daniel Vetter1d6ac182013-12-11 11:34:44 +0100936 if (copy_from_user(&h_pending,
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200937 ref_age_base + *buf_idx + 1,
938 sizeof(u32)))
Eric Anholt20caafa2007-08-25 19:22:43 +1000939 return -EINVAL;
Dave Airliebc5f4522007-11-05 12:50:58 +1000940
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200941 if (h_pending == 0)
Eric Anholt20caafa2007-08-25 19:22:43 +1000942 return -EINVAL;
Dave Airliebc5f4522007-11-05 12:50:58 +1000943
Dave Airlieee4621f2006-03-19 19:45:26 +1100944 h_pending--;
Dave Airliebc5f4522007-11-05 12:50:58 +1000945
Daniel Vetter1d6ac182013-12-11 11:34:44 +0100946 if (copy_to_user(ref_age_base + *buf_idx + 1,
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200947 &h_pending,
948 sizeof(u32)))
Eric Anholt20caafa2007-08-25 19:22:43 +1000949 return -EINVAL;
Dave Airliebc5f4522007-11-05 12:50:58 +1000950
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200951 drm_buffer_advance(cmdbuf->buffer, sizeof(*buf_idx));
Dave Airlieee4621f2006-03-19 19:45:26 +1100952 }
Dave Airliebc5f4522007-11-05 12:50:58 +1000953
Dave Airlieee4621f2006-03-19 19:45:26 +1100954 BEGIN_RING(2);
Oliver McFaddenc6c656b2007-07-11 12:24:10 +1000955 OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );
956 OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );
Dave Airlieee4621f2006-03-19 19:45:26 +1100957 ADVANCE_RING();
Dave Airliebc5f4522007-11-05 12:50:58 +1000958
Dave Airlieee4621f2006-03-19 19:45:26 +1100959 return 0;
960}
961
Dave Airlie414ed532005-08-16 20:43:16 +1000962/**
Dave Airliec0beb2a2008-05-28 13:52:28 +1000963 * Uploads user-supplied vertex program instructions or parameters onto
964 * the graphics card.
965 * Called by r300_do_cp_cmdbuf.
966 */
967static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
968 drm_radeon_kcmd_buffer_t *cmdbuf,
969 drm_r300_cmd_header_t header)
970{
971 int sz;
972 int addr;
973 int type;
Andi Kleen01136ac2009-12-21 02:24:47 +0100974 int isclamp;
Dave Airliec0beb2a2008-05-28 13:52:28 +1000975 int stride;
976 RING_LOCALS;
977
978 sz = header.r500fp.count;
979 /* address is 9 bits 0 - 8, bit 1 of flags is part of address */
980 addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
981
982 type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
Andi Kleen01136ac2009-12-21 02:24:47 +0100983 isclamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
Dave Airliec0beb2a2008-05-28 13:52:28 +1000984
985 addr |= (type << 16);
Andi Kleen01136ac2009-12-21 02:24:47 +0100986 addr |= (isclamp << 17);
Dave Airliec0beb2a2008-05-28 13:52:28 +1000987
988 stride = type ? 4 : 6;
989
990 DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
991 if (!sz)
992 return 0;
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200993 if (sz * stride * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
Dave Airliec0beb2a2008-05-28 13:52:28 +1000994 return -EINVAL;
995
996 BEGIN_RING(3 + sz * stride);
997 OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
998 OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
Pauli Nieminenb4fe9452010-02-01 19:11:16 +0200999 OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * stride);
Dave Airliec0beb2a2008-05-28 13:52:28 +10001000
1001 ADVANCE_RING();
1002
Dave Airliec0beb2a2008-05-28 13:52:28 +10001003 return 0;
1004}
1005
1006
1007/**
Dave Airlie414ed532005-08-16 20:43:16 +10001008 * Parses and validates a user-supplied command buffer and emits appropriate
1009 * commands on the DMA ring buffer.
1010 * Called by the ioctl handler function radeon_cp_cmdbuf.
1011 */
Dave Airlie84b1fd12007-07-11 15:53:27 +10001012int r300_do_cp_cmdbuf(struct drm_device *dev,
Eric Anholt6c340ea2007-08-25 20:23:09 +10001013 struct drm_file *file_priv,
Dave Airlied985c102006-01-02 21:32:48 +11001014 drm_radeon_kcmd_buffer_t *cmdbuf)
Dave Airlie414ed532005-08-16 20:43:16 +10001015{
1016 drm_radeon_private_t *dev_priv = dev->dev_private;
Dave Airlie7c1c2872008-11-28 14:22:24 +10001017 struct drm_radeon_master_private *master_priv = file_priv->master->driver_priv;
Dave Airliecdd55a22007-07-11 16:32:08 +10001018 struct drm_device_dma *dma = dev->dma;
Dave Airlie056219e2007-07-11 16:17:42 +10001019 struct drm_buf *buf = NULL;
Dave Airlie414ed532005-08-16 20:43:16 +10001020 int emit_dispatch_age = 0;
1021 int ret = 0;
1022
1023 DRM_DEBUG("\n");
1024
Jerome Glisse54f961a2008-08-13 09:46:31 +10001025 /* pacify */
Dave Airlie414ed532005-08-16 20:43:16 +10001026 r300_pacify(dev_priv);
1027
1028 if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
1029 ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
1030 if (ret)
1031 goto cleanup;
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001032 }
Dave Airlie414ed532005-08-16 20:43:16 +10001033
Pauli Nieminenb4fe9452010-02-01 19:11:16 +02001034 while (drm_buffer_unprocessed(cmdbuf->buffer)
1035 >= sizeof(drm_r300_cmd_header_t)) {
Dave Airlie414ed532005-08-16 20:43:16 +10001036 int idx;
Pauli Nieminenb4fe9452010-02-01 19:11:16 +02001037 drm_r300_cmd_header_t *header, stack_header;
Dave Airlie414ed532005-08-16 20:43:16 +10001038
Pauli Nieminenb4fe9452010-02-01 19:11:16 +02001039 header = drm_buffer_read_object(cmdbuf->buffer,
1040 sizeof(stack_header), &stack_header);
Dave Airlie414ed532005-08-16 20:43:16 +10001041
Pauli Nieminenb4fe9452010-02-01 19:11:16 +02001042 switch (header->header.cmd_type) {
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001043 case R300_CMD_PACKET0:
Dave Airlie414ed532005-08-16 20:43:16 +10001044 DRM_DEBUG("R300_CMD_PACKET0\n");
Pauli Nieminenb4fe9452010-02-01 19:11:16 +02001045 ret = r300_emit_packet0(dev_priv, cmdbuf, *header);
Dave Airlie414ed532005-08-16 20:43:16 +10001046 if (ret) {
1047 DRM_ERROR("r300_emit_packet0 failed\n");
1048 goto cleanup;
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001049 }
Dave Airlie414ed532005-08-16 20:43:16 +10001050 break;
1051
1052 case R300_CMD_VPU:
1053 DRM_DEBUG("R300_CMD_VPU\n");
Pauli Nieminenb4fe9452010-02-01 19:11:16 +02001054 ret = r300_emit_vpu(dev_priv, cmdbuf, *header);
Dave Airlie414ed532005-08-16 20:43:16 +10001055 if (ret) {
1056 DRM_ERROR("r300_emit_vpu failed\n");
1057 goto cleanup;
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001058 }
Dave Airlie414ed532005-08-16 20:43:16 +10001059 break;
1060
1061 case R300_CMD_PACKET3:
1062 DRM_DEBUG("R300_CMD_PACKET3\n");
Pauli Nieminenb4fe9452010-02-01 19:11:16 +02001063 ret = r300_emit_packet3(dev_priv, cmdbuf, *header);
Dave Airlie414ed532005-08-16 20:43:16 +10001064 if (ret) {
1065 DRM_ERROR("r300_emit_packet3 failed\n");
1066 goto cleanup;
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001067 }
Dave Airlie414ed532005-08-16 20:43:16 +10001068 break;
1069
1070 case R300_CMD_END3D:
1071 DRM_DEBUG("R300_CMD_END3D\n");
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001072 /* TODO:
1073 Ideally userspace driver should not need to issue this call,
1074 i.e. the drm driver should issue it automatically and prevent
1075 lockups.
Dave Airlie414ed532005-08-16 20:43:16 +10001076
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001077 In practice, we do not understand why this call is needed and what
1078 it does (except for some vague guesses that it has to do with cache
1079 coherence) and so the user space driver does it.
1080
1081 Once we are sure which uses prevent lockups the code could be moved
1082 into the kernel and the userspace driver will not
1083 need to use this command.
1084
1085 Note that issuing this command does not hurt anything
1086 except, possibly, performance */
Dave Airlie414ed532005-08-16 20:43:16 +10001087 r300_pacify(dev_priv);
1088 break;
1089
1090 case R300_CMD_CP_DELAY:
1091 /* simple enough, we can do it here */
1092 DRM_DEBUG("R300_CMD_CP_DELAY\n");
1093 {
1094 int i;
1095 RING_LOCALS;
1096
Pauli Nieminenb4fe9452010-02-01 19:11:16 +02001097 BEGIN_RING(header->delay.count);
1098 for (i = 0; i < header->delay.count; i++)
Dave Airlie414ed532005-08-16 20:43:16 +10001099 OUT_RING(RADEON_CP_PACKET2);
1100 ADVANCE_RING();
1101 }
1102 break;
1103
1104 case R300_CMD_DMA_DISCARD:
1105 DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
Pauli Nieminenb4fe9452010-02-01 19:11:16 +02001106 idx = header->dma.buf_idx;
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001107 if (idx < 0 || idx >= dma->buf_count) {
1108 DRM_ERROR("buffer index %d (of %d max)\n",
1109 idx, dma->buf_count - 1);
Eric Anholt20caafa2007-08-25 19:22:43 +10001110 ret = -EINVAL;
Dave Airlie414ed532005-08-16 20:43:16 +10001111 goto cleanup;
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001112 }
1113
1114 buf = dma->buflist[idx];
Eric Anholt6c340ea2007-08-25 20:23:09 +10001115 if (buf->file_priv != file_priv || buf->pending) {
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001116 DRM_ERROR("bad buffer %p %p %d\n",
Eric Anholt6c340ea2007-08-25 20:23:09 +10001117 buf->file_priv, file_priv,
1118 buf->pending);
Eric Anholt20caafa2007-08-25 19:22:43 +10001119 ret = -EINVAL;
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001120 goto cleanup;
1121 }
Dave Airlie414ed532005-08-16 20:43:16 +10001122
1123 emit_dispatch_age = 1;
Dave Airlie7c1c2872008-11-28 14:22:24 +10001124 r300_discard_buffer(dev, file_priv->master, buf);
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001125 break;
Dave Airlie414ed532005-08-16 20:43:16 +10001126
1127 case R300_CMD_WAIT:
Dave Airlie414ed532005-08-16 20:43:16 +10001128 DRM_DEBUG("R300_CMD_WAIT\n");
Pauli Nieminenb4fe9452010-02-01 19:11:16 +02001129 r300_cmd_wait(dev_priv, *header);
Dave Airlie414ed532005-08-16 20:43:16 +10001130 break;
1131
Dave Airlieee4621f2006-03-19 19:45:26 +11001132 case R300_CMD_SCRATCH:
1133 DRM_DEBUG("R300_CMD_SCRATCH\n");
Pauli Nieminenb4fe9452010-02-01 19:11:16 +02001134 ret = r300_scratch(dev_priv, cmdbuf, *header);
Dave Airlieee4621f2006-03-19 19:45:26 +11001135 if (ret) {
1136 DRM_ERROR("r300_scratch failed\n");
1137 goto cleanup;
1138 }
1139 break;
Dave Airliebc5f4522007-11-05 12:50:58 +10001140
Dave Airliec0beb2a2008-05-28 13:52:28 +10001141 case R300_CMD_R500FP:
1142 if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) {
1143 DRM_ERROR("Calling r500 command on r300 card\n");
1144 ret = -EINVAL;
1145 goto cleanup;
1146 }
1147 DRM_DEBUG("R300_CMD_R500FP\n");
Pauli Nieminenb4fe9452010-02-01 19:11:16 +02001148 ret = r300_emit_r500fp(dev_priv, cmdbuf, *header);
Dave Airliec0beb2a2008-05-28 13:52:28 +10001149 if (ret) {
1150 DRM_ERROR("r300_emit_r500fp failed\n");
1151 goto cleanup;
1152 }
1153 break;
Dave Airlie414ed532005-08-16 20:43:16 +10001154 default:
Pauli Nieminenb4fe9452010-02-01 19:11:16 +02001155 DRM_ERROR("bad cmd_type %i at byte %d\n",
1156 header->header.cmd_type,
Pauli Nieminen55a5cb52010-03-01 11:37:11 +02001157 cmdbuf->buffer->iterator - (int)sizeof(*header));
Eric Anholt20caafa2007-08-25 19:22:43 +10001158 ret = -EINVAL;
Dave Airlie414ed532005-08-16 20:43:16 +10001159 goto cleanup;
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001160 }
Dave Airlie414ed532005-08-16 20:43:16 +10001161 }
1162
1163 DRM_DEBUG("END\n");
1164
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001165 cleanup:
Dave Airlie414ed532005-08-16 20:43:16 +10001166 r300_pacify(dev_priv);
1167
1168 /* We emit the vertex buffer age here, outside the pacifier "brackets"
1169 * for two reasons:
1170 * (1) This may coalesce multiple age emissions into a single one and
1171 * (2) more importantly, some chips lock up hard when scratch registers
1172 * are written inside the pacifier bracket.
1173 */
1174 if (emit_dispatch_age) {
1175 RING_LOCALS;
1176
1177 /* Emit the vertex buffer age */
1178 BEGIN_RING(2);
Dave Airlie7c1c2872008-11-28 14:22:24 +10001179 RADEON_DISPATCH_AGE(master_priv->sarea_priv->last_dispatch);
Dave Airlie414ed532005-08-16 20:43:16 +10001180 ADVANCE_RING();
Dave Airlieb5e89ed2005-09-25 14:28:13 +10001181 }
Dave Airlie414ed532005-08-16 20:43:16 +10001182
1183 COMMIT_RING();
1184
1185 return ret;
1186}