blob: 26905322d8e73a34ec38b607381eba992f51fc50 [file] [log] [blame]
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001/*
2 * Copyright 2010 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
David Howells760285e2012-10-02 18:01:07 +010028#include <drm/drmP.h>
Alex Deuchercb5fcbd2010-05-28 19:01:35 -040029#include "radeon.h"
30#include "evergreend.h"
31#include "evergreen_reg_safe.h"
Alex Deucherc175ca92011-03-02 20:07:37 -050032#include "cayman_reg_safe.h"
Alex Deuchercb5fcbd2010-05-28 19:01:35 -040033
Jerome Glisse285484e2011-12-16 17:03:42 -050034#define MAX(a,b) (((a)>(b))?(a):(b))
35#define MIN(a,b) (((a)<(b))?(a):(b))
36
Alex Deucherd2ead3e2012-12-13 09:55:45 -050037int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
38 struct radeon_cs_reloc **cs_reloc);
Alex Deuchercb5fcbd2010-05-28 19:01:35 -040039static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
40 struct radeon_cs_reloc **cs_reloc);
41
42struct evergreen_cs_track {
43 u32 group_size;
44 u32 nbanks;
45 u32 npipes;
Alex Deucherf3a71df2011-11-28 14:49:28 -050046 u32 row_size;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -040047 /* value we track */
Marek Olšák747e42a102012-03-19 03:09:36 +010048 u32 nsamples; /* unused */
Alex Deuchercb5fcbd2010-05-28 19:01:35 -040049 struct radeon_bo *cb_color_bo[12];
50 u32 cb_color_bo_offset[12];
Marek Olšák747e42a102012-03-19 03:09:36 +010051 struct radeon_bo *cb_color_fmask_bo[8]; /* unused */
52 struct radeon_bo *cb_color_cmask_bo[8]; /* unused */
Alex Deuchercb5fcbd2010-05-28 19:01:35 -040053 u32 cb_color_info[12];
54 u32 cb_color_view[12];
Alex Deuchercb5fcbd2010-05-28 19:01:35 -040055 u32 cb_color_pitch[12];
56 u32 cb_color_slice[12];
Jerome Glissed2609872012-06-09 10:57:41 -040057 u32 cb_color_slice_idx[12];
Jerome Glisse285484e2011-12-16 17:03:42 -050058 u32 cb_color_attrib[12];
Marek Olšák747e42a102012-03-19 03:09:36 +010059 u32 cb_color_cmask_slice[8];/* unused */
60 u32 cb_color_fmask_slice[8];/* unused */
Alex Deuchercb5fcbd2010-05-28 19:01:35 -040061 u32 cb_target_mask;
Marek Olšák747e42a102012-03-19 03:09:36 +010062 u32 cb_shader_mask; /* unused */
Alex Deuchercb5fcbd2010-05-28 19:01:35 -040063 u32 vgt_strmout_config;
64 u32 vgt_strmout_buffer_config;
Marek Olšákdd220a02012-01-27 12:17:59 -050065 struct radeon_bo *vgt_strmout_bo[4];
Marek Olšákdd220a02012-01-27 12:17:59 -050066 u32 vgt_strmout_bo_offset[4];
67 u32 vgt_strmout_size[4];
Alex Deuchercb5fcbd2010-05-28 19:01:35 -040068 u32 db_depth_control;
69 u32 db_depth_view;
Jerome Glisse285484e2011-12-16 17:03:42 -050070 u32 db_depth_slice;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -040071 u32 db_depth_size;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -040072 u32 db_z_info;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -040073 u32 db_z_read_offset;
74 u32 db_z_write_offset;
75 struct radeon_bo *db_z_read_bo;
76 struct radeon_bo *db_z_write_bo;
77 u32 db_s_info;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -040078 u32 db_s_read_offset;
79 u32 db_s_write_offset;
80 struct radeon_bo *db_s_read_bo;
81 struct radeon_bo *db_s_write_bo;
Marek Olšák779923b2012-03-08 00:56:00 +010082 bool sx_misc_kill_all_prims;
Marek Olšák30838572012-03-19 03:09:35 +010083 bool cb_dirty;
84 bool db_dirty;
85 bool streamout_dirty;
Jerome Glisse88f50c82012-03-21 19:18:21 -040086 u32 htile_offset;
87 u32 htile_surface;
88 struct radeon_bo *htile_bo;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -040089};
90
Alex Deucherf3a71df2011-11-28 14:49:28 -050091static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
92{
93 if (tiling_flags & RADEON_TILING_MACRO)
94 return ARRAY_2D_TILED_THIN1;
95 else if (tiling_flags & RADEON_TILING_MICRO)
96 return ARRAY_1D_TILED_THIN1;
97 else
98 return ARRAY_LINEAR_GENERAL;
99}
100
101static u32 evergreen_cs_get_num_banks(u32 nbanks)
102{
103 switch (nbanks) {
104 case 2:
105 return ADDR_SURF_2_BANK;
106 case 4:
107 return ADDR_SURF_4_BANK;
108 case 8:
109 default:
110 return ADDR_SURF_8_BANK;
111 case 16:
112 return ADDR_SURF_16_BANK;
113 }
114}
115
Alex Deuchercb5fcbd2010-05-28 19:01:35 -0400116static void evergreen_cs_track_init(struct evergreen_cs_track *track)
117{
118 int i;
119
120 for (i = 0; i < 8; i++) {
121 track->cb_color_fmask_bo[i] = NULL;
122 track->cb_color_cmask_bo[i] = NULL;
123 track->cb_color_cmask_slice[i] = 0;
124 track->cb_color_fmask_slice[i] = 0;
125 }
126
127 for (i = 0; i < 12; i++) {
Alex Deuchercb5fcbd2010-05-28 19:01:35 -0400128 track->cb_color_bo[i] = NULL;
129 track->cb_color_bo_offset[i] = 0xFFFFFFFF;
130 track->cb_color_info[i] = 0;
Jerome Glisse285484e2011-12-16 17:03:42 -0500131 track->cb_color_view[i] = 0xFFFFFFFF;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -0400132 track->cb_color_pitch[i] = 0;
Jerome Glissed2609872012-06-09 10:57:41 -0400133 track->cb_color_slice[i] = 0xfffffff;
134 track->cb_color_slice_idx[i] = 0;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -0400135 }
136 track->cb_target_mask = 0xFFFFFFFF;
137 track->cb_shader_mask = 0xFFFFFFFF;
Marek Olšák30838572012-03-19 03:09:35 +0100138 track->cb_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -0400139
Jerome Glissed2609872012-06-09 10:57:41 -0400140 track->db_depth_slice = 0xffffffff;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -0400141 track->db_depth_view = 0xFFFFC000;
142 track->db_depth_size = 0xFFFFFFFF;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -0400143 track->db_depth_control = 0xFFFFFFFF;
144 track->db_z_info = 0xFFFFFFFF;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -0400145 track->db_z_read_offset = 0xFFFFFFFF;
146 track->db_z_write_offset = 0xFFFFFFFF;
147 track->db_z_read_bo = NULL;
148 track->db_z_write_bo = NULL;
149 track->db_s_info = 0xFFFFFFFF;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -0400150 track->db_s_read_offset = 0xFFFFFFFF;
151 track->db_s_write_offset = 0xFFFFFFFF;
152 track->db_s_read_bo = NULL;
153 track->db_s_write_bo = NULL;
Marek Olšák30838572012-03-19 03:09:35 +0100154 track->db_dirty = true;
Jerome Glisse88f50c82012-03-21 19:18:21 -0400155 track->htile_bo = NULL;
156 track->htile_offset = 0xFFFFFFFF;
157 track->htile_surface = 0;
Marek Olšákdd220a02012-01-27 12:17:59 -0500158
159 for (i = 0; i < 4; i++) {
160 track->vgt_strmout_size[i] = 0;
161 track->vgt_strmout_bo[i] = NULL;
162 track->vgt_strmout_bo_offset[i] = 0xFFFFFFFF;
Marek Olšákdd220a02012-01-27 12:17:59 -0500163 }
Marek Olšák30838572012-03-19 03:09:35 +0100164 track->streamout_dirty = true;
Marek Olšák779923b2012-03-08 00:56:00 +0100165 track->sx_misc_kill_all_prims = false;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -0400166}
167
Jerome Glisse285484e2011-12-16 17:03:42 -0500168struct eg_surface {
169 /* value gathered from cs */
170 unsigned nbx;
171 unsigned nby;
172 unsigned format;
173 unsigned mode;
174 unsigned nbanks;
175 unsigned bankw;
176 unsigned bankh;
177 unsigned tsplit;
178 unsigned mtilea;
179 unsigned nsamples;
180 /* output value */
181 unsigned bpe;
182 unsigned layer_size;
183 unsigned palign;
184 unsigned halign;
185 unsigned long base_align;
186};
187
188static int evergreen_surface_check_linear(struct radeon_cs_parser *p,
189 struct eg_surface *surf,
190 const char *prefix)
191{
192 surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
193 surf->base_align = surf->bpe;
194 surf->palign = 1;
195 surf->halign = 1;
196 return 0;
197}
198
199static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p,
200 struct eg_surface *surf,
201 const char *prefix)
202{
203 struct evergreen_cs_track *track = p->track;
204 unsigned palign;
205
206 palign = MAX(64, track->group_size / surf->bpe);
207 surf->layer_size = surf->nbx * surf->nby * surf->bpe * surf->nsamples;
208 surf->base_align = track->group_size;
209 surf->palign = palign;
210 surf->halign = 1;
211 if (surf->nbx & (palign - 1)) {
212 if (prefix) {
213 dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
214 __func__, __LINE__, prefix, surf->nbx, palign);
215 }
216 return -EINVAL;
217 }
218 return 0;
219}
220
221static int evergreen_surface_check_1d(struct radeon_cs_parser *p,
222 struct eg_surface *surf,
223 const char *prefix)
224{
225 struct evergreen_cs_track *track = p->track;
226 unsigned palign;
227
228 palign = track->group_size / (8 * surf->bpe * surf->nsamples);
229 palign = MAX(8, palign);
230 surf->layer_size = surf->nbx * surf->nby * surf->bpe;
231 surf->base_align = track->group_size;
232 surf->palign = palign;
233 surf->halign = 8;
234 if ((surf->nbx & (palign - 1))) {
235 if (prefix) {
236 dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d (%d %d %d)\n",
237 __func__, __LINE__, prefix, surf->nbx, palign,
238 track->group_size, surf->bpe, surf->nsamples);
239 }
240 return -EINVAL;
241 }
242 if ((surf->nby & (8 - 1))) {
243 if (prefix) {
244 dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with 8\n",
245 __func__, __LINE__, prefix, surf->nby);
246 }
247 return -EINVAL;
248 }
249 return 0;
250}
251
252static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
253 struct eg_surface *surf,
254 const char *prefix)
255{
256 struct evergreen_cs_track *track = p->track;
257 unsigned palign, halign, tileb, slice_pt;
Jerome Glissed2609872012-06-09 10:57:41 -0400258 unsigned mtile_pr, mtile_ps, mtileb;
Jerome Glisse285484e2011-12-16 17:03:42 -0500259
260 tileb = 64 * surf->bpe * surf->nsamples;
Jerome Glisse285484e2011-12-16 17:03:42 -0500261 slice_pt = 1;
262 if (tileb > surf->tsplit) {
263 slice_pt = tileb / surf->tsplit;
264 }
265 tileb = tileb / slice_pt;
266 /* macro tile width & height */
267 palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
268 halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
Peter Senna Tschudin74e4ca32012-10-24 16:42:26 +0200269 mtileb = (palign / 8) * (halign / 8) * tileb;
Jerome Glissed2609872012-06-09 10:57:41 -0400270 mtile_pr = surf->nbx / palign;
271 mtile_ps = (mtile_pr * surf->nby) / halign;
272 surf->layer_size = mtile_ps * mtileb * slice_pt;
Jerome Glisse285484e2011-12-16 17:03:42 -0500273 surf->base_align = (palign / 8) * (halign / 8) * tileb;
274 surf->palign = palign;
275 surf->halign = halign;
276
277 if ((surf->nbx & (palign - 1))) {
278 if (prefix) {
279 dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n",
280 __func__, __LINE__, prefix, surf->nbx, palign);
281 }
282 return -EINVAL;
283 }
284 if ((surf->nby & (halign - 1))) {
285 if (prefix) {
286 dev_warn(p->dev, "%s:%d %s height %d invalid must be aligned with %d\n",
287 __func__, __LINE__, prefix, surf->nby, halign);
288 }
289 return -EINVAL;
290 }
291
292 return 0;
293}
294
295static int evergreen_surface_check(struct radeon_cs_parser *p,
296 struct eg_surface *surf,
297 const char *prefix)
298{
299 /* some common value computed here */
300 surf->bpe = r600_fmt_get_blocksize(surf->format);
301
302 switch (surf->mode) {
303 case ARRAY_LINEAR_GENERAL:
304 return evergreen_surface_check_linear(p, surf, prefix);
305 case ARRAY_LINEAR_ALIGNED:
306 return evergreen_surface_check_linear_aligned(p, surf, prefix);
307 case ARRAY_1D_TILED_THIN1:
308 return evergreen_surface_check_1d(p, surf, prefix);
309 case ARRAY_2D_TILED_THIN1:
310 return evergreen_surface_check_2d(p, surf, prefix);
311 default:
Marek Olšák7df7c542012-03-19 03:09:32 +0100312 dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
313 __func__, __LINE__, prefix, surf->mode);
Jerome Glisse285484e2011-12-16 17:03:42 -0500314 return -EINVAL;
315 }
316 return -EINVAL;
317}
318
319static int evergreen_surface_value_conv_check(struct radeon_cs_parser *p,
320 struct eg_surface *surf,
321 const char *prefix)
322{
323 switch (surf->mode) {
324 case ARRAY_2D_TILED_THIN1:
325 break;
326 case ARRAY_LINEAR_GENERAL:
327 case ARRAY_LINEAR_ALIGNED:
328 case ARRAY_1D_TILED_THIN1:
329 return 0;
330 default:
Marek Olšák7df7c542012-03-19 03:09:32 +0100331 dev_warn(p->dev, "%s:%d %s invalid array mode %d\n",
332 __func__, __LINE__, prefix, surf->mode);
Jerome Glisse285484e2011-12-16 17:03:42 -0500333 return -EINVAL;
334 }
335
336 switch (surf->nbanks) {
337 case 0: surf->nbanks = 2; break;
338 case 1: surf->nbanks = 4; break;
339 case 2: surf->nbanks = 8; break;
340 case 3: surf->nbanks = 16; break;
341 default:
342 dev_warn(p->dev, "%s:%d %s invalid number of banks %d\n",
343 __func__, __LINE__, prefix, surf->nbanks);
344 return -EINVAL;
345 }
346 switch (surf->bankw) {
347 case 0: surf->bankw = 1; break;
348 case 1: surf->bankw = 2; break;
349 case 2: surf->bankw = 4; break;
350 case 3: surf->bankw = 8; break;
351 default:
352 dev_warn(p->dev, "%s:%d %s invalid bankw %d\n",
353 __func__, __LINE__, prefix, surf->bankw);
354 return -EINVAL;
355 }
356 switch (surf->bankh) {
357 case 0: surf->bankh = 1; break;
358 case 1: surf->bankh = 2; break;
359 case 2: surf->bankh = 4; break;
360 case 3: surf->bankh = 8; break;
361 default:
362 dev_warn(p->dev, "%s:%d %s invalid bankh %d\n",
363 __func__, __LINE__, prefix, surf->bankh);
364 return -EINVAL;
365 }
366 switch (surf->mtilea) {
367 case 0: surf->mtilea = 1; break;
368 case 1: surf->mtilea = 2; break;
369 case 2: surf->mtilea = 4; break;
370 case 3: surf->mtilea = 8; break;
371 default:
372 dev_warn(p->dev, "%s:%d %s invalid macro tile aspect %d\n",
373 __func__, __LINE__, prefix, surf->mtilea);
374 return -EINVAL;
375 }
376 switch (surf->tsplit) {
377 case 0: surf->tsplit = 64; break;
378 case 1: surf->tsplit = 128; break;
379 case 2: surf->tsplit = 256; break;
380 case 3: surf->tsplit = 512; break;
381 case 4: surf->tsplit = 1024; break;
382 case 5: surf->tsplit = 2048; break;
383 case 6: surf->tsplit = 4096; break;
384 default:
385 dev_warn(p->dev, "%s:%d %s invalid tile split %d\n",
386 __func__, __LINE__, prefix, surf->tsplit);
387 return -EINVAL;
388 }
389 return 0;
390}
391
392static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned id)
393{
394 struct evergreen_cs_track *track = p->track;
395 struct eg_surface surf;
396 unsigned pitch, slice, mslice;
397 unsigned long offset;
398 int r;
399
400 mslice = G_028C6C_SLICE_MAX(track->cb_color_view[id]) + 1;
401 pitch = track->cb_color_pitch[id];
402 slice = track->cb_color_slice[id];
403 surf.nbx = (pitch + 1) * 8;
404 surf.nby = ((slice + 1) * 64) / surf.nbx;
405 surf.mode = G_028C70_ARRAY_MODE(track->cb_color_info[id]);
406 surf.format = G_028C70_FORMAT(track->cb_color_info[id]);
407 surf.tsplit = G_028C74_TILE_SPLIT(track->cb_color_attrib[id]);
408 surf.nbanks = G_028C74_NUM_BANKS(track->cb_color_attrib[id]);
409 surf.bankw = G_028C74_BANK_WIDTH(track->cb_color_attrib[id]);
410 surf.bankh = G_028C74_BANK_HEIGHT(track->cb_color_attrib[id]);
411 surf.mtilea = G_028C74_MACRO_TILE_ASPECT(track->cb_color_attrib[id]);
412 surf.nsamples = 1;
413
414 if (!r600_fmt_is_valid_color(surf.format)) {
415 dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08x)\n",
416 __func__, __LINE__, surf.format,
417 id, track->cb_color_info[id]);
418 return -EINVAL;
419 }
420
421 r = evergreen_surface_value_conv_check(p, &surf, "cb");
422 if (r) {
423 return r;
424 }
425
426 r = evergreen_surface_check(p, &surf, "cb");
427 if (r) {
428 dev_warn(p->dev, "%s:%d cb[%d] invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
429 __func__, __LINE__, id, track->cb_color_pitch[id],
430 track->cb_color_slice[id], track->cb_color_attrib[id],
431 track->cb_color_info[id]);
432 return r;
433 }
434
435 offset = track->cb_color_bo_offset[id] << 8;
436 if (offset & (surf.base_align - 1)) {
437 dev_warn(p->dev, "%s:%d cb[%d] bo base %ld not aligned with %ld\n",
438 __func__, __LINE__, id, offset, surf.base_align);
439 return -EINVAL;
440 }
441
442 offset += surf.layer_size * mslice;
443 if (offset > radeon_bo_size(track->cb_color_bo[id])) {
Jerome Glissed2609872012-06-09 10:57:41 -0400444 /* old ddx are broken they allocate bo with w*h*bpp but
445 * program slice with ALIGN(h, 8), catch this and patch
446 * command stream.
447 */
448 if (!surf.mode) {
449 volatile u32 *ib = p->ib.ptr;
450 unsigned long tmp, nby, bsize, size, min = 0;
451
452 /* find the height the ddx wants */
453 if (surf.nby > 8) {
454 min = surf.nby - 8;
455 }
456 bsize = radeon_bo_size(track->cb_color_bo[id]);
457 tmp = track->cb_color_bo_offset[id] << 8;
458 for (nby = surf.nby; nby > min; nby--) {
459 size = nby * surf.nbx * surf.bpe * surf.nsamples;
460 if ((tmp + size * mslice) <= bsize) {
461 break;
462 }
463 }
464 if (nby > min) {
465 surf.nby = nby;
466 slice = ((nby * surf.nbx) / 64) - 1;
467 if (!evergreen_surface_check(p, &surf, "cb")) {
468 /* check if this one works */
469 tmp += surf.layer_size * mslice;
470 if (tmp <= bsize) {
471 ib[track->cb_color_slice_idx[id]] = slice;
472 goto old_ddx_ok;
473 }
474 }
475 }
476 }
Jerome Glisse285484e2011-12-16 17:03:42 -0500477 dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
478 "offset %d, max layer %d, bo size %ld, slice %d)\n",
479 __func__, __LINE__, id, surf.layer_size,
480 track->cb_color_bo_offset[id] << 8, mslice,
481 radeon_bo_size(track->cb_color_bo[id]), slice);
482 dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
483 __func__, __LINE__, surf.nbx, surf.nby,
484 surf.mode, surf.bpe, surf.nsamples,
485 surf.bankw, surf.bankh,
486 surf.tsplit, surf.mtilea);
487 return -EINVAL;
488 }
Jerome Glissed2609872012-06-09 10:57:41 -0400489old_ddx_ok:
Jerome Glisse285484e2011-12-16 17:03:42 -0500490
491 return 0;
492}
493
Jerome Glisse88f50c82012-03-21 19:18:21 -0400494static int evergreen_cs_track_validate_htile(struct radeon_cs_parser *p,
495 unsigned nbx, unsigned nby)
496{
497 struct evergreen_cs_track *track = p->track;
498 unsigned long size;
499
500 if (track->htile_bo == NULL) {
501 dev_warn(p->dev, "%s:%d htile enabled without htile surface 0x%08x\n",
502 __func__, __LINE__, track->db_z_info);
503 return -EINVAL;
504 }
505
506 if (G_028ABC_LINEAR(track->htile_surface)) {
507 /* pitch must be 16 htiles aligned == 16 * 8 pixel aligned */
508 nbx = round_up(nbx, 16 * 8);
509 /* height is npipes htiles aligned == npipes * 8 pixel aligned */
510 nby = round_up(nby, track->npipes * 8);
511 } else {
Jerome Glisse4ac05332012-12-13 12:08:11 -0500512 /* always assume 8x8 htile */
513 /* align is htile align * 8, htile align vary according to
514 * number of pipe and tile width and nby
515 */
Jerome Glisse88f50c82012-03-21 19:18:21 -0400516 switch (track->npipes) {
517 case 8:
Jerome Glisse4ac05332012-12-13 12:08:11 -0500518 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
Jerome Glisse88f50c82012-03-21 19:18:21 -0400519 nbx = round_up(nbx, 64 * 8);
520 nby = round_up(nby, 64 * 8);
521 break;
522 case 4:
Jerome Glisse4ac05332012-12-13 12:08:11 -0500523 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
Jerome Glisse88f50c82012-03-21 19:18:21 -0400524 nbx = round_up(nbx, 64 * 8);
525 nby = round_up(nby, 32 * 8);
526 break;
527 case 2:
Jerome Glisse4ac05332012-12-13 12:08:11 -0500528 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
Jerome Glisse88f50c82012-03-21 19:18:21 -0400529 nbx = round_up(nbx, 32 * 8);
530 nby = round_up(nby, 32 * 8);
531 break;
532 case 1:
Jerome Glisse4ac05332012-12-13 12:08:11 -0500533 /* HTILE_WIDTH = 8 & HTILE_HEIGHT = 8*/
Jerome Glisse88f50c82012-03-21 19:18:21 -0400534 nbx = round_up(nbx, 32 * 8);
535 nby = round_up(nby, 16 * 8);
536 break;
537 default:
538 dev_warn(p->dev, "%s:%d invalid num pipes %d\n",
539 __func__, __LINE__, track->npipes);
540 return -EINVAL;
541 }
542 }
543 /* compute number of htile */
Jerome Glisse4ac05332012-12-13 12:08:11 -0500544 nbx = nbx >> 3;
545 nby = nby >> 3;
546 /* size must be aligned on npipes * 2K boundary */
547 size = roundup(nbx * nby * 4, track->npipes * (2 << 10));
Jerome Glisse88f50c82012-03-21 19:18:21 -0400548 size += track->htile_offset;
549
550 if (size > radeon_bo_size(track->htile_bo)) {
551 dev_warn(p->dev, "%s:%d htile surface too small %ld for %ld (%d %d)\n",
552 __func__, __LINE__, radeon_bo_size(track->htile_bo),
553 size, nbx, nby);
554 return -EINVAL;
555 }
556 return 0;
557}
558
Jerome Glisse285484e2011-12-16 17:03:42 -0500559static int evergreen_cs_track_validate_stencil(struct radeon_cs_parser *p)
560{
561 struct evergreen_cs_track *track = p->track;
562 struct eg_surface surf;
563 unsigned pitch, slice, mslice;
564 unsigned long offset;
565 int r;
566
567 mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
568 pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
569 slice = track->db_depth_slice;
570 surf.nbx = (pitch + 1) * 8;
571 surf.nby = ((slice + 1) * 64) / surf.nbx;
572 surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
573 surf.format = G_028044_FORMAT(track->db_s_info);
574 surf.tsplit = G_028044_TILE_SPLIT(track->db_s_info);
575 surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
576 surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
577 surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
578 surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
579 surf.nsamples = 1;
580
581 if (surf.format != 1) {
582 dev_warn(p->dev, "%s:%d stencil invalid format %d\n",
583 __func__, __LINE__, surf.format);
584 return -EINVAL;
585 }
586 /* replace by color format so we can use same code */
587 surf.format = V_028C70_COLOR_8;
588
589 r = evergreen_surface_value_conv_check(p, &surf, "stencil");
590 if (r) {
591 return r;
592 }
593
594 r = evergreen_surface_check(p, &surf, NULL);
595 if (r) {
596 /* old userspace doesn't compute proper depth/stencil alignment
597 * check that alignment against a bigger byte per elements and
598 * only report if that alignment is wrong too.
599 */
600 surf.format = V_028C70_COLOR_8_8_8_8;
601 r = evergreen_surface_check(p, &surf, "stencil");
602 if (r) {
603 dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
604 __func__, __LINE__, track->db_depth_size,
605 track->db_depth_slice, track->db_s_info, track->db_z_info);
606 }
607 return r;
608 }
609
610 offset = track->db_s_read_offset << 8;
611 if (offset & (surf.base_align - 1)) {
612 dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
613 __func__, __LINE__, offset, surf.base_align);
614 return -EINVAL;
615 }
616 offset += surf.layer_size * mslice;
617 if (offset > radeon_bo_size(track->db_s_read_bo)) {
618 dev_warn(p->dev, "%s:%d stencil read bo too small (layer size %d, "
619 "offset %ld, max layer %d, bo size %ld)\n",
620 __func__, __LINE__, surf.layer_size,
621 (unsigned long)track->db_s_read_offset << 8, mslice,
622 radeon_bo_size(track->db_s_read_bo));
623 dev_warn(p->dev, "%s:%d stencil invalid (0x%08x 0x%08x 0x%08x 0x%08x)\n",
624 __func__, __LINE__, track->db_depth_size,
625 track->db_depth_slice, track->db_s_info, track->db_z_info);
626 return -EINVAL;
627 }
628
629 offset = track->db_s_write_offset << 8;
630 if (offset & (surf.base_align - 1)) {
631 dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
632 __func__, __LINE__, offset, surf.base_align);
633 return -EINVAL;
634 }
635 offset += surf.layer_size * mslice;
636 if (offset > radeon_bo_size(track->db_s_write_bo)) {
637 dev_warn(p->dev, "%s:%d stencil write bo too small (layer size %d, "
638 "offset %ld, max layer %d, bo size %ld)\n",
639 __func__, __LINE__, surf.layer_size,
640 (unsigned long)track->db_s_write_offset << 8, mslice,
641 radeon_bo_size(track->db_s_write_bo));
642 return -EINVAL;
643 }
644
Jerome Glisse88f50c82012-03-21 19:18:21 -0400645 /* hyperz */
646 if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
647 r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
648 if (r) {
649 return r;
650 }
651 }
652
Jerome Glisse285484e2011-12-16 17:03:42 -0500653 return 0;
654}
655
656static int evergreen_cs_track_validate_depth(struct radeon_cs_parser *p)
657{
658 struct evergreen_cs_track *track = p->track;
659 struct eg_surface surf;
660 unsigned pitch, slice, mslice;
661 unsigned long offset;
662 int r;
663
664 mslice = G_028008_SLICE_MAX(track->db_depth_view) + 1;
665 pitch = G_028058_PITCH_TILE_MAX(track->db_depth_size);
666 slice = track->db_depth_slice;
667 surf.nbx = (pitch + 1) * 8;
668 surf.nby = ((slice + 1) * 64) / surf.nbx;
669 surf.mode = G_028040_ARRAY_MODE(track->db_z_info);
670 surf.format = G_028040_FORMAT(track->db_z_info);
671 surf.tsplit = G_028040_TILE_SPLIT(track->db_z_info);
672 surf.nbanks = G_028040_NUM_BANKS(track->db_z_info);
673 surf.bankw = G_028040_BANK_WIDTH(track->db_z_info);
674 surf.bankh = G_028040_BANK_HEIGHT(track->db_z_info);
675 surf.mtilea = G_028040_MACRO_TILE_ASPECT(track->db_z_info);
676 surf.nsamples = 1;
677
678 switch (surf.format) {
679 case V_028040_Z_16:
680 surf.format = V_028C70_COLOR_16;
681 break;
682 case V_028040_Z_24:
683 case V_028040_Z_32_FLOAT:
684 surf.format = V_028C70_COLOR_8_8_8_8;
685 break;
686 default:
687 dev_warn(p->dev, "%s:%d depth invalid format %d\n",
688 __func__, __LINE__, surf.format);
689 return -EINVAL;
690 }
691
692 r = evergreen_surface_value_conv_check(p, &surf, "depth");
693 if (r) {
694 dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
695 __func__, __LINE__, track->db_depth_size,
696 track->db_depth_slice, track->db_z_info);
697 return r;
698 }
699
700 r = evergreen_surface_check(p, &surf, "depth");
701 if (r) {
702 dev_warn(p->dev, "%s:%d depth invalid (0x%08x 0x%08x 0x%08x)\n",
703 __func__, __LINE__, track->db_depth_size,
704 track->db_depth_slice, track->db_z_info);
705 return r;
706 }
707
708 offset = track->db_z_read_offset << 8;
709 if (offset & (surf.base_align - 1)) {
710 dev_warn(p->dev, "%s:%d stencil read bo base %ld not aligned with %ld\n",
711 __func__, __LINE__, offset, surf.base_align);
712 return -EINVAL;
713 }
714 offset += surf.layer_size * mslice;
715 if (offset > radeon_bo_size(track->db_z_read_bo)) {
716 dev_warn(p->dev, "%s:%d depth read bo too small (layer size %d, "
717 "offset %ld, max layer %d, bo size %ld)\n",
718 __func__, __LINE__, surf.layer_size,
719 (unsigned long)track->db_z_read_offset << 8, mslice,
720 radeon_bo_size(track->db_z_read_bo));
721 return -EINVAL;
722 }
723
724 offset = track->db_z_write_offset << 8;
725 if (offset & (surf.base_align - 1)) {
726 dev_warn(p->dev, "%s:%d stencil write bo base %ld not aligned with %ld\n",
727 __func__, __LINE__, offset, surf.base_align);
728 return -EINVAL;
729 }
730 offset += surf.layer_size * mslice;
731 if (offset > radeon_bo_size(track->db_z_write_bo)) {
732 dev_warn(p->dev, "%s:%d depth write bo too small (layer size %d, "
733 "offset %ld, max layer %d, bo size %ld)\n",
734 __func__, __LINE__, surf.layer_size,
735 (unsigned long)track->db_z_write_offset << 8, mslice,
736 radeon_bo_size(track->db_z_write_bo));
737 return -EINVAL;
738 }
739
Jerome Glisse88f50c82012-03-21 19:18:21 -0400740 /* hyperz */
741 if (G_028040_TILE_SURFACE_ENABLE(track->db_z_info)) {
742 r = evergreen_cs_track_validate_htile(p, surf.nbx, surf.nby);
743 if (r) {
744 return r;
745 }
746 }
747
Jerome Glisse285484e2011-12-16 17:03:42 -0500748 return 0;
749}
750
751static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p,
752 struct radeon_bo *texture,
753 struct radeon_bo *mipmap,
754 unsigned idx)
755{
756 struct eg_surface surf;
757 unsigned long toffset, moffset;
758 unsigned dim, llevel, mslice, width, height, depth, i;
Dan Carpenter42b923b2012-02-14 10:38:11 +0300759 u32 texdw[8];
Jerome Glisse285484e2011-12-16 17:03:42 -0500760 int r;
761
762 texdw[0] = radeon_get_ib_value(p, idx + 0);
763 texdw[1] = radeon_get_ib_value(p, idx + 1);
764 texdw[2] = radeon_get_ib_value(p, idx + 2);
765 texdw[3] = radeon_get_ib_value(p, idx + 3);
766 texdw[4] = radeon_get_ib_value(p, idx + 4);
767 texdw[5] = radeon_get_ib_value(p, idx + 5);
768 texdw[6] = radeon_get_ib_value(p, idx + 6);
769 texdw[7] = radeon_get_ib_value(p, idx + 7);
770 dim = G_030000_DIM(texdw[0]);
771 llevel = G_030014_LAST_LEVEL(texdw[5]);
772 mslice = G_030014_LAST_ARRAY(texdw[5]) + 1;
773 width = G_030000_TEX_WIDTH(texdw[0]) + 1;
774 height = G_030004_TEX_HEIGHT(texdw[1]) + 1;
775 depth = G_030004_TEX_DEPTH(texdw[1]) + 1;
776 surf.format = G_03001C_DATA_FORMAT(texdw[7]);
777 surf.nbx = (G_030000_PITCH(texdw[0]) + 1) * 8;
778 surf.nbx = r600_fmt_get_nblocksx(surf.format, surf.nbx);
779 surf.nby = r600_fmt_get_nblocksy(surf.format, height);
780 surf.mode = G_030004_ARRAY_MODE(texdw[1]);
781 surf.tsplit = G_030018_TILE_SPLIT(texdw[6]);
782 surf.nbanks = G_03001C_NUM_BANKS(texdw[7]);
783 surf.bankw = G_03001C_BANK_WIDTH(texdw[7]);
784 surf.bankh = G_03001C_BANK_HEIGHT(texdw[7]);
785 surf.mtilea = G_03001C_MACRO_TILE_ASPECT(texdw[7]);
786 surf.nsamples = 1;
787 toffset = texdw[2] << 8;
788 moffset = texdw[3] << 8;
789
790 if (!r600_fmt_is_valid_texture(surf.format, p->family)) {
791 dev_warn(p->dev, "%s:%d texture invalid format %d\n",
792 __func__, __LINE__, surf.format);
793 return -EINVAL;
794 }
795 switch (dim) {
796 case V_030000_SQ_TEX_DIM_1D:
797 case V_030000_SQ_TEX_DIM_2D:
798 case V_030000_SQ_TEX_DIM_CUBEMAP:
799 case V_030000_SQ_TEX_DIM_1D_ARRAY:
800 case V_030000_SQ_TEX_DIM_2D_ARRAY:
801 depth = 1;
Marek Olšákb51ad122012-08-09 16:34:16 +0200802 break;
803 case V_030000_SQ_TEX_DIM_2D_MSAA:
804 case V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA:
805 surf.nsamples = 1 << llevel;
806 llevel = 0;
807 depth = 1;
808 break;
Jerome Glisse285484e2011-12-16 17:03:42 -0500809 case V_030000_SQ_TEX_DIM_3D:
810 break;
811 default:
812 dev_warn(p->dev, "%s:%d texture invalid dimension %d\n",
813 __func__, __LINE__, dim);
814 return -EINVAL;
815 }
816
817 r = evergreen_surface_value_conv_check(p, &surf, "texture");
818 if (r) {
819 return r;
820 }
821
822 /* align height */
823 evergreen_surface_check(p, &surf, NULL);
824 surf.nby = ALIGN(surf.nby, surf.halign);
825
826 r = evergreen_surface_check(p, &surf, "texture");
827 if (r) {
828 dev_warn(p->dev, "%s:%d texture invalid 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
829 __func__, __LINE__, texdw[0], texdw[1], texdw[4],
830 texdw[5], texdw[6], texdw[7]);
831 return r;
832 }
833
834 /* check texture size */
835 if (toffset & (surf.base_align - 1)) {
836 dev_warn(p->dev, "%s:%d texture bo base %ld not aligned with %ld\n",
837 __func__, __LINE__, toffset, surf.base_align);
838 return -EINVAL;
839 }
840 if (moffset & (surf.base_align - 1)) {
841 dev_warn(p->dev, "%s:%d mipmap bo base %ld not aligned with %ld\n",
842 __func__, __LINE__, moffset, surf.base_align);
843 return -EINVAL;
844 }
845 if (dim == SQ_TEX_DIM_3D) {
846 toffset += surf.layer_size * depth;
847 } else {
848 toffset += surf.layer_size * mslice;
849 }
850 if (toffset > radeon_bo_size(texture)) {
851 dev_warn(p->dev, "%s:%d texture bo too small (layer size %d, "
852 "offset %ld, max layer %d, depth %d, bo size %ld) (%d %d)\n",
853 __func__, __LINE__, surf.layer_size,
854 (unsigned long)texdw[2] << 8, mslice,
855 depth, radeon_bo_size(texture),
856 surf.nbx, surf.nby);
857 return -EINVAL;
858 }
859
Marek Olšák61051af2012-09-25 03:34:01 +0200860 if (!mipmap) {
861 if (llevel) {
862 dev_warn(p->dev, "%s:%i got NULL MIP_ADDRESS relocation\n",
863 __func__, __LINE__);
864 return -EINVAL;
865 } else {
866 return 0; /* everything's ok */
867 }
868 }
869
Jerome Glisse285484e2011-12-16 17:03:42 -0500870 /* check mipmap size */
871 for (i = 1; i <= llevel; i++) {
872 unsigned w, h, d;
873
874 w = r600_mip_minify(width, i);
875 h = r600_mip_minify(height, i);
876 d = r600_mip_minify(depth, i);
877 surf.nbx = r600_fmt_get_nblocksx(surf.format, w);
878 surf.nby = r600_fmt_get_nblocksy(surf.format, h);
879
880 switch (surf.mode) {
881 case ARRAY_2D_TILED_THIN1:
882 if (surf.nbx < surf.palign || surf.nby < surf.halign) {
883 surf.mode = ARRAY_1D_TILED_THIN1;
884 }
885 /* recompute alignment */
886 evergreen_surface_check(p, &surf, NULL);
887 break;
888 case ARRAY_LINEAR_GENERAL:
889 case ARRAY_LINEAR_ALIGNED:
890 case ARRAY_1D_TILED_THIN1:
891 break;
892 default:
893 dev_warn(p->dev, "%s:%d invalid array mode %d\n",
894 __func__, __LINE__, surf.mode);
895 return -EINVAL;
896 }
897 surf.nbx = ALIGN(surf.nbx, surf.palign);
898 surf.nby = ALIGN(surf.nby, surf.halign);
899
900 r = evergreen_surface_check(p, &surf, "mipmap");
901 if (r) {
902 return r;
903 }
904
905 if (dim == SQ_TEX_DIM_3D) {
906 moffset += surf.layer_size * d;
907 } else {
908 moffset += surf.layer_size * mslice;
909 }
910 if (moffset > radeon_bo_size(mipmap)) {
911 dev_warn(p->dev, "%s:%d mipmap [%d] bo too small (layer size %d, "
912 "offset %ld, coffset %ld, max layer %d, depth %d, "
913 "bo size %ld) level0 (%d %d %d)\n",
914 __func__, __LINE__, i, surf.layer_size,
915 (unsigned long)texdw[3] << 8, moffset, mslice,
916 d, radeon_bo_size(mipmap),
917 width, height, depth);
918 dev_warn(p->dev, "%s:%d problematic surf: (%d %d) (%d %d %d %d %d %d %d)\n",
919 __func__, __LINE__, surf.nbx, surf.nby,
920 surf.mode, surf.bpe, surf.nsamples,
921 surf.bankw, surf.bankh,
922 surf.tsplit, surf.mtilea);
923 return -EINVAL;
924 }
925 }
926
927 return 0;
928}
929
Alex Deuchercb5fcbd2010-05-28 19:01:35 -0400930static int evergreen_cs_track_check(struct radeon_cs_parser *p)
931{
932 struct evergreen_cs_track *track = p->track;
Marek Olšák7e9fa5f2012-03-19 03:09:34 +0100933 unsigned tmp, i;
Jerome Glisse285484e2011-12-16 17:03:42 -0500934 int r;
Marek Olšák7e9fa5f2012-03-19 03:09:34 +0100935 unsigned buffer_mask = 0;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -0400936
Marek Olšákdd220a02012-01-27 12:17:59 -0500937 /* check streamout */
Marek Olšák30838572012-03-19 03:09:35 +0100938 if (track->streamout_dirty && track->vgt_strmout_config) {
Marek Olšák7e9fa5f2012-03-19 03:09:34 +0100939 for (i = 0; i < 4; i++) {
940 if (track->vgt_strmout_config & (1 << i)) {
941 buffer_mask |= (track->vgt_strmout_buffer_config >> (i * 4)) & 0xf;
942 }
943 }
944
945 for (i = 0; i < 4; i++) {
946 if (buffer_mask & (1 << i)) {
947 if (track->vgt_strmout_bo[i]) {
948 u64 offset = (u64)track->vgt_strmout_bo_offset[i] +
949 (u64)track->vgt_strmout_size[i];
950 if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) {
951 DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n",
952 i, offset,
953 radeon_bo_size(track->vgt_strmout_bo[i]));
Marek Olšákdd220a02012-01-27 12:17:59 -0500954 return -EINVAL;
955 }
Marek Olšák7e9fa5f2012-03-19 03:09:34 +0100956 } else {
957 dev_warn(p->dev, "No buffer for streamout %d\n", i);
958 return -EINVAL;
Marek Olšákdd220a02012-01-27 12:17:59 -0500959 }
960 }
961 }
Marek Olšák30838572012-03-19 03:09:35 +0100962 track->streamout_dirty = false;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -0400963 }
964
Marek Olšák779923b2012-03-08 00:56:00 +0100965 if (track->sx_misc_kill_all_prims)
966 return 0;
967
Jerome Glisse285484e2011-12-16 17:03:42 -0500968 /* check that we have a cb for each enabled target
969 */
Marek Olšák30838572012-03-19 03:09:35 +0100970 if (track->cb_dirty) {
971 tmp = track->cb_target_mask;
972 for (i = 0; i < 8; i++) {
973 if ((tmp >> (i * 4)) & 0xF) {
974 /* at least one component is enabled */
975 if (track->cb_color_bo[i] == NULL) {
976 dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
977 __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
978 return -EINVAL;
979 }
980 /* check cb */
981 r = evergreen_cs_track_validate_cb(p, i);
982 if (r) {
983 return r;
984 }
Jerome Glisse285484e2011-12-16 17:03:42 -0500985 }
986 }
Marek Olšák30838572012-03-19 03:09:35 +0100987 track->cb_dirty = false;
Jerome Glisse285484e2011-12-16 17:03:42 -0500988 }
989
Marek Olšák30838572012-03-19 03:09:35 +0100990 if (track->db_dirty) {
991 /* Check stencil buffer */
Marek Olšák0f457e42012-07-29 16:24:57 +0200992 if (G_028044_FORMAT(track->db_s_info) != V_028044_STENCIL_INVALID &&
993 G_028800_STENCIL_ENABLE(track->db_depth_control)) {
Marek Olšák30838572012-03-19 03:09:35 +0100994 r = evergreen_cs_track_validate_stencil(p);
995 if (r)
996 return r;
997 }
998 /* Check depth buffer */
Marek Olšák0f457e42012-07-29 16:24:57 +0200999 if (G_028040_FORMAT(track->db_z_info) != V_028040_Z_INVALID &&
1000 G_028800_Z_ENABLE(track->db_depth_control)) {
Marek Olšák30838572012-03-19 03:09:35 +01001001 r = evergreen_cs_track_validate_depth(p);
1002 if (r)
1003 return r;
1004 }
1005 track->db_dirty = false;
Jerome Glisse285484e2011-12-16 17:03:42 -05001006 }
1007
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001008 return 0;
1009}
1010
1011/**
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001012 * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
1013 * @parser: parser structure holding parsing context.
1014 * @data: pointer to relocation data
1015 * @offset_start: starting offset
1016 * @offset_mask: offset mask (to align start offset on)
1017 * @reloc: reloc informations
1018 *
1019 * Check next packet is relocation packet3, do bo validation and compute
1020 * GPU offset using the provided start.
1021 **/
1022static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser *p,
1023 struct radeon_cs_reloc **cs_reloc)
1024{
1025 struct radeon_cs_chunk *relocs_chunk;
1026 struct radeon_cs_packet p3reloc;
1027 unsigned idx;
1028 int r;
1029
1030 if (p->chunk_relocs_idx == -1) {
1031 DRM_ERROR("No relocation chunk !\n");
1032 return -EINVAL;
1033 }
1034 *cs_reloc = NULL;
1035 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
Ilija Hadzicc38f34b2013-01-02 18:27:41 -05001036 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001037 if (r) {
1038 return r;
1039 }
1040 p->idx += p3reloc.count + 2;
1041 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
1042 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
1043 p3reloc.idx);
1044 return -EINVAL;
1045 }
1046 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
1047 if (idx >= relocs_chunk->length_dw) {
1048 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
1049 idx, relocs_chunk->length_dw);
1050 return -EINVAL;
1051 }
1052 /* FIXME: we assume reloc size is 4 dwords */
1053 *cs_reloc = p->relocs_ptr[(idx / 4)];
1054 return 0;
1055}
1056
1057/**
Ilija Hadzic40592a12013-01-02 18:27:43 -05001058 * evergreen_cs_packet_parse_vline() - parse userspace VLINE packet
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001059 * @parser: parser structure holding parsing context.
1060 *
Ilija Hadzic40592a12013-01-02 18:27:43 -05001061 * This is an Evergreen(+)-specific function for parsing VLINE packets.
1062 * Real work is done by r600_cs_common_vline_parse function.
1063 * Here we just set up ASIC-specific register table and call
1064 * the common implementation function.
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001065 */
1066static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser *p)
1067{
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001068
Ilija Hadzic40592a12013-01-02 18:27:43 -05001069 static uint32_t vline_start_end[6] = {
1070 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC0_REGISTER_OFFSET,
1071 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC1_REGISTER_OFFSET,
1072 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC2_REGISTER_OFFSET,
1073 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC3_REGISTER_OFFSET,
1074 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC4_REGISTER_OFFSET,
1075 EVERGREEN_VLINE_START_END + EVERGREEN_CRTC5_REGISTER_OFFSET
1076 };
1077 static uint32_t vline_status[6] = {
1078 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET,
1079 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET,
1080 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET,
1081 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET,
1082 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET,
1083 EVERGREEN_VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET
1084 };
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001085
Ilija Hadzic40592a12013-01-02 18:27:43 -05001086 return r600_cs_common_vline_parse(p, vline_start_end, vline_status);
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001087}
1088
1089static int evergreen_packet0_check(struct radeon_cs_parser *p,
1090 struct radeon_cs_packet *pkt,
1091 unsigned idx, unsigned reg)
1092{
1093 int r;
1094
1095 switch (reg) {
1096 case EVERGREEN_VLINE_START_END:
1097 r = evergreen_cs_packet_parse_vline(p);
1098 if (r) {
1099 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1100 idx, reg);
1101 return r;
1102 }
1103 break;
1104 default:
1105 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1106 reg, idx);
1107 return -EINVAL;
1108 }
1109 return 0;
1110}
1111
1112static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
1113 struct radeon_cs_packet *pkt)
1114{
1115 unsigned reg, i;
1116 unsigned idx;
1117 int r;
1118
1119 idx = pkt->idx + 1;
1120 reg = pkt->reg;
1121 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
1122 r = evergreen_packet0_check(p, pkt, idx, reg);
1123 if (r) {
1124 return r;
1125 }
1126 }
1127 return 0;
1128}
1129
1130/**
1131 * evergreen_cs_check_reg() - check if register is authorized or not
1132 * @parser: parser structure holding parsing context
1133 * @reg: register we are testing
1134 * @idx: index into the cs buffer
1135 *
1136 * This function will test against evergreen_reg_safe_bm and return 0
1137 * if register is safe. If register is not flag as safe this function
1138 * will test it against a list of register needind special handling.
1139 */
Andi Kleen488479e2011-10-13 16:08:41 -07001140static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001141{
1142 struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
1143 struct radeon_cs_reloc *reloc;
Alex Deucherc175ca92011-03-02 20:07:37 -05001144 u32 last_reg;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001145 u32 m, i, tmp, *ib;
1146 int r;
1147
Alex Deucherc175ca92011-03-02 20:07:37 -05001148 if (p->rdev->family >= CHIP_CAYMAN)
1149 last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
1150 else
1151 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
1152
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001153 i = (reg >> 7);
Dan Carpenter88498832011-07-27 09:53:40 +00001154 if (i >= last_reg) {
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001155 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1156 return -EINVAL;
1157 }
1158 m = 1 << ((reg >> 2) & 31);
Alex Deucherc175ca92011-03-02 20:07:37 -05001159 if (p->rdev->family >= CHIP_CAYMAN) {
1160 if (!(cayman_reg_safe_bm[i] & m))
1161 return 0;
1162 } else {
1163 if (!(evergreen_reg_safe_bm[i] & m))
1164 return 0;
1165 }
Jerome Glissef2e39222012-05-09 15:35:02 +02001166 ib = p->ib.ptr;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001167 switch (reg) {
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001168 /* force following reg to 0 in an attempt to disable out buffer
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001169 * which will need us to better understand how it works to perform
1170 * security check on it (Jerome)
1171 */
1172 case SQ_ESGS_RING_SIZE:
1173 case SQ_GSVS_RING_SIZE:
1174 case SQ_ESTMP_RING_SIZE:
1175 case SQ_GSTMP_RING_SIZE:
1176 case SQ_HSTMP_RING_SIZE:
1177 case SQ_LSTMP_RING_SIZE:
1178 case SQ_PSTMP_RING_SIZE:
1179 case SQ_VSTMP_RING_SIZE:
1180 case SQ_ESGS_RING_ITEMSIZE:
1181 case SQ_ESTMP_RING_ITEMSIZE:
1182 case SQ_GSTMP_RING_ITEMSIZE:
1183 case SQ_GSVS_RING_ITEMSIZE:
1184 case SQ_GS_VERT_ITEMSIZE:
1185 case SQ_GS_VERT_ITEMSIZE_1:
1186 case SQ_GS_VERT_ITEMSIZE_2:
1187 case SQ_GS_VERT_ITEMSIZE_3:
1188 case SQ_GSVS_RING_OFFSET_1:
1189 case SQ_GSVS_RING_OFFSET_2:
1190 case SQ_GSVS_RING_OFFSET_3:
1191 case SQ_HSTMP_RING_ITEMSIZE:
1192 case SQ_LSTMP_RING_ITEMSIZE:
1193 case SQ_PSTMP_RING_ITEMSIZE:
1194 case SQ_VSTMP_RING_ITEMSIZE:
1195 case VGT_TF_RING_SIZE:
1196 /* get value to populate the IB don't remove */
Alex Deucher8aa75002011-03-02 20:07:40 -05001197 /*tmp =radeon_get_ib_value(p, idx);
1198 ib[idx] = 0;*/
1199 break;
1200 case SQ_ESGS_RING_BASE:
1201 case SQ_GSVS_RING_BASE:
1202 case SQ_ESTMP_RING_BASE:
1203 case SQ_GSTMP_RING_BASE:
1204 case SQ_HSTMP_RING_BASE:
1205 case SQ_LSTMP_RING_BASE:
1206 case SQ_PSTMP_RING_BASE:
1207 case SQ_VSTMP_RING_BASE:
1208 r = evergreen_cs_packet_next_reloc(p, &reloc);
1209 if (r) {
1210 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1211 "0x%04X\n", reg);
1212 return -EINVAL;
1213 }
1214 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001215 break;
1216 case DB_DEPTH_CONTROL:
1217 track->db_depth_control = radeon_get_ib_value(p, idx);
Marek Olšák30838572012-03-19 03:09:35 +01001218 track->db_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001219 break;
Alex Deucherc175ca92011-03-02 20:07:37 -05001220 case CAYMAN_DB_EQAA:
1221 if (p->rdev->family < CHIP_CAYMAN) {
1222 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1223 "0x%04X\n", reg);
1224 return -EINVAL;
1225 }
1226 break;
1227 case CAYMAN_DB_DEPTH_INFO:
1228 if (p->rdev->family < CHIP_CAYMAN) {
1229 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1230 "0x%04X\n", reg);
1231 return -EINVAL;
1232 }
1233 break;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001234 case DB_Z_INFO:
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001235 track->db_z_info = radeon_get_ib_value(p, idx);
Jerome Glisse721604a2012-01-05 22:11:05 -05001236 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
Marek Olšáke70f2242011-10-25 01:38:45 +02001237 r = evergreen_cs_packet_next_reloc(p, &reloc);
1238 if (r) {
1239 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1240 "0x%04X\n", reg);
1241 return -EINVAL;
1242 }
1243 ib[idx] &= ~Z_ARRAY_MODE(0xf);
1244 track->db_z_info &= ~Z_ARRAY_MODE(0xf);
Alex Deucherf3a71df2011-11-28 14:49:28 -05001245 ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1246 track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
Marek Olšáke70f2242011-10-25 01:38:45 +02001247 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
Jerome Glisse285484e2011-12-16 17:03:42 -05001248 unsigned bankw, bankh, mtaspect, tile_split;
1249
1250 evergreen_tiling_fields(reloc->lobj.tiling_flags,
1251 &bankw, &bankh, &mtaspect,
1252 &tile_split);
Alex Deucherf3a71df2011-11-28 14:49:28 -05001253 ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
Jerome Glisse285484e2011-12-16 17:03:42 -05001254 ib[idx] |= DB_TILE_SPLIT(tile_split) |
1255 DB_BANK_WIDTH(bankw) |
1256 DB_BANK_HEIGHT(bankh) |
1257 DB_MACRO_TILE_ASPECT(mtaspect);
Marek Olšáke70f2242011-10-25 01:38:45 +02001258 }
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001259 }
Marek Olšák30838572012-03-19 03:09:35 +01001260 track->db_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001261 break;
1262 case DB_STENCIL_INFO:
1263 track->db_s_info = radeon_get_ib_value(p, idx);
Marek Olšák30838572012-03-19 03:09:35 +01001264 track->db_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001265 break;
1266 case DB_DEPTH_VIEW:
1267 track->db_depth_view = radeon_get_ib_value(p, idx);
Marek Olšák30838572012-03-19 03:09:35 +01001268 track->db_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001269 break;
1270 case DB_DEPTH_SIZE:
1271 track->db_depth_size = radeon_get_ib_value(p, idx);
Marek Olšák30838572012-03-19 03:09:35 +01001272 track->db_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001273 break;
Jerome Glisse285484e2011-12-16 17:03:42 -05001274 case R_02805C_DB_DEPTH_SLICE:
1275 track->db_depth_slice = radeon_get_ib_value(p, idx);
Marek Olšák30838572012-03-19 03:09:35 +01001276 track->db_dirty = true;
Jerome Glisse285484e2011-12-16 17:03:42 -05001277 break;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001278 case DB_Z_READ_BASE:
1279 r = evergreen_cs_packet_next_reloc(p, &reloc);
1280 if (r) {
1281 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1282 "0x%04X\n", reg);
1283 return -EINVAL;
1284 }
1285 track->db_z_read_offset = radeon_get_ib_value(p, idx);
1286 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1287 track->db_z_read_bo = reloc->robj;
Marek Olšák30838572012-03-19 03:09:35 +01001288 track->db_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001289 break;
1290 case DB_Z_WRITE_BASE:
1291 r = evergreen_cs_packet_next_reloc(p, &reloc);
1292 if (r) {
1293 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1294 "0x%04X\n", reg);
1295 return -EINVAL;
1296 }
1297 track->db_z_write_offset = radeon_get_ib_value(p, idx);
1298 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1299 track->db_z_write_bo = reloc->robj;
Marek Olšák30838572012-03-19 03:09:35 +01001300 track->db_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001301 break;
1302 case DB_STENCIL_READ_BASE:
1303 r = evergreen_cs_packet_next_reloc(p, &reloc);
1304 if (r) {
1305 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1306 "0x%04X\n", reg);
1307 return -EINVAL;
1308 }
1309 track->db_s_read_offset = radeon_get_ib_value(p, idx);
1310 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1311 track->db_s_read_bo = reloc->robj;
Marek Olšák30838572012-03-19 03:09:35 +01001312 track->db_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001313 break;
1314 case DB_STENCIL_WRITE_BASE:
1315 r = evergreen_cs_packet_next_reloc(p, &reloc);
1316 if (r) {
1317 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1318 "0x%04X\n", reg);
1319 return -EINVAL;
1320 }
1321 track->db_s_write_offset = radeon_get_ib_value(p, idx);
1322 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1323 track->db_s_write_bo = reloc->robj;
Marek Olšák30838572012-03-19 03:09:35 +01001324 track->db_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001325 break;
1326 case VGT_STRMOUT_CONFIG:
1327 track->vgt_strmout_config = radeon_get_ib_value(p, idx);
Marek Olšák30838572012-03-19 03:09:35 +01001328 track->streamout_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001329 break;
1330 case VGT_STRMOUT_BUFFER_CONFIG:
1331 track->vgt_strmout_buffer_config = radeon_get_ib_value(p, idx);
Marek Olšák30838572012-03-19 03:09:35 +01001332 track->streamout_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001333 break;
Marek Olšákdd220a02012-01-27 12:17:59 -05001334 case VGT_STRMOUT_BUFFER_BASE_0:
1335 case VGT_STRMOUT_BUFFER_BASE_1:
1336 case VGT_STRMOUT_BUFFER_BASE_2:
1337 case VGT_STRMOUT_BUFFER_BASE_3:
1338 r = evergreen_cs_packet_next_reloc(p, &reloc);
1339 if (r) {
1340 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1341 "0x%04X\n", reg);
1342 return -EINVAL;
1343 }
1344 tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16;
1345 track->vgt_strmout_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
1346 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1347 track->vgt_strmout_bo[tmp] = reloc->robj;
Marek Olšák30838572012-03-19 03:09:35 +01001348 track->streamout_dirty = true;
Marek Olšákdd220a02012-01-27 12:17:59 -05001349 break;
1350 case VGT_STRMOUT_BUFFER_SIZE_0:
1351 case VGT_STRMOUT_BUFFER_SIZE_1:
1352 case VGT_STRMOUT_BUFFER_SIZE_2:
1353 case VGT_STRMOUT_BUFFER_SIZE_3:
1354 tmp = (reg - VGT_STRMOUT_BUFFER_SIZE_0) / 16;
1355 /* size in register is DWs, convert to bytes */
1356 track->vgt_strmout_size[tmp] = radeon_get_ib_value(p, idx) * 4;
Marek Olšák30838572012-03-19 03:09:35 +01001357 track->streamout_dirty = true;
Marek Olšákdd220a02012-01-27 12:17:59 -05001358 break;
1359 case CP_COHER_BASE:
1360 r = evergreen_cs_packet_next_reloc(p, &reloc);
1361 if (r) {
1362 dev_warn(p->dev, "missing reloc for CP_COHER_BASE "
1363 "0x%04X\n", reg);
1364 return -EINVAL;
1365 }
1366 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001367 case CB_TARGET_MASK:
1368 track->cb_target_mask = radeon_get_ib_value(p, idx);
Marek Olšák30838572012-03-19 03:09:35 +01001369 track->cb_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001370 break;
1371 case CB_SHADER_MASK:
1372 track->cb_shader_mask = radeon_get_ib_value(p, idx);
Marek Olšák30838572012-03-19 03:09:35 +01001373 track->cb_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001374 break;
1375 case PA_SC_AA_CONFIG:
Alex Deucherc175ca92011-03-02 20:07:37 -05001376 if (p->rdev->family >= CHIP_CAYMAN) {
1377 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1378 "0x%04X\n", reg);
1379 return -EINVAL;
1380 }
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001381 tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK;
1382 track->nsamples = 1 << tmp;
1383 break;
Alex Deucherc175ca92011-03-02 20:07:37 -05001384 case CAYMAN_PA_SC_AA_CONFIG:
1385 if (p->rdev->family < CHIP_CAYMAN) {
1386 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1387 "0x%04X\n", reg);
1388 return -EINVAL;
1389 }
1390 tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK;
1391 track->nsamples = 1 << tmp;
1392 break;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001393 case CB_COLOR0_VIEW:
1394 case CB_COLOR1_VIEW:
1395 case CB_COLOR2_VIEW:
1396 case CB_COLOR3_VIEW:
1397 case CB_COLOR4_VIEW:
1398 case CB_COLOR5_VIEW:
1399 case CB_COLOR6_VIEW:
1400 case CB_COLOR7_VIEW:
1401 tmp = (reg - CB_COLOR0_VIEW) / 0x3c;
1402 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
Marek Olšák30838572012-03-19 03:09:35 +01001403 track->cb_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001404 break;
1405 case CB_COLOR8_VIEW:
1406 case CB_COLOR9_VIEW:
1407 case CB_COLOR10_VIEW:
1408 case CB_COLOR11_VIEW:
1409 tmp = ((reg - CB_COLOR8_VIEW) / 0x1c) + 8;
1410 track->cb_color_view[tmp] = radeon_get_ib_value(p, idx);
Marek Olšák30838572012-03-19 03:09:35 +01001411 track->cb_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001412 break;
1413 case CB_COLOR0_INFO:
1414 case CB_COLOR1_INFO:
1415 case CB_COLOR2_INFO:
1416 case CB_COLOR3_INFO:
1417 case CB_COLOR4_INFO:
1418 case CB_COLOR5_INFO:
1419 case CB_COLOR6_INFO:
1420 case CB_COLOR7_INFO:
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001421 tmp = (reg - CB_COLOR0_INFO) / 0x3c;
1422 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
Jerome Glisse721604a2012-01-05 22:11:05 -05001423 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
Marek Olšáke70f2242011-10-25 01:38:45 +02001424 r = evergreen_cs_packet_next_reloc(p, &reloc);
1425 if (r) {
1426 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1427 "0x%04X\n", reg);
1428 return -EINVAL;
1429 }
Alex Deucherf3a71df2011-11-28 14:49:28 -05001430 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1431 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001432 }
Marek Olšák30838572012-03-19 03:09:35 +01001433 track->cb_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001434 break;
1435 case CB_COLOR8_INFO:
1436 case CB_COLOR9_INFO:
1437 case CB_COLOR10_INFO:
1438 case CB_COLOR11_INFO:
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001439 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
1440 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
Jerome Glisse721604a2012-01-05 22:11:05 -05001441 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
Marek Olšáke70f2242011-10-25 01:38:45 +02001442 r = evergreen_cs_packet_next_reloc(p, &reloc);
1443 if (r) {
1444 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1445 "0x%04X\n", reg);
1446 return -EINVAL;
1447 }
Alex Deucherf3a71df2011-11-28 14:49:28 -05001448 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1449 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001450 }
Marek Olšák30838572012-03-19 03:09:35 +01001451 track->cb_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001452 break;
1453 case CB_COLOR0_PITCH:
1454 case CB_COLOR1_PITCH:
1455 case CB_COLOR2_PITCH:
1456 case CB_COLOR3_PITCH:
1457 case CB_COLOR4_PITCH:
1458 case CB_COLOR5_PITCH:
1459 case CB_COLOR6_PITCH:
1460 case CB_COLOR7_PITCH:
1461 tmp = (reg - CB_COLOR0_PITCH) / 0x3c;
1462 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
Marek Olšák30838572012-03-19 03:09:35 +01001463 track->cb_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001464 break;
1465 case CB_COLOR8_PITCH:
1466 case CB_COLOR9_PITCH:
1467 case CB_COLOR10_PITCH:
1468 case CB_COLOR11_PITCH:
1469 tmp = ((reg - CB_COLOR8_PITCH) / 0x1c) + 8;
1470 track->cb_color_pitch[tmp] = radeon_get_ib_value(p, idx);
Marek Olšák30838572012-03-19 03:09:35 +01001471 track->cb_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001472 break;
1473 case CB_COLOR0_SLICE:
1474 case CB_COLOR1_SLICE:
1475 case CB_COLOR2_SLICE:
1476 case CB_COLOR3_SLICE:
1477 case CB_COLOR4_SLICE:
1478 case CB_COLOR5_SLICE:
1479 case CB_COLOR6_SLICE:
1480 case CB_COLOR7_SLICE:
1481 tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
1482 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
Jerome Glissed2609872012-06-09 10:57:41 -04001483 track->cb_color_slice_idx[tmp] = idx;
Marek Olšák30838572012-03-19 03:09:35 +01001484 track->cb_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001485 break;
1486 case CB_COLOR8_SLICE:
1487 case CB_COLOR9_SLICE:
1488 case CB_COLOR10_SLICE:
1489 case CB_COLOR11_SLICE:
1490 tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
1491 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
Jerome Glissed2609872012-06-09 10:57:41 -04001492 track->cb_color_slice_idx[tmp] = idx;
Marek Olšák30838572012-03-19 03:09:35 +01001493 track->cb_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001494 break;
1495 case CB_COLOR0_ATTRIB:
1496 case CB_COLOR1_ATTRIB:
1497 case CB_COLOR2_ATTRIB:
1498 case CB_COLOR3_ATTRIB:
1499 case CB_COLOR4_ATTRIB:
1500 case CB_COLOR5_ATTRIB:
1501 case CB_COLOR6_ATTRIB:
1502 case CB_COLOR7_ATTRIB:
Jerome Glisse285484e2011-12-16 17:03:42 -05001503 r = evergreen_cs_packet_next_reloc(p, &reloc);
1504 if (r) {
1505 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1506 "0x%04X\n", reg);
1507 return -EINVAL;
1508 }
1509 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1510 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1511 unsigned bankw, bankh, mtaspect, tile_split;
1512
1513 evergreen_tiling_fields(reloc->lobj.tiling_flags,
1514 &bankw, &bankh, &mtaspect,
1515 &tile_split);
1516 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1517 ib[idx] |= CB_TILE_SPLIT(tile_split) |
1518 CB_BANK_WIDTH(bankw) |
1519 CB_BANK_HEIGHT(bankh) |
1520 CB_MACRO_TILE_ASPECT(mtaspect);
1521 }
1522 }
1523 tmp = ((reg - CB_COLOR0_ATTRIB) / 0x3c);
1524 track->cb_color_attrib[tmp] = ib[idx];
Marek Olšák30838572012-03-19 03:09:35 +01001525 track->cb_dirty = true;
Jerome Glisse285484e2011-12-16 17:03:42 -05001526 break;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001527 case CB_COLOR8_ATTRIB:
1528 case CB_COLOR9_ATTRIB:
1529 case CB_COLOR10_ATTRIB:
1530 case CB_COLOR11_ATTRIB:
Alex Deucherf3a71df2011-11-28 14:49:28 -05001531 r = evergreen_cs_packet_next_reloc(p, &reloc);
1532 if (r) {
1533 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1534 "0x%04X\n", reg);
1535 return -EINVAL;
1536 }
Jerome Glisse285484e2011-12-16 17:03:42 -05001537 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1538 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1539 unsigned bankw, bankh, mtaspect, tile_split;
1540
1541 evergreen_tiling_fields(reloc->lobj.tiling_flags,
1542 &bankw, &bankh, &mtaspect,
1543 &tile_split);
1544 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1545 ib[idx] |= CB_TILE_SPLIT(tile_split) |
1546 CB_BANK_WIDTH(bankw) |
1547 CB_BANK_HEIGHT(bankh) |
1548 CB_MACRO_TILE_ASPECT(mtaspect);
1549 }
Alex Deucherf3a71df2011-11-28 14:49:28 -05001550 }
Jerome Glisse285484e2011-12-16 17:03:42 -05001551 tmp = ((reg - CB_COLOR8_ATTRIB) / 0x1c) + 8;
1552 track->cb_color_attrib[tmp] = ib[idx];
Marek Olšák30838572012-03-19 03:09:35 +01001553 track->cb_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001554 break;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001555 case CB_COLOR0_FMASK:
1556 case CB_COLOR1_FMASK:
1557 case CB_COLOR2_FMASK:
1558 case CB_COLOR3_FMASK:
1559 case CB_COLOR4_FMASK:
1560 case CB_COLOR5_FMASK:
1561 case CB_COLOR6_FMASK:
1562 case CB_COLOR7_FMASK:
1563 tmp = (reg - CB_COLOR0_FMASK) / 0x3c;
1564 r = evergreen_cs_packet_next_reloc(p, &reloc);
1565 if (r) {
1566 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1567 return -EINVAL;
1568 }
1569 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1570 track->cb_color_fmask_bo[tmp] = reloc->robj;
1571 break;
1572 case CB_COLOR0_CMASK:
1573 case CB_COLOR1_CMASK:
1574 case CB_COLOR2_CMASK:
1575 case CB_COLOR3_CMASK:
1576 case CB_COLOR4_CMASK:
1577 case CB_COLOR5_CMASK:
1578 case CB_COLOR6_CMASK:
1579 case CB_COLOR7_CMASK:
1580 tmp = (reg - CB_COLOR0_CMASK) / 0x3c;
1581 r = evergreen_cs_packet_next_reloc(p, &reloc);
1582 if (r) {
1583 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
1584 return -EINVAL;
1585 }
1586 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1587 track->cb_color_cmask_bo[tmp] = reloc->robj;
1588 break;
1589 case CB_COLOR0_FMASK_SLICE:
1590 case CB_COLOR1_FMASK_SLICE:
1591 case CB_COLOR2_FMASK_SLICE:
1592 case CB_COLOR3_FMASK_SLICE:
1593 case CB_COLOR4_FMASK_SLICE:
1594 case CB_COLOR5_FMASK_SLICE:
1595 case CB_COLOR6_FMASK_SLICE:
1596 case CB_COLOR7_FMASK_SLICE:
1597 tmp = (reg - CB_COLOR0_FMASK_SLICE) / 0x3c;
1598 track->cb_color_fmask_slice[tmp] = radeon_get_ib_value(p, idx);
1599 break;
1600 case CB_COLOR0_CMASK_SLICE:
1601 case CB_COLOR1_CMASK_SLICE:
1602 case CB_COLOR2_CMASK_SLICE:
1603 case CB_COLOR3_CMASK_SLICE:
1604 case CB_COLOR4_CMASK_SLICE:
1605 case CB_COLOR5_CMASK_SLICE:
1606 case CB_COLOR6_CMASK_SLICE:
1607 case CB_COLOR7_CMASK_SLICE:
1608 tmp = (reg - CB_COLOR0_CMASK_SLICE) / 0x3c;
1609 track->cb_color_cmask_slice[tmp] = radeon_get_ib_value(p, idx);
1610 break;
1611 case CB_COLOR0_BASE:
1612 case CB_COLOR1_BASE:
1613 case CB_COLOR2_BASE:
1614 case CB_COLOR3_BASE:
1615 case CB_COLOR4_BASE:
1616 case CB_COLOR5_BASE:
1617 case CB_COLOR6_BASE:
1618 case CB_COLOR7_BASE:
1619 r = evergreen_cs_packet_next_reloc(p, &reloc);
1620 if (r) {
1621 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1622 "0x%04X\n", reg);
1623 return -EINVAL;
1624 }
1625 tmp = (reg - CB_COLOR0_BASE) / 0x3c;
1626 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
1627 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001628 track->cb_color_bo[tmp] = reloc->robj;
Marek Olšák30838572012-03-19 03:09:35 +01001629 track->cb_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001630 break;
1631 case CB_COLOR8_BASE:
1632 case CB_COLOR9_BASE:
1633 case CB_COLOR10_BASE:
1634 case CB_COLOR11_BASE:
1635 r = evergreen_cs_packet_next_reloc(p, &reloc);
1636 if (r) {
1637 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1638 "0x%04X\n", reg);
1639 return -EINVAL;
1640 }
1641 tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8;
1642 track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
1643 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001644 track->cb_color_bo[tmp] = reloc->robj;
Marek Olšák30838572012-03-19 03:09:35 +01001645 track->cb_dirty = true;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001646 break;
Jerome Glisse88f50c82012-03-21 19:18:21 -04001647 case DB_HTILE_DATA_BASE:
1648 r = evergreen_cs_packet_next_reloc(p, &reloc);
1649 if (r) {
1650 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1651 "0x%04X\n", reg);
1652 return -EINVAL;
1653 }
1654 track->htile_offset = radeon_get_ib_value(p, idx);
1655 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1656 track->htile_bo = reloc->robj;
1657 track->db_dirty = true;
1658 break;
1659 case DB_HTILE_SURFACE:
1660 /* 8x8 only */
1661 track->htile_surface = radeon_get_ib_value(p, idx);
Jerome Glisse4ac05332012-12-13 12:08:11 -05001662 /* force 8x8 htile width and height */
1663 ib[idx] |= 3;
Jerome Glisse88f50c82012-03-21 19:18:21 -04001664 track->db_dirty = true;
1665 break;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001666 case CB_IMMED0_BASE:
1667 case CB_IMMED1_BASE:
1668 case CB_IMMED2_BASE:
1669 case CB_IMMED3_BASE:
1670 case CB_IMMED4_BASE:
1671 case CB_IMMED5_BASE:
1672 case CB_IMMED6_BASE:
1673 case CB_IMMED7_BASE:
1674 case CB_IMMED8_BASE:
1675 case CB_IMMED9_BASE:
1676 case CB_IMMED10_BASE:
1677 case CB_IMMED11_BASE:
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001678 case SQ_PGM_START_FS:
1679 case SQ_PGM_START_ES:
1680 case SQ_PGM_START_VS:
1681 case SQ_PGM_START_GS:
1682 case SQ_PGM_START_PS:
1683 case SQ_PGM_START_HS:
1684 case SQ_PGM_START_LS:
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001685 case SQ_CONST_MEM_BASE:
1686 case SQ_ALU_CONST_CACHE_GS_0:
1687 case SQ_ALU_CONST_CACHE_GS_1:
1688 case SQ_ALU_CONST_CACHE_GS_2:
1689 case SQ_ALU_CONST_CACHE_GS_3:
1690 case SQ_ALU_CONST_CACHE_GS_4:
1691 case SQ_ALU_CONST_CACHE_GS_5:
1692 case SQ_ALU_CONST_CACHE_GS_6:
1693 case SQ_ALU_CONST_CACHE_GS_7:
1694 case SQ_ALU_CONST_CACHE_GS_8:
1695 case SQ_ALU_CONST_CACHE_GS_9:
1696 case SQ_ALU_CONST_CACHE_GS_10:
1697 case SQ_ALU_CONST_CACHE_GS_11:
1698 case SQ_ALU_CONST_CACHE_GS_12:
1699 case SQ_ALU_CONST_CACHE_GS_13:
1700 case SQ_ALU_CONST_CACHE_GS_14:
1701 case SQ_ALU_CONST_CACHE_GS_15:
1702 case SQ_ALU_CONST_CACHE_PS_0:
1703 case SQ_ALU_CONST_CACHE_PS_1:
1704 case SQ_ALU_CONST_CACHE_PS_2:
1705 case SQ_ALU_CONST_CACHE_PS_3:
1706 case SQ_ALU_CONST_CACHE_PS_4:
1707 case SQ_ALU_CONST_CACHE_PS_5:
1708 case SQ_ALU_CONST_CACHE_PS_6:
1709 case SQ_ALU_CONST_CACHE_PS_7:
1710 case SQ_ALU_CONST_CACHE_PS_8:
1711 case SQ_ALU_CONST_CACHE_PS_9:
1712 case SQ_ALU_CONST_CACHE_PS_10:
1713 case SQ_ALU_CONST_CACHE_PS_11:
1714 case SQ_ALU_CONST_CACHE_PS_12:
1715 case SQ_ALU_CONST_CACHE_PS_13:
1716 case SQ_ALU_CONST_CACHE_PS_14:
1717 case SQ_ALU_CONST_CACHE_PS_15:
1718 case SQ_ALU_CONST_CACHE_VS_0:
1719 case SQ_ALU_CONST_CACHE_VS_1:
1720 case SQ_ALU_CONST_CACHE_VS_2:
1721 case SQ_ALU_CONST_CACHE_VS_3:
1722 case SQ_ALU_CONST_CACHE_VS_4:
1723 case SQ_ALU_CONST_CACHE_VS_5:
1724 case SQ_ALU_CONST_CACHE_VS_6:
1725 case SQ_ALU_CONST_CACHE_VS_7:
1726 case SQ_ALU_CONST_CACHE_VS_8:
1727 case SQ_ALU_CONST_CACHE_VS_9:
1728 case SQ_ALU_CONST_CACHE_VS_10:
1729 case SQ_ALU_CONST_CACHE_VS_11:
1730 case SQ_ALU_CONST_CACHE_VS_12:
1731 case SQ_ALU_CONST_CACHE_VS_13:
1732 case SQ_ALU_CONST_CACHE_VS_14:
1733 case SQ_ALU_CONST_CACHE_VS_15:
1734 case SQ_ALU_CONST_CACHE_HS_0:
1735 case SQ_ALU_CONST_CACHE_HS_1:
1736 case SQ_ALU_CONST_CACHE_HS_2:
1737 case SQ_ALU_CONST_CACHE_HS_3:
1738 case SQ_ALU_CONST_CACHE_HS_4:
1739 case SQ_ALU_CONST_CACHE_HS_5:
1740 case SQ_ALU_CONST_CACHE_HS_6:
1741 case SQ_ALU_CONST_CACHE_HS_7:
1742 case SQ_ALU_CONST_CACHE_HS_8:
1743 case SQ_ALU_CONST_CACHE_HS_9:
1744 case SQ_ALU_CONST_CACHE_HS_10:
1745 case SQ_ALU_CONST_CACHE_HS_11:
1746 case SQ_ALU_CONST_CACHE_HS_12:
1747 case SQ_ALU_CONST_CACHE_HS_13:
1748 case SQ_ALU_CONST_CACHE_HS_14:
1749 case SQ_ALU_CONST_CACHE_HS_15:
1750 case SQ_ALU_CONST_CACHE_LS_0:
1751 case SQ_ALU_CONST_CACHE_LS_1:
1752 case SQ_ALU_CONST_CACHE_LS_2:
1753 case SQ_ALU_CONST_CACHE_LS_3:
1754 case SQ_ALU_CONST_CACHE_LS_4:
1755 case SQ_ALU_CONST_CACHE_LS_5:
1756 case SQ_ALU_CONST_CACHE_LS_6:
1757 case SQ_ALU_CONST_CACHE_LS_7:
1758 case SQ_ALU_CONST_CACHE_LS_8:
1759 case SQ_ALU_CONST_CACHE_LS_9:
1760 case SQ_ALU_CONST_CACHE_LS_10:
1761 case SQ_ALU_CONST_CACHE_LS_11:
1762 case SQ_ALU_CONST_CACHE_LS_12:
1763 case SQ_ALU_CONST_CACHE_LS_13:
1764 case SQ_ALU_CONST_CACHE_LS_14:
1765 case SQ_ALU_CONST_CACHE_LS_15:
1766 r = evergreen_cs_packet_next_reloc(p, &reloc);
1767 if (r) {
1768 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1769 "0x%04X\n", reg);
1770 return -EINVAL;
1771 }
1772 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1773 break;
Alex Deucher033b5652011-06-08 15:26:45 -04001774 case SX_MEMORY_EXPORT_BASE:
1775 if (p->rdev->family >= CHIP_CAYMAN) {
1776 dev_warn(p->dev, "bad SET_CONFIG_REG "
1777 "0x%04X\n", reg);
1778 return -EINVAL;
1779 }
1780 r = evergreen_cs_packet_next_reloc(p, &reloc);
1781 if (r) {
1782 dev_warn(p->dev, "bad SET_CONFIG_REG "
1783 "0x%04X\n", reg);
1784 return -EINVAL;
1785 }
1786 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1787 break;
1788 case CAYMAN_SX_SCATTER_EXPORT_BASE:
1789 if (p->rdev->family < CHIP_CAYMAN) {
1790 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1791 "0x%04X\n", reg);
1792 return -EINVAL;
1793 }
1794 r = evergreen_cs_packet_next_reloc(p, &reloc);
1795 if (r) {
1796 dev_warn(p->dev, "bad SET_CONTEXT_REG "
1797 "0x%04X\n", reg);
1798 return -EINVAL;
1799 }
1800 ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1801 break;
Marek Olšák779923b2012-03-08 00:56:00 +01001802 case SX_MISC:
1803 track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0;
1804 break;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001805 default:
1806 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1807 return -EINVAL;
1808 }
1809 return 0;
1810}
1811
Marek Olšákdd220a02012-01-27 12:17:59 -05001812static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1813{
1814 u32 last_reg, m, i;
1815
1816 if (p->rdev->family >= CHIP_CAYMAN)
1817 last_reg = ARRAY_SIZE(cayman_reg_safe_bm);
1818 else
1819 last_reg = ARRAY_SIZE(evergreen_reg_safe_bm);
1820
1821 i = (reg >> 7);
1822 if (i >= last_reg) {
1823 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1824 return false;
1825 }
1826 m = 1 << ((reg >> 2) & 31);
1827 if (p->rdev->family >= CHIP_CAYMAN) {
1828 if (!(cayman_reg_safe_bm[i] & m))
1829 return true;
1830 } else {
1831 if (!(evergreen_reg_safe_bm[i] & m))
1832 return true;
1833 }
1834 dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
1835 return false;
1836}
1837
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001838static int evergreen_packet3_check(struct radeon_cs_parser *p,
1839 struct radeon_cs_packet *pkt)
1840{
1841 struct radeon_cs_reloc *reloc;
1842 struct evergreen_cs_track *track;
1843 volatile u32 *ib;
1844 unsigned idx;
1845 unsigned i;
1846 unsigned start_reg, end_reg, reg;
1847 int r;
1848 u32 idx_value;
1849
1850 track = (struct evergreen_cs_track *)p->track;
Jerome Glissef2e39222012-05-09 15:35:02 +02001851 ib = p->ib.ptr;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001852 idx = pkt->idx + 1;
1853 idx_value = radeon_get_ib_value(p, idx);
1854
1855 switch (pkt->opcode) {
Dave Airlie2a19cac2011-02-28 16:11:48 +10001856 case PACKET3_SET_PREDICATION:
1857 {
1858 int pred_op;
1859 int tmp;
Marek Olšák78857132012-03-19 03:09:33 +01001860 uint64_t offset;
1861
Dave Airlie2a19cac2011-02-28 16:11:48 +10001862 if (pkt->count != 1) {
1863 DRM_ERROR("bad SET PREDICATION\n");
1864 return -EINVAL;
1865 }
1866
1867 tmp = radeon_get_ib_value(p, idx + 1);
1868 pred_op = (tmp >> 16) & 0x7;
1869
1870 /* for the clear predicate operation */
1871 if (pred_op == 0)
1872 return 0;
1873
1874 if (pred_op > 2) {
1875 DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op);
1876 return -EINVAL;
1877 }
1878
1879 r = evergreen_cs_packet_next_reloc(p, &reloc);
1880 if (r) {
1881 DRM_ERROR("bad SET PREDICATION\n");
1882 return -EINVAL;
1883 }
1884
Marek Olšák78857132012-03-19 03:09:33 +01001885 offset = reloc->lobj.gpu_offset +
1886 (idx_value & 0xfffffff0) +
1887 ((u64)(tmp & 0xff) << 32);
1888
1889 ib[idx + 0] = offset;
1890 ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff);
Dave Airlie2a19cac2011-02-28 16:11:48 +10001891 }
1892 break;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001893 case PACKET3_CONTEXT_CONTROL:
1894 if (pkt->count != 1) {
1895 DRM_ERROR("bad CONTEXT_CONTROL\n");
1896 return -EINVAL;
1897 }
1898 break;
1899 case PACKET3_INDEX_TYPE:
1900 case PACKET3_NUM_INSTANCES:
1901 case PACKET3_CLEAR_STATE:
1902 if (pkt->count) {
1903 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1904 return -EINVAL;
1905 }
1906 break;
Alex Deucherc175ca92011-03-02 20:07:37 -05001907 case CAYMAN_PACKET3_DEALLOC_STATE:
1908 if (p->rdev->family < CHIP_CAYMAN) {
1909 DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
1910 return -EINVAL;
1911 }
1912 if (pkt->count) {
1913 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1914 return -EINVAL;
1915 }
1916 break;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001917 case PACKET3_INDEX_BASE:
Marek Olšák78857132012-03-19 03:09:33 +01001918 {
1919 uint64_t offset;
1920
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001921 if (pkt->count != 1) {
1922 DRM_ERROR("bad INDEX_BASE\n");
1923 return -EINVAL;
1924 }
1925 r = evergreen_cs_packet_next_reloc(p, &reloc);
1926 if (r) {
1927 DRM_ERROR("bad INDEX_BASE\n");
1928 return -EINVAL;
1929 }
Marek Olšák78857132012-03-19 03:09:33 +01001930
1931 offset = reloc->lobj.gpu_offset +
1932 idx_value +
1933 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1934
1935 ib[idx+0] = offset;
1936 ib[idx+1] = upper_32_bits(offset) & 0xff;
1937
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001938 r = evergreen_cs_track_check(p);
1939 if (r) {
1940 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1941 return r;
1942 }
1943 break;
Marek Olšák78857132012-03-19 03:09:33 +01001944 }
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001945 case PACKET3_DRAW_INDEX:
Marek Olšák78857132012-03-19 03:09:33 +01001946 {
1947 uint64_t offset;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001948 if (pkt->count != 3) {
1949 DRM_ERROR("bad DRAW_INDEX\n");
1950 return -EINVAL;
1951 }
1952 r = evergreen_cs_packet_next_reloc(p, &reloc);
1953 if (r) {
1954 DRM_ERROR("bad DRAW_INDEX\n");
1955 return -EINVAL;
1956 }
Marek Olšák78857132012-03-19 03:09:33 +01001957
1958 offset = reloc->lobj.gpu_offset +
1959 idx_value +
1960 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
1961
1962 ib[idx+0] = offset;
1963 ib[idx+1] = upper_32_bits(offset) & 0xff;
1964
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001965 r = evergreen_cs_track_check(p);
1966 if (r) {
1967 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1968 return r;
1969 }
1970 break;
Marek Olšák78857132012-03-19 03:09:33 +01001971 }
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001972 case PACKET3_DRAW_INDEX_2:
Marek Olšák78857132012-03-19 03:09:33 +01001973 {
1974 uint64_t offset;
1975
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001976 if (pkt->count != 4) {
1977 DRM_ERROR("bad DRAW_INDEX_2\n");
1978 return -EINVAL;
1979 }
1980 r = evergreen_cs_packet_next_reloc(p, &reloc);
1981 if (r) {
1982 DRM_ERROR("bad DRAW_INDEX_2\n");
1983 return -EINVAL;
1984 }
Marek Olšák78857132012-03-19 03:09:33 +01001985
1986 offset = reloc->lobj.gpu_offset +
1987 radeon_get_ib_value(p, idx+1) +
1988 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
1989
1990 ib[idx+1] = offset;
1991 ib[idx+2] = upper_32_bits(offset) & 0xff;
1992
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04001993 r = evergreen_cs_track_check(p);
1994 if (r) {
1995 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
1996 return r;
1997 }
1998 break;
Marek Olšák78857132012-03-19 03:09:33 +01001999 }
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002000 case PACKET3_DRAW_INDEX_AUTO:
2001 if (pkt->count != 1) {
2002 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
2003 return -EINVAL;
2004 }
2005 r = evergreen_cs_track_check(p);
2006 if (r) {
2007 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
2008 return r;
2009 }
2010 break;
2011 case PACKET3_DRAW_INDEX_MULTI_AUTO:
2012 if (pkt->count != 2) {
2013 DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
2014 return -EINVAL;
2015 }
2016 r = evergreen_cs_track_check(p);
2017 if (r) {
2018 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
2019 return r;
2020 }
2021 break;
2022 case PACKET3_DRAW_INDEX_IMMD:
2023 if (pkt->count < 2) {
2024 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
2025 return -EINVAL;
2026 }
2027 r = evergreen_cs_track_check(p);
2028 if (r) {
2029 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2030 return r;
2031 }
2032 break;
2033 case PACKET3_DRAW_INDEX_OFFSET:
2034 if (pkt->count != 2) {
2035 DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
2036 return -EINVAL;
2037 }
2038 r = evergreen_cs_track_check(p);
2039 if (r) {
2040 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2041 return r;
2042 }
2043 break;
2044 case PACKET3_DRAW_INDEX_OFFSET_2:
2045 if (pkt->count != 3) {
2046 DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
2047 return -EINVAL;
2048 }
2049 r = evergreen_cs_track_check(p);
2050 if (r) {
2051 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2052 return r;
2053 }
2054 break;
Alex Deucher033b5652011-06-08 15:26:45 -04002055 case PACKET3_DISPATCH_DIRECT:
2056 if (pkt->count != 3) {
2057 DRM_ERROR("bad DISPATCH_DIRECT\n");
2058 return -EINVAL;
2059 }
2060 r = evergreen_cs_track_check(p);
2061 if (r) {
2062 dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
2063 return r;
2064 }
2065 break;
2066 case PACKET3_DISPATCH_INDIRECT:
2067 if (pkt->count != 1) {
2068 DRM_ERROR("bad DISPATCH_INDIRECT\n");
2069 return -EINVAL;
2070 }
2071 r = evergreen_cs_packet_next_reloc(p, &reloc);
2072 if (r) {
2073 DRM_ERROR("bad DISPATCH_INDIRECT\n");
2074 return -EINVAL;
2075 }
2076 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
2077 r = evergreen_cs_track_check(p);
2078 if (r) {
2079 dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
2080 return r;
2081 }
2082 break;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002083 case PACKET3_WAIT_REG_MEM:
2084 if (pkt->count != 5) {
2085 DRM_ERROR("bad WAIT_REG_MEM\n");
2086 return -EINVAL;
2087 }
2088 /* bit 4 is reg (0) or mem (1) */
2089 if (idx_value & 0x10) {
Marek Olšák78857132012-03-19 03:09:33 +01002090 uint64_t offset;
2091
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002092 r = evergreen_cs_packet_next_reloc(p, &reloc);
2093 if (r) {
2094 DRM_ERROR("bad WAIT_REG_MEM\n");
2095 return -EINVAL;
2096 }
Marek Olšák78857132012-03-19 03:09:33 +01002097
2098 offset = reloc->lobj.gpu_offset +
2099 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2100 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2101
2102 ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc);
2103 ib[idx+2] = upper_32_bits(offset) & 0xff;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002104 }
2105 break;
Alex Deucher8770b862012-12-03 19:18:30 -05002106 case PACKET3_CP_DMA:
2107 {
2108 u32 command, size, info;
2109 u64 offset, tmp;
2110 if (pkt->count != 4) {
2111 DRM_ERROR("bad CP DMA\n");
2112 return -EINVAL;
2113 }
2114 command = radeon_get_ib_value(p, idx+4);
2115 size = command & 0x1fffff;
2116 info = radeon_get_ib_value(p, idx+1);
Alex Deucher9d89d782012-12-14 00:23:06 -05002117 if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
2118 (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
2119 ((((info & 0x00300000) >> 20) == 0) &&
2120 (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
2121 ((((info & 0x60000000) >> 29) == 0) &&
2122 (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
2123 /* non mem to mem copies requires dw aligned count */
2124 if (size % 4) {
2125 DRM_ERROR("CP DMA command requires dw count alignment\n");
2126 return -EINVAL;
2127 }
2128 }
Alex Deucher8770b862012-12-03 19:18:30 -05002129 if (command & PACKET3_CP_DMA_CMD_SAS) {
2130 /* src address space is register */
2131 /* GDS is ok */
2132 if (((info & 0x60000000) >> 29) != 1) {
2133 DRM_ERROR("CP DMA SAS not supported\n");
2134 return -EINVAL;
2135 }
2136 } else {
2137 if (command & PACKET3_CP_DMA_CMD_SAIC) {
2138 DRM_ERROR("CP DMA SAIC only supported for registers\n");
2139 return -EINVAL;
2140 }
2141 /* src address space is memory */
2142 if (((info & 0x60000000) >> 29) == 0) {
2143 r = evergreen_cs_packet_next_reloc(p, &reloc);
2144 if (r) {
2145 DRM_ERROR("bad CP DMA SRC\n");
2146 return -EINVAL;
2147 }
2148
2149 tmp = radeon_get_ib_value(p, idx) +
2150 ((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32);
2151
2152 offset = reloc->lobj.gpu_offset + tmp;
2153
2154 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
2155 dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n",
2156 tmp + size, radeon_bo_size(reloc->robj));
2157 return -EINVAL;
2158 }
2159
2160 ib[idx] = offset;
2161 ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
2162 } else if (((info & 0x60000000) >> 29) != 2) {
2163 DRM_ERROR("bad CP DMA SRC_SEL\n");
2164 return -EINVAL;
2165 }
2166 }
2167 if (command & PACKET3_CP_DMA_CMD_DAS) {
2168 /* dst address space is register */
2169 /* GDS is ok */
2170 if (((info & 0x00300000) >> 20) != 1) {
2171 DRM_ERROR("CP DMA DAS not supported\n");
2172 return -EINVAL;
2173 }
2174 } else {
2175 /* dst address space is memory */
2176 if (command & PACKET3_CP_DMA_CMD_DAIC) {
2177 DRM_ERROR("CP DMA DAIC only supported for registers\n");
2178 return -EINVAL;
2179 }
2180 if (((info & 0x00300000) >> 20) == 0) {
2181 r = evergreen_cs_packet_next_reloc(p, &reloc);
2182 if (r) {
2183 DRM_ERROR("bad CP DMA DST\n");
2184 return -EINVAL;
2185 }
2186
2187 tmp = radeon_get_ib_value(p, idx+2) +
2188 ((u64)(radeon_get_ib_value(p, idx+3) & 0xff) << 32);
2189
2190 offset = reloc->lobj.gpu_offset + tmp;
2191
2192 if ((tmp + size) > radeon_bo_size(reloc->robj)) {
2193 dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n",
2194 tmp + size, radeon_bo_size(reloc->robj));
2195 return -EINVAL;
2196 }
2197
2198 ib[idx+2] = offset;
2199 ib[idx+3] = upper_32_bits(offset) & 0xff;
2200 } else {
2201 DRM_ERROR("bad CP DMA DST_SEL\n");
2202 return -EINVAL;
2203 }
2204 }
2205 break;
2206 }
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002207 case PACKET3_SURFACE_SYNC:
2208 if (pkt->count != 3) {
2209 DRM_ERROR("bad SURFACE_SYNC\n");
2210 return -EINVAL;
2211 }
2212 /* 0xffffffff/0x0 is flush all cache flag */
2213 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
2214 radeon_get_ib_value(p, idx + 2) != 0) {
2215 r = evergreen_cs_packet_next_reloc(p, &reloc);
2216 if (r) {
2217 DRM_ERROR("bad SURFACE_SYNC\n");
2218 return -EINVAL;
2219 }
2220 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2221 }
2222 break;
2223 case PACKET3_EVENT_WRITE:
2224 if (pkt->count != 2 && pkt->count != 0) {
2225 DRM_ERROR("bad EVENT_WRITE\n");
2226 return -EINVAL;
2227 }
2228 if (pkt->count) {
Marek Olšák78857132012-03-19 03:09:33 +01002229 uint64_t offset;
2230
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002231 r = evergreen_cs_packet_next_reloc(p, &reloc);
2232 if (r) {
2233 DRM_ERROR("bad EVENT_WRITE\n");
2234 return -EINVAL;
2235 }
Marek Olšák78857132012-03-19 03:09:33 +01002236 offset = reloc->lobj.gpu_offset +
2237 (radeon_get_ib_value(p, idx+1) & 0xfffffff8) +
2238 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2239
2240 ib[idx+1] = offset & 0xfffffff8;
2241 ib[idx+2] = upper_32_bits(offset) & 0xff;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002242 }
2243 break;
2244 case PACKET3_EVENT_WRITE_EOP:
Marek Olšák78857132012-03-19 03:09:33 +01002245 {
2246 uint64_t offset;
2247
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002248 if (pkt->count != 4) {
2249 DRM_ERROR("bad EVENT_WRITE_EOP\n");
2250 return -EINVAL;
2251 }
2252 r = evergreen_cs_packet_next_reloc(p, &reloc);
2253 if (r) {
2254 DRM_ERROR("bad EVENT_WRITE_EOP\n");
2255 return -EINVAL;
2256 }
Marek Olšák78857132012-03-19 03:09:33 +01002257
2258 offset = reloc->lobj.gpu_offset +
2259 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2260 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2261
2262 ib[idx+1] = offset & 0xfffffffc;
2263 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002264 break;
Marek Olšák78857132012-03-19 03:09:33 +01002265 }
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002266 case PACKET3_EVENT_WRITE_EOS:
Marek Olšák78857132012-03-19 03:09:33 +01002267 {
2268 uint64_t offset;
2269
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002270 if (pkt->count != 3) {
2271 DRM_ERROR("bad EVENT_WRITE_EOS\n");
2272 return -EINVAL;
2273 }
2274 r = evergreen_cs_packet_next_reloc(p, &reloc);
2275 if (r) {
2276 DRM_ERROR("bad EVENT_WRITE_EOS\n");
2277 return -EINVAL;
2278 }
Marek Olšák78857132012-03-19 03:09:33 +01002279
2280 offset = reloc->lobj.gpu_offset +
2281 (radeon_get_ib_value(p, idx+1) & 0xfffffffc) +
2282 ((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32);
2283
2284 ib[idx+1] = offset & 0xfffffffc;
2285 ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff);
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002286 break;
Marek Olšák78857132012-03-19 03:09:33 +01002287 }
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002288 case PACKET3_SET_CONFIG_REG:
2289 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
2290 end_reg = 4 * pkt->count + start_reg - 4;
2291 if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
2292 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
2293 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
2294 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
2295 return -EINVAL;
2296 }
2297 for (i = 0; i < pkt->count; i++) {
2298 reg = start_reg + (4 * i);
2299 r = evergreen_cs_check_reg(p, reg, idx+1+i);
2300 if (r)
2301 return r;
2302 }
2303 break;
2304 case PACKET3_SET_CONTEXT_REG:
2305 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_START;
2306 end_reg = 4 * pkt->count + start_reg - 4;
2307 if ((start_reg < PACKET3_SET_CONTEXT_REG_START) ||
2308 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
2309 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
2310 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
2311 return -EINVAL;
2312 }
2313 for (i = 0; i < pkt->count; i++) {
2314 reg = start_reg + (4 * i);
2315 r = evergreen_cs_check_reg(p, reg, idx+1+i);
2316 if (r)
2317 return r;
2318 }
2319 break;
2320 case PACKET3_SET_RESOURCE:
2321 if (pkt->count % 8) {
2322 DRM_ERROR("bad SET_RESOURCE\n");
2323 return -EINVAL;
2324 }
2325 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START;
2326 end_reg = 4 * pkt->count + start_reg - 4;
2327 if ((start_reg < PACKET3_SET_RESOURCE_START) ||
2328 (start_reg >= PACKET3_SET_RESOURCE_END) ||
2329 (end_reg >= PACKET3_SET_RESOURCE_END)) {
2330 DRM_ERROR("bad SET_RESOURCE\n");
2331 return -EINVAL;
2332 }
2333 for (i = 0; i < (pkt->count / 8); i++) {
2334 struct radeon_bo *texture, *mipmap;
Jerome Glisse285484e2011-12-16 17:03:42 -05002335 u32 toffset, moffset;
Marek Olšák61051af2012-09-25 03:34:01 +02002336 u32 size, offset, mip_address, tex_dim;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002337
2338 switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p, idx+1+(i*8)+7))) {
2339 case SQ_TEX_VTX_VALID_TEXTURE:
2340 /* tex base */
2341 r = evergreen_cs_packet_next_reloc(p, &reloc);
2342 if (r) {
2343 DRM_ERROR("bad SET_RESOURCE (tex)\n");
2344 return -EINVAL;
2345 }
Jerome Glisse721604a2012-01-05 22:11:05 -05002346 if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
Alex Deucherf3a71df2011-11-28 14:49:28 -05002347 ib[idx+1+(i*8)+1] |=
2348 TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
2349 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
Jerome Glisse285484e2011-12-16 17:03:42 -05002350 unsigned bankw, bankh, mtaspect, tile_split;
2351
2352 evergreen_tiling_fields(reloc->lobj.tiling_flags,
2353 &bankw, &bankh, &mtaspect,
2354 &tile_split);
2355 ib[idx+1+(i*8)+6] |= TEX_TILE_SPLIT(tile_split);
Alex Deucherf3a71df2011-11-28 14:49:28 -05002356 ib[idx+1+(i*8)+7] |=
Jerome Glisse285484e2011-12-16 17:03:42 -05002357 TEX_BANK_WIDTH(bankw) |
2358 TEX_BANK_HEIGHT(bankh) |
2359 MACRO_TILE_ASPECT(mtaspect) |
Alex Deucherf3a71df2011-11-28 14:49:28 -05002360 TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
2361 }
Marek Olšáke70f2242011-10-25 01:38:45 +02002362 }
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002363 texture = reloc->robj;
Jerome Glisse285484e2011-12-16 17:03:42 -05002364 toffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
Marek Olšák61051af2012-09-25 03:34:01 +02002365
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002366 /* tex mip base */
Marek Olšák61051af2012-09-25 03:34:01 +02002367 tex_dim = ib[idx+1+(i*8)+0] & 0x7;
2368 mip_address = ib[idx+1+(i*8)+3];
2369
2370 if ((tex_dim == SQ_TEX_DIM_2D_MSAA || tex_dim == SQ_TEX_DIM_2D_ARRAY_MSAA) &&
2371 !mip_address &&
Ilija Hadzic9ffb7a62013-01-02 18:27:42 -05002372 !radeon_cs_packet_next_is_pkt3_nop(p)) {
Marek Olšák61051af2012-09-25 03:34:01 +02002373 /* MIP_ADDRESS should point to FMASK for an MSAA texture.
2374 * It should be 0 if FMASK is disabled. */
2375 moffset = 0;
2376 mipmap = NULL;
2377 } else {
2378 r = evergreen_cs_packet_next_reloc(p, &reloc);
2379 if (r) {
2380 DRM_ERROR("bad SET_RESOURCE (tex)\n");
2381 return -EINVAL;
2382 }
2383 moffset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2384 mipmap = reloc->robj;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002385 }
Marek Olšák61051af2012-09-25 03:34:01 +02002386
Jerome Glisse285484e2011-12-16 17:03:42 -05002387 r = evergreen_cs_track_validate_texture(p, texture, mipmap, idx+1+(i*8));
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002388 if (r)
2389 return r;
Jerome Glisse285484e2011-12-16 17:03:42 -05002390 ib[idx+1+(i*8)+2] += toffset;
2391 ib[idx+1+(i*8)+3] += moffset;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002392 break;
2393 case SQ_TEX_VTX_VALID_BUFFER:
Marek Olšák78857132012-03-19 03:09:33 +01002394 {
2395 uint64_t offset64;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002396 /* vtx base */
2397 r = evergreen_cs_packet_next_reloc(p, &reloc);
2398 if (r) {
2399 DRM_ERROR("bad SET_RESOURCE (vtx)\n");
2400 return -EINVAL;
2401 }
2402 offset = radeon_get_ib_value(p, idx+1+(i*8)+0);
2403 size = radeon_get_ib_value(p, idx+1+(i*8)+1);
2404 if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
2405 /* force size to size of the buffer */
2406 dev_warn(p->dev, "vbo resource seems too big for the bo\n");
Marek Olšák78857132012-03-19 03:09:33 +01002407 ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002408 }
Marek Olšák78857132012-03-19 03:09:33 +01002409
2410 offset64 = reloc->lobj.gpu_offset + offset;
2411 ib[idx+1+(i*8)+0] = offset64;
2412 ib[idx+1+(i*8)+2] = (ib[idx+1+(i*8)+2] & 0xffffff00) |
2413 (upper_32_bits(offset64) & 0xff);
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002414 break;
Marek Olšák78857132012-03-19 03:09:33 +01002415 }
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002416 case SQ_TEX_VTX_INVALID_TEXTURE:
2417 case SQ_TEX_VTX_INVALID_BUFFER:
2418 default:
2419 DRM_ERROR("bad SET_RESOURCE\n");
2420 return -EINVAL;
2421 }
2422 }
2423 break;
2424 case PACKET3_SET_ALU_CONST:
2425 /* XXX fix me ALU const buffers only */
2426 break;
2427 case PACKET3_SET_BOOL_CONST:
2428 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_START;
2429 end_reg = 4 * pkt->count + start_reg - 4;
2430 if ((start_reg < PACKET3_SET_BOOL_CONST_START) ||
2431 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
2432 (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
2433 DRM_ERROR("bad SET_BOOL_CONST\n");
2434 return -EINVAL;
2435 }
2436 break;
2437 case PACKET3_SET_LOOP_CONST:
2438 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_START;
2439 end_reg = 4 * pkt->count + start_reg - 4;
2440 if ((start_reg < PACKET3_SET_LOOP_CONST_START) ||
2441 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
2442 (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
2443 DRM_ERROR("bad SET_LOOP_CONST\n");
2444 return -EINVAL;
2445 }
2446 break;
2447 case PACKET3_SET_CTL_CONST:
2448 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_START;
2449 end_reg = 4 * pkt->count + start_reg - 4;
2450 if ((start_reg < PACKET3_SET_CTL_CONST_START) ||
2451 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
2452 (end_reg >= PACKET3_SET_CTL_CONST_END)) {
2453 DRM_ERROR("bad SET_CTL_CONST\n");
2454 return -EINVAL;
2455 }
2456 break;
2457 case PACKET3_SET_SAMPLER:
2458 if (pkt->count % 3) {
2459 DRM_ERROR("bad SET_SAMPLER\n");
2460 return -EINVAL;
2461 }
2462 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START;
2463 end_reg = 4 * pkt->count + start_reg - 4;
2464 if ((start_reg < PACKET3_SET_SAMPLER_START) ||
2465 (start_reg >= PACKET3_SET_SAMPLER_END) ||
2466 (end_reg >= PACKET3_SET_SAMPLER_END)) {
2467 DRM_ERROR("bad SET_SAMPLER\n");
2468 return -EINVAL;
2469 }
2470 break;
Marek Olšákdd220a02012-01-27 12:17:59 -05002471 case PACKET3_STRMOUT_BUFFER_UPDATE:
2472 if (pkt->count != 4) {
2473 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n");
2474 return -EINVAL;
2475 }
2476 /* Updating memory at DST_ADDRESS. */
2477 if (idx_value & 0x1) {
2478 u64 offset;
2479 r = evergreen_cs_packet_next_reloc(p, &reloc);
2480 if (r) {
2481 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n");
2482 return -EINVAL;
2483 }
2484 offset = radeon_get_ib_value(p, idx+1);
2485 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2486 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2487 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n",
2488 offset + 4, radeon_bo_size(reloc->robj));
2489 return -EINVAL;
2490 }
Marek Olšák78857132012-03-19 03:09:33 +01002491 offset += reloc->lobj.gpu_offset;
2492 ib[idx+1] = offset;
2493 ib[idx+2] = upper_32_bits(offset) & 0xff;
Marek Olšákdd220a02012-01-27 12:17:59 -05002494 }
2495 /* Reading data from SRC_ADDRESS. */
2496 if (((idx_value >> 1) & 0x3) == 2) {
2497 u64 offset;
2498 r = evergreen_cs_packet_next_reloc(p, &reloc);
2499 if (r) {
2500 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n");
2501 return -EINVAL;
2502 }
2503 offset = radeon_get_ib_value(p, idx+3);
2504 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2505 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2506 DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n",
2507 offset + 4, radeon_bo_size(reloc->robj));
2508 return -EINVAL;
2509 }
Marek Olšák78857132012-03-19 03:09:33 +01002510 offset += reloc->lobj.gpu_offset;
2511 ib[idx+3] = offset;
2512 ib[idx+4] = upper_32_bits(offset) & 0xff;
Marek Olšákdd220a02012-01-27 12:17:59 -05002513 }
2514 break;
Jerome Glisse4613ca12012-12-19 12:26:45 -05002515 case PACKET3_MEM_WRITE:
2516 {
2517 u64 offset;
2518
2519 if (pkt->count != 3) {
2520 DRM_ERROR("bad MEM_WRITE (invalid count)\n");
2521 return -EINVAL;
2522 }
2523 r = evergreen_cs_packet_next_reloc(p, &reloc);
2524 if (r) {
2525 DRM_ERROR("bad MEM_WRITE (missing reloc)\n");
2526 return -EINVAL;
2527 }
2528 offset = radeon_get_ib_value(p, idx+0);
2529 offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL;
2530 if (offset & 0x7) {
2531 DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n");
2532 return -EINVAL;
2533 }
2534 if ((offset + 8) > radeon_bo_size(reloc->robj)) {
2535 DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n",
2536 offset + 8, radeon_bo_size(reloc->robj));
2537 return -EINVAL;
2538 }
2539 offset += reloc->lobj.gpu_offset;
2540 ib[idx+0] = offset;
2541 ib[idx+1] = upper_32_bits(offset) & 0xff;
2542 break;
2543 }
Marek Olšákdd220a02012-01-27 12:17:59 -05002544 case PACKET3_COPY_DW:
2545 if (pkt->count != 4) {
2546 DRM_ERROR("bad COPY_DW (invalid count)\n");
2547 return -EINVAL;
2548 }
2549 if (idx_value & 0x1) {
2550 u64 offset;
2551 /* SRC is memory. */
2552 r = evergreen_cs_packet_next_reloc(p, &reloc);
2553 if (r) {
2554 DRM_ERROR("bad COPY_DW (missing src reloc)\n");
2555 return -EINVAL;
2556 }
2557 offset = radeon_get_ib_value(p, idx+1);
2558 offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32;
2559 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2560 DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n",
2561 offset + 4, radeon_bo_size(reloc->robj));
2562 return -EINVAL;
2563 }
Marek Olšák78857132012-03-19 03:09:33 +01002564 offset += reloc->lobj.gpu_offset;
2565 ib[idx+1] = offset;
2566 ib[idx+2] = upper_32_bits(offset) & 0xff;
Marek Olšákdd220a02012-01-27 12:17:59 -05002567 } else {
2568 /* SRC is a reg. */
2569 reg = radeon_get_ib_value(p, idx+1) << 2;
2570 if (!evergreen_is_safe_reg(p, reg, idx+1))
2571 return -EINVAL;
2572 }
2573 if (idx_value & 0x2) {
2574 u64 offset;
2575 /* DST is memory. */
2576 r = evergreen_cs_packet_next_reloc(p, &reloc);
2577 if (r) {
2578 DRM_ERROR("bad COPY_DW (missing dst reloc)\n");
2579 return -EINVAL;
2580 }
2581 offset = radeon_get_ib_value(p, idx+3);
2582 offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32;
2583 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2584 DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n",
2585 offset + 4, radeon_bo_size(reloc->robj));
2586 return -EINVAL;
2587 }
Marek Olšák78857132012-03-19 03:09:33 +01002588 offset += reloc->lobj.gpu_offset;
2589 ib[idx+3] = offset;
2590 ib[idx+4] = upper_32_bits(offset) & 0xff;
Marek Olšákdd220a02012-01-27 12:17:59 -05002591 } else {
2592 /* DST is a reg. */
2593 reg = radeon_get_ib_value(p, idx+3) << 2;
2594 if (!evergreen_is_safe_reg(p, reg, idx+3))
2595 return -EINVAL;
2596 }
2597 break;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002598 case PACKET3_NOP:
2599 break;
2600 default:
2601 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
2602 return -EINVAL;
2603 }
2604 return 0;
2605}
2606
2607int evergreen_cs_parse(struct radeon_cs_parser *p)
2608{
2609 struct radeon_cs_packet pkt;
2610 struct evergreen_cs_track *track;
Alex Deucherf3a71df2011-11-28 14:49:28 -05002611 u32 tmp;
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002612 int r;
2613
2614 if (p->track == NULL) {
2615 /* initialize tracker, we are in kms */
2616 track = kzalloc(sizeof(*track), GFP_KERNEL);
2617 if (track == NULL)
2618 return -ENOMEM;
2619 evergreen_cs_track_init(track);
Alex Deucherf3a71df2011-11-28 14:49:28 -05002620 if (p->rdev->family >= CHIP_CAYMAN)
2621 tmp = p->rdev->config.cayman.tile_config;
2622 else
2623 tmp = p->rdev->config.evergreen.tile_config;
2624
2625 switch (tmp & 0xf) {
2626 case 0:
2627 track->npipes = 1;
2628 break;
2629 case 1:
2630 default:
2631 track->npipes = 2;
2632 break;
2633 case 2:
2634 track->npipes = 4;
2635 break;
2636 case 3:
2637 track->npipes = 8;
2638 break;
2639 }
2640
2641 switch ((tmp & 0xf0) >> 4) {
2642 case 0:
2643 track->nbanks = 4;
2644 break;
2645 case 1:
2646 default:
2647 track->nbanks = 8;
2648 break;
2649 case 2:
2650 track->nbanks = 16;
2651 break;
2652 }
2653
2654 switch ((tmp & 0xf00) >> 8) {
2655 case 0:
2656 track->group_size = 256;
2657 break;
2658 case 1:
2659 default:
2660 track->group_size = 512;
2661 break;
2662 }
2663
2664 switch ((tmp & 0xf000) >> 12) {
2665 case 0:
2666 track->row_size = 1;
2667 break;
2668 case 1:
2669 default:
2670 track->row_size = 2;
2671 break;
2672 case 2:
2673 track->row_size = 4;
2674 break;
2675 }
2676
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002677 p->track = track;
2678 }
2679 do {
Ilija Hadzicc38f34b2013-01-02 18:27:41 -05002680 r = radeon_cs_packet_parse(p, &pkt, p->idx);
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002681 if (r) {
2682 kfree(p->track);
2683 p->track = NULL;
2684 return r;
2685 }
2686 p->idx += pkt.count + 2;
2687 switch (pkt.type) {
2688 case PACKET_TYPE0:
2689 r = evergreen_cs_parse_packet0(p, &pkt);
2690 break;
2691 case PACKET_TYPE2:
2692 break;
2693 case PACKET_TYPE3:
2694 r = evergreen_packet3_check(p, &pkt);
2695 break;
2696 default:
2697 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
2698 kfree(p->track);
2699 p->track = NULL;
2700 return -EINVAL;
2701 }
2702 if (r) {
2703 kfree(p->track);
2704 p->track = NULL;
2705 return r;
2706 }
2707 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
2708#if 0
Jerome Glissef2e39222012-05-09 15:35:02 +02002709 for (r = 0; r < p->ib.length_dw; r++) {
2710 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
Alex Deuchercb5fcbd2010-05-28 19:01:35 -04002711 mdelay(1);
2712 }
2713#endif
2714 kfree(p->track);
2715 p->track = NULL;
2716 return 0;
2717}
2718
Alex Deucherd2ead3e2012-12-13 09:55:45 -05002719/*
2720 * DMA
2721 */
2722
2723#define GET_DMA_CMD(h) (((h) & 0xf0000000) >> 28)
2724#define GET_DMA_COUNT(h) ((h) & 0x000fffff)
2725#define GET_DMA_T(h) (((h) & 0x00800000) >> 23)
2726#define GET_DMA_NEW(h) (((h) & 0x04000000) >> 26)
2727#define GET_DMA_MISC(h) (((h) & 0x0700000) >> 20)
2728
2729/**
2730 * evergreen_dma_cs_parse() - parse the DMA IB
2731 * @p: parser structure holding parsing context.
2732 *
2733 * Parses the DMA IB from the CS ioctl and updates
2734 * the GPU addresses based on the reloc information and
2735 * checks for errors. (Evergreen-Cayman)
2736 * Returns 0 for success and an error on failure.
2737 **/
2738int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2739{
2740 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
2741 struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
2742 u32 header, cmd, count, tiled, new_cmd, misc;
2743 volatile u32 *ib = p->ib.ptr;
2744 u32 idx, idx_value;
2745 u64 src_offset, dst_offset, dst2_offset;
2746 int r;
2747
2748 do {
2749 if (p->idx >= ib_chunk->length_dw) {
2750 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
2751 p->idx, ib_chunk->length_dw);
2752 return -EINVAL;
2753 }
2754 idx = p->idx;
2755 header = radeon_get_ib_value(p, idx);
2756 cmd = GET_DMA_CMD(header);
2757 count = GET_DMA_COUNT(header);
2758 tiled = GET_DMA_T(header);
2759 new_cmd = GET_DMA_NEW(header);
2760 misc = GET_DMA_MISC(header);
2761
2762 switch (cmd) {
2763 case DMA_PACKET_WRITE:
2764 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2765 if (r) {
2766 DRM_ERROR("bad DMA_PACKET_WRITE\n");
2767 return -EINVAL;
2768 }
2769 if (tiled) {
2770 dst_offset = ib[idx+1];
2771 dst_offset <<= 8;
2772
2773 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2774 p->idx += count + 7;
2775 } else {
2776 dst_offset = ib[idx+1];
2777 dst_offset |= ((u64)(ib[idx+2] & 0xff)) << 32;
2778
2779 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2780 ib[idx+2] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2781 p->idx += count + 3;
2782 }
2783 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2784 dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n",
2785 dst_offset, radeon_bo_size(dst_reloc->robj));
2786 return -EINVAL;
2787 }
2788 break;
2789 case DMA_PACKET_COPY:
2790 r = r600_dma_cs_next_reloc(p, &src_reloc);
2791 if (r) {
2792 DRM_ERROR("bad DMA_PACKET_COPY\n");
2793 return -EINVAL;
2794 }
2795 r = r600_dma_cs_next_reloc(p, &dst_reloc);
2796 if (r) {
2797 DRM_ERROR("bad DMA_PACKET_COPY\n");
2798 return -EINVAL;
2799 }
2800 if (tiled) {
2801 idx_value = radeon_get_ib_value(p, idx + 2);
2802 if (new_cmd) {
2803 switch (misc) {
2804 case 0:
2805 /* L2T, frame to fields */
2806 if (idx_value & (1 << 31)) {
2807 DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
2808 return -EINVAL;
2809 }
2810 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
2811 if (r) {
2812 DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n");
2813 return -EINVAL;
2814 }
2815 dst_offset = ib[idx+1];
2816 dst_offset <<= 8;
2817 dst2_offset = ib[idx+2];
2818 dst2_offset <<= 8;
2819 src_offset = ib[idx+8];
2820 src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
2821 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2822 dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n",
2823 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2824 return -EINVAL;
2825 }
2826 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2827 dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
2828 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2829 return -EINVAL;
2830 }
2831 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
2832 dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n",
2833 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
2834 return -EINVAL;
2835 }
2836 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2837 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
2838 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2839 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2840 p->idx += 10;
2841 break;
2842 case 1:
2843 /* L2T, T2L partial */
2844 if (p->family < CHIP_CAYMAN) {
2845 DRM_ERROR("L2T, T2L Partial is cayman only !\n");
2846 return -EINVAL;
2847 }
2848 /* detile bit */
2849 if (idx_value & (1 << 31)) {
2850 /* tiled src, linear dst */
2851 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2852
2853 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2854 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2855 } else {
2856 /* linear src, tiled dst */
2857 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2858 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2859
2860 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2861 }
2862 p->idx += 12;
2863 break;
2864 case 3:
2865 /* L2T, broadcast */
2866 if (idx_value & (1 << 31)) {
2867 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
2868 return -EINVAL;
2869 }
2870 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
2871 if (r) {
2872 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
2873 return -EINVAL;
2874 }
2875 dst_offset = ib[idx+1];
2876 dst_offset <<= 8;
2877 dst2_offset = ib[idx+2];
2878 dst2_offset <<= 8;
2879 src_offset = ib[idx+8];
2880 src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
2881 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2882 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
2883 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2884 return -EINVAL;
2885 }
2886 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2887 dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
2888 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2889 return -EINVAL;
2890 }
2891 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
2892 dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
2893 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
2894 return -EINVAL;
2895 }
2896 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2897 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
2898 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2899 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2900 p->idx += 10;
2901 break;
2902 case 4:
2903 /* L2T, T2L */
2904 /* detile bit */
2905 if (idx_value & (1 << 31)) {
2906 /* tiled src, linear dst */
2907 src_offset = ib[idx+1];
2908 src_offset <<= 8;
2909 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2910
2911 dst_offset = ib[idx+7];
2912 dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
2913 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
2914 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
2915 } else {
2916 /* linear src, tiled dst */
2917 src_offset = ib[idx+7];
2918 src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
2919 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2920 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2921
2922 dst_offset = ib[idx+1];
2923 dst_offset <<= 8;
2924 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2925 }
2926 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2927 dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n",
2928 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2929 return -EINVAL;
2930 }
2931 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2932 dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n",
2933 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2934 return -EINVAL;
2935 }
2936 p->idx += 9;
2937 break;
2938 case 5:
2939 /* T2T partial */
2940 if (p->family < CHIP_CAYMAN) {
2941 DRM_ERROR("L2T, T2L Partial is cayman only !\n");
2942 return -EINVAL;
2943 }
2944 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2945 ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2946 p->idx += 13;
2947 break;
2948 case 7:
2949 /* L2T, broadcast */
2950 if (idx_value & (1 << 31)) {
2951 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
2952 return -EINVAL;
2953 }
2954 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
2955 if (r) {
2956 DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n");
2957 return -EINVAL;
2958 }
2959 dst_offset = ib[idx+1];
2960 dst_offset <<= 8;
2961 dst2_offset = ib[idx+2];
2962 dst2_offset <<= 8;
2963 src_offset = ib[idx+8];
2964 src_offset |= ((u64)(ib[idx+9] & 0xff)) << 32;
2965 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
2966 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
2967 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
2968 return -EINVAL;
2969 }
2970 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
2971 dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
2972 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
2973 return -EINVAL;
2974 }
2975 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
2976 dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n",
2977 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
2978 return -EINVAL;
2979 }
2980 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
2981 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset >> 8);
2982 ib[idx+8] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
2983 ib[idx+9] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
2984 p->idx += 10;
2985 break;
2986 default:
2987 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
2988 return -EINVAL;
2989 }
2990 } else {
2991 switch (misc) {
2992 case 0:
2993 /* detile bit */
2994 if (idx_value & (1 << 31)) {
2995 /* tiled src, linear dst */
2996 src_offset = ib[idx+1];
2997 src_offset <<= 8;
2998 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset >> 8);
2999
3000 dst_offset = ib[idx+7];
3001 dst_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
3002 ib[idx+7] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3003 ib[idx+8] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3004 } else {
3005 /* linear src, tiled dst */
3006 src_offset = ib[idx+7];
3007 src_offset |= ((u64)(ib[idx+8] & 0xff)) << 32;
3008 ib[idx+7] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3009 ib[idx+8] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3010
3011 dst_offset = ib[idx+1];
3012 dst_offset <<= 8;
3013 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset >> 8);
3014 }
3015 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3016 dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n",
3017 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3018 return -EINVAL;
3019 }
3020 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3021 dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n",
3022 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3023 return -EINVAL;
3024 }
3025 p->idx += 9;
3026 break;
3027 default:
3028 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
3029 return -EINVAL;
3030 }
3031 }
3032 } else {
3033 if (new_cmd) {
3034 switch (misc) {
3035 case 0:
3036 /* L2L, byte */
3037 src_offset = ib[idx+2];
3038 src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
3039 dst_offset = ib[idx+1];
3040 dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
3041 if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) {
3042 dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n",
3043 src_offset + count, radeon_bo_size(src_reloc->robj));
3044 return -EINVAL;
3045 }
3046 if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) {
3047 dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n",
3048 dst_offset + count, radeon_bo_size(dst_reloc->robj));
3049 return -EINVAL;
3050 }
3051 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
3052 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
3053 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3054 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3055 p->idx += 5;
3056 break;
3057 case 1:
3058 /* L2L, partial */
3059 if (p->family < CHIP_CAYMAN) {
3060 DRM_ERROR("L2L Partial is cayman only !\n");
3061 return -EINVAL;
3062 }
3063 ib[idx+1] += (u32)(src_reloc->lobj.gpu_offset & 0xffffffff);
3064 ib[idx+2] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3065 ib[idx+4] += (u32)(dst_reloc->lobj.gpu_offset & 0xffffffff);
3066 ib[idx+5] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3067
3068 p->idx += 9;
3069 break;
3070 case 4:
3071 /* L2L, dw, broadcast */
3072 r = r600_dma_cs_next_reloc(p, &dst2_reloc);
3073 if (r) {
3074 DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n");
3075 return -EINVAL;
3076 }
3077 dst_offset = ib[idx+1];
3078 dst_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
3079 dst2_offset = ib[idx+2];
3080 dst2_offset |= ((u64)(ib[idx+5] & 0xff)) << 32;
3081 src_offset = ib[idx+3];
3082 src_offset |= ((u64)(ib[idx+6] & 0xff)) << 32;
3083 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3084 dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n",
3085 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3086 return -EINVAL;
3087 }
3088 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3089 dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n",
3090 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3091 return -EINVAL;
3092 }
3093 if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) {
3094 dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n",
3095 dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj));
3096 return -EINVAL;
3097 }
3098 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3099 ib[idx+2] += (u32)(dst2_reloc->lobj.gpu_offset & 0xfffffffc);
3100 ib[idx+3] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3101 ib[idx+4] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3102 ib[idx+5] += upper_32_bits(dst2_reloc->lobj.gpu_offset) & 0xff;
3103 ib[idx+6] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3104 p->idx += 7;
3105 break;
3106 default:
3107 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
3108 return -EINVAL;
3109 }
3110 } else {
3111 /* L2L, dw */
3112 src_offset = ib[idx+2];
3113 src_offset |= ((u64)(ib[idx+4] & 0xff)) << 32;
3114 dst_offset = ib[idx+1];
3115 dst_offset |= ((u64)(ib[idx+3] & 0xff)) << 32;
3116 if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) {
3117 dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n",
3118 src_offset + (count * 4), radeon_bo_size(src_reloc->robj));
3119 return -EINVAL;
3120 }
3121 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3122 dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n",
3123 dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj));
3124 return -EINVAL;
3125 }
3126 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3127 ib[idx+2] += (u32)(src_reloc->lobj.gpu_offset & 0xfffffffc);
3128 ib[idx+3] += upper_32_bits(dst_reloc->lobj.gpu_offset) & 0xff;
3129 ib[idx+4] += upper_32_bits(src_reloc->lobj.gpu_offset) & 0xff;
3130 p->idx += 5;
3131 }
3132 }
3133 break;
3134 case DMA_PACKET_CONSTANT_FILL:
3135 r = r600_dma_cs_next_reloc(p, &dst_reloc);
3136 if (r) {
3137 DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n");
3138 return -EINVAL;
3139 }
3140 dst_offset = ib[idx+1];
3141 dst_offset |= ((u64)(ib[idx+3] & 0x00ff0000)) << 16;
3142 if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) {
3143 dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n",
3144 dst_offset, radeon_bo_size(dst_reloc->robj));
3145 return -EINVAL;
3146 }
3147 ib[idx+1] += (u32)(dst_reloc->lobj.gpu_offset & 0xfffffffc);
3148 ib[idx+3] += (upper_32_bits(dst_reloc->lobj.gpu_offset) << 16) & 0x00ff0000;
3149 p->idx += 4;
3150 break;
3151 case DMA_PACKET_NOP:
3152 p->idx += 1;
3153 break;
3154 default:
3155 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
3156 return -EINVAL;
3157 }
3158 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
3159#if 0
3160 for (r = 0; r < p->ib->length_dw; r++) {
3161 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
3162 mdelay(1);
3163 }
3164#endif
3165 return 0;
3166}
3167
Jerome Glisse721604a2012-01-05 22:11:05 -05003168/* vm parser */
3169static bool evergreen_vm_reg_valid(u32 reg)
3170{
3171 /* context regs are fine */
3172 if (reg >= 0x28000)
3173 return true;
3174
3175 /* check config regs */
3176 switch (reg) {
Alex Deucher668bbc82012-12-20 21:19:32 -05003177 case WAIT_UNTIL:
Jerome Glisse721604a2012-01-05 22:11:05 -05003178 case GRBM_GFX_INDEX:
Alex Deucher860fe2f2012-11-08 10:08:04 -05003179 case CP_STRMOUT_CNTL:
3180 case CP_COHER_CNTL:
3181 case CP_COHER_SIZE:
Jerome Glisse721604a2012-01-05 22:11:05 -05003182 case VGT_VTX_VECT_EJECT_REG:
3183 case VGT_CACHE_INVALIDATION:
3184 case VGT_GS_VERTEX_REUSE:
3185 case VGT_PRIMITIVE_TYPE:
3186 case VGT_INDEX_TYPE:
3187 case VGT_NUM_INDICES:
3188 case VGT_NUM_INSTANCES:
3189 case VGT_COMPUTE_DIM_X:
3190 case VGT_COMPUTE_DIM_Y:
3191 case VGT_COMPUTE_DIM_Z:
3192 case VGT_COMPUTE_START_X:
3193 case VGT_COMPUTE_START_Y:
3194 case VGT_COMPUTE_START_Z:
3195 case VGT_COMPUTE_INDEX:
3196 case VGT_COMPUTE_THREAD_GROUP_SIZE:
3197 case VGT_HS_OFFCHIP_PARAM:
3198 case PA_CL_ENHANCE:
3199 case PA_SU_LINE_STIPPLE_VALUE:
3200 case PA_SC_LINE_STIPPLE_STATE:
3201 case PA_SC_ENHANCE:
3202 case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ:
3203 case SQ_DYN_GPR_SIMD_LOCK_EN:
3204 case SQ_CONFIG:
3205 case SQ_GPR_RESOURCE_MGMT_1:
3206 case SQ_GLOBAL_GPR_RESOURCE_MGMT_1:
3207 case SQ_GLOBAL_GPR_RESOURCE_MGMT_2:
3208 case SQ_CONST_MEM_BASE:
3209 case SQ_STATIC_THREAD_MGMT_1:
3210 case SQ_STATIC_THREAD_MGMT_2:
3211 case SQ_STATIC_THREAD_MGMT_3:
3212 case SPI_CONFIG_CNTL:
3213 case SPI_CONFIG_CNTL_1:
3214 case TA_CNTL_AUX:
3215 case DB_DEBUG:
3216 case DB_DEBUG2:
3217 case DB_DEBUG3:
3218 case DB_DEBUG4:
3219 case DB_WATERMARKS:
3220 case TD_PS_BORDER_COLOR_INDEX:
3221 case TD_PS_BORDER_COLOR_RED:
3222 case TD_PS_BORDER_COLOR_GREEN:
3223 case TD_PS_BORDER_COLOR_BLUE:
3224 case TD_PS_BORDER_COLOR_ALPHA:
3225 case TD_VS_BORDER_COLOR_INDEX:
3226 case TD_VS_BORDER_COLOR_RED:
3227 case TD_VS_BORDER_COLOR_GREEN:
3228 case TD_VS_BORDER_COLOR_BLUE:
3229 case TD_VS_BORDER_COLOR_ALPHA:
3230 case TD_GS_BORDER_COLOR_INDEX:
3231 case TD_GS_BORDER_COLOR_RED:
3232 case TD_GS_BORDER_COLOR_GREEN:
3233 case TD_GS_BORDER_COLOR_BLUE:
3234 case TD_GS_BORDER_COLOR_ALPHA:
3235 case TD_HS_BORDER_COLOR_INDEX:
3236 case TD_HS_BORDER_COLOR_RED:
3237 case TD_HS_BORDER_COLOR_GREEN:
3238 case TD_HS_BORDER_COLOR_BLUE:
3239 case TD_HS_BORDER_COLOR_ALPHA:
3240 case TD_LS_BORDER_COLOR_INDEX:
3241 case TD_LS_BORDER_COLOR_RED:
3242 case TD_LS_BORDER_COLOR_GREEN:
3243 case TD_LS_BORDER_COLOR_BLUE:
3244 case TD_LS_BORDER_COLOR_ALPHA:
3245 case TD_CS_BORDER_COLOR_INDEX:
3246 case TD_CS_BORDER_COLOR_RED:
3247 case TD_CS_BORDER_COLOR_GREEN:
3248 case TD_CS_BORDER_COLOR_BLUE:
3249 case TD_CS_BORDER_COLOR_ALPHA:
3250 case SQ_ESGS_RING_SIZE:
3251 case SQ_GSVS_RING_SIZE:
3252 case SQ_ESTMP_RING_SIZE:
3253 case SQ_GSTMP_RING_SIZE:
3254 case SQ_HSTMP_RING_SIZE:
3255 case SQ_LSTMP_RING_SIZE:
3256 case SQ_PSTMP_RING_SIZE:
3257 case SQ_VSTMP_RING_SIZE:
3258 case SQ_ESGS_RING_ITEMSIZE:
3259 case SQ_ESTMP_RING_ITEMSIZE:
3260 case SQ_GSTMP_RING_ITEMSIZE:
3261 case SQ_GSVS_RING_ITEMSIZE:
3262 case SQ_GS_VERT_ITEMSIZE:
3263 case SQ_GS_VERT_ITEMSIZE_1:
3264 case SQ_GS_VERT_ITEMSIZE_2:
3265 case SQ_GS_VERT_ITEMSIZE_3:
3266 case SQ_GSVS_RING_OFFSET_1:
3267 case SQ_GSVS_RING_OFFSET_2:
3268 case SQ_GSVS_RING_OFFSET_3:
3269 case SQ_HSTMP_RING_ITEMSIZE:
3270 case SQ_LSTMP_RING_ITEMSIZE:
3271 case SQ_PSTMP_RING_ITEMSIZE:
3272 case SQ_VSTMP_RING_ITEMSIZE:
3273 case VGT_TF_RING_SIZE:
3274 case SQ_ESGS_RING_BASE:
3275 case SQ_GSVS_RING_BASE:
3276 case SQ_ESTMP_RING_BASE:
3277 case SQ_GSTMP_RING_BASE:
3278 case SQ_HSTMP_RING_BASE:
3279 case SQ_LSTMP_RING_BASE:
3280 case SQ_PSTMP_RING_BASE:
3281 case SQ_VSTMP_RING_BASE:
3282 case CAYMAN_VGT_OFFCHIP_LDS_BASE:
3283 case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
3284 return true;
3285 default:
Alex Deucherc7172132012-10-19 13:27:04 -04003286 DRM_ERROR("Invalid register 0x%x in CS\n", reg);
Jerome Glisse721604a2012-01-05 22:11:05 -05003287 return false;
3288 }
3289}
3290
3291static int evergreen_vm_packet3_check(struct radeon_device *rdev,
3292 u32 *ib, struct radeon_cs_packet *pkt)
3293{
3294 u32 idx = pkt->idx + 1;
3295 u32 idx_value = ib[idx];
3296 u32 start_reg, end_reg, reg, i;
Alex Deucher94e014e2012-12-03 19:32:54 -05003297 u32 command, info;
Jerome Glisse721604a2012-01-05 22:11:05 -05003298
3299 switch (pkt->opcode) {
3300 case PACKET3_NOP:
3301 case PACKET3_SET_BASE:
3302 case PACKET3_CLEAR_STATE:
3303 case PACKET3_INDEX_BUFFER_SIZE:
3304 case PACKET3_DISPATCH_DIRECT:
3305 case PACKET3_DISPATCH_INDIRECT:
3306 case PACKET3_MODE_CONTROL:
3307 case PACKET3_SET_PREDICATION:
3308 case PACKET3_COND_EXEC:
3309 case PACKET3_PRED_EXEC:
3310 case PACKET3_DRAW_INDIRECT:
3311 case PACKET3_DRAW_INDEX_INDIRECT:
3312 case PACKET3_INDEX_BASE:
3313 case PACKET3_DRAW_INDEX_2:
3314 case PACKET3_CONTEXT_CONTROL:
3315 case PACKET3_DRAW_INDEX_OFFSET:
3316 case PACKET3_INDEX_TYPE:
3317 case PACKET3_DRAW_INDEX:
3318 case PACKET3_DRAW_INDEX_AUTO:
3319 case PACKET3_DRAW_INDEX_IMMD:
3320 case PACKET3_NUM_INSTANCES:
3321 case PACKET3_DRAW_INDEX_MULTI_AUTO:
3322 case PACKET3_STRMOUT_BUFFER_UPDATE:
3323 case PACKET3_DRAW_INDEX_OFFSET_2:
3324 case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
3325 case PACKET3_MPEG_INDEX:
3326 case PACKET3_WAIT_REG_MEM:
3327 case PACKET3_MEM_WRITE:
3328 case PACKET3_SURFACE_SYNC:
3329 case PACKET3_EVENT_WRITE:
3330 case PACKET3_EVENT_WRITE_EOP:
3331 case PACKET3_EVENT_WRITE_EOS:
3332 case PACKET3_SET_CONTEXT_REG:
3333 case PACKET3_SET_BOOL_CONST:
3334 case PACKET3_SET_LOOP_CONST:
3335 case PACKET3_SET_RESOURCE:
3336 case PACKET3_SET_SAMPLER:
3337 case PACKET3_SET_CTL_CONST:
3338 case PACKET3_SET_RESOURCE_OFFSET:
3339 case PACKET3_SET_CONTEXT_REG_INDIRECT:
3340 case PACKET3_SET_RESOURCE_INDIRECT:
3341 case CAYMAN_PACKET3_DEALLOC_STATE:
3342 break;
3343 case PACKET3_COND_WRITE:
3344 if (idx_value & 0x100) {
3345 reg = ib[idx + 5] * 4;
3346 if (!evergreen_vm_reg_valid(reg))
3347 return -EINVAL;
3348 }
3349 break;
3350 case PACKET3_COPY_DW:
3351 if (idx_value & 0x2) {
3352 reg = ib[idx + 3] * 4;
3353 if (!evergreen_vm_reg_valid(reg))
3354 return -EINVAL;
3355 }
3356 break;
3357 case PACKET3_SET_CONFIG_REG:
3358 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
3359 end_reg = 4 * pkt->count + start_reg - 4;
3360 if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
3361 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
3362 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
3363 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
3364 return -EINVAL;
3365 }
3366 for (i = 0; i < pkt->count; i++) {
3367 reg = start_reg + (4 * i);
3368 if (!evergreen_vm_reg_valid(reg))
3369 return -EINVAL;
3370 }
3371 break;
Alex Deucher94e014e2012-12-03 19:32:54 -05003372 case PACKET3_CP_DMA:
3373 command = ib[idx + 4];
3374 info = ib[idx + 1];
Alex Deucher9d89d782012-12-14 00:23:06 -05003375 if ((((info & 0x60000000) >> 29) != 0) || /* src = GDS or DATA */
3376 (((info & 0x00300000) >> 20) != 0) || /* dst = GDS */
3377 ((((info & 0x00300000) >> 20) == 0) &&
3378 (command & PACKET3_CP_DMA_CMD_DAS)) || /* dst = register */
3379 ((((info & 0x60000000) >> 29) == 0) &&
3380 (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */
3381 /* non mem to mem copies requires dw aligned count */
3382 if ((command & 0x1fffff) % 4) {
3383 DRM_ERROR("CP DMA command requires dw count alignment\n");
3384 return -EINVAL;
3385 }
3386 }
Alex Deucher94e014e2012-12-03 19:32:54 -05003387 if (command & PACKET3_CP_DMA_CMD_SAS) {
3388 /* src address space is register */
3389 if (((info & 0x60000000) >> 29) == 0) {
3390 start_reg = idx_value << 2;
3391 if (command & PACKET3_CP_DMA_CMD_SAIC) {
3392 reg = start_reg;
3393 if (!evergreen_vm_reg_valid(reg)) {
3394 DRM_ERROR("CP DMA Bad SRC register\n");
3395 return -EINVAL;
3396 }
3397 } else {
3398 for (i = 0; i < (command & 0x1fffff); i++) {
3399 reg = start_reg + (4 * i);
3400 if (!evergreen_vm_reg_valid(reg)) {
3401 DRM_ERROR("CP DMA Bad SRC register\n");
3402 return -EINVAL;
3403 }
3404 }
3405 }
3406 }
3407 }
3408 if (command & PACKET3_CP_DMA_CMD_DAS) {
3409 /* dst address space is register */
3410 if (((info & 0x00300000) >> 20) == 0) {
3411 start_reg = ib[idx + 2];
3412 if (command & PACKET3_CP_DMA_CMD_DAIC) {
3413 reg = start_reg;
3414 if (!evergreen_vm_reg_valid(reg)) {
3415 DRM_ERROR("CP DMA Bad DST register\n");
3416 return -EINVAL;
3417 }
3418 } else {
3419 for (i = 0; i < (command & 0x1fffff); i++) {
3420 reg = start_reg + (4 * i);
3421 if (!evergreen_vm_reg_valid(reg)) {
3422 DRM_ERROR("CP DMA Bad DST register\n");
3423 return -EINVAL;
3424 }
3425 }
3426 }
3427 }
3428 }
3429 break;
Jerome Glisse721604a2012-01-05 22:11:05 -05003430 default:
3431 return -EINVAL;
3432 }
3433 return 0;
3434}
3435
3436int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
3437{
3438 int ret = 0;
3439 u32 idx = 0;
3440 struct radeon_cs_packet pkt;
3441
3442 do {
3443 pkt.idx = idx;
3444 pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
3445 pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
3446 pkt.one_reg_wr = 0;
3447 switch (pkt.type) {
3448 case PACKET_TYPE0:
3449 dev_err(rdev->dev, "Packet0 not allowed!\n");
3450 ret = -EINVAL;
3451 break;
3452 case PACKET_TYPE2:
Alex Deucher0b41da62012-01-12 15:42:37 -05003453 idx += 1;
Jerome Glisse721604a2012-01-05 22:11:05 -05003454 break;
3455 case PACKET_TYPE3:
3456 pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
3457 ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
Alex Deucher0b41da62012-01-12 15:42:37 -05003458 idx += pkt.count + 2;
Jerome Glisse721604a2012-01-05 22:11:05 -05003459 break;
3460 default:
3461 dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
3462 ret = -EINVAL;
3463 break;
3464 }
3465 if (ret)
3466 break;
Jerome Glisse721604a2012-01-05 22:11:05 -05003467 } while (idx < ib->length_dw);
3468
3469 return ret;
3470}
Alex Deuchercd459e52012-12-13 12:17:38 -05003471
3472/**
3473 * evergreen_dma_ib_parse() - parse the DMA IB for VM
3474 * @rdev: radeon_device pointer
3475 * @ib: radeon_ib pointer
3476 *
3477 * Parses the DMA IB from the VM CS ioctl
3478 * checks for errors. (Cayman-SI)
3479 * Returns 0 for success and an error on failure.
3480 **/
3481int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
3482{
3483 u32 idx = 0;
3484 u32 header, cmd, count, tiled, new_cmd, misc;
3485
3486 do {
3487 header = ib->ptr[idx];
3488 cmd = GET_DMA_CMD(header);
3489 count = GET_DMA_COUNT(header);
3490 tiled = GET_DMA_T(header);
3491 new_cmd = GET_DMA_NEW(header);
3492 misc = GET_DMA_MISC(header);
3493
3494 switch (cmd) {
3495 case DMA_PACKET_WRITE:
3496 if (tiled)
3497 idx += count + 7;
3498 else
3499 idx += count + 3;
3500 break;
3501 case DMA_PACKET_COPY:
3502 if (tiled) {
3503 if (new_cmd) {
3504 switch (misc) {
3505 case 0:
3506 /* L2T, frame to fields */
3507 idx += 10;
3508 break;
3509 case 1:
3510 /* L2T, T2L partial */
3511 idx += 12;
3512 break;
3513 case 3:
3514 /* L2T, broadcast */
3515 idx += 10;
3516 break;
3517 case 4:
3518 /* L2T, T2L */
3519 idx += 9;
3520 break;
3521 case 5:
3522 /* T2T partial */
3523 idx += 13;
3524 break;
3525 case 7:
3526 /* L2T, broadcast */
3527 idx += 10;
3528 break;
3529 default:
3530 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
3531 return -EINVAL;
3532 }
3533 } else {
3534 switch (misc) {
3535 case 0:
3536 idx += 9;
3537 break;
3538 default:
3539 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
3540 return -EINVAL;
3541 }
3542 }
3543 } else {
3544 if (new_cmd) {
3545 switch (misc) {
3546 case 0:
3547 /* L2L, byte */
3548 idx += 5;
3549 break;
3550 case 1:
3551 /* L2L, partial */
3552 idx += 9;
3553 break;
3554 case 4:
3555 /* L2L, dw, broadcast */
3556 idx += 7;
3557 break;
3558 default:
3559 DRM_ERROR("bad DMA_PACKET_COPY misc %u\n", misc);
3560 return -EINVAL;
3561 }
3562 } else {
3563 /* L2L, dw */
3564 idx += 5;
3565 }
3566 }
3567 break;
3568 case DMA_PACKET_CONSTANT_FILL:
3569 idx += 4;
3570 break;
3571 case DMA_PACKET_NOP:
3572 idx += 1;
3573 break;
3574 default:
3575 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
3576 return -EINVAL;
3577 }
3578 } while (idx < ib->length_dw);
3579
3580 return 0;
3581}