blob: 9fda59b21f7e2f3eb5298368e6aa4c0673212d63 [file] [log] [blame]
Chia-I Wu5a323262014-08-11 10:31:53 +08001/*
2 * XGL
3 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
Chia-I Wu44e42362014-09-02 08:32:09 +080023 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
Chia-I Wu5a323262014-08-11 10:31:53 +080026 */
27
Chia-I Wu9269d1c2014-08-16 12:47:47 +080028#include "genhw/genhw.h"
29#include "kmd/winsys.h"
Chia-I Wu714df452015-01-01 07:55:04 +080030#include "buf.h"
Chia-I Wu5a323262014-08-11 10:31:53 +080031#include "dev.h"
Chia-I Wu9269d1c2014-08-16 12:47:47 +080032#include "format.h"
Chia-I Wu5a323262014-08-11 10:31:53 +080033#include "gpu.h"
34#include "img.h"
35#include "mem.h"
36#include "view.h"
37
Chia-I Wu06bed192014-08-20 13:57:18 +080038static void surface_state_null_gen7(const struct intel_gpu *gpu,
39 uint32_t dw[8])
Chia-I Wu9269d1c2014-08-16 12:47:47 +080040{
41 INTEL_GPU_ASSERT(gpu, 7, 7.5);
42
43 /*
44 * From the Ivy Bridge PRM, volume 4 part 1, page 62:
45 *
46 * "A null surface is used in instances where an actual surface is not
47 * bound. When a write message is generated to a null surface, no
48 * actual surface is written to. When a read message (including any
49 * sampling engine message) is generated to a null surface, the result
50 * is all zeros. Note that a null surface type is allowed to be used
51 * with all messages, even if it is not specificially indicated as
52 * supported. All of the remaining fields in surface state are ignored
53 * for null surfaces, with the following exceptions:
54 *
55 * * Width, Height, Depth, LOD, and Render Target View Extent fields
56 * must match the depth buffer's corresponding state for all render
57 * target surfaces, including null.
58 * * All sampling engine and data port messages support null surfaces
59 * with the above behavior, even if not mentioned as specifically
60 * supported, except for the following:
61 * * Data Port Media Block Read/Write messages.
62 * * The Surface Type of a surface used as a render target (accessed
63 * via the Data Port's Render Target Write message) must be the same
64 * as the Surface Type of all other render targets and of the depth
65 * buffer (defined in 3DSTATE_DEPTH_BUFFER), unless either the depth
66 * buffer or render targets are SURFTYPE_NULL."
67 *
68 * From the Ivy Bridge PRM, volume 4 part 1, page 65:
69 *
70 * "If Surface Type is SURFTYPE_NULL, this field (Tiled Surface) must be
71 * true"
72 */
73
74 dw[0] = GEN6_SURFTYPE_NULL << GEN7_SURFACE_DW0_TYPE__SHIFT |
75 GEN6_FORMAT_B8G8R8A8_UNORM << GEN7_SURFACE_DW0_FORMAT__SHIFT |
76 GEN6_TILING_X << 13;
77
78 dw[1] = 0;
79 dw[2] = 0;
80 dw[3] = 0;
81 dw[4] = 0;
82 dw[5] = 0;
83 dw[6] = 0;
84 dw[7] = 0;
85}
86
Chia-I Wu06bed192014-08-20 13:57:18 +080087static void surface_state_buf_gen7(const struct intel_gpu *gpu,
88 unsigned offset, unsigned size,
89 unsigned struct_size,
90 XGL_FORMAT elem_format,
91 bool is_rt, bool render_cache_rw,
92 uint32_t dw[8])
Chia-I Wu9269d1c2014-08-16 12:47:47 +080093{
94 const bool typed = !icd_format_is_undef(elem_format);
95 const bool structured = (!typed && struct_size > 1);
96 const int elem_size = (typed) ?
97 icd_format_get_size(elem_format) : 1;
98 int width, height, depth, pitch;
99 int surface_type, surface_format, num_entries;
100
101 INTEL_GPU_ASSERT(gpu, 7, 7.5);
102
103 surface_type = (structured) ? GEN7_SURFTYPE_STRBUF : GEN6_SURFTYPE_BUFFER;
104
105 surface_format = (typed) ?
106 intel_format_translate_color(gpu, elem_format) : GEN6_FORMAT_RAW;
107
108 num_entries = size / struct_size;
109 /* see if there is enough space to fit another element */
110 if (size % struct_size >= elem_size && !structured)
111 num_entries++;
112
113 /*
114 * From the Ivy Bridge PRM, volume 4 part 1, page 67:
115 *
116 * "For SURFTYPE_BUFFER render targets, this field (Surface Base
117 * Address) specifies the base address of first element of the
118 * surface. The surface is interpreted as a simple array of that
119 * single element type. The address must be naturally-aligned to the
120 * element size (e.g., a buffer containing R32G32B32A32_FLOAT elements
121 * must be 16-byte aligned)
122 *
123 * For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
124 * the base address of the first element of the surface, computed in
125 * software by adding the surface base address to the byte offset of
126 * the element in the buffer."
127 */
128 if (is_rt)
129 assert(offset % elem_size == 0);
130
131 /*
132 * From the Ivy Bridge PRM, volume 4 part 1, page 68:
133 *
134 * "For typed buffer and structured buffer surfaces, the number of
135 * entries in the buffer ranges from 1 to 2^27. For raw buffer
136 * surfaces, the number of entries in the buffer is the number of
137 * bytes which can range from 1 to 2^30."
138 */
139 assert(num_entries >= 1 &&
140 num_entries <= 1 << ((typed || structured) ? 27 : 30));
141
142 /*
143 * From the Ivy Bridge PRM, volume 4 part 1, page 69:
144 *
145 * "For SURFTYPE_BUFFER: The low two bits of this field (Width) must be
146 * 11 if the Surface Format is RAW (the size of the buffer must be a
147 * multiple of 4 bytes)."
148 *
149 * From the Ivy Bridge PRM, volume 4 part 1, page 70:
150 *
151 * "For surfaces of type SURFTYPE_BUFFER and SURFTYPE_STRBUF, this
152 * field (Surface Pitch) indicates the size of the structure."
153 *
154 * "For linear surfaces with Surface Type of SURFTYPE_STRBUF, the pitch
155 * must be a multiple of 4 bytes."
156 */
157 if (structured)
158 assert(struct_size % 4 == 0);
159 else if (!typed)
160 assert(num_entries % 4 == 0);
161
162 pitch = struct_size;
163
164 pitch--;
165 num_entries--;
166 /* bits [6:0] */
167 width = (num_entries & 0x0000007f);
168 /* bits [20:7] */
169 height = (num_entries & 0x001fff80) >> 7;
170 /* bits [30:21] */
171 depth = (num_entries & 0x7fe00000) >> 21;
172 /* limit to [26:21] */
173 if (typed || structured)
174 depth &= 0x3f;
175
176 dw[0] = surface_type << GEN7_SURFACE_DW0_TYPE__SHIFT |
177 surface_format << GEN7_SURFACE_DW0_FORMAT__SHIFT;
178 if (render_cache_rw)
179 dw[0] |= GEN7_SURFACE_DW0_RENDER_CACHE_RW;
180
181 dw[1] = offset;
182
183 dw[2] = height << GEN7_SURFACE_DW2_HEIGHT__SHIFT |
184 width << GEN7_SURFACE_DW2_WIDTH__SHIFT;
185
186 dw[3] = depth << GEN7_SURFACE_DW3_DEPTH__SHIFT |
187 pitch;
188
189 dw[4] = 0;
Chia-I Wub3686982015-02-27 09:51:16 -0700190 dw[5] = GEN7_MOCS_L3_ON << GEN7_SURFACE_DW5_MOCS__SHIFT;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800191
192 dw[6] = 0;
193 dw[7] = 0;
194
195 if (intel_gpu_gen(gpu) >= INTEL_GEN(7.5)) {
196 dw[7] |= GEN75_SCS_RED << GEN75_SURFACE_DW7_SCS_R__SHIFT |
197 GEN75_SCS_GREEN << GEN75_SURFACE_DW7_SCS_G__SHIFT |
198 GEN75_SCS_BLUE << GEN75_SURFACE_DW7_SCS_B__SHIFT |
199 GEN75_SCS_ALPHA << GEN75_SURFACE_DW7_SCS_A__SHIFT;
200 }
201}
202
Chia-I Wueac0acf2015-02-17 09:51:00 -0700203static int img_type_to_view_type(XGL_IMAGE_TYPE type)
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800204{
205 switch (type) {
206 case XGL_IMAGE_1D: return XGL_IMAGE_VIEW_1D;
207 case XGL_IMAGE_2D: return XGL_IMAGE_VIEW_2D;
208 case XGL_IMAGE_3D: return XGL_IMAGE_VIEW_3D;
209 default: assert(!"unknown img type"); return XGL_IMAGE_VIEW_1D;
210 }
211}
212
213static int view_type_to_surface_type(XGL_IMAGE_VIEW_TYPE type)
214{
215 switch (type) {
216 case XGL_IMAGE_VIEW_1D: return GEN6_SURFTYPE_1D;
217 case XGL_IMAGE_VIEW_2D: return GEN6_SURFTYPE_2D;
218 case XGL_IMAGE_VIEW_3D: return GEN6_SURFTYPE_3D;
219 case XGL_IMAGE_VIEW_CUBE: return GEN6_SURFTYPE_CUBE;
220 default: assert(!"unknown view type"); return GEN6_SURFTYPE_NULL;
221 }
222}
223
224static int winsys_tiling_to_surface_tiling(enum intel_tiling_mode tiling)
225{
226 switch (tiling) {
227 case INTEL_TILING_NONE: return GEN6_TILING_NONE;
228 case INTEL_TILING_X: return GEN6_TILING_X;
229 case INTEL_TILING_Y: return GEN6_TILING_Y;
230 default: assert(!"unknown tiling"); return GEN6_TILING_NONE;
231 }
232}
233
Chia-I Wuf57758c2014-12-02 14:15:50 +0800234static int channel_swizzle_to_scs(XGL_CHANNEL_SWIZZLE swizzle)
235{
236 switch (swizzle) {
237 case XGL_CHANNEL_SWIZZLE_ZERO: return GEN75_SCS_ZERO;
238 case XGL_CHANNEL_SWIZZLE_ONE: return GEN75_SCS_ONE;
239 case XGL_CHANNEL_SWIZZLE_R: return GEN75_SCS_RED;
240 case XGL_CHANNEL_SWIZZLE_G: return GEN75_SCS_GREEN;
241 case XGL_CHANNEL_SWIZZLE_B: return GEN75_SCS_BLUE;
242 case XGL_CHANNEL_SWIZZLE_A: return GEN75_SCS_ALPHA;
243 default: assert(!"unknown swizzle"); return GEN75_SCS_ZERO;
244 }
245}
246
Chia-I Wu06bed192014-08-20 13:57:18 +0800247static void surface_state_tex_gen7(const struct intel_gpu *gpu,
248 const struct intel_img *img,
249 XGL_IMAGE_VIEW_TYPE type,
250 XGL_FORMAT format,
251 unsigned first_level,
252 unsigned num_levels,
253 unsigned first_layer,
254 unsigned num_layers,
Chia-I Wuf57758c2014-12-02 14:15:50 +0800255 XGL_CHANNEL_MAPPING swizzles,
Chia-I Wu06bed192014-08-20 13:57:18 +0800256 bool is_rt,
257 uint32_t dw[8])
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800258{
259 int surface_type, surface_format;
260 int width, height, depth, pitch, lod;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800261
262 INTEL_GPU_ASSERT(gpu, 7, 7.5);
263
264 surface_type = view_type_to_surface_type(type);
265 assert(surface_type != GEN6_SURFTYPE_BUFFER);
266
267 surface_format = intel_format_translate_color(gpu, format);
268 assert(surface_format >= 0);
269
Chia-I Wu73e326f2014-08-21 11:07:57 +0800270 width = img->layout.width0;
271 height = img->layout.height0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800272 depth = (type == XGL_IMAGE_VIEW_3D) ?
Chia-I Wu73e326f2014-08-21 11:07:57 +0800273 img->depth : num_layers;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800274 pitch = img->layout.bo_stride;
275
276 if (surface_type == GEN6_SURFTYPE_CUBE) {
277 /*
278 * From the Ivy Bridge PRM, volume 4 part 1, page 70:
279 *
280 * "For SURFTYPE_CUBE:For Sampling Engine Surfaces, the range of
281 * this field is [0,340], indicating the number of cube array
282 * elements (equal to the number of underlying 2D array elements
283 * divided by 6). For other surfaces, this field must be zero."
284 *
285 * When is_rt is true, we treat the texture as a 2D one to avoid the
286 * restriction.
287 */
288 if (is_rt) {
289 surface_type = GEN6_SURFTYPE_2D;
290 }
291 else {
292 assert(num_layers % 6 == 0);
293 depth = num_layers / 6;
294 }
295 }
296
297 /* sanity check the size */
298 assert(width >= 1 && height >= 1 && depth >= 1 && pitch >= 1);
299 assert(first_layer < 2048 && num_layers <= 2048);
300 switch (surface_type) {
301 case GEN6_SURFTYPE_1D:
302 assert(width <= 16384 && height == 1 && depth <= 2048);
303 break;
304 case GEN6_SURFTYPE_2D:
305 assert(width <= 16384 && height <= 16384 && depth <= 2048);
306 break;
307 case GEN6_SURFTYPE_3D:
308 assert(width <= 2048 && height <= 2048 && depth <= 2048);
309 if (!is_rt)
310 assert(first_layer == 0);
311 break;
312 case GEN6_SURFTYPE_CUBE:
313 assert(width <= 16384 && height <= 16384 && depth <= 86);
314 assert(width == height);
315 if (is_rt)
316 assert(first_layer == 0);
317 break;
318 default:
319 assert(!"unexpected surface type");
320 break;
321 }
322
323 if (is_rt) {
324 assert(num_levels == 1);
325 lod = first_level;
326 }
327 else {
328 lod = num_levels - 1;
329 }
330
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800331 /*
332 * From the Ivy Bridge PRM, volume 4 part 1, page 68:
333 *
334 * "The Base Address for linear render target surfaces and surfaces
335 * accessed with the typed surface read/write data port messages must
336 * be element-size aligned, for non-YUV surface formats, or a multiple
337 * of 2 element-sizes for YUV surface formats. Other linear surfaces
338 * have no alignment requirements (byte alignment is sufficient)."
339 *
340 * From the Ivy Bridge PRM, volume 4 part 1, page 70:
341 *
342 * "For linear render target surfaces and surfaces accessed with the
343 * typed data port messages, the pitch must be a multiple of the
344 * element size for non-YUV surface formats. Pitch must be a multiple
345 * of 2 * element size for YUV surface formats. For linear surfaces
346 * with Surface Type of SURFTYPE_STRBUF, the pitch must be a multiple
347 * of 4 bytes.For other linear surfaces, the pitch can be any multiple
348 * of bytes."
349 *
350 * From the Ivy Bridge PRM, volume 4 part 1, page 74:
351 *
352 * "For linear surfaces, this field (X Offset) must be zero."
353 */
354 if (img->layout.tiling == INTEL_TILING_NONE) {
355 if (is_rt) {
Chia-I Wu08cd6e92015-02-11 13:44:50 -0700356 const int elem_size U_ASSERT_ONLY = icd_format_get_size(format);
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800357 assert(pitch % elem_size == 0);
358 }
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800359 }
360
361 dw[0] = surface_type << GEN7_SURFACE_DW0_TYPE__SHIFT |
362 surface_format << GEN7_SURFACE_DW0_FORMAT__SHIFT |
363 winsys_tiling_to_surface_tiling(img->layout.tiling) << 13;
364
365 /*
366 * From the Ivy Bridge PRM, volume 4 part 1, page 63:
367 *
368 * "If this field (Surface Array) is enabled, the Surface Type must be
369 * SURFTYPE_1D, SURFTYPE_2D, or SURFTYPE_CUBE. If this field is
370 * disabled and Surface Type is SURFTYPE_1D, SURFTYPE_2D, or
371 * SURFTYPE_CUBE, the Depth field must be set to zero."
372 *
373 * For non-3D sampler surfaces, resinfo (the sampler message) always
374 * returns zero for the number of layers when this field is not set.
375 */
376 if (surface_type != GEN6_SURFTYPE_3D) {
377 if (num_layers > 1)
378 dw[0] |= GEN7_SURFACE_DW0_IS_ARRAY;
379 else
380 assert(depth == 1);
381 }
382
383 assert(img->layout.align_i == 4 || img->layout.align_i == 8);
384 assert(img->layout.align_j == 2 || img->layout.align_j == 4);
385
386 if (img->layout.align_j == 4)
387 dw[0] |= GEN7_SURFACE_DW0_VALIGN_4;
388
389 if (img->layout.align_i == 8)
390 dw[0] |= GEN7_SURFACE_DW0_HALIGN_8;
391
Chia-I Wu457d0a62014-08-18 13:02:26 +0800392 if (img->layout.walk == INTEL_LAYOUT_WALK_LOD)
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800393 dw[0] |= GEN7_SURFACE_DW0_ARYSPC_LOD0;
Chia-I Wu457d0a62014-08-18 13:02:26 +0800394 else
395 dw[0] |= GEN7_SURFACE_DW0_ARYSPC_FULL;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800396
397 if (is_rt)
398 dw[0] |= GEN7_SURFACE_DW0_RENDER_CACHE_RW;
399
400 if (surface_type == GEN6_SURFTYPE_CUBE && !is_rt)
401 dw[0] |= GEN7_SURFACE_DW0_CUBE_FACE_ENABLES__MASK;
402
Chia-I Wu457d0a62014-08-18 13:02:26 +0800403 dw[1] = 0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800404
405 dw[2] = (height - 1) << GEN7_SURFACE_DW2_HEIGHT__SHIFT |
406 (width - 1) << GEN7_SURFACE_DW2_WIDTH__SHIFT;
407
408 dw[3] = (depth - 1) << GEN7_SURFACE_DW3_DEPTH__SHIFT |
409 (pitch - 1);
410
411 dw[4] = first_layer << 18 |
412 (num_layers - 1) << 7;
413
414 /*
415 * MSFMT_MSS means the samples are not interleaved and MSFMT_DEPTH_STENCIL
416 * means the samples are interleaved. The layouts are the same when the
417 * number of samples is 1.
418 */
419 if (img->layout.interleaved_samples && img->samples > 1) {
420 assert(!is_rt);
421 dw[4] |= GEN7_SURFACE_DW4_MSFMT_DEPTH_STENCIL;
422 }
423 else {
424 dw[4] |= GEN7_SURFACE_DW4_MSFMT_MSS;
425 }
426
427 if (img->samples > 4)
428 dw[4] |= GEN7_SURFACE_DW4_MULTISAMPLECOUNT_8;
429 else if (img->samples > 2)
430 dw[4] |= GEN7_SURFACE_DW4_MULTISAMPLECOUNT_4;
431 else
432 dw[4] |= GEN7_SURFACE_DW4_MULTISAMPLECOUNT_1;
433
Chia-I Wub3686982015-02-27 09:51:16 -0700434 dw[5] = GEN7_MOCS_L3_ON << GEN7_SURFACE_DW5_MOCS__SHIFT |
435 (first_level) << GEN7_SURFACE_DW5_MIN_LOD__SHIFT |
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800436 lod;
437
438 dw[6] = 0;
439 dw[7] = 0;
440
441 if (intel_gpu_gen(gpu) >= INTEL_GEN(7.5)) {
Chia-I Wuf57758c2014-12-02 14:15:50 +0800442 dw[7] |=
443 channel_swizzle_to_scs(swizzles.r) << GEN75_SURFACE_DW7_SCS_R__SHIFT |
444 channel_swizzle_to_scs(swizzles.g) << GEN75_SURFACE_DW7_SCS_G__SHIFT |
445 channel_swizzle_to_scs(swizzles.b) << GEN75_SURFACE_DW7_SCS_B__SHIFT |
446 channel_swizzle_to_scs(swizzles.a) << GEN75_SURFACE_DW7_SCS_A__SHIFT;
447 } else {
448 assert(swizzles.r == XGL_CHANNEL_SWIZZLE_R &&
449 swizzles.g == XGL_CHANNEL_SWIZZLE_G &&
450 swizzles.b == XGL_CHANNEL_SWIZZLE_B &&
451 swizzles.a == XGL_CHANNEL_SWIZZLE_A);
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800452 }
453}
454
Chia-I Wu06bed192014-08-20 13:57:18 +0800455static void surface_state_null_gen6(const struct intel_gpu *gpu,
456 uint32_t dw[6])
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800457{
458 INTEL_GPU_ASSERT(gpu, 6, 6);
459
460 /*
461 * From the Sandy Bridge PRM, volume 4 part 1, page 71:
462 *
463 * "A null surface will be used in instances where an actual surface is
464 * not bound. When a write message is generated to a null surface, no
465 * actual surface is written to. When a read message (including any
466 * sampling engine message) is generated to a null surface, the result
467 * is all zeros. Note that a null surface type is allowed to be used
468 * with all messages, even if it is not specificially indicated as
469 * supported. All of the remaining fields in surface state are ignored
470 * for null surfaces, with the following exceptions:
471 *
472 * * [DevSNB+]: Width, Height, Depth, and LOD fields must match the
473 * depth buffer's corresponding state for all render target
474 * surfaces, including null.
475 * * Surface Format must be R8G8B8A8_UNORM."
476 *
477 * From the Sandy Bridge PRM, volume 4 part 1, page 82:
478 *
479 * "If Surface Type is SURFTYPE_NULL, this field (Tiled Surface) must be
480 * true"
481 */
482
483 dw[0] = GEN6_SURFTYPE_NULL << GEN6_SURFACE_DW0_TYPE__SHIFT |
484 GEN6_FORMAT_B8G8R8A8_UNORM << GEN6_SURFACE_DW0_FORMAT__SHIFT;
485
486 dw[1] = 0;
487 dw[2] = 0;
488 dw[3] = GEN6_TILING_X;
489 dw[4] = 0;
490 dw[5] = 0;
491}
492
Chia-I Wu06bed192014-08-20 13:57:18 +0800493static void surface_state_buf_gen6(const struct intel_gpu *gpu,
494 unsigned offset, unsigned size,
495 unsigned struct_size,
496 XGL_FORMAT elem_format,
497 bool is_rt, bool render_cache_rw,
498 uint32_t dw[6])
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800499{
Chia-I Wu4619ed22014-10-08 12:24:37 +0800500 const bool typed = !icd_format_is_undef(elem_format);
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800501 const int elem_size = icd_format_get_size(elem_format);
502 int width, height, depth, pitch;
503 int surface_format, num_entries;
504
505 INTEL_GPU_ASSERT(gpu, 6, 6);
506
507 /*
508 * For SURFTYPE_BUFFER, a SURFACE_STATE specifies an element of a
509 * structure in a buffer.
510 */
511
Chia-I Wu4619ed22014-10-08 12:24:37 +0800512 surface_format = (typed) ?
513 intel_format_translate_color(gpu, elem_format) : GEN6_FORMAT_RAW;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800514
515 num_entries = size / struct_size;
516 /* see if there is enough space to fit another element */
517 if (size % struct_size >= elem_size)
518 num_entries++;
519
520 /*
521 * From the Sandy Bridge PRM, volume 4 part 1, page 76:
522 *
523 * "For SURFTYPE_BUFFER render targets, this field (Surface Base
524 * Address) specifies the base address of first element of the
525 * surface. The surface is interpreted as a simple array of that
526 * single element type. The address must be naturally-aligned to the
527 * element size (e.g., a buffer containing R32G32B32A32_FLOAT elements
528 * must be 16-byte aligned).
529 *
530 * For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
531 * the base address of the first element of the surface, computed in
532 * software by adding the surface base address to the byte offset of
533 * the element in the buffer."
534 */
535 if (is_rt)
536 assert(offset % elem_size == 0);
537
538 /*
539 * From the Sandy Bridge PRM, volume 4 part 1, page 77:
540 *
541 * "For buffer surfaces, the number of entries in the buffer ranges
542 * from 1 to 2^27."
543 */
544 assert(num_entries >= 1 && num_entries <= 1 << 27);
545
546 /*
547 * From the Sandy Bridge PRM, volume 4 part 1, page 81:
548 *
549 * "For surfaces of type SURFTYPE_BUFFER, this field (Surface Pitch)
550 * indicates the size of the structure."
551 */
552 pitch = struct_size;
553
554 pitch--;
555 num_entries--;
556 /* bits [6:0] */
557 width = (num_entries & 0x0000007f);
558 /* bits [19:7] */
559 height = (num_entries & 0x000fff80) >> 7;
560 /* bits [26:20] */
561 depth = (num_entries & 0x07f00000) >> 20;
562
563 dw[0] = GEN6_SURFTYPE_BUFFER << GEN6_SURFACE_DW0_TYPE__SHIFT |
564 surface_format << GEN6_SURFACE_DW0_FORMAT__SHIFT;
565 if (render_cache_rw)
566 dw[0] |= GEN6_SURFACE_DW0_RENDER_CACHE_RW;
567
568 dw[1] = offset;
569
570 dw[2] = height << GEN6_SURFACE_DW2_HEIGHT__SHIFT |
571 width << GEN6_SURFACE_DW2_WIDTH__SHIFT;
572
573 dw[3] = depth << GEN6_SURFACE_DW3_DEPTH__SHIFT |
574 pitch << GEN6_SURFACE_DW3_PITCH__SHIFT;
575
576 dw[4] = 0;
577 dw[5] = 0;
578}
579
Chia-I Wu06bed192014-08-20 13:57:18 +0800580static void surface_state_tex_gen6(const struct intel_gpu *gpu,
581 const struct intel_img *img,
582 XGL_IMAGE_VIEW_TYPE type,
583 XGL_FORMAT format,
584 unsigned first_level,
585 unsigned num_levels,
586 unsigned first_layer,
587 unsigned num_layers,
588 bool is_rt,
589 uint32_t dw[6])
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800590{
591 int surface_type, surface_format;
592 int width, height, depth, pitch, lod;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800593
594 INTEL_GPU_ASSERT(gpu, 6, 6);
595
596 surface_type = view_type_to_surface_type(type);
597 assert(surface_type != GEN6_SURFTYPE_BUFFER);
598
599 surface_format = intel_format_translate_color(gpu, format);
600 assert(surface_format >= 0);
601
Chia-I Wu73e326f2014-08-21 11:07:57 +0800602 width = img->layout.width0;
603 height = img->layout.height0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800604 depth = (type == XGL_IMAGE_VIEW_3D) ?
Chia-I Wu73e326f2014-08-21 11:07:57 +0800605 img->depth : num_layers;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800606 pitch = img->layout.bo_stride;
607
608 if (surface_type == GEN6_SURFTYPE_CUBE) {
609 /*
610 * From the Sandy Bridge PRM, volume 4 part 1, page 81:
611 *
612 * "For SURFTYPE_CUBE: [DevSNB+]: for Sampling Engine Surfaces, the
613 * range of this field (Depth) is [0,84], indicating the number of
614 * cube array elements (equal to the number of underlying 2D array
615 * elements divided by 6). For other surfaces, this field must be
616 * zero."
617 *
618 * When is_rt is true, we treat the texture as a 2D one to avoid the
619 * restriction.
620 */
621 if (is_rt) {
622 surface_type = GEN6_SURFTYPE_2D;
623 }
624 else {
625 assert(num_layers % 6 == 0);
626 depth = num_layers / 6;
627 }
628 }
629
630 /* sanity check the size */
631 assert(width >= 1 && height >= 1 && depth >= 1 && pitch >= 1);
632 switch (surface_type) {
633 case GEN6_SURFTYPE_1D:
634 assert(width <= 8192 && height == 1 && depth <= 512);
635 assert(first_layer < 512 && num_layers <= 512);
636 break;
637 case GEN6_SURFTYPE_2D:
638 assert(width <= 8192 && height <= 8192 && depth <= 512);
639 assert(first_layer < 512 && num_layers <= 512);
640 break;
641 case GEN6_SURFTYPE_3D:
642 assert(width <= 2048 && height <= 2048 && depth <= 2048);
643 assert(first_layer < 2048 && num_layers <= 512);
644 if (!is_rt)
645 assert(first_layer == 0);
646 break;
647 case GEN6_SURFTYPE_CUBE:
648 assert(width <= 8192 && height <= 8192 && depth <= 85);
649 assert(width == height);
650 assert(first_layer < 512 && num_layers <= 512);
651 if (is_rt)
652 assert(first_layer == 0);
653 break;
654 default:
655 assert(!"unexpected surface type");
656 break;
657 }
658
659 /* non-full array spacing is supported only on GEN7+ */
Chia-I Wu457d0a62014-08-18 13:02:26 +0800660 assert(img->layout.walk != INTEL_LAYOUT_WALK_LOD);
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800661 /* non-interleaved samples are supported only on GEN7+ */
662 if (img->samples > 1)
663 assert(img->layout.interleaved_samples);
664
665 if (is_rt) {
666 assert(num_levels == 1);
667 lod = first_level;
668 }
669 else {
670 lod = num_levels - 1;
671 }
672
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800673 /*
674 * From the Sandy Bridge PRM, volume 4 part 1, page 76:
675 *
676 * "Linear render target surface base addresses must be element-size
677 * aligned, for non-YUV surface formats, or a multiple of 2
678 * element-sizes for YUV surface formats. Other linear surfaces have
679 * no alignment requirements (byte alignment is sufficient.)"
680 *
681 * From the Sandy Bridge PRM, volume 4 part 1, page 81:
682 *
683 * "For linear render target surfaces, the pitch must be a multiple
684 * of the element size for non-YUV surface formats. Pitch must be a
685 * multiple of 2 * element size for YUV surface formats."
686 *
687 * From the Sandy Bridge PRM, volume 4 part 1, page 86:
688 *
689 * "For linear surfaces, this field (X Offset) must be zero"
690 */
691 if (img->layout.tiling == INTEL_TILING_NONE) {
692 if (is_rt) {
Chia-I Wu08cd6e92015-02-11 13:44:50 -0700693 const int elem_size U_ASSERT_ONLY = icd_format_get_size(format);
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800694 assert(pitch % elem_size == 0);
695 }
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800696 }
697
698 dw[0] = surface_type << GEN6_SURFACE_DW0_TYPE__SHIFT |
699 surface_format << GEN6_SURFACE_DW0_FORMAT__SHIFT |
700 GEN6_SURFACE_DW0_MIPLAYOUT_BELOW;
701
702 if (surface_type == GEN6_SURFTYPE_CUBE && !is_rt) {
703 dw[0] |= 1 << 9 |
704 GEN6_SURFACE_DW0_CUBE_FACE_ENABLES__MASK;
705 }
706
707 if (is_rt)
708 dw[0] |= GEN6_SURFACE_DW0_RENDER_CACHE_RW;
709
Chia-I Wu457d0a62014-08-18 13:02:26 +0800710 dw[1] = 0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800711
712 dw[2] = (height - 1) << GEN6_SURFACE_DW2_HEIGHT__SHIFT |
713 (width - 1) << GEN6_SURFACE_DW2_WIDTH__SHIFT |
714 lod << GEN6_SURFACE_DW2_MIP_COUNT_LOD__SHIFT;
715
716 dw[3] = (depth - 1) << GEN6_SURFACE_DW3_DEPTH__SHIFT |
717 (pitch - 1) << GEN6_SURFACE_DW3_PITCH__SHIFT |
718 winsys_tiling_to_surface_tiling(img->layout.tiling);
719
720 dw[4] = first_level << GEN6_SURFACE_DW4_MIN_LOD__SHIFT |
721 first_layer << 17 |
722 (num_layers - 1) << 8 |
723 ((img->samples > 1) ? GEN6_SURFACE_DW4_MULTISAMPLECOUNT_4 :
724 GEN6_SURFACE_DW4_MULTISAMPLECOUNT_1);
725
Chia-I Wu457d0a62014-08-18 13:02:26 +0800726 dw[5] = 0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800727
728 assert(img->layout.align_j == 2 || img->layout.align_j == 4);
729 if (img->layout.align_j == 4)
730 dw[5] |= GEN6_SURFACE_DW5_VALIGN_4;
731}
732
733struct ds_surface_info {
734 int surface_type;
735 int format;
736
737 struct {
738 unsigned stride;
Chia-I Wu457d0a62014-08-18 13:02:26 +0800739 unsigned offset;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800740 } zs, stencil, hiz;
741
742 unsigned width, height, depth;
743 unsigned lod, first_layer, num_layers;
744};
745
746static void
747ds_init_info_null(const struct intel_gpu *gpu,
748 struct ds_surface_info *info)
749{
750 INTEL_GPU_ASSERT(gpu, 6, 7.5);
751
752 memset(info, 0, sizeof(*info));
753
754 info->surface_type = GEN6_SURFTYPE_NULL;
755 info->format = GEN6_ZFORMAT_D32_FLOAT;
756 info->width = 1;
757 info->height = 1;
758 info->depth = 1;
759 info->num_layers = 1;
760}
761
762static void
763ds_init_info(const struct intel_gpu *gpu,
764 const struct intel_img *img,
765 XGL_FORMAT format, unsigned level,
766 unsigned first_layer, unsigned num_layers,
767 struct ds_surface_info *info)
768{
769 bool separate_stencil;
770
771 INTEL_GPU_ASSERT(gpu, 6, 7.5);
772
773 memset(info, 0, sizeof(*info));
774
775 info->surface_type =
776 view_type_to_surface_type(img_type_to_view_type(img->type));
777
778 if (info->surface_type == GEN6_SURFTYPE_CUBE) {
779 /*
780 * From the Sandy Bridge PRM, volume 2 part 1, page 325-326:
781 *
782 * "For Other Surfaces (Cube Surfaces):
783 * This field (Minimum Array Element) is ignored."
784 *
785 * "For Other Surfaces (Cube Surfaces):
786 * This field (Render Target View Extent) is ignored."
787 *
788 * As such, we cannot set first_layer and num_layers on cube surfaces.
789 * To work around that, treat it as a 2D surface.
790 */
791 info->surface_type = GEN6_SURFTYPE_2D;
792 }
793
794 if (intel_gpu_gen(gpu) >= INTEL_GEN(7)) {
795 separate_stencil = true;
796 }
797 else {
798 /*
799 * From the Sandy Bridge PRM, volume 2 part 1, page 317:
800 *
801 * "This field (Separate Stencil Buffer Enable) must be set to the
802 * same value (enabled or disabled) as Hierarchical Depth Buffer
803 * Enable."
804 */
Chia-I Wu3defd1f2015-02-18 12:21:22 -0700805 separate_stencil = intel_img_can_enable_hiz(img, level);
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800806 }
807
808 /*
809 * From the Sandy Bridge PRM, volume 2 part 1, page 317:
810 *
811 * "If this field (Hierarchical Depth Buffer Enable) is enabled, the
812 * Surface Format of the depth buffer cannot be
813 * D32_FLOAT_S8X24_UINT or D24_UNORM_S8_UINT. Use of stencil
814 * requires the separate stencil buffer."
815 *
816 * From the Ironlake PRM, volume 2 part 1, page 330:
817 *
818 * "If this field (Separate Stencil Buffer Enable) is disabled, the
819 * Surface Format of the depth buffer cannot be D24_UNORM_X8_UINT."
820 *
821 * There is no similar restriction for GEN6. But when D24_UNORM_X8_UINT
822 * is indeed used, the depth values output by the fragment shaders will
823 * be different when read back.
824 *
825 * As for GEN7+, separate_stencil is always true.
826 */
Jeremy Hayes2b7e88a2015-01-23 08:51:43 -0700827 switch (format) {
828 case XGL_FMT_D16_UNORM:
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800829 info->format = GEN6_ZFORMAT_D16_UNORM;
830 break;
Jeremy Hayes2b7e88a2015-01-23 08:51:43 -0700831 case XGL_FMT_D32_SFLOAT:
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800832 info->format = GEN6_ZFORMAT_D32_FLOAT;
833 break;
Jeremy Hayes2b7e88a2015-01-23 08:51:43 -0700834 case XGL_FMT_D32_SFLOAT_S8_UINT:
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800835 info->format = (separate_stencil) ?
836 GEN6_ZFORMAT_D32_FLOAT :
837 GEN6_ZFORMAT_D32_FLOAT_S8X24_UINT;
838 break;
Jeremy Hayes2b7e88a2015-01-23 08:51:43 -0700839 case XGL_FMT_S8_UINT:
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800840 if (separate_stencil) {
841 info->format = GEN6_ZFORMAT_D32_FLOAT;
842 break;
843 }
844 /* fall through */
845 default:
846 assert(!"unsupported depth/stencil format");
847 ds_init_info_null(gpu, info);
848 return;
849 break;
850 }
851
Jeremy Hayes2b7e88a2015-01-23 08:51:43 -0700852 if (format != XGL_FMT_S8_UINT)
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800853 info->zs.stride = img->layout.bo_stride;
854
855 if (img->s8_layout) {
856 /*
857 * From the Sandy Bridge PRM, volume 2 part 1, page 329:
858 *
859 * "The pitch must be set to 2x the value computed based on width,
860 * as the stencil buffer is stored with two rows interleaved."
861 *
862 * According to the classic driver, we need to do the same for GEN7+
863 * even though the Ivy Bridge PRM does not say anything about it.
864 */
865 info->stencil.stride = img->s8_layout->bo_stride * 2;
Chia-I Wu457d0a62014-08-18 13:02:26 +0800866
867 if (intel_gpu_gen(gpu) == INTEL_GEN(6)) {
868 unsigned x, y;
869
870 assert(img->s8_layout->walk == INTEL_LAYOUT_WALK_LOD);
871
872 /* offset to the level */
873 intel_layout_get_slice_pos(img->s8_layout, level, 0, &x, &y);
874 intel_layout_pos_to_mem(img->s8_layout, x, y, &x, &y);
875 info->stencil.offset = intel_layout_mem_to_raw(img->s8_layout, x, y);
876 }
Jeremy Hayes2b7e88a2015-01-23 08:51:43 -0700877 } else if (format == XGL_FMT_S8_UINT) {
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800878 info->stencil.stride = img->layout.bo_stride * 2;
879 }
880
Chia-I Wu3defd1f2015-02-18 12:21:22 -0700881 if (intel_img_can_enable_hiz(img, level)) {
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800882 info->hiz.stride = img->layout.aux_stride;
883
Chia-I Wu457d0a62014-08-18 13:02:26 +0800884 /* offset to the level */
885 if (intel_gpu_gen(gpu) == INTEL_GEN(6))
886 info->hiz.offset = img->layout.aux_offsets[level];
887 }
888
Chia-I Wu73e326f2014-08-21 11:07:57 +0800889 info->width = img->layout.width0;
890 info->height = img->layout.height0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800891 info->depth = (img->type == XGL_IMAGE_3D) ?
Chia-I Wu73e326f2014-08-21 11:07:57 +0800892 img->depth : num_layers;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800893
894 info->lod = level;
895 info->first_layer = first_layer;
896 info->num_layers = num_layers;
897}
898
Chia-I Wu06bed192014-08-20 13:57:18 +0800899static void ds_view_init(struct intel_ds_view *view,
900 const struct intel_gpu *gpu,
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800901 const struct intel_img *img,
902 XGL_FORMAT format, unsigned level,
Chia-I Wu06bed192014-08-20 13:57:18 +0800903 unsigned first_layer, unsigned num_layers)
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800904{
Chia-I Wu08cd6e92015-02-11 13:44:50 -0700905 const int max_2d_size U_ASSERT_ONLY =
906 (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 16384 : 8192;
907 const int max_array_size U_ASSERT_ONLY =
908 (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 2048 : 512;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800909 struct ds_surface_info info;
910 uint32_t dw1, dw2, dw3, dw4, dw5, dw6;
Chia-I Wu06bed192014-08-20 13:57:18 +0800911 uint32_t *dw;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800912
913 INTEL_GPU_ASSERT(gpu, 6, 7.5);
914
915 if (img) {
916 ds_init_info(gpu, img, format, level, first_layer, num_layers, &info);
917 }
918 else {
919 ds_init_info_null(gpu, &info);
920 }
921
922 switch (info.surface_type) {
923 case GEN6_SURFTYPE_NULL:
924 break;
925 case GEN6_SURFTYPE_1D:
926 assert(info.width <= max_2d_size && info.height == 1 &&
927 info.depth <= max_array_size);
928 assert(info.first_layer < max_array_size - 1 &&
929 info.num_layers <= max_array_size);
930 break;
931 case GEN6_SURFTYPE_2D:
932 assert(info.width <= max_2d_size && info.height <= max_2d_size &&
933 info.depth <= max_array_size);
934 assert(info.first_layer < max_array_size - 1 &&
935 info.num_layers <= max_array_size);
936 break;
937 case GEN6_SURFTYPE_3D:
938 assert(info.width <= 2048 && info.height <= 2048 && info.depth <= 2048);
939 assert(info.first_layer < 2048 && info.num_layers <= max_array_size);
940 break;
941 case GEN6_SURFTYPE_CUBE:
942 assert(info.width <= max_2d_size && info.height <= max_2d_size &&
943 info.depth == 1);
944 assert(info.first_layer == 0 && info.num_layers == 1);
945 assert(info.width == info.height);
946 break;
947 default:
948 assert(!"unexpected depth surface type");
949 break;
950 }
951
952 dw1 = info.surface_type << 29 |
953 info.format << 18;
954
955 if (info.zs.stride) {
956 /* required for GEN6+ */
957 assert(info.zs.stride > 0 && info.zs.stride < 128 * 1024 &&
958 info.zs.stride % 128 == 0);
959 assert(info.width <= info.zs.stride);
960
961 dw1 |= (info.zs.stride - 1);
962 }
963
964 dw2 = 0;
965
966 if (intel_gpu_gen(gpu) >= INTEL_GEN(7)) {
967 if (info.zs.stride)
968 dw1 |= 1 << 28;
969
970 if (info.stencil.stride)
971 dw1 |= 1 << 27;
972
973 if (info.hiz.stride)
974 dw1 |= 1 << 22;
975
976 dw3 = (info.height - 1) << 18 |
977 (info.width - 1) << 4 |
978 info.lod;
979
980 dw4 = (info.depth - 1) << 21 |
Chia-I Wub3686982015-02-27 09:51:16 -0700981 info.first_layer << 10 |
982 GEN7_MOCS_L3_ON;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800983
984 dw5 = 0;
985
986 dw6 = (info.num_layers - 1) << 21;
987 }
988 else {
989 /* always Y-tiled */
990 dw1 |= 1 << 27 |
991 1 << 26;
992
993 if (info.hiz.stride) {
994 dw1 |= 1 << 22 |
995 1 << 21;
996 }
997
998 dw3 = (info.height - 1) << 19 |
999 (info.width - 1) << 6 |
1000 info.lod << 2 |
1001 GEN6_DEPTH_DW3_MIPLAYOUT_BELOW;
1002
1003 dw4 = (info.depth - 1) << 21 |
1004 info.first_layer << 10 |
1005 (info.num_layers - 1) << 1;
1006
1007 dw5 = 0;
1008
1009 dw6 = 0;
1010 }
1011
Chia-I Wu06bed192014-08-20 13:57:18 +08001012 STATIC_ASSERT(ARRAY_SIZE(view->cmd) >= 10);
1013 dw = view->cmd;
1014
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001015 dw[0] = dw1;
1016 dw[1] = dw2;
1017 dw[2] = dw3;
1018 dw[3] = dw4;
1019 dw[4] = dw5;
1020 dw[5] = dw6;
1021
1022 /* separate stencil */
1023 if (info.stencil.stride) {
1024 assert(info.stencil.stride > 0 && info.stencil.stride < 128 * 1024 &&
1025 info.stencil.stride % 128 == 0);
1026
1027 dw[6] = info.stencil.stride - 1;
1028 dw[7] = img->s8_offset;
1029
Chia-I Wub3686982015-02-27 09:51:16 -07001030 if (intel_gpu_gen(gpu) >= INTEL_GEN(7))
1031 dw[6] |= GEN7_MOCS_L3_ON << GEN6_STENCIL_DW1_MOCS__SHIFT;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001032 if (intel_gpu_gen(gpu) >= INTEL_GEN(7.5))
1033 dw[6] |= GEN75_STENCIL_DW1_STENCIL_BUFFER_ENABLE;
1034 }
1035 else {
1036 dw[6] = 0;
1037 dw[7] = 0;
1038 }
1039
1040 /* hiz */
1041 if (info.hiz.stride) {
1042 dw[8] = info.hiz.stride - 1;
1043 dw[9] = img->aux_offset;
Chia-I Wub3686982015-02-27 09:51:16 -07001044
1045 if (intel_gpu_gen(gpu) >= INTEL_GEN(7))
1046 dw[8] |= GEN7_MOCS_L3_ON << GEN6_HIZ_DW1_MOCS__SHIFT;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001047 }
1048 else {
1049 dw[8] = 0;
1050 dw[9] = 0;
1051 }
Chia-I Wu3defd1f2015-02-18 12:21:22 -07001052
1053 view->has_stencil = info.stencil.stride;
1054 view->has_hiz = info.hiz.stride;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001055}
1056
Chia-I Wu5a323262014-08-11 10:31:53 +08001057void intel_null_view_init(struct intel_null_view *view,
1058 struct intel_dev *dev)
1059{
Chia-I Wucd83cf12014-08-23 17:26:08 +08001060 if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) {
Chia-I Wu06bed192014-08-20 13:57:18 +08001061 surface_state_null_gen7(dev->gpu, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001062 view->cmd_len = 8;
1063 } else {
Chia-I Wu06bed192014-08-20 13:57:18 +08001064 surface_state_null_gen6(dev->gpu, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001065 view->cmd_len = 6;
1066 }
Chia-I Wu5a323262014-08-11 10:31:53 +08001067}
1068
Chia-I Wu714df452015-01-01 07:55:04 +08001069static void buf_view_destroy(struct intel_obj *obj)
Chia-I Wu5a323262014-08-11 10:31:53 +08001070{
Chia-I Wu714df452015-01-01 07:55:04 +08001071 struct intel_buf_view *view = intel_buf_view_from_obj(obj);
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001072
Chia-I Wu714df452015-01-01 07:55:04 +08001073 intel_buf_view_destroy(view);
1074}
Chia-I Wu5a323262014-08-11 10:31:53 +08001075
Chia-I Wu714df452015-01-01 07:55:04 +08001076XGL_RESULT intel_buf_view_create(struct intel_dev *dev,
1077 const XGL_BUFFER_VIEW_CREATE_INFO *info,
1078 struct intel_buf_view **view_ret)
1079{
1080 struct intel_buf *buf = intel_buf(info->buffer);
1081 const bool will_write = (buf->usage |
1082 (XGL_BUFFER_USAGE_SHADER_ACCESS_WRITE_BIT &
1083 XGL_BUFFER_USAGE_SHADER_ACCESS_ATOMIC_BIT));
Chia-I Wu34341ba2015-01-16 17:38:37 +08001084 XGL_FORMAT format;
1085 XGL_GPU_SIZE stride;
1086 uint32_t *cmd;
Chia-I Wu714df452015-01-01 07:55:04 +08001087 struct intel_buf_view *view;
Chia-I Wu34341ba2015-01-16 17:38:37 +08001088 int i;
Chia-I Wu714df452015-01-01 07:55:04 +08001089
1090 view = (struct intel_buf_view *) intel_base_create(dev, sizeof(*view),
1091 dev->base.dbg, XGL_DBG_OBJECT_BUFFER_VIEW, info, 0);
1092 if (!view)
1093 return XGL_ERROR_OUT_OF_MEMORY;
1094
1095 view->obj.destroy = buf_view_destroy;
1096
1097 view->buf = buf;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001098
Chia-I Wu34341ba2015-01-16 17:38:37 +08001099 /*
1100 * The compiler expects uniform buffers to have pitch of
1101 * 4 for fragment shaders, but 16 for other stages. The format
1102 * must be XGL_FMT_R32G32B32A32_SFLOAT.
1103 */
1104 if (info->viewType == XGL_BUFFER_VIEW_RAW) {
Jeremy Hayes2b7e88a2015-01-23 08:51:43 -07001105 format = XGL_FMT_R32G32B32A32_SFLOAT;
Chia-I Wu34341ba2015-01-16 17:38:37 +08001106 stride = 16;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001107 } else {
Chia-I Wu34341ba2015-01-16 17:38:37 +08001108 format = info->format;
1109 stride = info->stride;
1110 }
1111 cmd = view->cmd;
1112
1113 for (i = 0; i < 2; i++) {
1114 if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) {
1115 surface_state_buf_gen7(dev->gpu, info->offset,
1116 info->range, stride, format,
1117 will_write, will_write, cmd);
1118 view->cmd_len = 8;
1119 } else {
1120 surface_state_buf_gen6(dev->gpu, info->offset,
1121 info->range, stride, format,
1122 will_write, will_write, cmd);
1123 view->cmd_len = 6;
1124 }
1125
1126 /* switch to view->fs_cmd */
1127 if (info->viewType == XGL_BUFFER_VIEW_RAW) {
1128 cmd = view->fs_cmd;
1129 stride = 4;
1130 } else {
1131 memcpy(view->fs_cmd, view->cmd, sizeof(uint32_t) * view->cmd_len);
1132 break;
1133 }
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001134 }
Chia-I Wu714df452015-01-01 07:55:04 +08001135
1136 *view_ret = view;
1137
1138 return XGL_SUCCESS;
1139}
1140
1141void intel_buf_view_destroy(struct intel_buf_view *view)
1142{
1143 intel_base_destroy(&view->obj.base);
Chia-I Wu5a323262014-08-11 10:31:53 +08001144}
1145
1146static void img_view_destroy(struct intel_obj *obj)
1147{
1148 struct intel_img_view *view = intel_img_view_from_obj(obj);
1149
1150 intel_img_view_destroy(view);
1151}
1152
1153XGL_RESULT intel_img_view_create(struct intel_dev *dev,
1154 const XGL_IMAGE_VIEW_CREATE_INFO *info,
1155 struct intel_img_view **view_ret)
1156{
1157 struct intel_img *img = intel_img(info->image);
1158 struct intel_img_view *view;
Mark Lobodzinskie2d07a52015-01-29 08:55:56 -06001159 uint32_t mip_levels, array_size;
Chia-I Wuf57758c2014-12-02 14:15:50 +08001160 XGL_CHANNEL_MAPPING state_swizzles;
Chia-I Wuaa759372014-10-18 12:47:35 +08001161
1162 if (info->subresourceRange.baseMipLevel >= img->mip_levels ||
1163 info->subresourceRange.baseArraySlice >= img->array_size ||
1164 !info->subresourceRange.mipLevels ||
1165 !info->subresourceRange.arraySize)
1166 return XGL_ERROR_INVALID_VALUE;
1167
1168 mip_levels = info->subresourceRange.mipLevels;
1169 if (mip_levels > img->mip_levels - info->subresourceRange.baseMipLevel)
1170 mip_levels = img->mip_levels - info->subresourceRange.baseMipLevel;
1171
1172 array_size = info->subresourceRange.arraySize;
1173 if (array_size > img->array_size - info->subresourceRange.baseArraySlice)
1174 array_size = img->array_size - info->subresourceRange.baseArraySlice;
Chia-I Wu5a323262014-08-11 10:31:53 +08001175
1176 view = (struct intel_img_view *) intel_base_create(dev, sizeof(*view),
1177 dev->base.dbg, XGL_DBG_OBJECT_IMAGE_VIEW, info, 0);
1178 if (!view)
1179 return XGL_ERROR_OUT_OF_MEMORY;
1180
1181 view->obj.destroy = img_view_destroy;
1182
1183 view->img = img;
Chia-I Wu5a323262014-08-11 10:31:53 +08001184 view->min_lod = info->minLod;
1185
Chia-I Wuf57758c2014-12-02 14:15:50 +08001186 if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7.5)) {
1187 state_swizzles = info->channels;
1188 view->shader_swizzles.r = XGL_CHANNEL_SWIZZLE_R;
1189 view->shader_swizzles.g = XGL_CHANNEL_SWIZZLE_G;
1190 view->shader_swizzles.b = XGL_CHANNEL_SWIZZLE_B;
1191 view->shader_swizzles.a = XGL_CHANNEL_SWIZZLE_A;
1192 } else {
1193 state_swizzles.r = XGL_CHANNEL_SWIZZLE_R;
1194 state_swizzles.g = XGL_CHANNEL_SWIZZLE_G;
1195 state_swizzles.b = XGL_CHANNEL_SWIZZLE_B;
1196 state_swizzles.a = XGL_CHANNEL_SWIZZLE_A;
1197 view->shader_swizzles = info->channels;
1198 }
1199
1200 /* shader_swizzles is ignored by the compiler */
1201 if (view->shader_swizzles.r != XGL_CHANNEL_SWIZZLE_R ||
1202 view->shader_swizzles.g != XGL_CHANNEL_SWIZZLE_G ||
1203 view->shader_swizzles.b != XGL_CHANNEL_SWIZZLE_B ||
1204 view->shader_swizzles.a != XGL_CHANNEL_SWIZZLE_A) {
1205 intel_dev_log(dev, XGL_DBG_MSG_WARNING,
1206 XGL_VALIDATION_LEVEL_0, XGL_NULL_HANDLE, 0, 0,
1207 "image data swizzling is ignored");
1208 }
1209
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001210 if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) {
Chia-I Wu06bed192014-08-20 13:57:18 +08001211 surface_state_tex_gen7(dev->gpu, img, info->viewType, info->format,
Chia-I Wuaa759372014-10-18 12:47:35 +08001212 info->subresourceRange.baseMipLevel, mip_levels,
1213 info->subresourceRange.baseArraySlice, array_size,
Chia-I Wuf57758c2014-12-02 14:15:50 +08001214 state_swizzles, false, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001215 view->cmd_len = 8;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001216 } else {
Chia-I Wu06bed192014-08-20 13:57:18 +08001217 surface_state_tex_gen6(dev->gpu, img, info->viewType, info->format,
Chia-I Wuaa759372014-10-18 12:47:35 +08001218 info->subresourceRange.baseMipLevel, mip_levels,
1219 info->subresourceRange.baseArraySlice, array_size,
1220 false, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001221 view->cmd_len = 6;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001222 }
1223
Chia-I Wu5a323262014-08-11 10:31:53 +08001224 *view_ret = view;
1225
1226 return XGL_SUCCESS;
1227}
1228
1229void intel_img_view_destroy(struct intel_img_view *view)
1230{
1231 intel_base_destroy(&view->obj.base);
1232}
1233
1234static void rt_view_destroy(struct intel_obj *obj)
1235{
1236 struct intel_rt_view *view = intel_rt_view_from_obj(obj);
1237
1238 intel_rt_view_destroy(view);
1239}
1240
1241XGL_RESULT intel_rt_view_create(struct intel_dev *dev,
1242 const XGL_COLOR_ATTACHMENT_VIEW_CREATE_INFO *info,
1243 struct intel_rt_view **view_ret)
1244{
Chia-I Wuf57758c2014-12-02 14:15:50 +08001245 static const XGL_CHANNEL_MAPPING identity_channel_mapping = {
1246 .r = XGL_CHANNEL_SWIZZLE_R,
1247 .g = XGL_CHANNEL_SWIZZLE_G,
1248 .b = XGL_CHANNEL_SWIZZLE_B,
1249 .a = XGL_CHANNEL_SWIZZLE_A,
1250 };
Chia-I Wu5a323262014-08-11 10:31:53 +08001251 struct intel_img *img = intel_img(info->image);
1252 struct intel_rt_view *view;
1253
1254 view = (struct intel_rt_view *) intel_base_create(dev, sizeof(*view),
1255 dev->base.dbg, XGL_DBG_OBJECT_COLOR_TARGET_VIEW, info, 0);
1256 if (!view)
1257 return XGL_ERROR_OUT_OF_MEMORY;
1258
1259 view->obj.destroy = rt_view_destroy;
1260
1261 view->img = img;
1262
Mark Lobodzinski71fcc2d2015-01-27 13:24:03 -06001263 view->array_size = info->arraySize;
1264
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001265 if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) {
Chia-I Wu06bed192014-08-20 13:57:18 +08001266 surface_state_tex_gen7(dev->gpu, img,
1267 img_type_to_view_type(img->type),
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001268 info->format, info->mipLevel, 1,
1269 info->baseArraySlice, info->arraySize,
Chia-I Wuf57758c2014-12-02 14:15:50 +08001270 identity_channel_mapping, true, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001271 view->cmd_len = 8;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001272 } else {
Chia-I Wu06bed192014-08-20 13:57:18 +08001273 surface_state_tex_gen6(dev->gpu, img,
1274 img_type_to_view_type(img->type),
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001275 info->format, info->mipLevel, 1,
1276 info->baseArraySlice, info->arraySize,
1277 true, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001278 view->cmd_len = 6;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001279 }
1280
Chia-I Wu5a323262014-08-11 10:31:53 +08001281 *view_ret = view;
1282
1283 return XGL_SUCCESS;
1284}
1285
1286void intel_rt_view_destroy(struct intel_rt_view *view)
1287{
1288 intel_base_destroy(&view->obj.base);
1289}
1290
1291static void ds_view_destroy(struct intel_obj *obj)
1292{
1293 struct intel_ds_view *view = intel_ds_view_from_obj(obj);
1294
1295 intel_ds_view_destroy(view);
1296}
1297
1298XGL_RESULT intel_ds_view_create(struct intel_dev *dev,
1299 const XGL_DEPTH_STENCIL_VIEW_CREATE_INFO *info,
1300 struct intel_ds_view **view_ret)
1301{
1302 struct intel_img *img = intel_img(info->image);
1303 struct intel_ds_view *view;
1304
1305 view = (struct intel_ds_view *) intel_base_create(dev, sizeof(*view),
1306 dev->base.dbg, XGL_DBG_OBJECT_DEPTH_STENCIL_VIEW, info, 0);
1307 if (!view)
1308 return XGL_ERROR_OUT_OF_MEMORY;
1309
1310 view->obj.destroy = ds_view_destroy;
1311
1312 view->img = img;
1313
Mark Lobodzinski71fcc2d2015-01-27 13:24:03 -06001314 view->array_size = info->arraySize;
1315
Chia-I Wu06bed192014-08-20 13:57:18 +08001316 ds_view_init(view, dev->gpu, img, img->layout.format, info->mipLevel,
1317 info->baseArraySlice, info->arraySize);
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001318
Chia-I Wu5a323262014-08-11 10:31:53 +08001319 *view_ret = view;
1320
1321 return XGL_SUCCESS;
1322}
1323
1324void intel_ds_view_destroy(struct intel_ds_view *view)
1325{
1326 intel_base_destroy(&view->obj.base);
1327}
1328
Chia-I Wu714df452015-01-01 07:55:04 +08001329ICD_EXPORT XGL_RESULT XGLAPI xglCreateBufferView(
1330 XGL_DEVICE device,
1331 const XGL_BUFFER_VIEW_CREATE_INFO* pCreateInfo,
1332 XGL_BUFFER_VIEW* pView)
1333{
1334 struct intel_dev *dev = intel_dev(device);
1335
1336 return intel_buf_view_create(dev, pCreateInfo,
1337 (struct intel_buf_view **) pView);
1338}
1339
Chia-I Wu96177272015-01-03 15:27:41 +08001340ICD_EXPORT XGL_RESULT XGLAPI xglCreateImageView(
Chia-I Wu5a323262014-08-11 10:31:53 +08001341 XGL_DEVICE device,
1342 const XGL_IMAGE_VIEW_CREATE_INFO* pCreateInfo,
1343 XGL_IMAGE_VIEW* pView)
1344{
1345 struct intel_dev *dev = intel_dev(device);
1346
1347 return intel_img_view_create(dev, pCreateInfo,
1348 (struct intel_img_view **) pView);
1349}
1350
Chia-I Wu96177272015-01-03 15:27:41 +08001351ICD_EXPORT XGL_RESULT XGLAPI xglCreateColorAttachmentView(
Chia-I Wu5a323262014-08-11 10:31:53 +08001352 XGL_DEVICE device,
1353 const XGL_COLOR_ATTACHMENT_VIEW_CREATE_INFO* pCreateInfo,
1354 XGL_COLOR_ATTACHMENT_VIEW* pView)
1355{
1356 struct intel_dev *dev = intel_dev(device);
1357
1358 return intel_rt_view_create(dev, pCreateInfo,
1359 (struct intel_rt_view **) pView);
1360}
1361
Chia-I Wu96177272015-01-03 15:27:41 +08001362ICD_EXPORT XGL_RESULT XGLAPI xglCreateDepthStencilView(
Chia-I Wu5a323262014-08-11 10:31:53 +08001363 XGL_DEVICE device,
1364 const XGL_DEPTH_STENCIL_VIEW_CREATE_INFO* pCreateInfo,
1365 XGL_DEPTH_STENCIL_VIEW* pView)
1366{
1367 struct intel_dev *dev = intel_dev(device);
1368
1369 return intel_ds_view_create(dev, pCreateInfo,
1370 (struct intel_ds_view **) pView);
1371}