blob: 3e5299f836148a2f882e8b4fee1b7ffc4ba15de3 [file] [log] [blame]
Chia-I Wu5a323262014-08-11 10:31:53 +08001/*
2 * XGL
3 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
Chia-I Wu44e42362014-09-02 08:32:09 +080023 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
Chia-I Wu5a323262014-08-11 10:31:53 +080026 */
27
Chia-I Wu9269d1c2014-08-16 12:47:47 +080028#include "genhw/genhw.h"
29#include "kmd/winsys.h"
Chia-I Wu714df452015-01-01 07:55:04 +080030#include "buf.h"
Chia-I Wu5a323262014-08-11 10:31:53 +080031#include "dev.h"
Chia-I Wu9269d1c2014-08-16 12:47:47 +080032#include "format.h"
Chia-I Wu5a323262014-08-11 10:31:53 +080033#include "gpu.h"
34#include "img.h"
35#include "mem.h"
36#include "view.h"
37
Chia-I Wu06bed192014-08-20 13:57:18 +080038static void surface_state_null_gen7(const struct intel_gpu *gpu,
39 uint32_t dw[8])
Chia-I Wu9269d1c2014-08-16 12:47:47 +080040{
41 INTEL_GPU_ASSERT(gpu, 7, 7.5);
42
43 /*
44 * From the Ivy Bridge PRM, volume 4 part 1, page 62:
45 *
46 * "A null surface is used in instances where an actual surface is not
47 * bound. When a write message is generated to a null surface, no
48 * actual surface is written to. When a read message (including any
49 * sampling engine message) is generated to a null surface, the result
50 * is all zeros. Note that a null surface type is allowed to be used
51 * with all messages, even if it is not specificially indicated as
52 * supported. All of the remaining fields in surface state are ignored
53 * for null surfaces, with the following exceptions:
54 *
55 * * Width, Height, Depth, LOD, and Render Target View Extent fields
56 * must match the depth buffer's corresponding state for all render
57 * target surfaces, including null.
58 * * All sampling engine and data port messages support null surfaces
59 * with the above behavior, even if not mentioned as specifically
60 * supported, except for the following:
61 * * Data Port Media Block Read/Write messages.
62 * * The Surface Type of a surface used as a render target (accessed
63 * via the Data Port's Render Target Write message) must be the same
64 * as the Surface Type of all other render targets and of the depth
65 * buffer (defined in 3DSTATE_DEPTH_BUFFER), unless either the depth
66 * buffer or render targets are SURFTYPE_NULL."
67 *
68 * From the Ivy Bridge PRM, volume 4 part 1, page 65:
69 *
70 * "If Surface Type is SURFTYPE_NULL, this field (Tiled Surface) must be
71 * true"
72 */
73
74 dw[0] = GEN6_SURFTYPE_NULL << GEN7_SURFACE_DW0_TYPE__SHIFT |
75 GEN6_FORMAT_B8G8R8A8_UNORM << GEN7_SURFACE_DW0_FORMAT__SHIFT |
76 GEN6_TILING_X << 13;
77
78 dw[1] = 0;
79 dw[2] = 0;
80 dw[3] = 0;
81 dw[4] = 0;
82 dw[5] = 0;
83 dw[6] = 0;
84 dw[7] = 0;
85}
86
Chia-I Wu06bed192014-08-20 13:57:18 +080087static void surface_state_buf_gen7(const struct intel_gpu *gpu,
88 unsigned offset, unsigned size,
89 unsigned struct_size,
90 XGL_FORMAT elem_format,
91 bool is_rt, bool render_cache_rw,
92 uint32_t dw[8])
Chia-I Wu9269d1c2014-08-16 12:47:47 +080093{
94 const bool typed = !icd_format_is_undef(elem_format);
95 const bool structured = (!typed && struct_size > 1);
96 const int elem_size = (typed) ?
97 icd_format_get_size(elem_format) : 1;
98 int width, height, depth, pitch;
99 int surface_type, surface_format, num_entries;
100
101 INTEL_GPU_ASSERT(gpu, 7, 7.5);
102
103 surface_type = (structured) ? GEN7_SURFTYPE_STRBUF : GEN6_SURFTYPE_BUFFER;
104
105 surface_format = (typed) ?
106 intel_format_translate_color(gpu, elem_format) : GEN6_FORMAT_RAW;
107
108 num_entries = size / struct_size;
109 /* see if there is enough space to fit another element */
110 if (size % struct_size >= elem_size && !structured)
111 num_entries++;
112
113 /*
114 * From the Ivy Bridge PRM, volume 4 part 1, page 67:
115 *
116 * "For SURFTYPE_BUFFER render targets, this field (Surface Base
117 * Address) specifies the base address of first element of the
118 * surface. The surface is interpreted as a simple array of that
119 * single element type. The address must be naturally-aligned to the
120 * element size (e.g., a buffer containing R32G32B32A32_FLOAT elements
121 * must be 16-byte aligned)
122 *
123 * For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
124 * the base address of the first element of the surface, computed in
125 * software by adding the surface base address to the byte offset of
126 * the element in the buffer."
127 */
128 if (is_rt)
129 assert(offset % elem_size == 0);
130
131 /*
132 * From the Ivy Bridge PRM, volume 4 part 1, page 68:
133 *
134 * "For typed buffer and structured buffer surfaces, the number of
135 * entries in the buffer ranges from 1 to 2^27. For raw buffer
136 * surfaces, the number of entries in the buffer is the number of
137 * bytes which can range from 1 to 2^30."
138 */
139 assert(num_entries >= 1 &&
140 num_entries <= 1 << ((typed || structured) ? 27 : 30));
141
142 /*
143 * From the Ivy Bridge PRM, volume 4 part 1, page 69:
144 *
145 * "For SURFTYPE_BUFFER: The low two bits of this field (Width) must be
146 * 11 if the Surface Format is RAW (the size of the buffer must be a
147 * multiple of 4 bytes)."
148 *
149 * From the Ivy Bridge PRM, volume 4 part 1, page 70:
150 *
151 * "For surfaces of type SURFTYPE_BUFFER and SURFTYPE_STRBUF, this
152 * field (Surface Pitch) indicates the size of the structure."
153 *
154 * "For linear surfaces with Surface Type of SURFTYPE_STRBUF, the pitch
155 * must be a multiple of 4 bytes."
156 */
157 if (structured)
158 assert(struct_size % 4 == 0);
159 else if (!typed)
160 assert(num_entries % 4 == 0);
161
162 pitch = struct_size;
163
164 pitch--;
165 num_entries--;
166 /* bits [6:0] */
167 width = (num_entries & 0x0000007f);
168 /* bits [20:7] */
169 height = (num_entries & 0x001fff80) >> 7;
170 /* bits [30:21] */
171 depth = (num_entries & 0x7fe00000) >> 21;
172 /* limit to [26:21] */
173 if (typed || structured)
174 depth &= 0x3f;
175
176 dw[0] = surface_type << GEN7_SURFACE_DW0_TYPE__SHIFT |
177 surface_format << GEN7_SURFACE_DW0_FORMAT__SHIFT;
178 if (render_cache_rw)
179 dw[0] |= GEN7_SURFACE_DW0_RENDER_CACHE_RW;
180
181 dw[1] = offset;
182
183 dw[2] = height << GEN7_SURFACE_DW2_HEIGHT__SHIFT |
184 width << GEN7_SURFACE_DW2_WIDTH__SHIFT;
185
186 dw[3] = depth << GEN7_SURFACE_DW3_DEPTH__SHIFT |
187 pitch;
188
189 dw[4] = 0;
190 dw[5] = 0;
191
192 dw[6] = 0;
193 dw[7] = 0;
194
195 if (intel_gpu_gen(gpu) >= INTEL_GEN(7.5)) {
196 dw[7] |= GEN75_SCS_RED << GEN75_SURFACE_DW7_SCS_R__SHIFT |
197 GEN75_SCS_GREEN << GEN75_SURFACE_DW7_SCS_G__SHIFT |
198 GEN75_SCS_BLUE << GEN75_SURFACE_DW7_SCS_B__SHIFT |
199 GEN75_SCS_ALPHA << GEN75_SURFACE_DW7_SCS_A__SHIFT;
200 }
201}
202
203static int img_type_to_view_type(XGL_IMAGE_VIEW_TYPE type)
204{
205 switch (type) {
206 case XGL_IMAGE_1D: return XGL_IMAGE_VIEW_1D;
207 case XGL_IMAGE_2D: return XGL_IMAGE_VIEW_2D;
208 case XGL_IMAGE_3D: return XGL_IMAGE_VIEW_3D;
209 default: assert(!"unknown img type"); return XGL_IMAGE_VIEW_1D;
210 }
211}
212
213static int view_type_to_surface_type(XGL_IMAGE_VIEW_TYPE type)
214{
215 switch (type) {
216 case XGL_IMAGE_VIEW_1D: return GEN6_SURFTYPE_1D;
217 case XGL_IMAGE_VIEW_2D: return GEN6_SURFTYPE_2D;
218 case XGL_IMAGE_VIEW_3D: return GEN6_SURFTYPE_3D;
219 case XGL_IMAGE_VIEW_CUBE: return GEN6_SURFTYPE_CUBE;
220 default: assert(!"unknown view type"); return GEN6_SURFTYPE_NULL;
221 }
222}
223
224static int winsys_tiling_to_surface_tiling(enum intel_tiling_mode tiling)
225{
226 switch (tiling) {
227 case INTEL_TILING_NONE: return GEN6_TILING_NONE;
228 case INTEL_TILING_X: return GEN6_TILING_X;
229 case INTEL_TILING_Y: return GEN6_TILING_Y;
230 default: assert(!"unknown tiling"); return GEN6_TILING_NONE;
231 }
232}
233
Chia-I Wuf57758c2014-12-02 14:15:50 +0800234static int channel_swizzle_to_scs(XGL_CHANNEL_SWIZZLE swizzle)
235{
236 switch (swizzle) {
237 case XGL_CHANNEL_SWIZZLE_ZERO: return GEN75_SCS_ZERO;
238 case XGL_CHANNEL_SWIZZLE_ONE: return GEN75_SCS_ONE;
239 case XGL_CHANNEL_SWIZZLE_R: return GEN75_SCS_RED;
240 case XGL_CHANNEL_SWIZZLE_G: return GEN75_SCS_GREEN;
241 case XGL_CHANNEL_SWIZZLE_B: return GEN75_SCS_BLUE;
242 case XGL_CHANNEL_SWIZZLE_A: return GEN75_SCS_ALPHA;
243 default: assert(!"unknown swizzle"); return GEN75_SCS_ZERO;
244 }
245}
246
Chia-I Wu06bed192014-08-20 13:57:18 +0800247static void surface_state_tex_gen7(const struct intel_gpu *gpu,
248 const struct intel_img *img,
249 XGL_IMAGE_VIEW_TYPE type,
250 XGL_FORMAT format,
251 unsigned first_level,
252 unsigned num_levels,
253 unsigned first_layer,
254 unsigned num_layers,
Chia-I Wuf57758c2014-12-02 14:15:50 +0800255 XGL_CHANNEL_MAPPING swizzles,
Chia-I Wu06bed192014-08-20 13:57:18 +0800256 bool is_rt,
257 uint32_t dw[8])
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800258{
259 int surface_type, surface_format;
260 int width, height, depth, pitch, lod;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800261
262 INTEL_GPU_ASSERT(gpu, 7, 7.5);
263
264 surface_type = view_type_to_surface_type(type);
265 assert(surface_type != GEN6_SURFTYPE_BUFFER);
266
267 surface_format = intel_format_translate_color(gpu, format);
268 assert(surface_format >= 0);
269
Chia-I Wu73e326f2014-08-21 11:07:57 +0800270 width = img->layout.width0;
271 height = img->layout.height0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800272 depth = (type == XGL_IMAGE_VIEW_3D) ?
Chia-I Wu73e326f2014-08-21 11:07:57 +0800273 img->depth : num_layers;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800274 pitch = img->layout.bo_stride;
275
276 if (surface_type == GEN6_SURFTYPE_CUBE) {
277 /*
278 * From the Ivy Bridge PRM, volume 4 part 1, page 70:
279 *
280 * "For SURFTYPE_CUBE:For Sampling Engine Surfaces, the range of
281 * this field is [0,340], indicating the number of cube array
282 * elements (equal to the number of underlying 2D array elements
283 * divided by 6). For other surfaces, this field must be zero."
284 *
285 * When is_rt is true, we treat the texture as a 2D one to avoid the
286 * restriction.
287 */
288 if (is_rt) {
289 surface_type = GEN6_SURFTYPE_2D;
290 }
291 else {
292 assert(num_layers % 6 == 0);
293 depth = num_layers / 6;
294 }
295 }
296
297 /* sanity check the size */
298 assert(width >= 1 && height >= 1 && depth >= 1 && pitch >= 1);
299 assert(first_layer < 2048 && num_layers <= 2048);
300 switch (surface_type) {
301 case GEN6_SURFTYPE_1D:
302 assert(width <= 16384 && height == 1 && depth <= 2048);
303 break;
304 case GEN6_SURFTYPE_2D:
305 assert(width <= 16384 && height <= 16384 && depth <= 2048);
306 break;
307 case GEN6_SURFTYPE_3D:
308 assert(width <= 2048 && height <= 2048 && depth <= 2048);
309 if (!is_rt)
310 assert(first_layer == 0);
311 break;
312 case GEN6_SURFTYPE_CUBE:
313 assert(width <= 16384 && height <= 16384 && depth <= 86);
314 assert(width == height);
315 if (is_rt)
316 assert(first_layer == 0);
317 break;
318 default:
319 assert(!"unexpected surface type");
320 break;
321 }
322
323 if (is_rt) {
324 assert(num_levels == 1);
325 lod = first_level;
326 }
327 else {
328 lod = num_levels - 1;
329 }
330
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800331 /*
332 * From the Ivy Bridge PRM, volume 4 part 1, page 68:
333 *
334 * "The Base Address for linear render target surfaces and surfaces
335 * accessed with the typed surface read/write data port messages must
336 * be element-size aligned, for non-YUV surface formats, or a multiple
337 * of 2 element-sizes for YUV surface formats. Other linear surfaces
338 * have no alignment requirements (byte alignment is sufficient)."
339 *
340 * From the Ivy Bridge PRM, volume 4 part 1, page 70:
341 *
342 * "For linear render target surfaces and surfaces accessed with the
343 * typed data port messages, the pitch must be a multiple of the
344 * element size for non-YUV surface formats. Pitch must be a multiple
345 * of 2 * element size for YUV surface formats. For linear surfaces
346 * with Surface Type of SURFTYPE_STRBUF, the pitch must be a multiple
347 * of 4 bytes.For other linear surfaces, the pitch can be any multiple
348 * of bytes."
349 *
350 * From the Ivy Bridge PRM, volume 4 part 1, page 74:
351 *
352 * "For linear surfaces, this field (X Offset) must be zero."
353 */
354 if (img->layout.tiling == INTEL_TILING_NONE) {
355 if (is_rt) {
356 const int elem_size = icd_format_get_size(format);
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800357 assert(pitch % elem_size == 0);
358 }
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800359 }
360
361 dw[0] = surface_type << GEN7_SURFACE_DW0_TYPE__SHIFT |
362 surface_format << GEN7_SURFACE_DW0_FORMAT__SHIFT |
363 winsys_tiling_to_surface_tiling(img->layout.tiling) << 13;
364
365 /*
366 * From the Ivy Bridge PRM, volume 4 part 1, page 63:
367 *
368 * "If this field (Surface Array) is enabled, the Surface Type must be
369 * SURFTYPE_1D, SURFTYPE_2D, or SURFTYPE_CUBE. If this field is
370 * disabled and Surface Type is SURFTYPE_1D, SURFTYPE_2D, or
371 * SURFTYPE_CUBE, the Depth field must be set to zero."
372 *
373 * For non-3D sampler surfaces, resinfo (the sampler message) always
374 * returns zero for the number of layers when this field is not set.
375 */
376 if (surface_type != GEN6_SURFTYPE_3D) {
377 if (num_layers > 1)
378 dw[0] |= GEN7_SURFACE_DW0_IS_ARRAY;
379 else
380 assert(depth == 1);
381 }
382
383 assert(img->layout.align_i == 4 || img->layout.align_i == 8);
384 assert(img->layout.align_j == 2 || img->layout.align_j == 4);
385
386 if (img->layout.align_j == 4)
387 dw[0] |= GEN7_SURFACE_DW0_VALIGN_4;
388
389 if (img->layout.align_i == 8)
390 dw[0] |= GEN7_SURFACE_DW0_HALIGN_8;
391
Chia-I Wu457d0a62014-08-18 13:02:26 +0800392 if (img->layout.walk == INTEL_LAYOUT_WALK_LOD)
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800393 dw[0] |= GEN7_SURFACE_DW0_ARYSPC_LOD0;
Chia-I Wu457d0a62014-08-18 13:02:26 +0800394 else
395 dw[0] |= GEN7_SURFACE_DW0_ARYSPC_FULL;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800396
397 if (is_rt)
398 dw[0] |= GEN7_SURFACE_DW0_RENDER_CACHE_RW;
399
400 if (surface_type == GEN6_SURFTYPE_CUBE && !is_rt)
401 dw[0] |= GEN7_SURFACE_DW0_CUBE_FACE_ENABLES__MASK;
402
Chia-I Wu457d0a62014-08-18 13:02:26 +0800403 dw[1] = 0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800404
405 dw[2] = (height - 1) << GEN7_SURFACE_DW2_HEIGHT__SHIFT |
406 (width - 1) << GEN7_SURFACE_DW2_WIDTH__SHIFT;
407
408 dw[3] = (depth - 1) << GEN7_SURFACE_DW3_DEPTH__SHIFT |
409 (pitch - 1);
410
411 dw[4] = first_layer << 18 |
412 (num_layers - 1) << 7;
413
414 /*
415 * MSFMT_MSS means the samples are not interleaved and MSFMT_DEPTH_STENCIL
416 * means the samples are interleaved. The layouts are the same when the
417 * number of samples is 1.
418 */
419 if (img->layout.interleaved_samples && img->samples > 1) {
420 assert(!is_rt);
421 dw[4] |= GEN7_SURFACE_DW4_MSFMT_DEPTH_STENCIL;
422 }
423 else {
424 dw[4] |= GEN7_SURFACE_DW4_MSFMT_MSS;
425 }
426
427 if (img->samples > 4)
428 dw[4] |= GEN7_SURFACE_DW4_MULTISAMPLECOUNT_8;
429 else if (img->samples > 2)
430 dw[4] |= GEN7_SURFACE_DW4_MULTISAMPLECOUNT_4;
431 else
432 dw[4] |= GEN7_SURFACE_DW4_MULTISAMPLECOUNT_1;
433
Chia-I Wu457d0a62014-08-18 13:02:26 +0800434 dw[5] = (first_level) << GEN7_SURFACE_DW5_MIN_LOD__SHIFT |
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800435 lod;
436
437 dw[6] = 0;
438 dw[7] = 0;
439
440 if (intel_gpu_gen(gpu) >= INTEL_GEN(7.5)) {
Chia-I Wuf57758c2014-12-02 14:15:50 +0800441 dw[7] |=
442 channel_swizzle_to_scs(swizzles.r) << GEN75_SURFACE_DW7_SCS_R__SHIFT |
443 channel_swizzle_to_scs(swizzles.g) << GEN75_SURFACE_DW7_SCS_G__SHIFT |
444 channel_swizzle_to_scs(swizzles.b) << GEN75_SURFACE_DW7_SCS_B__SHIFT |
445 channel_swizzle_to_scs(swizzles.a) << GEN75_SURFACE_DW7_SCS_A__SHIFT;
446 } else {
447 assert(swizzles.r == XGL_CHANNEL_SWIZZLE_R &&
448 swizzles.g == XGL_CHANNEL_SWIZZLE_G &&
449 swizzles.b == XGL_CHANNEL_SWIZZLE_B &&
450 swizzles.a == XGL_CHANNEL_SWIZZLE_A);
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800451 }
452}
453
Chia-I Wu06bed192014-08-20 13:57:18 +0800454static void surface_state_null_gen6(const struct intel_gpu *gpu,
455 uint32_t dw[6])
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800456{
457 INTEL_GPU_ASSERT(gpu, 6, 6);
458
459 /*
460 * From the Sandy Bridge PRM, volume 4 part 1, page 71:
461 *
462 * "A null surface will be used in instances where an actual surface is
463 * not bound. When a write message is generated to a null surface, no
464 * actual surface is written to. When a read message (including any
465 * sampling engine message) is generated to a null surface, the result
466 * is all zeros. Note that a null surface type is allowed to be used
467 * with all messages, even if it is not specificially indicated as
468 * supported. All of the remaining fields in surface state are ignored
469 * for null surfaces, with the following exceptions:
470 *
471 * * [DevSNB+]: Width, Height, Depth, and LOD fields must match the
472 * depth buffer's corresponding state for all render target
473 * surfaces, including null.
474 * * Surface Format must be R8G8B8A8_UNORM."
475 *
476 * From the Sandy Bridge PRM, volume 4 part 1, page 82:
477 *
478 * "If Surface Type is SURFTYPE_NULL, this field (Tiled Surface) must be
479 * true"
480 */
481
482 dw[0] = GEN6_SURFTYPE_NULL << GEN6_SURFACE_DW0_TYPE__SHIFT |
483 GEN6_FORMAT_B8G8R8A8_UNORM << GEN6_SURFACE_DW0_FORMAT__SHIFT;
484
485 dw[1] = 0;
486 dw[2] = 0;
487 dw[3] = GEN6_TILING_X;
488 dw[4] = 0;
489 dw[5] = 0;
490}
491
Chia-I Wu06bed192014-08-20 13:57:18 +0800492static void surface_state_buf_gen6(const struct intel_gpu *gpu,
493 unsigned offset, unsigned size,
494 unsigned struct_size,
495 XGL_FORMAT elem_format,
496 bool is_rt, bool render_cache_rw,
497 uint32_t dw[6])
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800498{
Chia-I Wu4619ed22014-10-08 12:24:37 +0800499 const bool typed = !icd_format_is_undef(elem_format);
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800500 const int elem_size = icd_format_get_size(elem_format);
501 int width, height, depth, pitch;
502 int surface_format, num_entries;
503
504 INTEL_GPU_ASSERT(gpu, 6, 6);
505
506 /*
507 * For SURFTYPE_BUFFER, a SURFACE_STATE specifies an element of a
508 * structure in a buffer.
509 */
510
Chia-I Wu4619ed22014-10-08 12:24:37 +0800511 surface_format = (typed) ?
512 intel_format_translate_color(gpu, elem_format) : GEN6_FORMAT_RAW;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800513
514 num_entries = size / struct_size;
515 /* see if there is enough space to fit another element */
516 if (size % struct_size >= elem_size)
517 num_entries++;
518
519 /*
520 * From the Sandy Bridge PRM, volume 4 part 1, page 76:
521 *
522 * "For SURFTYPE_BUFFER render targets, this field (Surface Base
523 * Address) specifies the base address of first element of the
524 * surface. The surface is interpreted as a simple array of that
525 * single element type. The address must be naturally-aligned to the
526 * element size (e.g., a buffer containing R32G32B32A32_FLOAT elements
527 * must be 16-byte aligned).
528 *
529 * For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
530 * the base address of the first element of the surface, computed in
531 * software by adding the surface base address to the byte offset of
532 * the element in the buffer."
533 */
534 if (is_rt)
535 assert(offset % elem_size == 0);
536
537 /*
538 * From the Sandy Bridge PRM, volume 4 part 1, page 77:
539 *
540 * "For buffer surfaces, the number of entries in the buffer ranges
541 * from 1 to 2^27."
542 */
543 assert(num_entries >= 1 && num_entries <= 1 << 27);
544
545 /*
546 * From the Sandy Bridge PRM, volume 4 part 1, page 81:
547 *
548 * "For surfaces of type SURFTYPE_BUFFER, this field (Surface Pitch)
549 * indicates the size of the structure."
550 */
551 pitch = struct_size;
552
553 pitch--;
554 num_entries--;
555 /* bits [6:0] */
556 width = (num_entries & 0x0000007f);
557 /* bits [19:7] */
558 height = (num_entries & 0x000fff80) >> 7;
559 /* bits [26:20] */
560 depth = (num_entries & 0x07f00000) >> 20;
561
562 dw[0] = GEN6_SURFTYPE_BUFFER << GEN6_SURFACE_DW0_TYPE__SHIFT |
563 surface_format << GEN6_SURFACE_DW0_FORMAT__SHIFT;
564 if (render_cache_rw)
565 dw[0] |= GEN6_SURFACE_DW0_RENDER_CACHE_RW;
566
567 dw[1] = offset;
568
569 dw[2] = height << GEN6_SURFACE_DW2_HEIGHT__SHIFT |
570 width << GEN6_SURFACE_DW2_WIDTH__SHIFT;
571
572 dw[3] = depth << GEN6_SURFACE_DW3_DEPTH__SHIFT |
573 pitch << GEN6_SURFACE_DW3_PITCH__SHIFT;
574
575 dw[4] = 0;
576 dw[5] = 0;
577}
578
Chia-I Wu06bed192014-08-20 13:57:18 +0800579static void surface_state_tex_gen6(const struct intel_gpu *gpu,
580 const struct intel_img *img,
581 XGL_IMAGE_VIEW_TYPE type,
582 XGL_FORMAT format,
583 unsigned first_level,
584 unsigned num_levels,
585 unsigned first_layer,
586 unsigned num_layers,
587 bool is_rt,
588 uint32_t dw[6])
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800589{
590 int surface_type, surface_format;
591 int width, height, depth, pitch, lod;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800592
593 INTEL_GPU_ASSERT(gpu, 6, 6);
594
595 surface_type = view_type_to_surface_type(type);
596 assert(surface_type != GEN6_SURFTYPE_BUFFER);
597
598 surface_format = intel_format_translate_color(gpu, format);
599 assert(surface_format >= 0);
600
Chia-I Wu73e326f2014-08-21 11:07:57 +0800601 width = img->layout.width0;
602 height = img->layout.height0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800603 depth = (type == XGL_IMAGE_VIEW_3D) ?
Chia-I Wu73e326f2014-08-21 11:07:57 +0800604 img->depth : num_layers;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800605 pitch = img->layout.bo_stride;
606
607 if (surface_type == GEN6_SURFTYPE_CUBE) {
608 /*
609 * From the Sandy Bridge PRM, volume 4 part 1, page 81:
610 *
611 * "For SURFTYPE_CUBE: [DevSNB+]: for Sampling Engine Surfaces, the
612 * range of this field (Depth) is [0,84], indicating the number of
613 * cube array elements (equal to the number of underlying 2D array
614 * elements divided by 6). For other surfaces, this field must be
615 * zero."
616 *
617 * When is_rt is true, we treat the texture as a 2D one to avoid the
618 * restriction.
619 */
620 if (is_rt) {
621 surface_type = GEN6_SURFTYPE_2D;
622 }
623 else {
624 assert(num_layers % 6 == 0);
625 depth = num_layers / 6;
626 }
627 }
628
629 /* sanity check the size */
630 assert(width >= 1 && height >= 1 && depth >= 1 && pitch >= 1);
631 switch (surface_type) {
632 case GEN6_SURFTYPE_1D:
633 assert(width <= 8192 && height == 1 && depth <= 512);
634 assert(first_layer < 512 && num_layers <= 512);
635 break;
636 case GEN6_SURFTYPE_2D:
637 assert(width <= 8192 && height <= 8192 && depth <= 512);
638 assert(first_layer < 512 && num_layers <= 512);
639 break;
640 case GEN6_SURFTYPE_3D:
641 assert(width <= 2048 && height <= 2048 && depth <= 2048);
642 assert(first_layer < 2048 && num_layers <= 512);
643 if (!is_rt)
644 assert(first_layer == 0);
645 break;
646 case GEN6_SURFTYPE_CUBE:
647 assert(width <= 8192 && height <= 8192 && depth <= 85);
648 assert(width == height);
649 assert(first_layer < 512 && num_layers <= 512);
650 if (is_rt)
651 assert(first_layer == 0);
652 break;
653 default:
654 assert(!"unexpected surface type");
655 break;
656 }
657
658 /* non-full array spacing is supported only on GEN7+ */
Chia-I Wu457d0a62014-08-18 13:02:26 +0800659 assert(img->layout.walk != INTEL_LAYOUT_WALK_LOD);
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800660 /* non-interleaved samples are supported only on GEN7+ */
661 if (img->samples > 1)
662 assert(img->layout.interleaved_samples);
663
664 if (is_rt) {
665 assert(num_levels == 1);
666 lod = first_level;
667 }
668 else {
669 lod = num_levels - 1;
670 }
671
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800672 /*
673 * From the Sandy Bridge PRM, volume 4 part 1, page 76:
674 *
675 * "Linear render target surface base addresses must be element-size
676 * aligned, for non-YUV surface formats, or a multiple of 2
677 * element-sizes for YUV surface formats. Other linear surfaces have
678 * no alignment requirements (byte alignment is sufficient.)"
679 *
680 * From the Sandy Bridge PRM, volume 4 part 1, page 81:
681 *
682 * "For linear render target surfaces, the pitch must be a multiple
683 * of the element size for non-YUV surface formats. Pitch must be a
684 * multiple of 2 * element size for YUV surface formats."
685 *
686 * From the Sandy Bridge PRM, volume 4 part 1, page 86:
687 *
688 * "For linear surfaces, this field (X Offset) must be zero"
689 */
690 if (img->layout.tiling == INTEL_TILING_NONE) {
691 if (is_rt) {
692 const int elem_size = icd_format_get_size(format);
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800693 assert(pitch % elem_size == 0);
694 }
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800695 }
696
697 dw[0] = surface_type << GEN6_SURFACE_DW0_TYPE__SHIFT |
698 surface_format << GEN6_SURFACE_DW0_FORMAT__SHIFT |
699 GEN6_SURFACE_DW0_MIPLAYOUT_BELOW;
700
701 if (surface_type == GEN6_SURFTYPE_CUBE && !is_rt) {
702 dw[0] |= 1 << 9 |
703 GEN6_SURFACE_DW0_CUBE_FACE_ENABLES__MASK;
704 }
705
706 if (is_rt)
707 dw[0] |= GEN6_SURFACE_DW0_RENDER_CACHE_RW;
708
Chia-I Wu457d0a62014-08-18 13:02:26 +0800709 dw[1] = 0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800710
711 dw[2] = (height - 1) << GEN6_SURFACE_DW2_HEIGHT__SHIFT |
712 (width - 1) << GEN6_SURFACE_DW2_WIDTH__SHIFT |
713 lod << GEN6_SURFACE_DW2_MIP_COUNT_LOD__SHIFT;
714
715 dw[3] = (depth - 1) << GEN6_SURFACE_DW3_DEPTH__SHIFT |
716 (pitch - 1) << GEN6_SURFACE_DW3_PITCH__SHIFT |
717 winsys_tiling_to_surface_tiling(img->layout.tiling);
718
719 dw[4] = first_level << GEN6_SURFACE_DW4_MIN_LOD__SHIFT |
720 first_layer << 17 |
721 (num_layers - 1) << 8 |
722 ((img->samples > 1) ? GEN6_SURFACE_DW4_MULTISAMPLECOUNT_4 :
723 GEN6_SURFACE_DW4_MULTISAMPLECOUNT_1);
724
Chia-I Wu457d0a62014-08-18 13:02:26 +0800725 dw[5] = 0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800726
727 assert(img->layout.align_j == 2 || img->layout.align_j == 4);
728 if (img->layout.align_j == 4)
729 dw[5] |= GEN6_SURFACE_DW5_VALIGN_4;
730}
731
732struct ds_surface_info {
733 int surface_type;
734 int format;
735
736 struct {
737 unsigned stride;
Chia-I Wu457d0a62014-08-18 13:02:26 +0800738 unsigned offset;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800739 } zs, stencil, hiz;
740
741 unsigned width, height, depth;
742 unsigned lod, first_layer, num_layers;
743};
744
745static void
746ds_init_info_null(const struct intel_gpu *gpu,
747 struct ds_surface_info *info)
748{
749 INTEL_GPU_ASSERT(gpu, 6, 7.5);
750
751 memset(info, 0, sizeof(*info));
752
753 info->surface_type = GEN6_SURFTYPE_NULL;
754 info->format = GEN6_ZFORMAT_D32_FLOAT;
755 info->width = 1;
756 info->height = 1;
757 info->depth = 1;
758 info->num_layers = 1;
759}
760
761static void
762ds_init_info(const struct intel_gpu *gpu,
763 const struct intel_img *img,
764 XGL_FORMAT format, unsigned level,
765 unsigned first_layer, unsigned num_layers,
766 struct ds_surface_info *info)
767{
768 bool separate_stencil;
769
770 INTEL_GPU_ASSERT(gpu, 6, 7.5);
771
772 memset(info, 0, sizeof(*info));
773
774 info->surface_type =
775 view_type_to_surface_type(img_type_to_view_type(img->type));
776
777 if (info->surface_type == GEN6_SURFTYPE_CUBE) {
778 /*
779 * From the Sandy Bridge PRM, volume 2 part 1, page 325-326:
780 *
781 * "For Other Surfaces (Cube Surfaces):
782 * This field (Minimum Array Element) is ignored."
783 *
784 * "For Other Surfaces (Cube Surfaces):
785 * This field (Render Target View Extent) is ignored."
786 *
787 * As such, we cannot set first_layer and num_layers on cube surfaces.
788 * To work around that, treat it as a 2D surface.
789 */
790 info->surface_type = GEN6_SURFTYPE_2D;
791 }
792
793 if (intel_gpu_gen(gpu) >= INTEL_GEN(7)) {
794 separate_stencil = true;
795 }
796 else {
797 /*
798 * From the Sandy Bridge PRM, volume 2 part 1, page 317:
799 *
800 * "This field (Separate Stencil Buffer Enable) must be set to the
801 * same value (enabled or disabled) as Hierarchical Depth Buffer
802 * Enable."
803 */
804 separate_stencil = img->aux_offset;
805 }
806
807 /*
808 * From the Sandy Bridge PRM, volume 2 part 1, page 317:
809 *
810 * "If this field (Hierarchical Depth Buffer Enable) is enabled, the
811 * Surface Format of the depth buffer cannot be
812 * D32_FLOAT_S8X24_UINT or D24_UNORM_S8_UINT. Use of stencil
813 * requires the separate stencil buffer."
814 *
815 * From the Ironlake PRM, volume 2 part 1, page 330:
816 *
817 * "If this field (Separate Stencil Buffer Enable) is disabled, the
818 * Surface Format of the depth buffer cannot be D24_UNORM_X8_UINT."
819 *
820 * There is no similar restriction for GEN6. But when D24_UNORM_X8_UINT
821 * is indeed used, the depth values output by the fragment shaders will
822 * be different when read back.
823 *
824 * As for GEN7+, separate_stencil is always true.
825 */
Jeremy Hayes2b7e88a2015-01-23 08:51:43 -0700826 switch (format) {
827 case XGL_FMT_D16_UNORM:
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800828 info->format = GEN6_ZFORMAT_D16_UNORM;
829 break;
Jeremy Hayes2b7e88a2015-01-23 08:51:43 -0700830 case XGL_FMT_D32_SFLOAT:
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800831 info->format = GEN6_ZFORMAT_D32_FLOAT;
832 break;
Jeremy Hayes2b7e88a2015-01-23 08:51:43 -0700833 case XGL_FMT_D32_SFLOAT_S8_UINT:
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800834 info->format = (separate_stencil) ?
835 GEN6_ZFORMAT_D32_FLOAT :
836 GEN6_ZFORMAT_D32_FLOAT_S8X24_UINT;
837 break;
Jeremy Hayes2b7e88a2015-01-23 08:51:43 -0700838 case XGL_FMT_S8_UINT:
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800839 if (separate_stencil) {
840 info->format = GEN6_ZFORMAT_D32_FLOAT;
841 break;
842 }
843 /* fall through */
844 default:
845 assert(!"unsupported depth/stencil format");
846 ds_init_info_null(gpu, info);
847 return;
848 break;
849 }
850
Jeremy Hayes2b7e88a2015-01-23 08:51:43 -0700851 if (format != XGL_FMT_S8_UINT)
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800852 info->zs.stride = img->layout.bo_stride;
853
854 if (img->s8_layout) {
855 /*
856 * From the Sandy Bridge PRM, volume 2 part 1, page 329:
857 *
858 * "The pitch must be set to 2x the value computed based on width,
859 * as the stencil buffer is stored with two rows interleaved."
860 *
861 * According to the classic driver, we need to do the same for GEN7+
862 * even though the Ivy Bridge PRM does not say anything about it.
863 */
864 info->stencil.stride = img->s8_layout->bo_stride * 2;
Chia-I Wu457d0a62014-08-18 13:02:26 +0800865
866 if (intel_gpu_gen(gpu) == INTEL_GEN(6)) {
867 unsigned x, y;
868
869 assert(img->s8_layout->walk == INTEL_LAYOUT_WALK_LOD);
870
871 /* offset to the level */
872 intel_layout_get_slice_pos(img->s8_layout, level, 0, &x, &y);
873 intel_layout_pos_to_mem(img->s8_layout, x, y, &x, &y);
874 info->stencil.offset = intel_layout_mem_to_raw(img->s8_layout, x, y);
875 }
Jeremy Hayes2b7e88a2015-01-23 08:51:43 -0700876 } else if (format == XGL_FMT_S8_UINT) {
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800877 info->stencil.stride = img->layout.bo_stride * 2;
878 }
879
Chia-I Wu457d0a62014-08-18 13:02:26 +0800880 if (img->aux_offset) {
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800881 info->hiz.stride = img->layout.aux_stride;
882
Chia-I Wu457d0a62014-08-18 13:02:26 +0800883 /* offset to the level */
884 if (intel_gpu_gen(gpu) == INTEL_GEN(6))
885 info->hiz.offset = img->layout.aux_offsets[level];
886 }
887
888
Chia-I Wu73e326f2014-08-21 11:07:57 +0800889 info->width = img->layout.width0;
890 info->height = img->layout.height0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800891 info->depth = (img->type == XGL_IMAGE_3D) ?
Chia-I Wu73e326f2014-08-21 11:07:57 +0800892 img->depth : num_layers;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800893
894 info->lod = level;
895 info->first_layer = first_layer;
896 info->num_layers = num_layers;
897}
898
Chia-I Wu06bed192014-08-20 13:57:18 +0800899static void ds_view_init(struct intel_ds_view *view,
900 const struct intel_gpu *gpu,
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800901 const struct intel_img *img,
902 XGL_FORMAT format, unsigned level,
Chia-I Wu06bed192014-08-20 13:57:18 +0800903 unsigned first_layer, unsigned num_layers)
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800904{
905 const int max_2d_size = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 16384 : 8192;
906 const int max_array_size = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 2048 : 512;
907 struct ds_surface_info info;
908 uint32_t dw1, dw2, dw3, dw4, dw5, dw6;
Chia-I Wu06bed192014-08-20 13:57:18 +0800909 uint32_t *dw;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800910
911 INTEL_GPU_ASSERT(gpu, 6, 7.5);
912
913 if (img) {
914 ds_init_info(gpu, img, format, level, first_layer, num_layers, &info);
915 }
916 else {
917 ds_init_info_null(gpu, &info);
918 }
919
920 switch (info.surface_type) {
921 case GEN6_SURFTYPE_NULL:
922 break;
923 case GEN6_SURFTYPE_1D:
924 assert(info.width <= max_2d_size && info.height == 1 &&
925 info.depth <= max_array_size);
926 assert(info.first_layer < max_array_size - 1 &&
927 info.num_layers <= max_array_size);
928 break;
929 case GEN6_SURFTYPE_2D:
930 assert(info.width <= max_2d_size && info.height <= max_2d_size &&
931 info.depth <= max_array_size);
932 assert(info.first_layer < max_array_size - 1 &&
933 info.num_layers <= max_array_size);
934 break;
935 case GEN6_SURFTYPE_3D:
936 assert(info.width <= 2048 && info.height <= 2048 && info.depth <= 2048);
937 assert(info.first_layer < 2048 && info.num_layers <= max_array_size);
938 break;
939 case GEN6_SURFTYPE_CUBE:
940 assert(info.width <= max_2d_size && info.height <= max_2d_size &&
941 info.depth == 1);
942 assert(info.first_layer == 0 && info.num_layers == 1);
943 assert(info.width == info.height);
944 break;
945 default:
946 assert(!"unexpected depth surface type");
947 break;
948 }
949
950 dw1 = info.surface_type << 29 |
951 info.format << 18;
952
953 if (info.zs.stride) {
954 /* required for GEN6+ */
955 assert(info.zs.stride > 0 && info.zs.stride < 128 * 1024 &&
956 info.zs.stride % 128 == 0);
957 assert(info.width <= info.zs.stride);
958
959 dw1 |= (info.zs.stride - 1);
960 }
961
962 dw2 = 0;
963
964 if (intel_gpu_gen(gpu) >= INTEL_GEN(7)) {
965 if (info.zs.stride)
966 dw1 |= 1 << 28;
967
968 if (info.stencil.stride)
969 dw1 |= 1 << 27;
970
971 if (info.hiz.stride)
972 dw1 |= 1 << 22;
973
974 dw3 = (info.height - 1) << 18 |
975 (info.width - 1) << 4 |
976 info.lod;
977
978 dw4 = (info.depth - 1) << 21 |
979 info.first_layer << 10;
980
981 dw5 = 0;
982
983 dw6 = (info.num_layers - 1) << 21;
984 }
985 else {
986 /* always Y-tiled */
987 dw1 |= 1 << 27 |
988 1 << 26;
989
990 if (info.hiz.stride) {
991 dw1 |= 1 << 22 |
992 1 << 21;
993 }
994
995 dw3 = (info.height - 1) << 19 |
996 (info.width - 1) << 6 |
997 info.lod << 2 |
998 GEN6_DEPTH_DW3_MIPLAYOUT_BELOW;
999
1000 dw4 = (info.depth - 1) << 21 |
1001 info.first_layer << 10 |
1002 (info.num_layers - 1) << 1;
1003
1004 dw5 = 0;
1005
1006 dw6 = 0;
1007 }
1008
Chia-I Wu06bed192014-08-20 13:57:18 +08001009 STATIC_ASSERT(ARRAY_SIZE(view->cmd) >= 10);
1010 dw = view->cmd;
1011
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001012 dw[0] = dw1;
1013 dw[1] = dw2;
1014 dw[2] = dw3;
1015 dw[3] = dw4;
1016 dw[4] = dw5;
1017 dw[5] = dw6;
1018
1019 /* separate stencil */
1020 if (info.stencil.stride) {
1021 assert(info.stencil.stride > 0 && info.stencil.stride < 128 * 1024 &&
1022 info.stencil.stride % 128 == 0);
1023
1024 dw[6] = info.stencil.stride - 1;
1025 dw[7] = img->s8_offset;
1026
1027 if (intel_gpu_gen(gpu) >= INTEL_GEN(7.5))
1028 dw[6] |= GEN75_STENCIL_DW1_STENCIL_BUFFER_ENABLE;
1029 }
1030 else {
1031 dw[6] = 0;
1032 dw[7] = 0;
1033 }
1034
1035 /* hiz */
1036 if (info.hiz.stride) {
1037 dw[8] = info.hiz.stride - 1;
1038 dw[9] = img->aux_offset;
1039 }
1040 else {
1041 dw[8] = 0;
1042 dw[9] = 0;
1043 }
1044}
1045
Chia-I Wu5a323262014-08-11 10:31:53 +08001046void intel_null_view_init(struct intel_null_view *view,
1047 struct intel_dev *dev)
1048{
Chia-I Wucd83cf12014-08-23 17:26:08 +08001049 if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) {
Chia-I Wu06bed192014-08-20 13:57:18 +08001050 surface_state_null_gen7(dev->gpu, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001051 view->cmd_len = 8;
1052 } else {
Chia-I Wu06bed192014-08-20 13:57:18 +08001053 surface_state_null_gen6(dev->gpu, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001054 view->cmd_len = 6;
1055 }
Chia-I Wu5a323262014-08-11 10:31:53 +08001056}
1057
Chia-I Wu714df452015-01-01 07:55:04 +08001058static void buf_view_destroy(struct intel_obj *obj)
Chia-I Wu5a323262014-08-11 10:31:53 +08001059{
Chia-I Wu714df452015-01-01 07:55:04 +08001060 struct intel_buf_view *view = intel_buf_view_from_obj(obj);
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001061
Chia-I Wu714df452015-01-01 07:55:04 +08001062 intel_buf_view_destroy(view);
1063}
Chia-I Wu5a323262014-08-11 10:31:53 +08001064
Chia-I Wu714df452015-01-01 07:55:04 +08001065XGL_RESULT intel_buf_view_create(struct intel_dev *dev,
1066 const XGL_BUFFER_VIEW_CREATE_INFO *info,
1067 struct intel_buf_view **view_ret)
1068{
1069 struct intel_buf *buf = intel_buf(info->buffer);
1070 const bool will_write = (buf->usage |
1071 (XGL_BUFFER_USAGE_SHADER_ACCESS_WRITE_BIT &
1072 XGL_BUFFER_USAGE_SHADER_ACCESS_ATOMIC_BIT));
Chia-I Wu34341ba2015-01-16 17:38:37 +08001073 XGL_FORMAT format;
1074 XGL_GPU_SIZE stride;
1075 uint32_t *cmd;
Chia-I Wu714df452015-01-01 07:55:04 +08001076 struct intel_buf_view *view;
Chia-I Wu34341ba2015-01-16 17:38:37 +08001077 int i;
Chia-I Wu714df452015-01-01 07:55:04 +08001078
1079 view = (struct intel_buf_view *) intel_base_create(dev, sizeof(*view),
1080 dev->base.dbg, XGL_DBG_OBJECT_BUFFER_VIEW, info, 0);
1081 if (!view)
1082 return XGL_ERROR_OUT_OF_MEMORY;
1083
1084 view->obj.destroy = buf_view_destroy;
1085
1086 view->buf = buf;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001087
Chia-I Wu34341ba2015-01-16 17:38:37 +08001088 /*
1089 * The compiler expects uniform buffers to have pitch of
1090 * 4 for fragment shaders, but 16 for other stages. The format
1091 * must be XGL_FMT_R32G32B32A32_SFLOAT.
1092 */
1093 if (info->viewType == XGL_BUFFER_VIEW_RAW) {
Jeremy Hayes2b7e88a2015-01-23 08:51:43 -07001094 format = XGL_FMT_R32G32B32A32_SFLOAT;
Chia-I Wu34341ba2015-01-16 17:38:37 +08001095 stride = 16;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001096 } else {
Chia-I Wu34341ba2015-01-16 17:38:37 +08001097 format = info->format;
1098 stride = info->stride;
1099 }
1100 cmd = view->cmd;
1101
1102 for (i = 0; i < 2; i++) {
1103 if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) {
1104 surface_state_buf_gen7(dev->gpu, info->offset,
1105 info->range, stride, format,
1106 will_write, will_write, cmd);
1107 view->cmd_len = 8;
1108 } else {
1109 surface_state_buf_gen6(dev->gpu, info->offset,
1110 info->range, stride, format,
1111 will_write, will_write, cmd);
1112 view->cmd_len = 6;
1113 }
1114
1115 /* switch to view->fs_cmd */
1116 if (info->viewType == XGL_BUFFER_VIEW_RAW) {
1117 cmd = view->fs_cmd;
1118 stride = 4;
1119 } else {
1120 memcpy(view->fs_cmd, view->cmd, sizeof(uint32_t) * view->cmd_len);
1121 break;
1122 }
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001123 }
Chia-I Wu714df452015-01-01 07:55:04 +08001124
1125 *view_ret = view;
1126
1127 return XGL_SUCCESS;
1128}
1129
1130void intel_buf_view_destroy(struct intel_buf_view *view)
1131{
1132 intel_base_destroy(&view->obj.base);
Chia-I Wu5a323262014-08-11 10:31:53 +08001133}
1134
1135static void img_view_destroy(struct intel_obj *obj)
1136{
1137 struct intel_img_view *view = intel_img_view_from_obj(obj);
1138
1139 intel_img_view_destroy(view);
1140}
1141
1142XGL_RESULT intel_img_view_create(struct intel_dev *dev,
1143 const XGL_IMAGE_VIEW_CREATE_INFO *info,
1144 struct intel_img_view **view_ret)
1145{
1146 struct intel_img *img = intel_img(info->image);
1147 struct intel_img_view *view;
Mark Lobodzinskie2d07a52015-01-29 08:55:56 -06001148 uint32_t mip_levels, array_size;
Chia-I Wuf57758c2014-12-02 14:15:50 +08001149 XGL_CHANNEL_MAPPING state_swizzles;
Chia-I Wuaa759372014-10-18 12:47:35 +08001150
1151 if (info->subresourceRange.baseMipLevel >= img->mip_levels ||
1152 info->subresourceRange.baseArraySlice >= img->array_size ||
1153 !info->subresourceRange.mipLevels ||
1154 !info->subresourceRange.arraySize)
1155 return XGL_ERROR_INVALID_VALUE;
1156
1157 mip_levels = info->subresourceRange.mipLevels;
1158 if (mip_levels > img->mip_levels - info->subresourceRange.baseMipLevel)
1159 mip_levels = img->mip_levels - info->subresourceRange.baseMipLevel;
1160
1161 array_size = info->subresourceRange.arraySize;
1162 if (array_size > img->array_size - info->subresourceRange.baseArraySlice)
1163 array_size = img->array_size - info->subresourceRange.baseArraySlice;
Chia-I Wu5a323262014-08-11 10:31:53 +08001164
1165 view = (struct intel_img_view *) intel_base_create(dev, sizeof(*view),
1166 dev->base.dbg, XGL_DBG_OBJECT_IMAGE_VIEW, info, 0);
1167 if (!view)
1168 return XGL_ERROR_OUT_OF_MEMORY;
1169
1170 view->obj.destroy = img_view_destroy;
1171
1172 view->img = img;
Chia-I Wu5a323262014-08-11 10:31:53 +08001173 view->min_lod = info->minLod;
1174
Chia-I Wuf57758c2014-12-02 14:15:50 +08001175 if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7.5)) {
1176 state_swizzles = info->channels;
1177 view->shader_swizzles.r = XGL_CHANNEL_SWIZZLE_R;
1178 view->shader_swizzles.g = XGL_CHANNEL_SWIZZLE_G;
1179 view->shader_swizzles.b = XGL_CHANNEL_SWIZZLE_B;
1180 view->shader_swizzles.a = XGL_CHANNEL_SWIZZLE_A;
1181 } else {
1182 state_swizzles.r = XGL_CHANNEL_SWIZZLE_R;
1183 state_swizzles.g = XGL_CHANNEL_SWIZZLE_G;
1184 state_swizzles.b = XGL_CHANNEL_SWIZZLE_B;
1185 state_swizzles.a = XGL_CHANNEL_SWIZZLE_A;
1186 view->shader_swizzles = info->channels;
1187 }
1188
1189 /* shader_swizzles is ignored by the compiler */
1190 if (view->shader_swizzles.r != XGL_CHANNEL_SWIZZLE_R ||
1191 view->shader_swizzles.g != XGL_CHANNEL_SWIZZLE_G ||
1192 view->shader_swizzles.b != XGL_CHANNEL_SWIZZLE_B ||
1193 view->shader_swizzles.a != XGL_CHANNEL_SWIZZLE_A) {
1194 intel_dev_log(dev, XGL_DBG_MSG_WARNING,
1195 XGL_VALIDATION_LEVEL_0, XGL_NULL_HANDLE, 0, 0,
1196 "image data swizzling is ignored");
1197 }
1198
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001199 if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) {
Chia-I Wu06bed192014-08-20 13:57:18 +08001200 surface_state_tex_gen7(dev->gpu, img, info->viewType, info->format,
Chia-I Wuaa759372014-10-18 12:47:35 +08001201 info->subresourceRange.baseMipLevel, mip_levels,
1202 info->subresourceRange.baseArraySlice, array_size,
Chia-I Wuf57758c2014-12-02 14:15:50 +08001203 state_swizzles, false, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001204 view->cmd_len = 8;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001205 } else {
Chia-I Wu06bed192014-08-20 13:57:18 +08001206 surface_state_tex_gen6(dev->gpu, img, info->viewType, info->format,
Chia-I Wuaa759372014-10-18 12:47:35 +08001207 info->subresourceRange.baseMipLevel, mip_levels,
1208 info->subresourceRange.baseArraySlice, array_size,
1209 false, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001210 view->cmd_len = 6;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001211 }
1212
Chia-I Wu5a323262014-08-11 10:31:53 +08001213 *view_ret = view;
1214
1215 return XGL_SUCCESS;
1216}
1217
1218void intel_img_view_destroy(struct intel_img_view *view)
1219{
1220 intel_base_destroy(&view->obj.base);
1221}
1222
1223static void rt_view_destroy(struct intel_obj *obj)
1224{
1225 struct intel_rt_view *view = intel_rt_view_from_obj(obj);
1226
1227 intel_rt_view_destroy(view);
1228}
1229
1230XGL_RESULT intel_rt_view_create(struct intel_dev *dev,
1231 const XGL_COLOR_ATTACHMENT_VIEW_CREATE_INFO *info,
1232 struct intel_rt_view **view_ret)
1233{
Chia-I Wuf57758c2014-12-02 14:15:50 +08001234 static const XGL_CHANNEL_MAPPING identity_channel_mapping = {
1235 .r = XGL_CHANNEL_SWIZZLE_R,
1236 .g = XGL_CHANNEL_SWIZZLE_G,
1237 .b = XGL_CHANNEL_SWIZZLE_B,
1238 .a = XGL_CHANNEL_SWIZZLE_A,
1239 };
Chia-I Wu5a323262014-08-11 10:31:53 +08001240 struct intel_img *img = intel_img(info->image);
1241 struct intel_rt_view *view;
1242
1243 view = (struct intel_rt_view *) intel_base_create(dev, sizeof(*view),
1244 dev->base.dbg, XGL_DBG_OBJECT_COLOR_TARGET_VIEW, info, 0);
1245 if (!view)
1246 return XGL_ERROR_OUT_OF_MEMORY;
1247
1248 view->obj.destroy = rt_view_destroy;
1249
1250 view->img = img;
1251
Mark Lobodzinski71fcc2d2015-01-27 13:24:03 -06001252 view->array_size = info->arraySize;
1253
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001254 if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) {
Chia-I Wu06bed192014-08-20 13:57:18 +08001255 surface_state_tex_gen7(dev->gpu, img,
1256 img_type_to_view_type(img->type),
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001257 info->format, info->mipLevel, 1,
1258 info->baseArraySlice, info->arraySize,
Chia-I Wuf57758c2014-12-02 14:15:50 +08001259 identity_channel_mapping, true, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001260 view->cmd_len = 8;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001261 } else {
Chia-I Wu06bed192014-08-20 13:57:18 +08001262 surface_state_tex_gen6(dev->gpu, img,
1263 img_type_to_view_type(img->type),
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001264 info->format, info->mipLevel, 1,
1265 info->baseArraySlice, info->arraySize,
1266 true, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001267 view->cmd_len = 6;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001268 }
1269
Chia-I Wu5a323262014-08-11 10:31:53 +08001270 *view_ret = view;
1271
1272 return XGL_SUCCESS;
1273}
1274
1275void intel_rt_view_destroy(struct intel_rt_view *view)
1276{
1277 intel_base_destroy(&view->obj.base);
1278}
1279
1280static void ds_view_destroy(struct intel_obj *obj)
1281{
1282 struct intel_ds_view *view = intel_ds_view_from_obj(obj);
1283
1284 intel_ds_view_destroy(view);
1285}
1286
1287XGL_RESULT intel_ds_view_create(struct intel_dev *dev,
1288 const XGL_DEPTH_STENCIL_VIEW_CREATE_INFO *info,
1289 struct intel_ds_view **view_ret)
1290{
1291 struct intel_img *img = intel_img(info->image);
1292 struct intel_ds_view *view;
1293
1294 view = (struct intel_ds_view *) intel_base_create(dev, sizeof(*view),
1295 dev->base.dbg, XGL_DBG_OBJECT_DEPTH_STENCIL_VIEW, info, 0);
1296 if (!view)
1297 return XGL_ERROR_OUT_OF_MEMORY;
1298
1299 view->obj.destroy = ds_view_destroy;
1300
1301 view->img = img;
1302
Mark Lobodzinski71fcc2d2015-01-27 13:24:03 -06001303 view->array_size = info->arraySize;
1304
Chia-I Wu06bed192014-08-20 13:57:18 +08001305 ds_view_init(view, dev->gpu, img, img->layout.format, info->mipLevel,
1306 info->baseArraySlice, info->arraySize);
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001307
Chia-I Wu5a323262014-08-11 10:31:53 +08001308 *view_ret = view;
1309
1310 return XGL_SUCCESS;
1311}
1312
1313void intel_ds_view_destroy(struct intel_ds_view *view)
1314{
1315 intel_base_destroy(&view->obj.base);
1316}
1317
Chia-I Wu714df452015-01-01 07:55:04 +08001318ICD_EXPORT XGL_RESULT XGLAPI xglCreateBufferView(
1319 XGL_DEVICE device,
1320 const XGL_BUFFER_VIEW_CREATE_INFO* pCreateInfo,
1321 XGL_BUFFER_VIEW* pView)
1322{
1323 struct intel_dev *dev = intel_dev(device);
1324
1325 return intel_buf_view_create(dev, pCreateInfo,
1326 (struct intel_buf_view **) pView);
1327}
1328
Chia-I Wu96177272015-01-03 15:27:41 +08001329ICD_EXPORT XGL_RESULT XGLAPI xglCreateImageView(
Chia-I Wu5a323262014-08-11 10:31:53 +08001330 XGL_DEVICE device,
1331 const XGL_IMAGE_VIEW_CREATE_INFO* pCreateInfo,
1332 XGL_IMAGE_VIEW* pView)
1333{
1334 struct intel_dev *dev = intel_dev(device);
1335
1336 return intel_img_view_create(dev, pCreateInfo,
1337 (struct intel_img_view **) pView);
1338}
1339
Chia-I Wu96177272015-01-03 15:27:41 +08001340ICD_EXPORT XGL_RESULT XGLAPI xglCreateColorAttachmentView(
Chia-I Wu5a323262014-08-11 10:31:53 +08001341 XGL_DEVICE device,
1342 const XGL_COLOR_ATTACHMENT_VIEW_CREATE_INFO* pCreateInfo,
1343 XGL_COLOR_ATTACHMENT_VIEW* pView)
1344{
1345 struct intel_dev *dev = intel_dev(device);
1346
1347 return intel_rt_view_create(dev, pCreateInfo,
1348 (struct intel_rt_view **) pView);
1349}
1350
Chia-I Wu96177272015-01-03 15:27:41 +08001351ICD_EXPORT XGL_RESULT XGLAPI xglCreateDepthStencilView(
Chia-I Wu5a323262014-08-11 10:31:53 +08001352 XGL_DEVICE device,
1353 const XGL_DEPTH_STENCIL_VIEW_CREATE_INFO* pCreateInfo,
1354 XGL_DEPTH_STENCIL_VIEW* pView)
1355{
1356 struct intel_dev *dev = intel_dev(device);
1357
1358 return intel_ds_view_create(dev, pCreateInfo,
1359 (struct intel_ds_view **) pView);
1360}