blob: 87e4a9fd9001bf7300b236e99977c3fe5ef34be4 [file] [log] [blame]
Chia-I Wu5a323262014-08-11 10:31:53 +08001/*
2 * XGL
3 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
Chia-I Wu44e42362014-09-02 08:32:09 +080023 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
Chia-I Wu5a323262014-08-11 10:31:53 +080026 */
27
Chia-I Wu9269d1c2014-08-16 12:47:47 +080028#include "genhw/genhw.h"
29#include "kmd/winsys.h"
Chia-I Wu5a323262014-08-11 10:31:53 +080030#include "dev.h"
Chia-I Wu9269d1c2014-08-16 12:47:47 +080031#include "format.h"
Chia-I Wu5a323262014-08-11 10:31:53 +080032#include "gpu.h"
33#include "img.h"
34#include "mem.h"
35#include "view.h"
36
Chia-I Wu06bed192014-08-20 13:57:18 +080037static void surface_state_null_gen7(const struct intel_gpu *gpu,
38 uint32_t dw[8])
Chia-I Wu9269d1c2014-08-16 12:47:47 +080039{
40 INTEL_GPU_ASSERT(gpu, 7, 7.5);
41
42 /*
43 * From the Ivy Bridge PRM, volume 4 part 1, page 62:
44 *
45 * "A null surface is used in instances where an actual surface is not
46 * bound. When a write message is generated to a null surface, no
47 * actual surface is written to. When a read message (including any
48 * sampling engine message) is generated to a null surface, the result
49 * is all zeros. Note that a null surface type is allowed to be used
50 * with all messages, even if it is not specificially indicated as
51 * supported. All of the remaining fields in surface state are ignored
52 * for null surfaces, with the following exceptions:
53 *
54 * * Width, Height, Depth, LOD, and Render Target View Extent fields
55 * must match the depth buffer's corresponding state for all render
56 * target surfaces, including null.
57 * * All sampling engine and data port messages support null surfaces
58 * with the above behavior, even if not mentioned as specifically
59 * supported, except for the following:
60 * * Data Port Media Block Read/Write messages.
61 * * The Surface Type of a surface used as a render target (accessed
62 * via the Data Port's Render Target Write message) must be the same
63 * as the Surface Type of all other render targets and of the depth
64 * buffer (defined in 3DSTATE_DEPTH_BUFFER), unless either the depth
65 * buffer or render targets are SURFTYPE_NULL."
66 *
67 * From the Ivy Bridge PRM, volume 4 part 1, page 65:
68 *
69 * "If Surface Type is SURFTYPE_NULL, this field (Tiled Surface) must be
70 * true"
71 */
72
73 dw[0] = GEN6_SURFTYPE_NULL << GEN7_SURFACE_DW0_TYPE__SHIFT |
74 GEN6_FORMAT_B8G8R8A8_UNORM << GEN7_SURFACE_DW0_FORMAT__SHIFT |
75 GEN6_TILING_X << 13;
76
77 dw[1] = 0;
78 dw[2] = 0;
79 dw[3] = 0;
80 dw[4] = 0;
81 dw[5] = 0;
82 dw[6] = 0;
83 dw[7] = 0;
84}
85
Chia-I Wu06bed192014-08-20 13:57:18 +080086static void surface_state_buf_gen7(const struct intel_gpu *gpu,
87 unsigned offset, unsigned size,
88 unsigned struct_size,
89 XGL_FORMAT elem_format,
90 bool is_rt, bool render_cache_rw,
91 uint32_t dw[8])
Chia-I Wu9269d1c2014-08-16 12:47:47 +080092{
93 const bool typed = !icd_format_is_undef(elem_format);
94 const bool structured = (!typed && struct_size > 1);
95 const int elem_size = (typed) ?
96 icd_format_get_size(elem_format) : 1;
97 int width, height, depth, pitch;
98 int surface_type, surface_format, num_entries;
99
100 INTEL_GPU_ASSERT(gpu, 7, 7.5);
101
102 surface_type = (structured) ? GEN7_SURFTYPE_STRBUF : GEN6_SURFTYPE_BUFFER;
103
104 surface_format = (typed) ?
105 intel_format_translate_color(gpu, elem_format) : GEN6_FORMAT_RAW;
106
107 num_entries = size / struct_size;
108 /* see if there is enough space to fit another element */
109 if (size % struct_size >= elem_size && !structured)
110 num_entries++;
111
112 /*
113 * From the Ivy Bridge PRM, volume 4 part 1, page 67:
114 *
115 * "For SURFTYPE_BUFFER render targets, this field (Surface Base
116 * Address) specifies the base address of first element of the
117 * surface. The surface is interpreted as a simple array of that
118 * single element type. The address must be naturally-aligned to the
119 * element size (e.g., a buffer containing R32G32B32A32_FLOAT elements
120 * must be 16-byte aligned)
121 *
122 * For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
123 * the base address of the first element of the surface, computed in
124 * software by adding the surface base address to the byte offset of
125 * the element in the buffer."
126 */
127 if (is_rt)
128 assert(offset % elem_size == 0);
129
130 /*
131 * From the Ivy Bridge PRM, volume 4 part 1, page 68:
132 *
133 * "For typed buffer and structured buffer surfaces, the number of
134 * entries in the buffer ranges from 1 to 2^27. For raw buffer
135 * surfaces, the number of entries in the buffer is the number of
136 * bytes which can range from 1 to 2^30."
137 */
138 assert(num_entries >= 1 &&
139 num_entries <= 1 << ((typed || structured) ? 27 : 30));
140
141 /*
142 * From the Ivy Bridge PRM, volume 4 part 1, page 69:
143 *
144 * "For SURFTYPE_BUFFER: The low two bits of this field (Width) must be
145 * 11 if the Surface Format is RAW (the size of the buffer must be a
146 * multiple of 4 bytes)."
147 *
148 * From the Ivy Bridge PRM, volume 4 part 1, page 70:
149 *
150 * "For surfaces of type SURFTYPE_BUFFER and SURFTYPE_STRBUF, this
151 * field (Surface Pitch) indicates the size of the structure."
152 *
153 * "For linear surfaces with Surface Type of SURFTYPE_STRBUF, the pitch
154 * must be a multiple of 4 bytes."
155 */
156 if (structured)
157 assert(struct_size % 4 == 0);
158 else if (!typed)
159 assert(num_entries % 4 == 0);
160
161 pitch = struct_size;
162
163 pitch--;
164 num_entries--;
165 /* bits [6:0] */
166 width = (num_entries & 0x0000007f);
167 /* bits [20:7] */
168 height = (num_entries & 0x001fff80) >> 7;
169 /* bits [30:21] */
170 depth = (num_entries & 0x7fe00000) >> 21;
171 /* limit to [26:21] */
172 if (typed || structured)
173 depth &= 0x3f;
174
175 dw[0] = surface_type << GEN7_SURFACE_DW0_TYPE__SHIFT |
176 surface_format << GEN7_SURFACE_DW0_FORMAT__SHIFT;
177 if (render_cache_rw)
178 dw[0] |= GEN7_SURFACE_DW0_RENDER_CACHE_RW;
179
180 dw[1] = offset;
181
182 dw[2] = height << GEN7_SURFACE_DW2_HEIGHT__SHIFT |
183 width << GEN7_SURFACE_DW2_WIDTH__SHIFT;
184
185 dw[3] = depth << GEN7_SURFACE_DW3_DEPTH__SHIFT |
186 pitch;
187
188 dw[4] = 0;
189 dw[5] = 0;
190
191 dw[6] = 0;
192 dw[7] = 0;
193
194 if (intel_gpu_gen(gpu) >= INTEL_GEN(7.5)) {
195 dw[7] |= GEN75_SCS_RED << GEN75_SURFACE_DW7_SCS_R__SHIFT |
196 GEN75_SCS_GREEN << GEN75_SURFACE_DW7_SCS_G__SHIFT |
197 GEN75_SCS_BLUE << GEN75_SURFACE_DW7_SCS_B__SHIFT |
198 GEN75_SCS_ALPHA << GEN75_SURFACE_DW7_SCS_A__SHIFT;
199 }
200}
201
202static int img_type_to_view_type(XGL_IMAGE_VIEW_TYPE type)
203{
204 switch (type) {
205 case XGL_IMAGE_1D: return XGL_IMAGE_VIEW_1D;
206 case XGL_IMAGE_2D: return XGL_IMAGE_VIEW_2D;
207 case XGL_IMAGE_3D: return XGL_IMAGE_VIEW_3D;
208 default: assert(!"unknown img type"); return XGL_IMAGE_VIEW_1D;
209 }
210}
211
212static int view_type_to_surface_type(XGL_IMAGE_VIEW_TYPE type)
213{
214 switch (type) {
215 case XGL_IMAGE_VIEW_1D: return GEN6_SURFTYPE_1D;
216 case XGL_IMAGE_VIEW_2D: return GEN6_SURFTYPE_2D;
217 case XGL_IMAGE_VIEW_3D: return GEN6_SURFTYPE_3D;
218 case XGL_IMAGE_VIEW_CUBE: return GEN6_SURFTYPE_CUBE;
219 default: assert(!"unknown view type"); return GEN6_SURFTYPE_NULL;
220 }
221}
222
223static int winsys_tiling_to_surface_tiling(enum intel_tiling_mode tiling)
224{
225 switch (tiling) {
226 case INTEL_TILING_NONE: return GEN6_TILING_NONE;
227 case INTEL_TILING_X: return GEN6_TILING_X;
228 case INTEL_TILING_Y: return GEN6_TILING_Y;
229 default: assert(!"unknown tiling"); return GEN6_TILING_NONE;
230 }
231}
232
Chia-I Wu06bed192014-08-20 13:57:18 +0800233static void surface_state_tex_gen7(const struct intel_gpu *gpu,
234 const struct intel_img *img,
235 XGL_IMAGE_VIEW_TYPE type,
236 XGL_FORMAT format,
237 unsigned first_level,
238 unsigned num_levels,
239 unsigned first_layer,
240 unsigned num_layers,
241 bool is_rt,
242 uint32_t dw[8])
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800243{
244 int surface_type, surface_format;
245 int width, height, depth, pitch, lod;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800246
247 INTEL_GPU_ASSERT(gpu, 7, 7.5);
248
249 surface_type = view_type_to_surface_type(type);
250 assert(surface_type != GEN6_SURFTYPE_BUFFER);
251
252 surface_format = intel_format_translate_color(gpu, format);
253 assert(surface_format >= 0);
254
Chia-I Wu73e326f2014-08-21 11:07:57 +0800255 width = img->layout.width0;
256 height = img->layout.height0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800257 depth = (type == XGL_IMAGE_VIEW_3D) ?
Chia-I Wu73e326f2014-08-21 11:07:57 +0800258 img->depth : num_layers;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800259 pitch = img->layout.bo_stride;
260
261 if (surface_type == GEN6_SURFTYPE_CUBE) {
262 /*
263 * From the Ivy Bridge PRM, volume 4 part 1, page 70:
264 *
265 * "For SURFTYPE_CUBE:For Sampling Engine Surfaces, the range of
266 * this field is [0,340], indicating the number of cube array
267 * elements (equal to the number of underlying 2D array elements
268 * divided by 6). For other surfaces, this field must be zero."
269 *
270 * When is_rt is true, we treat the texture as a 2D one to avoid the
271 * restriction.
272 */
273 if (is_rt) {
274 surface_type = GEN6_SURFTYPE_2D;
275 }
276 else {
277 assert(num_layers % 6 == 0);
278 depth = num_layers / 6;
279 }
280 }
281
282 /* sanity check the size */
283 assert(width >= 1 && height >= 1 && depth >= 1 && pitch >= 1);
284 assert(first_layer < 2048 && num_layers <= 2048);
285 switch (surface_type) {
286 case GEN6_SURFTYPE_1D:
287 assert(width <= 16384 && height == 1 && depth <= 2048);
288 break;
289 case GEN6_SURFTYPE_2D:
290 assert(width <= 16384 && height <= 16384 && depth <= 2048);
291 break;
292 case GEN6_SURFTYPE_3D:
293 assert(width <= 2048 && height <= 2048 && depth <= 2048);
294 if (!is_rt)
295 assert(first_layer == 0);
296 break;
297 case GEN6_SURFTYPE_CUBE:
298 assert(width <= 16384 && height <= 16384 && depth <= 86);
299 assert(width == height);
300 if (is_rt)
301 assert(first_layer == 0);
302 break;
303 default:
304 assert(!"unexpected surface type");
305 break;
306 }
307
308 if (is_rt) {
309 assert(num_levels == 1);
310 lod = first_level;
311 }
312 else {
313 lod = num_levels - 1;
314 }
315
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800316 /*
317 * From the Ivy Bridge PRM, volume 4 part 1, page 68:
318 *
319 * "The Base Address for linear render target surfaces and surfaces
320 * accessed with the typed surface read/write data port messages must
321 * be element-size aligned, for non-YUV surface formats, or a multiple
322 * of 2 element-sizes for YUV surface formats. Other linear surfaces
323 * have no alignment requirements (byte alignment is sufficient)."
324 *
325 * From the Ivy Bridge PRM, volume 4 part 1, page 70:
326 *
327 * "For linear render target surfaces and surfaces accessed with the
328 * typed data port messages, the pitch must be a multiple of the
329 * element size for non-YUV surface formats. Pitch must be a multiple
330 * of 2 * element size for YUV surface formats. For linear surfaces
331 * with Surface Type of SURFTYPE_STRBUF, the pitch must be a multiple
332 * of 4 bytes.For other linear surfaces, the pitch can be any multiple
333 * of bytes."
334 *
335 * From the Ivy Bridge PRM, volume 4 part 1, page 74:
336 *
337 * "For linear surfaces, this field (X Offset) must be zero."
338 */
339 if (img->layout.tiling == INTEL_TILING_NONE) {
340 if (is_rt) {
341 const int elem_size = icd_format_get_size(format);
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800342 assert(pitch % elem_size == 0);
343 }
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800344 }
345
346 dw[0] = surface_type << GEN7_SURFACE_DW0_TYPE__SHIFT |
347 surface_format << GEN7_SURFACE_DW0_FORMAT__SHIFT |
348 winsys_tiling_to_surface_tiling(img->layout.tiling) << 13;
349
350 /*
351 * From the Ivy Bridge PRM, volume 4 part 1, page 63:
352 *
353 * "If this field (Surface Array) is enabled, the Surface Type must be
354 * SURFTYPE_1D, SURFTYPE_2D, or SURFTYPE_CUBE. If this field is
355 * disabled and Surface Type is SURFTYPE_1D, SURFTYPE_2D, or
356 * SURFTYPE_CUBE, the Depth field must be set to zero."
357 *
358 * For non-3D sampler surfaces, resinfo (the sampler message) always
359 * returns zero for the number of layers when this field is not set.
360 */
361 if (surface_type != GEN6_SURFTYPE_3D) {
362 if (num_layers > 1)
363 dw[0] |= GEN7_SURFACE_DW0_IS_ARRAY;
364 else
365 assert(depth == 1);
366 }
367
368 assert(img->layout.align_i == 4 || img->layout.align_i == 8);
369 assert(img->layout.align_j == 2 || img->layout.align_j == 4);
370
371 if (img->layout.align_j == 4)
372 dw[0] |= GEN7_SURFACE_DW0_VALIGN_4;
373
374 if (img->layout.align_i == 8)
375 dw[0] |= GEN7_SURFACE_DW0_HALIGN_8;
376
Chia-I Wu457d0a62014-08-18 13:02:26 +0800377 if (img->layout.walk == INTEL_LAYOUT_WALK_LOD)
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800378 dw[0] |= GEN7_SURFACE_DW0_ARYSPC_LOD0;
Chia-I Wu457d0a62014-08-18 13:02:26 +0800379 else
380 dw[0] |= GEN7_SURFACE_DW0_ARYSPC_FULL;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800381
382 if (is_rt)
383 dw[0] |= GEN7_SURFACE_DW0_RENDER_CACHE_RW;
384
385 if (surface_type == GEN6_SURFTYPE_CUBE && !is_rt)
386 dw[0] |= GEN7_SURFACE_DW0_CUBE_FACE_ENABLES__MASK;
387
Chia-I Wu457d0a62014-08-18 13:02:26 +0800388 dw[1] = 0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800389
390 dw[2] = (height - 1) << GEN7_SURFACE_DW2_HEIGHT__SHIFT |
391 (width - 1) << GEN7_SURFACE_DW2_WIDTH__SHIFT;
392
393 dw[3] = (depth - 1) << GEN7_SURFACE_DW3_DEPTH__SHIFT |
394 (pitch - 1);
395
396 dw[4] = first_layer << 18 |
397 (num_layers - 1) << 7;
398
399 /*
400 * MSFMT_MSS means the samples are not interleaved and MSFMT_DEPTH_STENCIL
401 * means the samples are interleaved. The layouts are the same when the
402 * number of samples is 1.
403 */
404 if (img->layout.interleaved_samples && img->samples > 1) {
405 assert(!is_rt);
406 dw[4] |= GEN7_SURFACE_DW4_MSFMT_DEPTH_STENCIL;
407 }
408 else {
409 dw[4] |= GEN7_SURFACE_DW4_MSFMT_MSS;
410 }
411
412 if (img->samples > 4)
413 dw[4] |= GEN7_SURFACE_DW4_MULTISAMPLECOUNT_8;
414 else if (img->samples > 2)
415 dw[4] |= GEN7_SURFACE_DW4_MULTISAMPLECOUNT_4;
416 else
417 dw[4] |= GEN7_SURFACE_DW4_MULTISAMPLECOUNT_1;
418
Chia-I Wu457d0a62014-08-18 13:02:26 +0800419 dw[5] = (first_level) << GEN7_SURFACE_DW5_MIN_LOD__SHIFT |
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800420 lod;
421
422 dw[6] = 0;
423 dw[7] = 0;
424
425 if (intel_gpu_gen(gpu) >= INTEL_GEN(7.5)) {
426 dw[7] |= GEN75_SCS_RED << GEN75_SURFACE_DW7_SCS_R__SHIFT |
427 GEN75_SCS_GREEN << GEN75_SURFACE_DW7_SCS_G__SHIFT |
428 GEN75_SCS_BLUE << GEN75_SURFACE_DW7_SCS_B__SHIFT |
429 GEN75_SCS_ALPHA << GEN75_SURFACE_DW7_SCS_A__SHIFT;
430 }
431}
432
Chia-I Wu06bed192014-08-20 13:57:18 +0800433static void surface_state_null_gen6(const struct intel_gpu *gpu,
434 uint32_t dw[6])
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800435{
436 INTEL_GPU_ASSERT(gpu, 6, 6);
437
438 /*
439 * From the Sandy Bridge PRM, volume 4 part 1, page 71:
440 *
441 * "A null surface will be used in instances where an actual surface is
442 * not bound. When a write message is generated to a null surface, no
443 * actual surface is written to. When a read message (including any
444 * sampling engine message) is generated to a null surface, the result
445 * is all zeros. Note that a null surface type is allowed to be used
446 * with all messages, even if it is not specificially indicated as
447 * supported. All of the remaining fields in surface state are ignored
448 * for null surfaces, with the following exceptions:
449 *
450 * * [DevSNB+]: Width, Height, Depth, and LOD fields must match the
451 * depth buffer's corresponding state for all render target
452 * surfaces, including null.
453 * * Surface Format must be R8G8B8A8_UNORM."
454 *
455 * From the Sandy Bridge PRM, volume 4 part 1, page 82:
456 *
457 * "If Surface Type is SURFTYPE_NULL, this field (Tiled Surface) must be
458 * true"
459 */
460
461 dw[0] = GEN6_SURFTYPE_NULL << GEN6_SURFACE_DW0_TYPE__SHIFT |
462 GEN6_FORMAT_B8G8R8A8_UNORM << GEN6_SURFACE_DW0_FORMAT__SHIFT;
463
464 dw[1] = 0;
465 dw[2] = 0;
466 dw[3] = GEN6_TILING_X;
467 dw[4] = 0;
468 dw[5] = 0;
469}
470
Chia-I Wu06bed192014-08-20 13:57:18 +0800471static void surface_state_buf_gen6(const struct intel_gpu *gpu,
472 unsigned offset, unsigned size,
473 unsigned struct_size,
474 XGL_FORMAT elem_format,
475 bool is_rt, bool render_cache_rw,
476 uint32_t dw[6])
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800477{
Chia-I Wu4619ed22014-10-08 12:24:37 +0800478 const bool typed = !icd_format_is_undef(elem_format);
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800479 const int elem_size = icd_format_get_size(elem_format);
480 int width, height, depth, pitch;
481 int surface_format, num_entries;
482
483 INTEL_GPU_ASSERT(gpu, 6, 6);
484
485 /*
486 * For SURFTYPE_BUFFER, a SURFACE_STATE specifies an element of a
487 * structure in a buffer.
488 */
489
Chia-I Wu4619ed22014-10-08 12:24:37 +0800490 surface_format = (typed) ?
491 intel_format_translate_color(gpu, elem_format) : GEN6_FORMAT_RAW;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800492
493 num_entries = size / struct_size;
494 /* see if there is enough space to fit another element */
495 if (size % struct_size >= elem_size)
496 num_entries++;
497
498 /*
499 * From the Sandy Bridge PRM, volume 4 part 1, page 76:
500 *
501 * "For SURFTYPE_BUFFER render targets, this field (Surface Base
502 * Address) specifies the base address of first element of the
503 * surface. The surface is interpreted as a simple array of that
504 * single element type. The address must be naturally-aligned to the
505 * element size (e.g., a buffer containing R32G32B32A32_FLOAT elements
506 * must be 16-byte aligned).
507 *
508 * For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
509 * the base address of the first element of the surface, computed in
510 * software by adding the surface base address to the byte offset of
511 * the element in the buffer."
512 */
513 if (is_rt)
514 assert(offset % elem_size == 0);
515
516 /*
517 * From the Sandy Bridge PRM, volume 4 part 1, page 77:
518 *
519 * "For buffer surfaces, the number of entries in the buffer ranges
520 * from 1 to 2^27."
521 */
522 assert(num_entries >= 1 && num_entries <= 1 << 27);
523
524 /*
525 * From the Sandy Bridge PRM, volume 4 part 1, page 81:
526 *
527 * "For surfaces of type SURFTYPE_BUFFER, this field (Surface Pitch)
528 * indicates the size of the structure."
529 */
530 pitch = struct_size;
531
532 pitch--;
533 num_entries--;
534 /* bits [6:0] */
535 width = (num_entries & 0x0000007f);
536 /* bits [19:7] */
537 height = (num_entries & 0x000fff80) >> 7;
538 /* bits [26:20] */
539 depth = (num_entries & 0x07f00000) >> 20;
540
541 dw[0] = GEN6_SURFTYPE_BUFFER << GEN6_SURFACE_DW0_TYPE__SHIFT |
542 surface_format << GEN6_SURFACE_DW0_FORMAT__SHIFT;
543 if (render_cache_rw)
544 dw[0] |= GEN6_SURFACE_DW0_RENDER_CACHE_RW;
545
546 dw[1] = offset;
547
548 dw[2] = height << GEN6_SURFACE_DW2_HEIGHT__SHIFT |
549 width << GEN6_SURFACE_DW2_WIDTH__SHIFT;
550
551 dw[3] = depth << GEN6_SURFACE_DW3_DEPTH__SHIFT |
552 pitch << GEN6_SURFACE_DW3_PITCH__SHIFT;
553
554 dw[4] = 0;
555 dw[5] = 0;
556}
557
Chia-I Wu06bed192014-08-20 13:57:18 +0800558static void surface_state_tex_gen6(const struct intel_gpu *gpu,
559 const struct intel_img *img,
560 XGL_IMAGE_VIEW_TYPE type,
561 XGL_FORMAT format,
562 unsigned first_level,
563 unsigned num_levels,
564 unsigned first_layer,
565 unsigned num_layers,
566 bool is_rt,
567 uint32_t dw[6])
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800568{
569 int surface_type, surface_format;
570 int width, height, depth, pitch, lod;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800571
572 INTEL_GPU_ASSERT(gpu, 6, 6);
573
574 surface_type = view_type_to_surface_type(type);
575 assert(surface_type != GEN6_SURFTYPE_BUFFER);
576
577 surface_format = intel_format_translate_color(gpu, format);
578 assert(surface_format >= 0);
579
Chia-I Wu73e326f2014-08-21 11:07:57 +0800580 width = img->layout.width0;
581 height = img->layout.height0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800582 depth = (type == XGL_IMAGE_VIEW_3D) ?
Chia-I Wu73e326f2014-08-21 11:07:57 +0800583 img->depth : num_layers;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800584 pitch = img->layout.bo_stride;
585
586 if (surface_type == GEN6_SURFTYPE_CUBE) {
587 /*
588 * From the Sandy Bridge PRM, volume 4 part 1, page 81:
589 *
590 * "For SURFTYPE_CUBE: [DevSNB+]: for Sampling Engine Surfaces, the
591 * range of this field (Depth) is [0,84], indicating the number of
592 * cube array elements (equal to the number of underlying 2D array
593 * elements divided by 6). For other surfaces, this field must be
594 * zero."
595 *
596 * When is_rt is true, we treat the texture as a 2D one to avoid the
597 * restriction.
598 */
599 if (is_rt) {
600 surface_type = GEN6_SURFTYPE_2D;
601 }
602 else {
603 assert(num_layers % 6 == 0);
604 depth = num_layers / 6;
605 }
606 }
607
608 /* sanity check the size */
609 assert(width >= 1 && height >= 1 && depth >= 1 && pitch >= 1);
610 switch (surface_type) {
611 case GEN6_SURFTYPE_1D:
612 assert(width <= 8192 && height == 1 && depth <= 512);
613 assert(first_layer < 512 && num_layers <= 512);
614 break;
615 case GEN6_SURFTYPE_2D:
616 assert(width <= 8192 && height <= 8192 && depth <= 512);
617 assert(first_layer < 512 && num_layers <= 512);
618 break;
619 case GEN6_SURFTYPE_3D:
620 assert(width <= 2048 && height <= 2048 && depth <= 2048);
621 assert(first_layer < 2048 && num_layers <= 512);
622 if (!is_rt)
623 assert(first_layer == 0);
624 break;
625 case GEN6_SURFTYPE_CUBE:
626 assert(width <= 8192 && height <= 8192 && depth <= 85);
627 assert(width == height);
628 assert(first_layer < 512 && num_layers <= 512);
629 if (is_rt)
630 assert(first_layer == 0);
631 break;
632 default:
633 assert(!"unexpected surface type");
634 break;
635 }
636
637 /* non-full array spacing is supported only on GEN7+ */
Chia-I Wu457d0a62014-08-18 13:02:26 +0800638 assert(img->layout.walk != INTEL_LAYOUT_WALK_LOD);
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800639 /* non-interleaved samples are supported only on GEN7+ */
640 if (img->samples > 1)
641 assert(img->layout.interleaved_samples);
642
643 if (is_rt) {
644 assert(num_levels == 1);
645 lod = first_level;
646 }
647 else {
648 lod = num_levels - 1;
649 }
650
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800651 /*
652 * From the Sandy Bridge PRM, volume 4 part 1, page 76:
653 *
654 * "Linear render target surface base addresses must be element-size
655 * aligned, for non-YUV surface formats, or a multiple of 2
656 * element-sizes for YUV surface formats. Other linear surfaces have
657 * no alignment requirements (byte alignment is sufficient.)"
658 *
659 * From the Sandy Bridge PRM, volume 4 part 1, page 81:
660 *
661 * "For linear render target surfaces, the pitch must be a multiple
662 * of the element size for non-YUV surface formats. Pitch must be a
663 * multiple of 2 * element size for YUV surface formats."
664 *
665 * From the Sandy Bridge PRM, volume 4 part 1, page 86:
666 *
667 * "For linear surfaces, this field (X Offset) must be zero"
668 */
669 if (img->layout.tiling == INTEL_TILING_NONE) {
670 if (is_rt) {
671 const int elem_size = icd_format_get_size(format);
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800672 assert(pitch % elem_size == 0);
673 }
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800674 }
675
676 dw[0] = surface_type << GEN6_SURFACE_DW0_TYPE__SHIFT |
677 surface_format << GEN6_SURFACE_DW0_FORMAT__SHIFT |
678 GEN6_SURFACE_DW0_MIPLAYOUT_BELOW;
679
680 if (surface_type == GEN6_SURFTYPE_CUBE && !is_rt) {
681 dw[0] |= 1 << 9 |
682 GEN6_SURFACE_DW0_CUBE_FACE_ENABLES__MASK;
683 }
684
685 if (is_rt)
686 dw[0] |= GEN6_SURFACE_DW0_RENDER_CACHE_RW;
687
Chia-I Wu457d0a62014-08-18 13:02:26 +0800688 dw[1] = 0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800689
690 dw[2] = (height - 1) << GEN6_SURFACE_DW2_HEIGHT__SHIFT |
691 (width - 1) << GEN6_SURFACE_DW2_WIDTH__SHIFT |
692 lod << GEN6_SURFACE_DW2_MIP_COUNT_LOD__SHIFT;
693
694 dw[3] = (depth - 1) << GEN6_SURFACE_DW3_DEPTH__SHIFT |
695 (pitch - 1) << GEN6_SURFACE_DW3_PITCH__SHIFT |
696 winsys_tiling_to_surface_tiling(img->layout.tiling);
697
698 dw[4] = first_level << GEN6_SURFACE_DW4_MIN_LOD__SHIFT |
699 first_layer << 17 |
700 (num_layers - 1) << 8 |
701 ((img->samples > 1) ? GEN6_SURFACE_DW4_MULTISAMPLECOUNT_4 :
702 GEN6_SURFACE_DW4_MULTISAMPLECOUNT_1);
703
Chia-I Wu457d0a62014-08-18 13:02:26 +0800704 dw[5] = 0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800705
706 assert(img->layout.align_j == 2 || img->layout.align_j == 4);
707 if (img->layout.align_j == 4)
708 dw[5] |= GEN6_SURFACE_DW5_VALIGN_4;
709}
710
711struct ds_surface_info {
712 int surface_type;
713 int format;
714
715 struct {
716 unsigned stride;
Chia-I Wu457d0a62014-08-18 13:02:26 +0800717 unsigned offset;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800718 } zs, stencil, hiz;
719
720 unsigned width, height, depth;
721 unsigned lod, first_layer, num_layers;
722};
723
724static void
725ds_init_info_null(const struct intel_gpu *gpu,
726 struct ds_surface_info *info)
727{
728 INTEL_GPU_ASSERT(gpu, 6, 7.5);
729
730 memset(info, 0, sizeof(*info));
731
732 info->surface_type = GEN6_SURFTYPE_NULL;
733 info->format = GEN6_ZFORMAT_D32_FLOAT;
734 info->width = 1;
735 info->height = 1;
736 info->depth = 1;
737 info->num_layers = 1;
738}
739
740static void
741ds_init_info(const struct intel_gpu *gpu,
742 const struct intel_img *img,
743 XGL_FORMAT format, unsigned level,
744 unsigned first_layer, unsigned num_layers,
745 struct ds_surface_info *info)
746{
747 bool separate_stencil;
748
749 INTEL_GPU_ASSERT(gpu, 6, 7.5);
750
751 memset(info, 0, sizeof(*info));
752
753 info->surface_type =
754 view_type_to_surface_type(img_type_to_view_type(img->type));
755
756 if (info->surface_type == GEN6_SURFTYPE_CUBE) {
757 /*
758 * From the Sandy Bridge PRM, volume 2 part 1, page 325-326:
759 *
760 * "For Other Surfaces (Cube Surfaces):
761 * This field (Minimum Array Element) is ignored."
762 *
763 * "For Other Surfaces (Cube Surfaces):
764 * This field (Render Target View Extent) is ignored."
765 *
766 * As such, we cannot set first_layer and num_layers on cube surfaces.
767 * To work around that, treat it as a 2D surface.
768 */
769 info->surface_type = GEN6_SURFTYPE_2D;
770 }
771
772 if (intel_gpu_gen(gpu) >= INTEL_GEN(7)) {
773 separate_stencil = true;
774 }
775 else {
776 /*
777 * From the Sandy Bridge PRM, volume 2 part 1, page 317:
778 *
779 * "This field (Separate Stencil Buffer Enable) must be set to the
780 * same value (enabled or disabled) as Hierarchical Depth Buffer
781 * Enable."
782 */
783 separate_stencil = img->aux_offset;
784 }
785
786 /*
787 * From the Sandy Bridge PRM, volume 2 part 1, page 317:
788 *
789 * "If this field (Hierarchical Depth Buffer Enable) is enabled, the
790 * Surface Format of the depth buffer cannot be
791 * D32_FLOAT_S8X24_UINT or D24_UNORM_S8_UINT. Use of stencil
792 * requires the separate stencil buffer."
793 *
794 * From the Ironlake PRM, volume 2 part 1, page 330:
795 *
796 * "If this field (Separate Stencil Buffer Enable) is disabled, the
797 * Surface Format of the depth buffer cannot be D24_UNORM_X8_UINT."
798 *
799 * There is no similar restriction for GEN6. But when D24_UNORM_X8_UINT
800 * is indeed used, the depth values output by the fragment shaders will
801 * be different when read back.
802 *
803 * As for GEN7+, separate_stencil is always true.
804 */
805 switch (format.channelFormat) {
806 case XGL_CH_FMT_R16:
807 info->format = GEN6_ZFORMAT_D16_UNORM;
808 break;
809 case XGL_CH_FMT_R32:
810 info->format = GEN6_ZFORMAT_D32_FLOAT;
811 break;
812 case XGL_CH_FMT_R32G8:
813 info->format = (separate_stencil) ?
814 GEN6_ZFORMAT_D32_FLOAT :
815 GEN6_ZFORMAT_D32_FLOAT_S8X24_UINT;
816 break;
817 case XGL_CH_FMT_R8:
818 if (separate_stencil) {
819 info->format = GEN6_ZFORMAT_D32_FLOAT;
820 break;
821 }
822 /* fall through */
823 default:
824 assert(!"unsupported depth/stencil format");
825 ds_init_info_null(gpu, info);
826 return;
827 break;
828 }
829
830 if (format.channelFormat != XGL_CH_FMT_R8)
831 info->zs.stride = img->layout.bo_stride;
832
833 if (img->s8_layout) {
834 /*
835 * From the Sandy Bridge PRM, volume 2 part 1, page 329:
836 *
837 * "The pitch must be set to 2x the value computed based on width,
838 * as the stencil buffer is stored with two rows interleaved."
839 *
840 * According to the classic driver, we need to do the same for GEN7+
841 * even though the Ivy Bridge PRM does not say anything about it.
842 */
843 info->stencil.stride = img->s8_layout->bo_stride * 2;
Chia-I Wu457d0a62014-08-18 13:02:26 +0800844
845 if (intel_gpu_gen(gpu) == INTEL_GEN(6)) {
846 unsigned x, y;
847
848 assert(img->s8_layout->walk == INTEL_LAYOUT_WALK_LOD);
849
850 /* offset to the level */
851 intel_layout_get_slice_pos(img->s8_layout, level, 0, &x, &y);
852 intel_layout_pos_to_mem(img->s8_layout, x, y, &x, &y);
853 info->stencil.offset = intel_layout_mem_to_raw(img->s8_layout, x, y);
854 }
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800855 } else if (format.channelFormat == XGL_CH_FMT_R8) {
856 info->stencil.stride = img->layout.bo_stride * 2;
857 }
858
Chia-I Wu457d0a62014-08-18 13:02:26 +0800859 if (img->aux_offset) {
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800860 info->hiz.stride = img->layout.aux_stride;
861
Chia-I Wu457d0a62014-08-18 13:02:26 +0800862 /* offset to the level */
863 if (intel_gpu_gen(gpu) == INTEL_GEN(6))
864 info->hiz.offset = img->layout.aux_offsets[level];
865 }
866
867
Chia-I Wu73e326f2014-08-21 11:07:57 +0800868 info->width = img->layout.width0;
869 info->height = img->layout.height0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800870 info->depth = (img->type == XGL_IMAGE_3D) ?
Chia-I Wu73e326f2014-08-21 11:07:57 +0800871 img->depth : num_layers;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800872
873 info->lod = level;
874 info->first_layer = first_layer;
875 info->num_layers = num_layers;
876}
877
Chia-I Wu06bed192014-08-20 13:57:18 +0800878static void ds_view_init(struct intel_ds_view *view,
879 const struct intel_gpu *gpu,
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800880 const struct intel_img *img,
881 XGL_FORMAT format, unsigned level,
Chia-I Wu06bed192014-08-20 13:57:18 +0800882 unsigned first_layer, unsigned num_layers)
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800883{
884 const int max_2d_size = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 16384 : 8192;
885 const int max_array_size = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 2048 : 512;
886 struct ds_surface_info info;
887 uint32_t dw1, dw2, dw3, dw4, dw5, dw6;
Chia-I Wu06bed192014-08-20 13:57:18 +0800888 uint32_t *dw;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800889
890 INTEL_GPU_ASSERT(gpu, 6, 7.5);
891
892 if (img) {
893 ds_init_info(gpu, img, format, level, first_layer, num_layers, &info);
894 }
895 else {
896 ds_init_info_null(gpu, &info);
897 }
898
899 switch (info.surface_type) {
900 case GEN6_SURFTYPE_NULL:
901 break;
902 case GEN6_SURFTYPE_1D:
903 assert(info.width <= max_2d_size && info.height == 1 &&
904 info.depth <= max_array_size);
905 assert(info.first_layer < max_array_size - 1 &&
906 info.num_layers <= max_array_size);
907 break;
908 case GEN6_SURFTYPE_2D:
909 assert(info.width <= max_2d_size && info.height <= max_2d_size &&
910 info.depth <= max_array_size);
911 assert(info.first_layer < max_array_size - 1 &&
912 info.num_layers <= max_array_size);
913 break;
914 case GEN6_SURFTYPE_3D:
915 assert(info.width <= 2048 && info.height <= 2048 && info.depth <= 2048);
916 assert(info.first_layer < 2048 && info.num_layers <= max_array_size);
917 break;
918 case GEN6_SURFTYPE_CUBE:
919 assert(info.width <= max_2d_size && info.height <= max_2d_size &&
920 info.depth == 1);
921 assert(info.first_layer == 0 && info.num_layers == 1);
922 assert(info.width == info.height);
923 break;
924 default:
925 assert(!"unexpected depth surface type");
926 break;
927 }
928
929 dw1 = info.surface_type << 29 |
930 info.format << 18;
931
932 if (info.zs.stride) {
933 /* required for GEN6+ */
934 assert(info.zs.stride > 0 && info.zs.stride < 128 * 1024 &&
935 info.zs.stride % 128 == 0);
936 assert(info.width <= info.zs.stride);
937
938 dw1 |= (info.zs.stride - 1);
939 }
940
941 dw2 = 0;
942
943 if (intel_gpu_gen(gpu) >= INTEL_GEN(7)) {
944 if (info.zs.stride)
945 dw1 |= 1 << 28;
946
947 if (info.stencil.stride)
948 dw1 |= 1 << 27;
949
950 if (info.hiz.stride)
951 dw1 |= 1 << 22;
952
953 dw3 = (info.height - 1) << 18 |
954 (info.width - 1) << 4 |
955 info.lod;
956
957 dw4 = (info.depth - 1) << 21 |
958 info.first_layer << 10;
959
960 dw5 = 0;
961
962 dw6 = (info.num_layers - 1) << 21;
963 }
964 else {
965 /* always Y-tiled */
966 dw1 |= 1 << 27 |
967 1 << 26;
968
969 if (info.hiz.stride) {
970 dw1 |= 1 << 22 |
971 1 << 21;
972 }
973
974 dw3 = (info.height - 1) << 19 |
975 (info.width - 1) << 6 |
976 info.lod << 2 |
977 GEN6_DEPTH_DW3_MIPLAYOUT_BELOW;
978
979 dw4 = (info.depth - 1) << 21 |
980 info.first_layer << 10 |
981 (info.num_layers - 1) << 1;
982
983 dw5 = 0;
984
985 dw6 = 0;
986 }
987
Chia-I Wu06bed192014-08-20 13:57:18 +0800988 STATIC_ASSERT(ARRAY_SIZE(view->cmd) >= 10);
989 dw = view->cmd;
990
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800991 dw[0] = dw1;
992 dw[1] = dw2;
993 dw[2] = dw3;
994 dw[3] = dw4;
995 dw[4] = dw5;
996 dw[5] = dw6;
997
998 /* separate stencil */
999 if (info.stencil.stride) {
1000 assert(info.stencil.stride > 0 && info.stencil.stride < 128 * 1024 &&
1001 info.stencil.stride % 128 == 0);
1002
1003 dw[6] = info.stencil.stride - 1;
1004 dw[7] = img->s8_offset;
1005
1006 if (intel_gpu_gen(gpu) >= INTEL_GEN(7.5))
1007 dw[6] |= GEN75_STENCIL_DW1_STENCIL_BUFFER_ENABLE;
1008 }
1009 else {
1010 dw[6] = 0;
1011 dw[7] = 0;
1012 }
1013
1014 /* hiz */
1015 if (info.hiz.stride) {
1016 dw[8] = info.hiz.stride - 1;
1017 dw[9] = img->aux_offset;
1018 }
1019 else {
1020 dw[8] = 0;
1021 dw[9] = 0;
1022 }
1023}
1024
Chia-I Wu5a323262014-08-11 10:31:53 +08001025void intel_null_view_init(struct intel_null_view *view,
1026 struct intel_dev *dev)
1027{
Chia-I Wucd83cf12014-08-23 17:26:08 +08001028 if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) {
Chia-I Wu06bed192014-08-20 13:57:18 +08001029 surface_state_null_gen7(dev->gpu, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001030 view->cmd_len = 8;
1031 } else {
Chia-I Wu06bed192014-08-20 13:57:18 +08001032 surface_state_null_gen6(dev->gpu, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001033 view->cmd_len = 6;
1034 }
Chia-I Wu5a323262014-08-11 10:31:53 +08001035}
1036
1037void intel_mem_view_init(struct intel_mem_view *view,
1038 struct intel_dev *dev,
1039 const XGL_MEMORY_VIEW_ATTACH_INFO *info)
1040{
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001041 bool will_write;
1042
1043 switch (info->state) {
1044 case XGL_MEMORY_STATE_GRAPHICS_SHADER_WRITE_ONLY:
1045 case XGL_MEMORY_STATE_GRAPHICS_SHADER_READ_WRITE:
1046 case XGL_MEMORY_STATE_COMPUTE_SHADER_WRITE_ONLY:
1047 case XGL_MEMORY_STATE_COMPUTE_SHADER_READ_WRITE:
1048 will_write = true;
1049 break;
1050 default:
1051 will_write = false;
1052 break;
1053 }
Chia-I Wu5a323262014-08-11 10:31:53 +08001054
1055 view->mem = intel_mem(info->mem);
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001056
1057 if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) {
Chia-I Wu06bed192014-08-20 13:57:18 +08001058 surface_state_buf_gen7(dev->gpu, info->offset,
1059 info->range, info->stride, info->format,
1060 will_write, will_write, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001061 view->cmd_len = 8;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001062 } else {
Chia-I Wu06bed192014-08-20 13:57:18 +08001063 surface_state_buf_gen6(dev->gpu, info->offset,
1064 info->range, info->stride, info->format,
1065 will_write, will_write, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001066 view->cmd_len = 6;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001067 }
Chia-I Wu5a323262014-08-11 10:31:53 +08001068}
1069
1070static void img_view_destroy(struct intel_obj *obj)
1071{
1072 struct intel_img_view *view = intel_img_view_from_obj(obj);
1073
1074 intel_img_view_destroy(view);
1075}
1076
1077XGL_RESULT intel_img_view_create(struct intel_dev *dev,
1078 const XGL_IMAGE_VIEW_CREATE_INFO *info,
1079 struct intel_img_view **view_ret)
1080{
1081 struct intel_img *img = intel_img(info->image);
1082 struct intel_img_view *view;
Chia-I Wuaa759372014-10-18 12:47:35 +08001083 XGL_UINT mip_levels, array_size;
1084
1085 if (info->subresourceRange.baseMipLevel >= img->mip_levels ||
1086 info->subresourceRange.baseArraySlice >= img->array_size ||
1087 !info->subresourceRange.mipLevels ||
1088 !info->subresourceRange.arraySize)
1089 return XGL_ERROR_INVALID_VALUE;
1090
1091 mip_levels = info->subresourceRange.mipLevels;
1092 if (mip_levels > img->mip_levels - info->subresourceRange.baseMipLevel)
1093 mip_levels = img->mip_levels - info->subresourceRange.baseMipLevel;
1094
1095 array_size = info->subresourceRange.arraySize;
1096 if (array_size > img->array_size - info->subresourceRange.baseArraySlice)
1097 array_size = img->array_size - info->subresourceRange.baseArraySlice;
Chia-I Wu5a323262014-08-11 10:31:53 +08001098
1099 view = (struct intel_img_view *) intel_base_create(dev, sizeof(*view),
1100 dev->base.dbg, XGL_DBG_OBJECT_IMAGE_VIEW, info, 0);
1101 if (!view)
1102 return XGL_ERROR_OUT_OF_MEMORY;
1103
1104 view->obj.destroy = img_view_destroy;
1105
1106 view->img = img;
1107 view->swizzles = info->channels;
1108 view->min_lod = info->minLod;
1109
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001110 if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) {
Chia-I Wu06bed192014-08-20 13:57:18 +08001111 surface_state_tex_gen7(dev->gpu, img, info->viewType, info->format,
Chia-I Wuaa759372014-10-18 12:47:35 +08001112 info->subresourceRange.baseMipLevel, mip_levels,
1113 info->subresourceRange.baseArraySlice, array_size,
1114 false, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001115 view->cmd_len = 8;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001116 } else {
Chia-I Wu06bed192014-08-20 13:57:18 +08001117 surface_state_tex_gen6(dev->gpu, img, info->viewType, info->format,
Chia-I Wuaa759372014-10-18 12:47:35 +08001118 info->subresourceRange.baseMipLevel, mip_levels,
1119 info->subresourceRange.baseArraySlice, array_size,
1120 false, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001121 view->cmd_len = 6;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001122 }
1123
Chia-I Wu5a323262014-08-11 10:31:53 +08001124 *view_ret = view;
1125
1126 return XGL_SUCCESS;
1127}
1128
1129void intel_img_view_destroy(struct intel_img_view *view)
1130{
1131 intel_base_destroy(&view->obj.base);
1132}
1133
1134static void rt_view_destroy(struct intel_obj *obj)
1135{
1136 struct intel_rt_view *view = intel_rt_view_from_obj(obj);
1137
1138 intel_rt_view_destroy(view);
1139}
1140
1141XGL_RESULT intel_rt_view_create(struct intel_dev *dev,
1142 const XGL_COLOR_ATTACHMENT_VIEW_CREATE_INFO *info,
1143 struct intel_rt_view **view_ret)
1144{
1145 struct intel_img *img = intel_img(info->image);
1146 struct intel_rt_view *view;
1147
1148 view = (struct intel_rt_view *) intel_base_create(dev, sizeof(*view),
1149 dev->base.dbg, XGL_DBG_OBJECT_COLOR_TARGET_VIEW, info, 0);
1150 if (!view)
1151 return XGL_ERROR_OUT_OF_MEMORY;
1152
1153 view->obj.destroy = rt_view_destroy;
1154
1155 view->img = img;
1156
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001157 if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) {
Chia-I Wu06bed192014-08-20 13:57:18 +08001158 surface_state_tex_gen7(dev->gpu, img,
1159 img_type_to_view_type(img->type),
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001160 info->format, info->mipLevel, 1,
1161 info->baseArraySlice, info->arraySize,
1162 true, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001163 view->cmd_len = 8;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001164 } else {
Chia-I Wu06bed192014-08-20 13:57:18 +08001165 surface_state_tex_gen6(dev->gpu, img,
1166 img_type_to_view_type(img->type),
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001167 info->format, info->mipLevel, 1,
1168 info->baseArraySlice, info->arraySize,
1169 true, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001170 view->cmd_len = 6;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001171 }
1172
Chia-I Wu5a323262014-08-11 10:31:53 +08001173 *view_ret = view;
1174
1175 return XGL_SUCCESS;
1176}
1177
1178void intel_rt_view_destroy(struct intel_rt_view *view)
1179{
1180 intel_base_destroy(&view->obj.base);
1181}
1182
1183static void ds_view_destroy(struct intel_obj *obj)
1184{
1185 struct intel_ds_view *view = intel_ds_view_from_obj(obj);
1186
1187 intel_ds_view_destroy(view);
1188}
1189
1190XGL_RESULT intel_ds_view_create(struct intel_dev *dev,
1191 const XGL_DEPTH_STENCIL_VIEW_CREATE_INFO *info,
1192 struct intel_ds_view **view_ret)
1193{
1194 struct intel_img *img = intel_img(info->image);
1195 struct intel_ds_view *view;
1196
1197 view = (struct intel_ds_view *) intel_base_create(dev, sizeof(*view),
1198 dev->base.dbg, XGL_DBG_OBJECT_DEPTH_STENCIL_VIEW, info, 0);
1199 if (!view)
1200 return XGL_ERROR_OUT_OF_MEMORY;
1201
1202 view->obj.destroy = ds_view_destroy;
1203
1204 view->img = img;
1205
Chia-I Wu06bed192014-08-20 13:57:18 +08001206 ds_view_init(view, dev->gpu, img, img->layout.format, info->mipLevel,
1207 info->baseArraySlice, info->arraySize);
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001208
Chia-I Wu5a323262014-08-11 10:31:53 +08001209 *view_ret = view;
1210
1211 return XGL_SUCCESS;
1212}
1213
1214void intel_ds_view_destroy(struct intel_ds_view *view)
1215{
1216 intel_base_destroy(&view->obj.base);
1217}
1218
1219XGL_RESULT XGLAPI intelCreateImageView(
1220 XGL_DEVICE device,
1221 const XGL_IMAGE_VIEW_CREATE_INFO* pCreateInfo,
1222 XGL_IMAGE_VIEW* pView)
1223{
1224 struct intel_dev *dev = intel_dev(device);
1225
1226 return intel_img_view_create(dev, pCreateInfo,
1227 (struct intel_img_view **) pView);
1228}
1229
1230XGL_RESULT XGLAPI intelCreateColorAttachmentView(
1231 XGL_DEVICE device,
1232 const XGL_COLOR_ATTACHMENT_VIEW_CREATE_INFO* pCreateInfo,
1233 XGL_COLOR_ATTACHMENT_VIEW* pView)
1234{
1235 struct intel_dev *dev = intel_dev(device);
1236
1237 return intel_rt_view_create(dev, pCreateInfo,
1238 (struct intel_rt_view **) pView);
1239}
1240
1241XGL_RESULT XGLAPI intelCreateDepthStencilView(
1242 XGL_DEVICE device,
1243 const XGL_DEPTH_STENCIL_VIEW_CREATE_INFO* pCreateInfo,
1244 XGL_DEPTH_STENCIL_VIEW* pView)
1245{
1246 struct intel_dev *dev = intel_dev(device);
1247
1248 return intel_ds_view_create(dev, pCreateInfo,
1249 (struct intel_ds_view **) pView);
1250}