blob: d549e22385f84bd43c69aaa7919cc837ac7a022f [file] [log] [blame]
Chia-I Wu5a323262014-08-11 10:31:53 +08001/*
2 * XGL
3 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
Chia-I Wu44e42362014-09-02 08:32:09 +080023 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
Chia-I Wu5a323262014-08-11 10:31:53 +080026 */
27
Chia-I Wu9269d1c2014-08-16 12:47:47 +080028#include "genhw/genhw.h"
29#include "kmd/winsys.h"
Chia-I Wu5a323262014-08-11 10:31:53 +080030#include "dev.h"
Chia-I Wu9269d1c2014-08-16 12:47:47 +080031#include "format.h"
Chia-I Wu5a323262014-08-11 10:31:53 +080032#include "gpu.h"
33#include "img.h"
34#include "mem.h"
35#include "view.h"
36
Chia-I Wu06bed192014-08-20 13:57:18 +080037static void surface_state_null_gen7(const struct intel_gpu *gpu,
38 uint32_t dw[8])
Chia-I Wu9269d1c2014-08-16 12:47:47 +080039{
40 INTEL_GPU_ASSERT(gpu, 7, 7.5);
41
42 /*
43 * From the Ivy Bridge PRM, volume 4 part 1, page 62:
44 *
45 * "A null surface is used in instances where an actual surface is not
46 * bound. When a write message is generated to a null surface, no
47 * actual surface is written to. When a read message (including any
48 * sampling engine message) is generated to a null surface, the result
49 * is all zeros. Note that a null surface type is allowed to be used
50 * with all messages, even if it is not specificially indicated as
51 * supported. All of the remaining fields in surface state are ignored
52 * for null surfaces, with the following exceptions:
53 *
54 * * Width, Height, Depth, LOD, and Render Target View Extent fields
55 * must match the depth buffer's corresponding state for all render
56 * target surfaces, including null.
57 * * All sampling engine and data port messages support null surfaces
58 * with the above behavior, even if not mentioned as specifically
59 * supported, except for the following:
60 * * Data Port Media Block Read/Write messages.
61 * * The Surface Type of a surface used as a render target (accessed
62 * via the Data Port's Render Target Write message) must be the same
63 * as the Surface Type of all other render targets and of the depth
64 * buffer (defined in 3DSTATE_DEPTH_BUFFER), unless either the depth
65 * buffer or render targets are SURFTYPE_NULL."
66 *
67 * From the Ivy Bridge PRM, volume 4 part 1, page 65:
68 *
69 * "If Surface Type is SURFTYPE_NULL, this field (Tiled Surface) must be
70 * true"
71 */
72
73 dw[0] = GEN6_SURFTYPE_NULL << GEN7_SURFACE_DW0_TYPE__SHIFT |
74 GEN6_FORMAT_B8G8R8A8_UNORM << GEN7_SURFACE_DW0_FORMAT__SHIFT |
75 GEN6_TILING_X << 13;
76
77 dw[1] = 0;
78 dw[2] = 0;
79 dw[3] = 0;
80 dw[4] = 0;
81 dw[5] = 0;
82 dw[6] = 0;
83 dw[7] = 0;
84}
85
Chia-I Wu06bed192014-08-20 13:57:18 +080086static void surface_state_buf_gen7(const struct intel_gpu *gpu,
87 unsigned offset, unsigned size,
88 unsigned struct_size,
89 XGL_FORMAT elem_format,
90 bool is_rt, bool render_cache_rw,
91 uint32_t dw[8])
Chia-I Wu9269d1c2014-08-16 12:47:47 +080092{
93 const bool typed = !icd_format_is_undef(elem_format);
94 const bool structured = (!typed && struct_size > 1);
95 const int elem_size = (typed) ?
96 icd_format_get_size(elem_format) : 1;
97 int width, height, depth, pitch;
98 int surface_type, surface_format, num_entries;
99
100 INTEL_GPU_ASSERT(gpu, 7, 7.5);
101
102 surface_type = (structured) ? GEN7_SURFTYPE_STRBUF : GEN6_SURFTYPE_BUFFER;
103
104 surface_format = (typed) ?
105 intel_format_translate_color(gpu, elem_format) : GEN6_FORMAT_RAW;
106
107 num_entries = size / struct_size;
108 /* see if there is enough space to fit another element */
109 if (size % struct_size >= elem_size && !structured)
110 num_entries++;
111
112 /*
113 * From the Ivy Bridge PRM, volume 4 part 1, page 67:
114 *
115 * "For SURFTYPE_BUFFER render targets, this field (Surface Base
116 * Address) specifies the base address of first element of the
117 * surface. The surface is interpreted as a simple array of that
118 * single element type. The address must be naturally-aligned to the
119 * element size (e.g., a buffer containing R32G32B32A32_FLOAT elements
120 * must be 16-byte aligned)
121 *
122 * For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
123 * the base address of the first element of the surface, computed in
124 * software by adding the surface base address to the byte offset of
125 * the element in the buffer."
126 */
127 if (is_rt)
128 assert(offset % elem_size == 0);
129
130 /*
131 * From the Ivy Bridge PRM, volume 4 part 1, page 68:
132 *
133 * "For typed buffer and structured buffer surfaces, the number of
134 * entries in the buffer ranges from 1 to 2^27. For raw buffer
135 * surfaces, the number of entries in the buffer is the number of
136 * bytes which can range from 1 to 2^30."
137 */
138 assert(num_entries >= 1 &&
139 num_entries <= 1 << ((typed || structured) ? 27 : 30));
140
141 /*
142 * From the Ivy Bridge PRM, volume 4 part 1, page 69:
143 *
144 * "For SURFTYPE_BUFFER: The low two bits of this field (Width) must be
145 * 11 if the Surface Format is RAW (the size of the buffer must be a
146 * multiple of 4 bytes)."
147 *
148 * From the Ivy Bridge PRM, volume 4 part 1, page 70:
149 *
150 * "For surfaces of type SURFTYPE_BUFFER and SURFTYPE_STRBUF, this
151 * field (Surface Pitch) indicates the size of the structure."
152 *
153 * "For linear surfaces with Surface Type of SURFTYPE_STRBUF, the pitch
154 * must be a multiple of 4 bytes."
155 */
156 if (structured)
157 assert(struct_size % 4 == 0);
158 else if (!typed)
159 assert(num_entries % 4 == 0);
160
161 pitch = struct_size;
162
163 pitch--;
164 num_entries--;
165 /* bits [6:0] */
166 width = (num_entries & 0x0000007f);
167 /* bits [20:7] */
168 height = (num_entries & 0x001fff80) >> 7;
169 /* bits [30:21] */
170 depth = (num_entries & 0x7fe00000) >> 21;
171 /* limit to [26:21] */
172 if (typed || structured)
173 depth &= 0x3f;
174
175 dw[0] = surface_type << GEN7_SURFACE_DW0_TYPE__SHIFT |
176 surface_format << GEN7_SURFACE_DW0_FORMAT__SHIFT;
177 if (render_cache_rw)
178 dw[0] |= GEN7_SURFACE_DW0_RENDER_CACHE_RW;
179
180 dw[1] = offset;
181
182 dw[2] = height << GEN7_SURFACE_DW2_HEIGHT__SHIFT |
183 width << GEN7_SURFACE_DW2_WIDTH__SHIFT;
184
185 dw[3] = depth << GEN7_SURFACE_DW3_DEPTH__SHIFT |
186 pitch;
187
188 dw[4] = 0;
189 dw[5] = 0;
190
191 dw[6] = 0;
192 dw[7] = 0;
193
194 if (intel_gpu_gen(gpu) >= INTEL_GEN(7.5)) {
195 dw[7] |= GEN75_SCS_RED << GEN75_SURFACE_DW7_SCS_R__SHIFT |
196 GEN75_SCS_GREEN << GEN75_SURFACE_DW7_SCS_G__SHIFT |
197 GEN75_SCS_BLUE << GEN75_SURFACE_DW7_SCS_B__SHIFT |
198 GEN75_SCS_ALPHA << GEN75_SURFACE_DW7_SCS_A__SHIFT;
199 }
200}
201
202static int img_type_to_view_type(XGL_IMAGE_VIEW_TYPE type)
203{
204 switch (type) {
205 case XGL_IMAGE_1D: return XGL_IMAGE_VIEW_1D;
206 case XGL_IMAGE_2D: return XGL_IMAGE_VIEW_2D;
207 case XGL_IMAGE_3D: return XGL_IMAGE_VIEW_3D;
208 default: assert(!"unknown img type"); return XGL_IMAGE_VIEW_1D;
209 }
210}
211
212static int view_type_to_surface_type(XGL_IMAGE_VIEW_TYPE type)
213{
214 switch (type) {
215 case XGL_IMAGE_VIEW_1D: return GEN6_SURFTYPE_1D;
216 case XGL_IMAGE_VIEW_2D: return GEN6_SURFTYPE_2D;
217 case XGL_IMAGE_VIEW_3D: return GEN6_SURFTYPE_3D;
218 case XGL_IMAGE_VIEW_CUBE: return GEN6_SURFTYPE_CUBE;
219 default: assert(!"unknown view type"); return GEN6_SURFTYPE_NULL;
220 }
221}
222
223static int winsys_tiling_to_surface_tiling(enum intel_tiling_mode tiling)
224{
225 switch (tiling) {
226 case INTEL_TILING_NONE: return GEN6_TILING_NONE;
227 case INTEL_TILING_X: return GEN6_TILING_X;
228 case INTEL_TILING_Y: return GEN6_TILING_Y;
229 default: assert(!"unknown tiling"); return GEN6_TILING_NONE;
230 }
231}
232
Chia-I Wu06bed192014-08-20 13:57:18 +0800233static void surface_state_tex_gen7(const struct intel_gpu *gpu,
234 const struct intel_img *img,
235 XGL_IMAGE_VIEW_TYPE type,
236 XGL_FORMAT format,
237 unsigned first_level,
238 unsigned num_levels,
239 unsigned first_layer,
240 unsigned num_layers,
241 bool is_rt,
242 uint32_t dw[8])
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800243{
244 int surface_type, surface_format;
245 int width, height, depth, pitch, lod;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800246
247 INTEL_GPU_ASSERT(gpu, 7, 7.5);
248
249 surface_type = view_type_to_surface_type(type);
250 assert(surface_type != GEN6_SURFTYPE_BUFFER);
251
252 surface_format = intel_format_translate_color(gpu, format);
253 assert(surface_format >= 0);
254
Chia-I Wu73e326f2014-08-21 11:07:57 +0800255 width = img->layout.width0;
256 height = img->layout.height0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800257 depth = (type == XGL_IMAGE_VIEW_3D) ?
Chia-I Wu73e326f2014-08-21 11:07:57 +0800258 img->depth : num_layers;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800259 pitch = img->layout.bo_stride;
260
261 if (surface_type == GEN6_SURFTYPE_CUBE) {
262 /*
263 * From the Ivy Bridge PRM, volume 4 part 1, page 70:
264 *
265 * "For SURFTYPE_CUBE:For Sampling Engine Surfaces, the range of
266 * this field is [0,340], indicating the number of cube array
267 * elements (equal to the number of underlying 2D array elements
268 * divided by 6). For other surfaces, this field must be zero."
269 *
270 * When is_rt is true, we treat the texture as a 2D one to avoid the
271 * restriction.
272 */
273 if (is_rt) {
274 surface_type = GEN6_SURFTYPE_2D;
275 }
276 else {
277 assert(num_layers % 6 == 0);
278 depth = num_layers / 6;
279 }
280 }
281
282 /* sanity check the size */
283 assert(width >= 1 && height >= 1 && depth >= 1 && pitch >= 1);
284 assert(first_layer < 2048 && num_layers <= 2048);
285 switch (surface_type) {
286 case GEN6_SURFTYPE_1D:
287 assert(width <= 16384 && height == 1 && depth <= 2048);
288 break;
289 case GEN6_SURFTYPE_2D:
290 assert(width <= 16384 && height <= 16384 && depth <= 2048);
291 break;
292 case GEN6_SURFTYPE_3D:
293 assert(width <= 2048 && height <= 2048 && depth <= 2048);
294 if (!is_rt)
295 assert(first_layer == 0);
296 break;
297 case GEN6_SURFTYPE_CUBE:
298 assert(width <= 16384 && height <= 16384 && depth <= 86);
299 assert(width == height);
300 if (is_rt)
301 assert(first_layer == 0);
302 break;
303 default:
304 assert(!"unexpected surface type");
305 break;
306 }
307
308 if (is_rt) {
309 assert(num_levels == 1);
310 lod = first_level;
311 }
312 else {
313 lod = num_levels - 1;
314 }
315
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800316 /*
317 * From the Ivy Bridge PRM, volume 4 part 1, page 68:
318 *
319 * "The Base Address for linear render target surfaces and surfaces
320 * accessed with the typed surface read/write data port messages must
321 * be element-size aligned, for non-YUV surface formats, or a multiple
322 * of 2 element-sizes for YUV surface formats. Other linear surfaces
323 * have no alignment requirements (byte alignment is sufficient)."
324 *
325 * From the Ivy Bridge PRM, volume 4 part 1, page 70:
326 *
327 * "For linear render target surfaces and surfaces accessed with the
328 * typed data port messages, the pitch must be a multiple of the
329 * element size for non-YUV surface formats. Pitch must be a multiple
330 * of 2 * element size for YUV surface formats. For linear surfaces
331 * with Surface Type of SURFTYPE_STRBUF, the pitch must be a multiple
332 * of 4 bytes.For other linear surfaces, the pitch can be any multiple
333 * of bytes."
334 *
335 * From the Ivy Bridge PRM, volume 4 part 1, page 74:
336 *
337 * "For linear surfaces, this field (X Offset) must be zero."
338 */
339 if (img->layout.tiling == INTEL_TILING_NONE) {
340 if (is_rt) {
341 const int elem_size = icd_format_get_size(format);
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800342 assert(pitch % elem_size == 0);
343 }
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800344 }
345
346 dw[0] = surface_type << GEN7_SURFACE_DW0_TYPE__SHIFT |
347 surface_format << GEN7_SURFACE_DW0_FORMAT__SHIFT |
348 winsys_tiling_to_surface_tiling(img->layout.tiling) << 13;
349
350 /*
351 * From the Ivy Bridge PRM, volume 4 part 1, page 63:
352 *
353 * "If this field (Surface Array) is enabled, the Surface Type must be
354 * SURFTYPE_1D, SURFTYPE_2D, or SURFTYPE_CUBE. If this field is
355 * disabled and Surface Type is SURFTYPE_1D, SURFTYPE_2D, or
356 * SURFTYPE_CUBE, the Depth field must be set to zero."
357 *
358 * For non-3D sampler surfaces, resinfo (the sampler message) always
359 * returns zero for the number of layers when this field is not set.
360 */
361 if (surface_type != GEN6_SURFTYPE_3D) {
362 if (num_layers > 1)
363 dw[0] |= GEN7_SURFACE_DW0_IS_ARRAY;
364 else
365 assert(depth == 1);
366 }
367
368 assert(img->layout.align_i == 4 || img->layout.align_i == 8);
369 assert(img->layout.align_j == 2 || img->layout.align_j == 4);
370
371 if (img->layout.align_j == 4)
372 dw[0] |= GEN7_SURFACE_DW0_VALIGN_4;
373
374 if (img->layout.align_i == 8)
375 dw[0] |= GEN7_SURFACE_DW0_HALIGN_8;
376
Chia-I Wu457d0a62014-08-18 13:02:26 +0800377 if (img->layout.walk == INTEL_LAYOUT_WALK_LOD)
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800378 dw[0] |= GEN7_SURFACE_DW0_ARYSPC_LOD0;
Chia-I Wu457d0a62014-08-18 13:02:26 +0800379 else
380 dw[0] |= GEN7_SURFACE_DW0_ARYSPC_FULL;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800381
382 if (is_rt)
383 dw[0] |= GEN7_SURFACE_DW0_RENDER_CACHE_RW;
384
385 if (surface_type == GEN6_SURFTYPE_CUBE && !is_rt)
386 dw[0] |= GEN7_SURFACE_DW0_CUBE_FACE_ENABLES__MASK;
387
Chia-I Wu457d0a62014-08-18 13:02:26 +0800388 dw[1] = 0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800389
390 dw[2] = (height - 1) << GEN7_SURFACE_DW2_HEIGHT__SHIFT |
391 (width - 1) << GEN7_SURFACE_DW2_WIDTH__SHIFT;
392
393 dw[3] = (depth - 1) << GEN7_SURFACE_DW3_DEPTH__SHIFT |
394 (pitch - 1);
395
396 dw[4] = first_layer << 18 |
397 (num_layers - 1) << 7;
398
399 /*
400 * MSFMT_MSS means the samples are not interleaved and MSFMT_DEPTH_STENCIL
401 * means the samples are interleaved. The layouts are the same when the
402 * number of samples is 1.
403 */
404 if (img->layout.interleaved_samples && img->samples > 1) {
405 assert(!is_rt);
406 dw[4] |= GEN7_SURFACE_DW4_MSFMT_DEPTH_STENCIL;
407 }
408 else {
409 dw[4] |= GEN7_SURFACE_DW4_MSFMT_MSS;
410 }
411
412 if (img->samples > 4)
413 dw[4] |= GEN7_SURFACE_DW4_MULTISAMPLECOUNT_8;
414 else if (img->samples > 2)
415 dw[4] |= GEN7_SURFACE_DW4_MULTISAMPLECOUNT_4;
416 else
417 dw[4] |= GEN7_SURFACE_DW4_MULTISAMPLECOUNT_1;
418
Chia-I Wu457d0a62014-08-18 13:02:26 +0800419 dw[5] = (first_level) << GEN7_SURFACE_DW5_MIN_LOD__SHIFT |
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800420 lod;
421
422 dw[6] = 0;
423 dw[7] = 0;
424
425 if (intel_gpu_gen(gpu) >= INTEL_GEN(7.5)) {
426 dw[7] |= GEN75_SCS_RED << GEN75_SURFACE_DW7_SCS_R__SHIFT |
427 GEN75_SCS_GREEN << GEN75_SURFACE_DW7_SCS_G__SHIFT |
428 GEN75_SCS_BLUE << GEN75_SURFACE_DW7_SCS_B__SHIFT |
429 GEN75_SCS_ALPHA << GEN75_SURFACE_DW7_SCS_A__SHIFT;
430 }
431}
432
Chia-I Wu06bed192014-08-20 13:57:18 +0800433static void surface_state_null_gen6(const struct intel_gpu *gpu,
434 uint32_t dw[6])
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800435{
436 INTEL_GPU_ASSERT(gpu, 6, 6);
437
438 /*
439 * From the Sandy Bridge PRM, volume 4 part 1, page 71:
440 *
441 * "A null surface will be used in instances where an actual surface is
442 * not bound. When a write message is generated to a null surface, no
443 * actual surface is written to. When a read message (including any
444 * sampling engine message) is generated to a null surface, the result
445 * is all zeros. Note that a null surface type is allowed to be used
446 * with all messages, even if it is not specificially indicated as
447 * supported. All of the remaining fields in surface state are ignored
448 * for null surfaces, with the following exceptions:
449 *
450 * * [DevSNB+]: Width, Height, Depth, and LOD fields must match the
451 * depth buffer's corresponding state for all render target
452 * surfaces, including null.
453 * * Surface Format must be R8G8B8A8_UNORM."
454 *
455 * From the Sandy Bridge PRM, volume 4 part 1, page 82:
456 *
457 * "If Surface Type is SURFTYPE_NULL, this field (Tiled Surface) must be
458 * true"
459 */
460
461 dw[0] = GEN6_SURFTYPE_NULL << GEN6_SURFACE_DW0_TYPE__SHIFT |
462 GEN6_FORMAT_B8G8R8A8_UNORM << GEN6_SURFACE_DW0_FORMAT__SHIFT;
463
464 dw[1] = 0;
465 dw[2] = 0;
466 dw[3] = GEN6_TILING_X;
467 dw[4] = 0;
468 dw[5] = 0;
469}
470
Chia-I Wu06bed192014-08-20 13:57:18 +0800471static void surface_state_buf_gen6(const struct intel_gpu *gpu,
472 unsigned offset, unsigned size,
473 unsigned struct_size,
474 XGL_FORMAT elem_format,
475 bool is_rt, bool render_cache_rw,
476 uint32_t dw[6])
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800477{
478 const int elem_size = icd_format_get_size(elem_format);
479 int width, height, depth, pitch;
480 int surface_format, num_entries;
481
482 INTEL_GPU_ASSERT(gpu, 6, 6);
483
484 /*
485 * For SURFTYPE_BUFFER, a SURFACE_STATE specifies an element of a
486 * structure in a buffer.
487 */
488
489 surface_format = intel_format_translate_color(gpu, elem_format);
490
491 num_entries = size / struct_size;
492 /* see if there is enough space to fit another element */
493 if (size % struct_size >= elem_size)
494 num_entries++;
495
496 /*
497 * From the Sandy Bridge PRM, volume 4 part 1, page 76:
498 *
499 * "For SURFTYPE_BUFFER render targets, this field (Surface Base
500 * Address) specifies the base address of first element of the
501 * surface. The surface is interpreted as a simple array of that
502 * single element type. The address must be naturally-aligned to the
503 * element size (e.g., a buffer containing R32G32B32A32_FLOAT elements
504 * must be 16-byte aligned).
505 *
506 * For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
507 * the base address of the first element of the surface, computed in
508 * software by adding the surface base address to the byte offset of
509 * the element in the buffer."
510 */
511 if (is_rt)
512 assert(offset % elem_size == 0);
513
514 /*
515 * From the Sandy Bridge PRM, volume 4 part 1, page 77:
516 *
517 * "For buffer surfaces, the number of entries in the buffer ranges
518 * from 1 to 2^27."
519 */
520 assert(num_entries >= 1 && num_entries <= 1 << 27);
521
522 /*
523 * From the Sandy Bridge PRM, volume 4 part 1, page 81:
524 *
525 * "For surfaces of type SURFTYPE_BUFFER, this field (Surface Pitch)
526 * indicates the size of the structure."
527 */
528 pitch = struct_size;
529
530 pitch--;
531 num_entries--;
532 /* bits [6:0] */
533 width = (num_entries & 0x0000007f);
534 /* bits [19:7] */
535 height = (num_entries & 0x000fff80) >> 7;
536 /* bits [26:20] */
537 depth = (num_entries & 0x07f00000) >> 20;
538
539 dw[0] = GEN6_SURFTYPE_BUFFER << GEN6_SURFACE_DW0_TYPE__SHIFT |
540 surface_format << GEN6_SURFACE_DW0_FORMAT__SHIFT;
541 if (render_cache_rw)
542 dw[0] |= GEN6_SURFACE_DW0_RENDER_CACHE_RW;
543
544 dw[1] = offset;
545
546 dw[2] = height << GEN6_SURFACE_DW2_HEIGHT__SHIFT |
547 width << GEN6_SURFACE_DW2_WIDTH__SHIFT;
548
549 dw[3] = depth << GEN6_SURFACE_DW3_DEPTH__SHIFT |
550 pitch << GEN6_SURFACE_DW3_PITCH__SHIFT;
551
552 dw[4] = 0;
553 dw[5] = 0;
554}
555
Chia-I Wu06bed192014-08-20 13:57:18 +0800556static void surface_state_tex_gen6(const struct intel_gpu *gpu,
557 const struct intel_img *img,
558 XGL_IMAGE_VIEW_TYPE type,
559 XGL_FORMAT format,
560 unsigned first_level,
561 unsigned num_levels,
562 unsigned first_layer,
563 unsigned num_layers,
564 bool is_rt,
565 uint32_t dw[6])
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800566{
567 int surface_type, surface_format;
568 int width, height, depth, pitch, lod;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800569
570 INTEL_GPU_ASSERT(gpu, 6, 6);
571
572 surface_type = view_type_to_surface_type(type);
573 assert(surface_type != GEN6_SURFTYPE_BUFFER);
574
575 surface_format = intel_format_translate_color(gpu, format);
576 assert(surface_format >= 0);
577
Chia-I Wu73e326f2014-08-21 11:07:57 +0800578 width = img->layout.width0;
579 height = img->layout.height0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800580 depth = (type == XGL_IMAGE_VIEW_3D) ?
Chia-I Wu73e326f2014-08-21 11:07:57 +0800581 img->depth : num_layers;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800582 pitch = img->layout.bo_stride;
583
584 if (surface_type == GEN6_SURFTYPE_CUBE) {
585 /*
586 * From the Sandy Bridge PRM, volume 4 part 1, page 81:
587 *
588 * "For SURFTYPE_CUBE: [DevSNB+]: for Sampling Engine Surfaces, the
589 * range of this field (Depth) is [0,84], indicating the number of
590 * cube array elements (equal to the number of underlying 2D array
591 * elements divided by 6). For other surfaces, this field must be
592 * zero."
593 *
594 * When is_rt is true, we treat the texture as a 2D one to avoid the
595 * restriction.
596 */
597 if (is_rt) {
598 surface_type = GEN6_SURFTYPE_2D;
599 }
600 else {
601 assert(num_layers % 6 == 0);
602 depth = num_layers / 6;
603 }
604 }
605
606 /* sanity check the size */
607 assert(width >= 1 && height >= 1 && depth >= 1 && pitch >= 1);
608 switch (surface_type) {
609 case GEN6_SURFTYPE_1D:
610 assert(width <= 8192 && height == 1 && depth <= 512);
611 assert(first_layer < 512 && num_layers <= 512);
612 break;
613 case GEN6_SURFTYPE_2D:
614 assert(width <= 8192 && height <= 8192 && depth <= 512);
615 assert(first_layer < 512 && num_layers <= 512);
616 break;
617 case GEN6_SURFTYPE_3D:
618 assert(width <= 2048 && height <= 2048 && depth <= 2048);
619 assert(first_layer < 2048 && num_layers <= 512);
620 if (!is_rt)
621 assert(first_layer == 0);
622 break;
623 case GEN6_SURFTYPE_CUBE:
624 assert(width <= 8192 && height <= 8192 && depth <= 85);
625 assert(width == height);
626 assert(first_layer < 512 && num_layers <= 512);
627 if (is_rt)
628 assert(first_layer == 0);
629 break;
630 default:
631 assert(!"unexpected surface type");
632 break;
633 }
634
635 /* non-full array spacing is supported only on GEN7+ */
Chia-I Wu457d0a62014-08-18 13:02:26 +0800636 assert(img->layout.walk != INTEL_LAYOUT_WALK_LOD);
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800637 /* non-interleaved samples are supported only on GEN7+ */
638 if (img->samples > 1)
639 assert(img->layout.interleaved_samples);
640
641 if (is_rt) {
642 assert(num_levels == 1);
643 lod = first_level;
644 }
645 else {
646 lod = num_levels - 1;
647 }
648
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800649 /*
650 * From the Sandy Bridge PRM, volume 4 part 1, page 76:
651 *
652 * "Linear render target surface base addresses must be element-size
653 * aligned, for non-YUV surface formats, or a multiple of 2
654 * element-sizes for YUV surface formats. Other linear surfaces have
655 * no alignment requirements (byte alignment is sufficient.)"
656 *
657 * From the Sandy Bridge PRM, volume 4 part 1, page 81:
658 *
659 * "For linear render target surfaces, the pitch must be a multiple
660 * of the element size for non-YUV surface formats. Pitch must be a
661 * multiple of 2 * element size for YUV surface formats."
662 *
663 * From the Sandy Bridge PRM, volume 4 part 1, page 86:
664 *
665 * "For linear surfaces, this field (X Offset) must be zero"
666 */
667 if (img->layout.tiling == INTEL_TILING_NONE) {
668 if (is_rt) {
669 const int elem_size = icd_format_get_size(format);
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800670 assert(pitch % elem_size == 0);
671 }
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800672 }
673
674 dw[0] = surface_type << GEN6_SURFACE_DW0_TYPE__SHIFT |
675 surface_format << GEN6_SURFACE_DW0_FORMAT__SHIFT |
676 GEN6_SURFACE_DW0_MIPLAYOUT_BELOW;
677
678 if (surface_type == GEN6_SURFTYPE_CUBE && !is_rt) {
679 dw[0] |= 1 << 9 |
680 GEN6_SURFACE_DW0_CUBE_FACE_ENABLES__MASK;
681 }
682
683 if (is_rt)
684 dw[0] |= GEN6_SURFACE_DW0_RENDER_CACHE_RW;
685
Chia-I Wu457d0a62014-08-18 13:02:26 +0800686 dw[1] = 0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800687
688 dw[2] = (height - 1) << GEN6_SURFACE_DW2_HEIGHT__SHIFT |
689 (width - 1) << GEN6_SURFACE_DW2_WIDTH__SHIFT |
690 lod << GEN6_SURFACE_DW2_MIP_COUNT_LOD__SHIFT;
691
692 dw[3] = (depth - 1) << GEN6_SURFACE_DW3_DEPTH__SHIFT |
693 (pitch - 1) << GEN6_SURFACE_DW3_PITCH__SHIFT |
694 winsys_tiling_to_surface_tiling(img->layout.tiling);
695
696 dw[4] = first_level << GEN6_SURFACE_DW4_MIN_LOD__SHIFT |
697 first_layer << 17 |
698 (num_layers - 1) << 8 |
699 ((img->samples > 1) ? GEN6_SURFACE_DW4_MULTISAMPLECOUNT_4 :
700 GEN6_SURFACE_DW4_MULTISAMPLECOUNT_1);
701
Chia-I Wu457d0a62014-08-18 13:02:26 +0800702 dw[5] = 0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800703
704 assert(img->layout.align_j == 2 || img->layout.align_j == 4);
705 if (img->layout.align_j == 4)
706 dw[5] |= GEN6_SURFACE_DW5_VALIGN_4;
707}
708
709struct ds_surface_info {
710 int surface_type;
711 int format;
712
713 struct {
714 unsigned stride;
Chia-I Wu457d0a62014-08-18 13:02:26 +0800715 unsigned offset;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800716 } zs, stencil, hiz;
717
718 unsigned width, height, depth;
719 unsigned lod, first_layer, num_layers;
720};
721
722static void
723ds_init_info_null(const struct intel_gpu *gpu,
724 struct ds_surface_info *info)
725{
726 INTEL_GPU_ASSERT(gpu, 6, 7.5);
727
728 memset(info, 0, sizeof(*info));
729
730 info->surface_type = GEN6_SURFTYPE_NULL;
731 info->format = GEN6_ZFORMAT_D32_FLOAT;
732 info->width = 1;
733 info->height = 1;
734 info->depth = 1;
735 info->num_layers = 1;
736}
737
738static void
739ds_init_info(const struct intel_gpu *gpu,
740 const struct intel_img *img,
741 XGL_FORMAT format, unsigned level,
742 unsigned first_layer, unsigned num_layers,
743 struct ds_surface_info *info)
744{
745 bool separate_stencil;
746
747 INTEL_GPU_ASSERT(gpu, 6, 7.5);
748
749 memset(info, 0, sizeof(*info));
750
751 info->surface_type =
752 view_type_to_surface_type(img_type_to_view_type(img->type));
753
754 if (info->surface_type == GEN6_SURFTYPE_CUBE) {
755 /*
756 * From the Sandy Bridge PRM, volume 2 part 1, page 325-326:
757 *
758 * "For Other Surfaces (Cube Surfaces):
759 * This field (Minimum Array Element) is ignored."
760 *
761 * "For Other Surfaces (Cube Surfaces):
762 * This field (Render Target View Extent) is ignored."
763 *
764 * As such, we cannot set first_layer and num_layers on cube surfaces.
765 * To work around that, treat it as a 2D surface.
766 */
767 info->surface_type = GEN6_SURFTYPE_2D;
768 }
769
770 if (intel_gpu_gen(gpu) >= INTEL_GEN(7)) {
771 separate_stencil = true;
772 }
773 else {
774 /*
775 * From the Sandy Bridge PRM, volume 2 part 1, page 317:
776 *
777 * "This field (Separate Stencil Buffer Enable) must be set to the
778 * same value (enabled or disabled) as Hierarchical Depth Buffer
779 * Enable."
780 */
781 separate_stencil = img->aux_offset;
782 }
783
784 /*
785 * From the Sandy Bridge PRM, volume 2 part 1, page 317:
786 *
787 * "If this field (Hierarchical Depth Buffer Enable) is enabled, the
788 * Surface Format of the depth buffer cannot be
789 * D32_FLOAT_S8X24_UINT or D24_UNORM_S8_UINT. Use of stencil
790 * requires the separate stencil buffer."
791 *
792 * From the Ironlake PRM, volume 2 part 1, page 330:
793 *
794 * "If this field (Separate Stencil Buffer Enable) is disabled, the
795 * Surface Format of the depth buffer cannot be D24_UNORM_X8_UINT."
796 *
797 * There is no similar restriction for GEN6. But when D24_UNORM_X8_UINT
798 * is indeed used, the depth values output by the fragment shaders will
799 * be different when read back.
800 *
801 * As for GEN7+, separate_stencil is always true.
802 */
803 switch (format.channelFormat) {
804 case XGL_CH_FMT_R16:
805 info->format = GEN6_ZFORMAT_D16_UNORM;
806 break;
807 case XGL_CH_FMT_R32:
808 info->format = GEN6_ZFORMAT_D32_FLOAT;
809 break;
810 case XGL_CH_FMT_R32G8:
811 info->format = (separate_stencil) ?
812 GEN6_ZFORMAT_D32_FLOAT :
813 GEN6_ZFORMAT_D32_FLOAT_S8X24_UINT;
814 break;
815 case XGL_CH_FMT_R8:
816 if (separate_stencil) {
817 info->format = GEN6_ZFORMAT_D32_FLOAT;
818 break;
819 }
820 /* fall through */
821 default:
822 assert(!"unsupported depth/stencil format");
823 ds_init_info_null(gpu, info);
824 return;
825 break;
826 }
827
828 if (format.channelFormat != XGL_CH_FMT_R8)
829 info->zs.stride = img->layout.bo_stride;
830
831 if (img->s8_layout) {
832 /*
833 * From the Sandy Bridge PRM, volume 2 part 1, page 329:
834 *
835 * "The pitch must be set to 2x the value computed based on width,
836 * as the stencil buffer is stored with two rows interleaved."
837 *
838 * According to the classic driver, we need to do the same for GEN7+
839 * even though the Ivy Bridge PRM does not say anything about it.
840 */
841 info->stencil.stride = img->s8_layout->bo_stride * 2;
Chia-I Wu457d0a62014-08-18 13:02:26 +0800842
843 if (intel_gpu_gen(gpu) == INTEL_GEN(6)) {
844 unsigned x, y;
845
846 assert(img->s8_layout->walk == INTEL_LAYOUT_WALK_LOD);
847
848 /* offset to the level */
849 intel_layout_get_slice_pos(img->s8_layout, level, 0, &x, &y);
850 intel_layout_pos_to_mem(img->s8_layout, x, y, &x, &y);
851 info->stencil.offset = intel_layout_mem_to_raw(img->s8_layout, x, y);
852 }
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800853 } else if (format.channelFormat == XGL_CH_FMT_R8) {
854 info->stencil.stride = img->layout.bo_stride * 2;
855 }
856
Chia-I Wu457d0a62014-08-18 13:02:26 +0800857 if (img->aux_offset) {
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800858 info->hiz.stride = img->layout.aux_stride;
859
Chia-I Wu457d0a62014-08-18 13:02:26 +0800860 /* offset to the level */
861 if (intel_gpu_gen(gpu) == INTEL_GEN(6))
862 info->hiz.offset = img->layout.aux_offsets[level];
863 }
864
865
Chia-I Wu73e326f2014-08-21 11:07:57 +0800866 info->width = img->layout.width0;
867 info->height = img->layout.height0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800868 info->depth = (img->type == XGL_IMAGE_3D) ?
Chia-I Wu73e326f2014-08-21 11:07:57 +0800869 img->depth : num_layers;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800870
871 info->lod = level;
872 info->first_layer = first_layer;
873 info->num_layers = num_layers;
874}
875
Chia-I Wu06bed192014-08-20 13:57:18 +0800876static void ds_view_init(struct intel_ds_view *view,
877 const struct intel_gpu *gpu,
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800878 const struct intel_img *img,
879 XGL_FORMAT format, unsigned level,
Chia-I Wu06bed192014-08-20 13:57:18 +0800880 unsigned first_layer, unsigned num_layers)
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800881{
882 const int max_2d_size = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 16384 : 8192;
883 const int max_array_size = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 2048 : 512;
884 struct ds_surface_info info;
885 uint32_t dw1, dw2, dw3, dw4, dw5, dw6;
Chia-I Wu06bed192014-08-20 13:57:18 +0800886 uint32_t *dw;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800887
888 INTEL_GPU_ASSERT(gpu, 6, 7.5);
889
890 if (img) {
891 ds_init_info(gpu, img, format, level, first_layer, num_layers, &info);
892 }
893 else {
894 ds_init_info_null(gpu, &info);
895 }
896
897 switch (info.surface_type) {
898 case GEN6_SURFTYPE_NULL:
899 break;
900 case GEN6_SURFTYPE_1D:
901 assert(info.width <= max_2d_size && info.height == 1 &&
902 info.depth <= max_array_size);
903 assert(info.first_layer < max_array_size - 1 &&
904 info.num_layers <= max_array_size);
905 break;
906 case GEN6_SURFTYPE_2D:
907 assert(info.width <= max_2d_size && info.height <= max_2d_size &&
908 info.depth <= max_array_size);
909 assert(info.first_layer < max_array_size - 1 &&
910 info.num_layers <= max_array_size);
911 break;
912 case GEN6_SURFTYPE_3D:
913 assert(info.width <= 2048 && info.height <= 2048 && info.depth <= 2048);
914 assert(info.first_layer < 2048 && info.num_layers <= max_array_size);
915 break;
916 case GEN6_SURFTYPE_CUBE:
917 assert(info.width <= max_2d_size && info.height <= max_2d_size &&
918 info.depth == 1);
919 assert(info.first_layer == 0 && info.num_layers == 1);
920 assert(info.width == info.height);
921 break;
922 default:
923 assert(!"unexpected depth surface type");
924 break;
925 }
926
927 dw1 = info.surface_type << 29 |
928 info.format << 18;
929
930 if (info.zs.stride) {
931 /* required for GEN6+ */
932 assert(info.zs.stride > 0 && info.zs.stride < 128 * 1024 &&
933 info.zs.stride % 128 == 0);
934 assert(info.width <= info.zs.stride);
935
936 dw1 |= (info.zs.stride - 1);
937 }
938
939 dw2 = 0;
940
941 if (intel_gpu_gen(gpu) >= INTEL_GEN(7)) {
942 if (info.zs.stride)
943 dw1 |= 1 << 28;
944
945 if (info.stencil.stride)
946 dw1 |= 1 << 27;
947
948 if (info.hiz.stride)
949 dw1 |= 1 << 22;
950
951 dw3 = (info.height - 1) << 18 |
952 (info.width - 1) << 4 |
953 info.lod;
954
955 dw4 = (info.depth - 1) << 21 |
956 info.first_layer << 10;
957
958 dw5 = 0;
959
960 dw6 = (info.num_layers - 1) << 21;
961 }
962 else {
963 /* always Y-tiled */
964 dw1 |= 1 << 27 |
965 1 << 26;
966
967 if (info.hiz.stride) {
968 dw1 |= 1 << 22 |
969 1 << 21;
970 }
971
972 dw3 = (info.height - 1) << 19 |
973 (info.width - 1) << 6 |
974 info.lod << 2 |
975 GEN6_DEPTH_DW3_MIPLAYOUT_BELOW;
976
977 dw4 = (info.depth - 1) << 21 |
978 info.first_layer << 10 |
979 (info.num_layers - 1) << 1;
980
981 dw5 = 0;
982
983 dw6 = 0;
984 }
985
Chia-I Wu06bed192014-08-20 13:57:18 +0800986 STATIC_ASSERT(ARRAY_SIZE(view->cmd) >= 10);
987 dw = view->cmd;
988
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800989 dw[0] = dw1;
990 dw[1] = dw2;
991 dw[2] = dw3;
992 dw[3] = dw4;
993 dw[4] = dw5;
994 dw[5] = dw6;
995
996 /* separate stencil */
997 if (info.stencil.stride) {
998 assert(info.stencil.stride > 0 && info.stencil.stride < 128 * 1024 &&
999 info.stencil.stride % 128 == 0);
1000
1001 dw[6] = info.stencil.stride - 1;
1002 dw[7] = img->s8_offset;
1003
1004 if (intel_gpu_gen(gpu) >= INTEL_GEN(7.5))
1005 dw[6] |= GEN75_STENCIL_DW1_STENCIL_BUFFER_ENABLE;
1006 }
1007 else {
1008 dw[6] = 0;
1009 dw[7] = 0;
1010 }
1011
1012 /* hiz */
1013 if (info.hiz.stride) {
1014 dw[8] = info.hiz.stride - 1;
1015 dw[9] = img->aux_offset;
1016 }
1017 else {
1018 dw[8] = 0;
1019 dw[9] = 0;
1020 }
1021}
1022
Chia-I Wu5a323262014-08-11 10:31:53 +08001023void intel_null_view_init(struct intel_null_view *view,
1024 struct intel_dev *dev)
1025{
Chia-I Wucd83cf12014-08-23 17:26:08 +08001026 if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) {
Chia-I Wu06bed192014-08-20 13:57:18 +08001027 surface_state_null_gen7(dev->gpu, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001028 view->cmd_len = 8;
1029 } else {
Chia-I Wu06bed192014-08-20 13:57:18 +08001030 surface_state_null_gen6(dev->gpu, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001031 view->cmd_len = 6;
1032 }
Chia-I Wu5a323262014-08-11 10:31:53 +08001033}
1034
1035void intel_mem_view_init(struct intel_mem_view *view,
1036 struct intel_dev *dev,
1037 const XGL_MEMORY_VIEW_ATTACH_INFO *info)
1038{
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001039 bool will_write;
1040
1041 switch (info->state) {
1042 case XGL_MEMORY_STATE_GRAPHICS_SHADER_WRITE_ONLY:
1043 case XGL_MEMORY_STATE_GRAPHICS_SHADER_READ_WRITE:
1044 case XGL_MEMORY_STATE_COMPUTE_SHADER_WRITE_ONLY:
1045 case XGL_MEMORY_STATE_COMPUTE_SHADER_READ_WRITE:
1046 will_write = true;
1047 break;
1048 default:
1049 will_write = false;
1050 break;
1051 }
Chia-I Wu5a323262014-08-11 10:31:53 +08001052
1053 view->mem = intel_mem(info->mem);
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001054
1055 if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) {
Chia-I Wu06bed192014-08-20 13:57:18 +08001056 surface_state_buf_gen7(dev->gpu, info->offset,
1057 info->range, info->stride, info->format,
1058 will_write, will_write, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001059 view->cmd_len = 8;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001060 } else {
Chia-I Wu06bed192014-08-20 13:57:18 +08001061 surface_state_buf_gen6(dev->gpu, info->offset,
1062 info->range, info->stride, info->format,
1063 will_write, will_write, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001064 view->cmd_len = 6;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001065 }
Chia-I Wu5a323262014-08-11 10:31:53 +08001066}
1067
1068static void img_view_destroy(struct intel_obj *obj)
1069{
1070 struct intel_img_view *view = intel_img_view_from_obj(obj);
1071
1072 intel_img_view_destroy(view);
1073}
1074
1075XGL_RESULT intel_img_view_create(struct intel_dev *dev,
1076 const XGL_IMAGE_VIEW_CREATE_INFO *info,
1077 struct intel_img_view **view_ret)
1078{
1079 struct intel_img *img = intel_img(info->image);
1080 struct intel_img_view *view;
1081
1082 view = (struct intel_img_view *) intel_base_create(dev, sizeof(*view),
1083 dev->base.dbg, XGL_DBG_OBJECT_IMAGE_VIEW, info, 0);
1084 if (!view)
1085 return XGL_ERROR_OUT_OF_MEMORY;
1086
1087 view->obj.destroy = img_view_destroy;
1088
1089 view->img = img;
1090 view->swizzles = info->channels;
1091 view->min_lod = info->minLod;
1092
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001093 if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) {
Chia-I Wu06bed192014-08-20 13:57:18 +08001094 surface_state_tex_gen7(dev->gpu, img, info->viewType, info->format,
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001095 info->subresourceRange.baseMipLevel,
1096 info->subresourceRange.mipLevels,
1097 info->subresourceRange.baseArraySlice,
1098 info->subresourceRange.arraySize, false, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001099 view->cmd_len = 8;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001100 } else {
Chia-I Wu06bed192014-08-20 13:57:18 +08001101 surface_state_tex_gen6(dev->gpu, img, info->viewType, info->format,
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001102 info->subresourceRange.baseMipLevel,
1103 info->subresourceRange.mipLevels,
1104 info->subresourceRange.baseArraySlice,
1105 info->subresourceRange.arraySize, false, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001106 view->cmd_len = 6;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001107 }
1108
Chia-I Wu5a323262014-08-11 10:31:53 +08001109 *view_ret = view;
1110
1111 return XGL_SUCCESS;
1112}
1113
1114void intel_img_view_destroy(struct intel_img_view *view)
1115{
1116 intel_base_destroy(&view->obj.base);
1117}
1118
1119static void rt_view_destroy(struct intel_obj *obj)
1120{
1121 struct intel_rt_view *view = intel_rt_view_from_obj(obj);
1122
1123 intel_rt_view_destroy(view);
1124}
1125
1126XGL_RESULT intel_rt_view_create(struct intel_dev *dev,
1127 const XGL_COLOR_ATTACHMENT_VIEW_CREATE_INFO *info,
1128 struct intel_rt_view **view_ret)
1129{
1130 struct intel_img *img = intel_img(info->image);
1131 struct intel_rt_view *view;
1132
1133 view = (struct intel_rt_view *) intel_base_create(dev, sizeof(*view),
1134 dev->base.dbg, XGL_DBG_OBJECT_COLOR_TARGET_VIEW, info, 0);
1135 if (!view)
1136 return XGL_ERROR_OUT_OF_MEMORY;
1137
1138 view->obj.destroy = rt_view_destroy;
1139
1140 view->img = img;
1141
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001142 if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) {
Chia-I Wu06bed192014-08-20 13:57:18 +08001143 surface_state_tex_gen7(dev->gpu, img,
1144 img_type_to_view_type(img->type),
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001145 info->format, info->mipLevel, 1,
1146 info->baseArraySlice, info->arraySize,
1147 true, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001148 view->cmd_len = 8;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001149 } else {
Chia-I Wu06bed192014-08-20 13:57:18 +08001150 surface_state_tex_gen6(dev->gpu, img,
1151 img_type_to_view_type(img->type),
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001152 info->format, info->mipLevel, 1,
1153 info->baseArraySlice, info->arraySize,
1154 true, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001155 view->cmd_len = 6;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001156 }
1157
Chia-I Wu5a323262014-08-11 10:31:53 +08001158 *view_ret = view;
1159
1160 return XGL_SUCCESS;
1161}
1162
1163void intel_rt_view_destroy(struct intel_rt_view *view)
1164{
1165 intel_base_destroy(&view->obj.base);
1166}
1167
1168static void ds_view_destroy(struct intel_obj *obj)
1169{
1170 struct intel_ds_view *view = intel_ds_view_from_obj(obj);
1171
1172 intel_ds_view_destroy(view);
1173}
1174
1175XGL_RESULT intel_ds_view_create(struct intel_dev *dev,
1176 const XGL_DEPTH_STENCIL_VIEW_CREATE_INFO *info,
1177 struct intel_ds_view **view_ret)
1178{
1179 struct intel_img *img = intel_img(info->image);
1180 struct intel_ds_view *view;
1181
1182 view = (struct intel_ds_view *) intel_base_create(dev, sizeof(*view),
1183 dev->base.dbg, XGL_DBG_OBJECT_DEPTH_STENCIL_VIEW, info, 0);
1184 if (!view)
1185 return XGL_ERROR_OUT_OF_MEMORY;
1186
1187 view->obj.destroy = ds_view_destroy;
1188
1189 view->img = img;
1190
Chia-I Wu06bed192014-08-20 13:57:18 +08001191 ds_view_init(view, dev->gpu, img, img->layout.format, info->mipLevel,
1192 info->baseArraySlice, info->arraySize);
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001193
Chia-I Wu5a323262014-08-11 10:31:53 +08001194 *view_ret = view;
1195
1196 return XGL_SUCCESS;
1197}
1198
1199void intel_ds_view_destroy(struct intel_ds_view *view)
1200{
1201 intel_base_destroy(&view->obj.base);
1202}
1203
1204XGL_RESULT XGLAPI intelCreateImageView(
1205 XGL_DEVICE device,
1206 const XGL_IMAGE_VIEW_CREATE_INFO* pCreateInfo,
1207 XGL_IMAGE_VIEW* pView)
1208{
1209 struct intel_dev *dev = intel_dev(device);
1210
1211 return intel_img_view_create(dev, pCreateInfo,
1212 (struct intel_img_view **) pView);
1213}
1214
1215XGL_RESULT XGLAPI intelCreateColorAttachmentView(
1216 XGL_DEVICE device,
1217 const XGL_COLOR_ATTACHMENT_VIEW_CREATE_INFO* pCreateInfo,
1218 XGL_COLOR_ATTACHMENT_VIEW* pView)
1219{
1220 struct intel_dev *dev = intel_dev(device);
1221
1222 return intel_rt_view_create(dev, pCreateInfo,
1223 (struct intel_rt_view **) pView);
1224}
1225
1226XGL_RESULT XGLAPI intelCreateDepthStencilView(
1227 XGL_DEVICE device,
1228 const XGL_DEPTH_STENCIL_VIEW_CREATE_INFO* pCreateInfo,
1229 XGL_DEPTH_STENCIL_VIEW* pView)
1230{
1231 struct intel_dev *dev = intel_dev(device);
1232
1233 return intel_ds_view_create(dev, pCreateInfo,
1234 (struct intel_ds_view **) pView);
1235}