blob: da8f2310ce755b905a17bdcd460de02801bb0277 [file] [log] [blame]
Chia-I Wu5a323262014-08-11 10:31:53 +08001/*
2 * XGL
3 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
Chia-I Wu9269d1c2014-08-16 12:47:47 +080025#include "genhw/genhw.h"
26#include "kmd/winsys.h"
Chia-I Wu5a323262014-08-11 10:31:53 +080027#include "dev.h"
Chia-I Wu9269d1c2014-08-16 12:47:47 +080028#include "format.h"
Chia-I Wu5a323262014-08-11 10:31:53 +080029#include "gpu.h"
30#include "img.h"
31#include "mem.h"
32#include "view.h"
33
Chia-I Wu06bed192014-08-20 13:57:18 +080034static void surface_state_null_gen7(const struct intel_gpu *gpu,
35 uint32_t dw[8])
Chia-I Wu9269d1c2014-08-16 12:47:47 +080036{
37 INTEL_GPU_ASSERT(gpu, 7, 7.5);
38
39 /*
40 * From the Ivy Bridge PRM, volume 4 part 1, page 62:
41 *
42 * "A null surface is used in instances where an actual surface is not
43 * bound. When a write message is generated to a null surface, no
44 * actual surface is written to. When a read message (including any
45 * sampling engine message) is generated to a null surface, the result
46 * is all zeros. Note that a null surface type is allowed to be used
47 * with all messages, even if it is not specificially indicated as
48 * supported. All of the remaining fields in surface state are ignored
49 * for null surfaces, with the following exceptions:
50 *
51 * * Width, Height, Depth, LOD, and Render Target View Extent fields
52 * must match the depth buffer's corresponding state for all render
53 * target surfaces, including null.
54 * * All sampling engine and data port messages support null surfaces
55 * with the above behavior, even if not mentioned as specifically
56 * supported, except for the following:
57 * * Data Port Media Block Read/Write messages.
58 * * The Surface Type of a surface used as a render target (accessed
59 * via the Data Port's Render Target Write message) must be the same
60 * as the Surface Type of all other render targets and of the depth
61 * buffer (defined in 3DSTATE_DEPTH_BUFFER), unless either the depth
62 * buffer or render targets are SURFTYPE_NULL."
63 *
64 * From the Ivy Bridge PRM, volume 4 part 1, page 65:
65 *
66 * "If Surface Type is SURFTYPE_NULL, this field (Tiled Surface) must be
67 * true"
68 */
69
70 dw[0] = GEN6_SURFTYPE_NULL << GEN7_SURFACE_DW0_TYPE__SHIFT |
71 GEN6_FORMAT_B8G8R8A8_UNORM << GEN7_SURFACE_DW0_FORMAT__SHIFT |
72 GEN6_TILING_X << 13;
73
74 dw[1] = 0;
75 dw[2] = 0;
76 dw[3] = 0;
77 dw[4] = 0;
78 dw[5] = 0;
79 dw[6] = 0;
80 dw[7] = 0;
81}
82
Chia-I Wu06bed192014-08-20 13:57:18 +080083static void surface_state_buf_gen7(const struct intel_gpu *gpu,
84 unsigned offset, unsigned size,
85 unsigned struct_size,
86 XGL_FORMAT elem_format,
87 bool is_rt, bool render_cache_rw,
88 uint32_t dw[8])
Chia-I Wu9269d1c2014-08-16 12:47:47 +080089{
90 const bool typed = !icd_format_is_undef(elem_format);
91 const bool structured = (!typed && struct_size > 1);
92 const int elem_size = (typed) ?
93 icd_format_get_size(elem_format) : 1;
94 int width, height, depth, pitch;
95 int surface_type, surface_format, num_entries;
96
97 INTEL_GPU_ASSERT(gpu, 7, 7.5);
98
99 surface_type = (structured) ? GEN7_SURFTYPE_STRBUF : GEN6_SURFTYPE_BUFFER;
100
101 surface_format = (typed) ?
102 intel_format_translate_color(gpu, elem_format) : GEN6_FORMAT_RAW;
103
104 num_entries = size / struct_size;
105 /* see if there is enough space to fit another element */
106 if (size % struct_size >= elem_size && !structured)
107 num_entries++;
108
109 /*
110 * From the Ivy Bridge PRM, volume 4 part 1, page 67:
111 *
112 * "For SURFTYPE_BUFFER render targets, this field (Surface Base
113 * Address) specifies the base address of first element of the
114 * surface. The surface is interpreted as a simple array of that
115 * single element type. The address must be naturally-aligned to the
116 * element size (e.g., a buffer containing R32G32B32A32_FLOAT elements
117 * must be 16-byte aligned)
118 *
119 * For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
120 * the base address of the first element of the surface, computed in
121 * software by adding the surface base address to the byte offset of
122 * the element in the buffer."
123 */
124 if (is_rt)
125 assert(offset % elem_size == 0);
126
127 /*
128 * From the Ivy Bridge PRM, volume 4 part 1, page 68:
129 *
130 * "For typed buffer and structured buffer surfaces, the number of
131 * entries in the buffer ranges from 1 to 2^27. For raw buffer
132 * surfaces, the number of entries in the buffer is the number of
133 * bytes which can range from 1 to 2^30."
134 */
135 assert(num_entries >= 1 &&
136 num_entries <= 1 << ((typed || structured) ? 27 : 30));
137
138 /*
139 * From the Ivy Bridge PRM, volume 4 part 1, page 69:
140 *
141 * "For SURFTYPE_BUFFER: The low two bits of this field (Width) must be
142 * 11 if the Surface Format is RAW (the size of the buffer must be a
143 * multiple of 4 bytes)."
144 *
145 * From the Ivy Bridge PRM, volume 4 part 1, page 70:
146 *
147 * "For surfaces of type SURFTYPE_BUFFER and SURFTYPE_STRBUF, this
148 * field (Surface Pitch) indicates the size of the structure."
149 *
150 * "For linear surfaces with Surface Type of SURFTYPE_STRBUF, the pitch
151 * must be a multiple of 4 bytes."
152 */
153 if (structured)
154 assert(struct_size % 4 == 0);
155 else if (!typed)
156 assert(num_entries % 4 == 0);
157
158 pitch = struct_size;
159
160 pitch--;
161 num_entries--;
162 /* bits [6:0] */
163 width = (num_entries & 0x0000007f);
164 /* bits [20:7] */
165 height = (num_entries & 0x001fff80) >> 7;
166 /* bits [30:21] */
167 depth = (num_entries & 0x7fe00000) >> 21;
168 /* limit to [26:21] */
169 if (typed || structured)
170 depth &= 0x3f;
171
172 dw[0] = surface_type << GEN7_SURFACE_DW0_TYPE__SHIFT |
173 surface_format << GEN7_SURFACE_DW0_FORMAT__SHIFT;
174 if (render_cache_rw)
175 dw[0] |= GEN7_SURFACE_DW0_RENDER_CACHE_RW;
176
177 dw[1] = offset;
178
179 dw[2] = height << GEN7_SURFACE_DW2_HEIGHT__SHIFT |
180 width << GEN7_SURFACE_DW2_WIDTH__SHIFT;
181
182 dw[3] = depth << GEN7_SURFACE_DW3_DEPTH__SHIFT |
183 pitch;
184
185 dw[4] = 0;
186 dw[5] = 0;
187
188 dw[6] = 0;
189 dw[7] = 0;
190
191 if (intel_gpu_gen(gpu) >= INTEL_GEN(7.5)) {
192 dw[7] |= GEN75_SCS_RED << GEN75_SURFACE_DW7_SCS_R__SHIFT |
193 GEN75_SCS_GREEN << GEN75_SURFACE_DW7_SCS_G__SHIFT |
194 GEN75_SCS_BLUE << GEN75_SURFACE_DW7_SCS_B__SHIFT |
195 GEN75_SCS_ALPHA << GEN75_SURFACE_DW7_SCS_A__SHIFT;
196 }
197}
198
199static int img_type_to_view_type(XGL_IMAGE_VIEW_TYPE type)
200{
201 switch (type) {
202 case XGL_IMAGE_1D: return XGL_IMAGE_VIEW_1D;
203 case XGL_IMAGE_2D: return XGL_IMAGE_VIEW_2D;
204 case XGL_IMAGE_3D: return XGL_IMAGE_VIEW_3D;
205 default: assert(!"unknown img type"); return XGL_IMAGE_VIEW_1D;
206 }
207}
208
209static int view_type_to_surface_type(XGL_IMAGE_VIEW_TYPE type)
210{
211 switch (type) {
212 case XGL_IMAGE_VIEW_1D: return GEN6_SURFTYPE_1D;
213 case XGL_IMAGE_VIEW_2D: return GEN6_SURFTYPE_2D;
214 case XGL_IMAGE_VIEW_3D: return GEN6_SURFTYPE_3D;
215 case XGL_IMAGE_VIEW_CUBE: return GEN6_SURFTYPE_CUBE;
216 default: assert(!"unknown view type"); return GEN6_SURFTYPE_NULL;
217 }
218}
219
220static int winsys_tiling_to_surface_tiling(enum intel_tiling_mode tiling)
221{
222 switch (tiling) {
223 case INTEL_TILING_NONE: return GEN6_TILING_NONE;
224 case INTEL_TILING_X: return GEN6_TILING_X;
225 case INTEL_TILING_Y: return GEN6_TILING_Y;
226 default: assert(!"unknown tiling"); return GEN6_TILING_NONE;
227 }
228}
229
Chia-I Wu06bed192014-08-20 13:57:18 +0800230static void surface_state_tex_gen7(const struct intel_gpu *gpu,
231 const struct intel_img *img,
232 XGL_IMAGE_VIEW_TYPE type,
233 XGL_FORMAT format,
234 unsigned first_level,
235 unsigned num_levels,
236 unsigned first_layer,
237 unsigned num_layers,
238 bool is_rt,
239 uint32_t dw[8])
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800240{
241 int surface_type, surface_format;
242 int width, height, depth, pitch, lod;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800243
244 INTEL_GPU_ASSERT(gpu, 7, 7.5);
245
246 surface_type = view_type_to_surface_type(type);
247 assert(surface_type != GEN6_SURFTYPE_BUFFER);
248
249 surface_format = intel_format_translate_color(gpu, format);
250 assert(surface_format >= 0);
251
Chia-I Wu73e326f2014-08-21 11:07:57 +0800252 width = img->layout.width0;
253 height = img->layout.height0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800254 depth = (type == XGL_IMAGE_VIEW_3D) ?
Chia-I Wu73e326f2014-08-21 11:07:57 +0800255 img->depth : num_layers;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800256 pitch = img->layout.bo_stride;
257
258 if (surface_type == GEN6_SURFTYPE_CUBE) {
259 /*
260 * From the Ivy Bridge PRM, volume 4 part 1, page 70:
261 *
262 * "For SURFTYPE_CUBE:For Sampling Engine Surfaces, the range of
263 * this field is [0,340], indicating the number of cube array
264 * elements (equal to the number of underlying 2D array elements
265 * divided by 6). For other surfaces, this field must be zero."
266 *
267 * When is_rt is true, we treat the texture as a 2D one to avoid the
268 * restriction.
269 */
270 if (is_rt) {
271 surface_type = GEN6_SURFTYPE_2D;
272 }
273 else {
274 assert(num_layers % 6 == 0);
275 depth = num_layers / 6;
276 }
277 }
278
279 /* sanity check the size */
280 assert(width >= 1 && height >= 1 && depth >= 1 && pitch >= 1);
281 assert(first_layer < 2048 && num_layers <= 2048);
282 switch (surface_type) {
283 case GEN6_SURFTYPE_1D:
284 assert(width <= 16384 && height == 1 && depth <= 2048);
285 break;
286 case GEN6_SURFTYPE_2D:
287 assert(width <= 16384 && height <= 16384 && depth <= 2048);
288 break;
289 case GEN6_SURFTYPE_3D:
290 assert(width <= 2048 && height <= 2048 && depth <= 2048);
291 if (!is_rt)
292 assert(first_layer == 0);
293 break;
294 case GEN6_SURFTYPE_CUBE:
295 assert(width <= 16384 && height <= 16384 && depth <= 86);
296 assert(width == height);
297 if (is_rt)
298 assert(first_layer == 0);
299 break;
300 default:
301 assert(!"unexpected surface type");
302 break;
303 }
304
305 if (is_rt) {
306 assert(num_levels == 1);
307 lod = first_level;
308 }
309 else {
310 lod = num_levels - 1;
311 }
312
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800313 /*
314 * From the Ivy Bridge PRM, volume 4 part 1, page 68:
315 *
316 * "The Base Address for linear render target surfaces and surfaces
317 * accessed with the typed surface read/write data port messages must
318 * be element-size aligned, for non-YUV surface formats, or a multiple
319 * of 2 element-sizes for YUV surface formats. Other linear surfaces
320 * have no alignment requirements (byte alignment is sufficient)."
321 *
322 * From the Ivy Bridge PRM, volume 4 part 1, page 70:
323 *
324 * "For linear render target surfaces and surfaces accessed with the
325 * typed data port messages, the pitch must be a multiple of the
326 * element size for non-YUV surface formats. Pitch must be a multiple
327 * of 2 * element size for YUV surface formats. For linear surfaces
328 * with Surface Type of SURFTYPE_STRBUF, the pitch must be a multiple
329 * of 4 bytes.For other linear surfaces, the pitch can be any multiple
330 * of bytes."
331 *
332 * From the Ivy Bridge PRM, volume 4 part 1, page 74:
333 *
334 * "For linear surfaces, this field (X Offset) must be zero."
335 */
336 if (img->layout.tiling == INTEL_TILING_NONE) {
337 if (is_rt) {
338 const int elem_size = icd_format_get_size(format);
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800339 assert(pitch % elem_size == 0);
340 }
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800341 }
342
343 dw[0] = surface_type << GEN7_SURFACE_DW0_TYPE__SHIFT |
344 surface_format << GEN7_SURFACE_DW0_FORMAT__SHIFT |
345 winsys_tiling_to_surface_tiling(img->layout.tiling) << 13;
346
347 /*
348 * From the Ivy Bridge PRM, volume 4 part 1, page 63:
349 *
350 * "If this field (Surface Array) is enabled, the Surface Type must be
351 * SURFTYPE_1D, SURFTYPE_2D, or SURFTYPE_CUBE. If this field is
352 * disabled and Surface Type is SURFTYPE_1D, SURFTYPE_2D, or
353 * SURFTYPE_CUBE, the Depth field must be set to zero."
354 *
355 * For non-3D sampler surfaces, resinfo (the sampler message) always
356 * returns zero for the number of layers when this field is not set.
357 */
358 if (surface_type != GEN6_SURFTYPE_3D) {
359 if (num_layers > 1)
360 dw[0] |= GEN7_SURFACE_DW0_IS_ARRAY;
361 else
362 assert(depth == 1);
363 }
364
365 assert(img->layout.align_i == 4 || img->layout.align_i == 8);
366 assert(img->layout.align_j == 2 || img->layout.align_j == 4);
367
368 if (img->layout.align_j == 4)
369 dw[0] |= GEN7_SURFACE_DW0_VALIGN_4;
370
371 if (img->layout.align_i == 8)
372 dw[0] |= GEN7_SURFACE_DW0_HALIGN_8;
373
Chia-I Wu457d0a62014-08-18 13:02:26 +0800374 if (img->layout.walk == INTEL_LAYOUT_WALK_LOD)
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800375 dw[0] |= GEN7_SURFACE_DW0_ARYSPC_LOD0;
Chia-I Wu457d0a62014-08-18 13:02:26 +0800376 else
377 dw[0] |= GEN7_SURFACE_DW0_ARYSPC_FULL;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800378
379 if (is_rt)
380 dw[0] |= GEN7_SURFACE_DW0_RENDER_CACHE_RW;
381
382 if (surface_type == GEN6_SURFTYPE_CUBE && !is_rt)
383 dw[0] |= GEN7_SURFACE_DW0_CUBE_FACE_ENABLES__MASK;
384
Chia-I Wu457d0a62014-08-18 13:02:26 +0800385 dw[1] = 0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800386
387 dw[2] = (height - 1) << GEN7_SURFACE_DW2_HEIGHT__SHIFT |
388 (width - 1) << GEN7_SURFACE_DW2_WIDTH__SHIFT;
389
390 dw[3] = (depth - 1) << GEN7_SURFACE_DW3_DEPTH__SHIFT |
391 (pitch - 1);
392
393 dw[4] = first_layer << 18 |
394 (num_layers - 1) << 7;
395
396 /*
397 * MSFMT_MSS means the samples are not interleaved and MSFMT_DEPTH_STENCIL
398 * means the samples are interleaved. The layouts are the same when the
399 * number of samples is 1.
400 */
401 if (img->layout.interleaved_samples && img->samples > 1) {
402 assert(!is_rt);
403 dw[4] |= GEN7_SURFACE_DW4_MSFMT_DEPTH_STENCIL;
404 }
405 else {
406 dw[4] |= GEN7_SURFACE_DW4_MSFMT_MSS;
407 }
408
409 if (img->samples > 4)
410 dw[4] |= GEN7_SURFACE_DW4_MULTISAMPLECOUNT_8;
411 else if (img->samples > 2)
412 dw[4] |= GEN7_SURFACE_DW4_MULTISAMPLECOUNT_4;
413 else
414 dw[4] |= GEN7_SURFACE_DW4_MULTISAMPLECOUNT_1;
415
Chia-I Wu457d0a62014-08-18 13:02:26 +0800416 dw[5] = (first_level) << GEN7_SURFACE_DW5_MIN_LOD__SHIFT |
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800417 lod;
418
419 dw[6] = 0;
420 dw[7] = 0;
421
422 if (intel_gpu_gen(gpu) >= INTEL_GEN(7.5)) {
423 dw[7] |= GEN75_SCS_RED << GEN75_SURFACE_DW7_SCS_R__SHIFT |
424 GEN75_SCS_GREEN << GEN75_SURFACE_DW7_SCS_G__SHIFT |
425 GEN75_SCS_BLUE << GEN75_SURFACE_DW7_SCS_B__SHIFT |
426 GEN75_SCS_ALPHA << GEN75_SURFACE_DW7_SCS_A__SHIFT;
427 }
428}
429
Chia-I Wu06bed192014-08-20 13:57:18 +0800430static void surface_state_null_gen6(const struct intel_gpu *gpu,
431 uint32_t dw[6])
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800432{
433 INTEL_GPU_ASSERT(gpu, 6, 6);
434
435 /*
436 * From the Sandy Bridge PRM, volume 4 part 1, page 71:
437 *
438 * "A null surface will be used in instances where an actual surface is
439 * not bound. When a write message is generated to a null surface, no
440 * actual surface is written to. When a read message (including any
441 * sampling engine message) is generated to a null surface, the result
442 * is all zeros. Note that a null surface type is allowed to be used
443 * with all messages, even if it is not specificially indicated as
444 * supported. All of the remaining fields in surface state are ignored
445 * for null surfaces, with the following exceptions:
446 *
447 * * [DevSNB+]: Width, Height, Depth, and LOD fields must match the
448 * depth buffer's corresponding state for all render target
449 * surfaces, including null.
450 * * Surface Format must be R8G8B8A8_UNORM."
451 *
452 * From the Sandy Bridge PRM, volume 4 part 1, page 82:
453 *
454 * "If Surface Type is SURFTYPE_NULL, this field (Tiled Surface) must be
455 * true"
456 */
457
458 dw[0] = GEN6_SURFTYPE_NULL << GEN6_SURFACE_DW0_TYPE__SHIFT |
459 GEN6_FORMAT_B8G8R8A8_UNORM << GEN6_SURFACE_DW0_FORMAT__SHIFT;
460
461 dw[1] = 0;
462 dw[2] = 0;
463 dw[3] = GEN6_TILING_X;
464 dw[4] = 0;
465 dw[5] = 0;
466}
467
Chia-I Wu06bed192014-08-20 13:57:18 +0800468static void surface_state_buf_gen6(const struct intel_gpu *gpu,
469 unsigned offset, unsigned size,
470 unsigned struct_size,
471 XGL_FORMAT elem_format,
472 bool is_rt, bool render_cache_rw,
473 uint32_t dw[6])
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800474{
475 const int elem_size = icd_format_get_size(elem_format);
476 int width, height, depth, pitch;
477 int surface_format, num_entries;
478
479 INTEL_GPU_ASSERT(gpu, 6, 6);
480
481 /*
482 * For SURFTYPE_BUFFER, a SURFACE_STATE specifies an element of a
483 * structure in a buffer.
484 */
485
486 surface_format = intel_format_translate_color(gpu, elem_format);
487
488 num_entries = size / struct_size;
489 /* see if there is enough space to fit another element */
490 if (size % struct_size >= elem_size)
491 num_entries++;
492
493 /*
494 * From the Sandy Bridge PRM, volume 4 part 1, page 76:
495 *
496 * "For SURFTYPE_BUFFER render targets, this field (Surface Base
497 * Address) specifies the base address of first element of the
498 * surface. The surface is interpreted as a simple array of that
499 * single element type. The address must be naturally-aligned to the
500 * element size (e.g., a buffer containing R32G32B32A32_FLOAT elements
501 * must be 16-byte aligned).
502 *
503 * For SURFTYPE_BUFFER non-rendertarget surfaces, this field specifies
504 * the base address of the first element of the surface, computed in
505 * software by adding the surface base address to the byte offset of
506 * the element in the buffer."
507 */
508 if (is_rt)
509 assert(offset % elem_size == 0);
510
511 /*
512 * From the Sandy Bridge PRM, volume 4 part 1, page 77:
513 *
514 * "For buffer surfaces, the number of entries in the buffer ranges
515 * from 1 to 2^27."
516 */
517 assert(num_entries >= 1 && num_entries <= 1 << 27);
518
519 /*
520 * From the Sandy Bridge PRM, volume 4 part 1, page 81:
521 *
522 * "For surfaces of type SURFTYPE_BUFFER, this field (Surface Pitch)
523 * indicates the size of the structure."
524 */
525 pitch = struct_size;
526
527 pitch--;
528 num_entries--;
529 /* bits [6:0] */
530 width = (num_entries & 0x0000007f);
531 /* bits [19:7] */
532 height = (num_entries & 0x000fff80) >> 7;
533 /* bits [26:20] */
534 depth = (num_entries & 0x07f00000) >> 20;
535
536 dw[0] = GEN6_SURFTYPE_BUFFER << GEN6_SURFACE_DW0_TYPE__SHIFT |
537 surface_format << GEN6_SURFACE_DW0_FORMAT__SHIFT;
538 if (render_cache_rw)
539 dw[0] |= GEN6_SURFACE_DW0_RENDER_CACHE_RW;
540
541 dw[1] = offset;
542
543 dw[2] = height << GEN6_SURFACE_DW2_HEIGHT__SHIFT |
544 width << GEN6_SURFACE_DW2_WIDTH__SHIFT;
545
546 dw[3] = depth << GEN6_SURFACE_DW3_DEPTH__SHIFT |
547 pitch << GEN6_SURFACE_DW3_PITCH__SHIFT;
548
549 dw[4] = 0;
550 dw[5] = 0;
551}
552
Chia-I Wu06bed192014-08-20 13:57:18 +0800553static void surface_state_tex_gen6(const struct intel_gpu *gpu,
554 const struct intel_img *img,
555 XGL_IMAGE_VIEW_TYPE type,
556 XGL_FORMAT format,
557 unsigned first_level,
558 unsigned num_levels,
559 unsigned first_layer,
560 unsigned num_layers,
561 bool is_rt,
562 uint32_t dw[6])
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800563{
564 int surface_type, surface_format;
565 int width, height, depth, pitch, lod;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800566
567 INTEL_GPU_ASSERT(gpu, 6, 6);
568
569 surface_type = view_type_to_surface_type(type);
570 assert(surface_type != GEN6_SURFTYPE_BUFFER);
571
572 surface_format = intel_format_translate_color(gpu, format);
573 assert(surface_format >= 0);
574
Chia-I Wu73e326f2014-08-21 11:07:57 +0800575 width = img->layout.width0;
576 height = img->layout.height0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800577 depth = (type == XGL_IMAGE_VIEW_3D) ?
Chia-I Wu73e326f2014-08-21 11:07:57 +0800578 img->depth : num_layers;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800579 pitch = img->layout.bo_stride;
580
581 if (surface_type == GEN6_SURFTYPE_CUBE) {
582 /*
583 * From the Sandy Bridge PRM, volume 4 part 1, page 81:
584 *
585 * "For SURFTYPE_CUBE: [DevSNB+]: for Sampling Engine Surfaces, the
586 * range of this field (Depth) is [0,84], indicating the number of
587 * cube array elements (equal to the number of underlying 2D array
588 * elements divided by 6). For other surfaces, this field must be
589 * zero."
590 *
591 * When is_rt is true, we treat the texture as a 2D one to avoid the
592 * restriction.
593 */
594 if (is_rt) {
595 surface_type = GEN6_SURFTYPE_2D;
596 }
597 else {
598 assert(num_layers % 6 == 0);
599 depth = num_layers / 6;
600 }
601 }
602
603 /* sanity check the size */
604 assert(width >= 1 && height >= 1 && depth >= 1 && pitch >= 1);
605 switch (surface_type) {
606 case GEN6_SURFTYPE_1D:
607 assert(width <= 8192 && height == 1 && depth <= 512);
608 assert(first_layer < 512 && num_layers <= 512);
609 break;
610 case GEN6_SURFTYPE_2D:
611 assert(width <= 8192 && height <= 8192 && depth <= 512);
612 assert(first_layer < 512 && num_layers <= 512);
613 break;
614 case GEN6_SURFTYPE_3D:
615 assert(width <= 2048 && height <= 2048 && depth <= 2048);
616 assert(first_layer < 2048 && num_layers <= 512);
617 if (!is_rt)
618 assert(first_layer == 0);
619 break;
620 case GEN6_SURFTYPE_CUBE:
621 assert(width <= 8192 && height <= 8192 && depth <= 85);
622 assert(width == height);
623 assert(first_layer < 512 && num_layers <= 512);
624 if (is_rt)
625 assert(first_layer == 0);
626 break;
627 default:
628 assert(!"unexpected surface type");
629 break;
630 }
631
632 /* non-full array spacing is supported only on GEN7+ */
Chia-I Wu457d0a62014-08-18 13:02:26 +0800633 assert(img->layout.walk != INTEL_LAYOUT_WALK_LOD);
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800634 /* non-interleaved samples are supported only on GEN7+ */
635 if (img->samples > 1)
636 assert(img->layout.interleaved_samples);
637
638 if (is_rt) {
639 assert(num_levels == 1);
640 lod = first_level;
641 }
642 else {
643 lod = num_levels - 1;
644 }
645
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800646 /*
647 * From the Sandy Bridge PRM, volume 4 part 1, page 76:
648 *
649 * "Linear render target surface base addresses must be element-size
650 * aligned, for non-YUV surface formats, or a multiple of 2
651 * element-sizes for YUV surface formats. Other linear surfaces have
652 * no alignment requirements (byte alignment is sufficient.)"
653 *
654 * From the Sandy Bridge PRM, volume 4 part 1, page 81:
655 *
656 * "For linear render target surfaces, the pitch must be a multiple
657 * of the element size for non-YUV surface formats. Pitch must be a
658 * multiple of 2 * element size for YUV surface formats."
659 *
660 * From the Sandy Bridge PRM, volume 4 part 1, page 86:
661 *
662 * "For linear surfaces, this field (X Offset) must be zero"
663 */
664 if (img->layout.tiling == INTEL_TILING_NONE) {
665 if (is_rt) {
666 const int elem_size = icd_format_get_size(format);
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800667 assert(pitch % elem_size == 0);
668 }
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800669 }
670
671 dw[0] = surface_type << GEN6_SURFACE_DW0_TYPE__SHIFT |
672 surface_format << GEN6_SURFACE_DW0_FORMAT__SHIFT |
673 GEN6_SURFACE_DW0_MIPLAYOUT_BELOW;
674
675 if (surface_type == GEN6_SURFTYPE_CUBE && !is_rt) {
676 dw[0] |= 1 << 9 |
677 GEN6_SURFACE_DW0_CUBE_FACE_ENABLES__MASK;
678 }
679
680 if (is_rt)
681 dw[0] |= GEN6_SURFACE_DW0_RENDER_CACHE_RW;
682
Chia-I Wu457d0a62014-08-18 13:02:26 +0800683 dw[1] = 0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800684
685 dw[2] = (height - 1) << GEN6_SURFACE_DW2_HEIGHT__SHIFT |
686 (width - 1) << GEN6_SURFACE_DW2_WIDTH__SHIFT |
687 lod << GEN6_SURFACE_DW2_MIP_COUNT_LOD__SHIFT;
688
689 dw[3] = (depth - 1) << GEN6_SURFACE_DW3_DEPTH__SHIFT |
690 (pitch - 1) << GEN6_SURFACE_DW3_PITCH__SHIFT |
691 winsys_tiling_to_surface_tiling(img->layout.tiling);
692
693 dw[4] = first_level << GEN6_SURFACE_DW4_MIN_LOD__SHIFT |
694 first_layer << 17 |
695 (num_layers - 1) << 8 |
696 ((img->samples > 1) ? GEN6_SURFACE_DW4_MULTISAMPLECOUNT_4 :
697 GEN6_SURFACE_DW4_MULTISAMPLECOUNT_1);
698
Chia-I Wu457d0a62014-08-18 13:02:26 +0800699 dw[5] = 0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800700
701 assert(img->layout.align_j == 2 || img->layout.align_j == 4);
702 if (img->layout.align_j == 4)
703 dw[5] |= GEN6_SURFACE_DW5_VALIGN_4;
704}
705
706struct ds_surface_info {
707 int surface_type;
708 int format;
709
710 struct {
711 unsigned stride;
Chia-I Wu457d0a62014-08-18 13:02:26 +0800712 unsigned offset;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800713 } zs, stencil, hiz;
714
715 unsigned width, height, depth;
716 unsigned lod, first_layer, num_layers;
717};
718
719static void
720ds_init_info_null(const struct intel_gpu *gpu,
721 struct ds_surface_info *info)
722{
723 INTEL_GPU_ASSERT(gpu, 6, 7.5);
724
725 memset(info, 0, sizeof(*info));
726
727 info->surface_type = GEN6_SURFTYPE_NULL;
728 info->format = GEN6_ZFORMAT_D32_FLOAT;
729 info->width = 1;
730 info->height = 1;
731 info->depth = 1;
732 info->num_layers = 1;
733}
734
735static void
736ds_init_info(const struct intel_gpu *gpu,
737 const struct intel_img *img,
738 XGL_FORMAT format, unsigned level,
739 unsigned first_layer, unsigned num_layers,
740 struct ds_surface_info *info)
741{
742 bool separate_stencil;
743
744 INTEL_GPU_ASSERT(gpu, 6, 7.5);
745
746 memset(info, 0, sizeof(*info));
747
748 info->surface_type =
749 view_type_to_surface_type(img_type_to_view_type(img->type));
750
751 if (info->surface_type == GEN6_SURFTYPE_CUBE) {
752 /*
753 * From the Sandy Bridge PRM, volume 2 part 1, page 325-326:
754 *
755 * "For Other Surfaces (Cube Surfaces):
756 * This field (Minimum Array Element) is ignored."
757 *
758 * "For Other Surfaces (Cube Surfaces):
759 * This field (Render Target View Extent) is ignored."
760 *
761 * As such, we cannot set first_layer and num_layers on cube surfaces.
762 * To work around that, treat it as a 2D surface.
763 */
764 info->surface_type = GEN6_SURFTYPE_2D;
765 }
766
767 if (intel_gpu_gen(gpu) >= INTEL_GEN(7)) {
768 separate_stencil = true;
769 }
770 else {
771 /*
772 * From the Sandy Bridge PRM, volume 2 part 1, page 317:
773 *
774 * "This field (Separate Stencil Buffer Enable) must be set to the
775 * same value (enabled or disabled) as Hierarchical Depth Buffer
776 * Enable."
777 */
778 separate_stencil = img->aux_offset;
779 }
780
781 /*
782 * From the Sandy Bridge PRM, volume 2 part 1, page 317:
783 *
784 * "If this field (Hierarchical Depth Buffer Enable) is enabled, the
785 * Surface Format of the depth buffer cannot be
786 * D32_FLOAT_S8X24_UINT or D24_UNORM_S8_UINT. Use of stencil
787 * requires the separate stencil buffer."
788 *
789 * From the Ironlake PRM, volume 2 part 1, page 330:
790 *
791 * "If this field (Separate Stencil Buffer Enable) is disabled, the
792 * Surface Format of the depth buffer cannot be D24_UNORM_X8_UINT."
793 *
794 * There is no similar restriction for GEN6. But when D24_UNORM_X8_UINT
795 * is indeed used, the depth values output by the fragment shaders will
796 * be different when read back.
797 *
798 * As for GEN7+, separate_stencil is always true.
799 */
800 switch (format.channelFormat) {
801 case XGL_CH_FMT_R16:
802 info->format = GEN6_ZFORMAT_D16_UNORM;
803 break;
804 case XGL_CH_FMT_R32:
805 info->format = GEN6_ZFORMAT_D32_FLOAT;
806 break;
807 case XGL_CH_FMT_R32G8:
808 info->format = (separate_stencil) ?
809 GEN6_ZFORMAT_D32_FLOAT :
810 GEN6_ZFORMAT_D32_FLOAT_S8X24_UINT;
811 break;
812 case XGL_CH_FMT_R8:
813 if (separate_stencil) {
814 info->format = GEN6_ZFORMAT_D32_FLOAT;
815 break;
816 }
817 /* fall through */
818 default:
819 assert(!"unsupported depth/stencil format");
820 ds_init_info_null(gpu, info);
821 return;
822 break;
823 }
824
825 if (format.channelFormat != XGL_CH_FMT_R8)
826 info->zs.stride = img->layout.bo_stride;
827
828 if (img->s8_layout) {
829 /*
830 * From the Sandy Bridge PRM, volume 2 part 1, page 329:
831 *
832 * "The pitch must be set to 2x the value computed based on width,
833 * as the stencil buffer is stored with two rows interleaved."
834 *
835 * According to the classic driver, we need to do the same for GEN7+
836 * even though the Ivy Bridge PRM does not say anything about it.
837 */
838 info->stencil.stride = img->s8_layout->bo_stride * 2;
Chia-I Wu457d0a62014-08-18 13:02:26 +0800839
840 if (intel_gpu_gen(gpu) == INTEL_GEN(6)) {
841 unsigned x, y;
842
843 assert(img->s8_layout->walk == INTEL_LAYOUT_WALK_LOD);
844
845 /* offset to the level */
846 intel_layout_get_slice_pos(img->s8_layout, level, 0, &x, &y);
847 intel_layout_pos_to_mem(img->s8_layout, x, y, &x, &y);
848 info->stencil.offset = intel_layout_mem_to_raw(img->s8_layout, x, y);
849 }
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800850 } else if (format.channelFormat == XGL_CH_FMT_R8) {
851 info->stencil.stride = img->layout.bo_stride * 2;
852 }
853
Chia-I Wu457d0a62014-08-18 13:02:26 +0800854 if (img->aux_offset) {
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800855 info->hiz.stride = img->layout.aux_stride;
856
Chia-I Wu457d0a62014-08-18 13:02:26 +0800857 /* offset to the level */
858 if (intel_gpu_gen(gpu) == INTEL_GEN(6))
859 info->hiz.offset = img->layout.aux_offsets[level];
860 }
861
862
Chia-I Wu73e326f2014-08-21 11:07:57 +0800863 info->width = img->layout.width0;
864 info->height = img->layout.height0;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800865 info->depth = (img->type == XGL_IMAGE_3D) ?
Chia-I Wu73e326f2014-08-21 11:07:57 +0800866 img->depth : num_layers;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800867
868 info->lod = level;
869 info->first_layer = first_layer;
870 info->num_layers = num_layers;
871}
872
Chia-I Wu06bed192014-08-20 13:57:18 +0800873static void ds_view_init(struct intel_ds_view *view,
874 const struct intel_gpu *gpu,
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800875 const struct intel_img *img,
876 XGL_FORMAT format, unsigned level,
Chia-I Wu06bed192014-08-20 13:57:18 +0800877 unsigned first_layer, unsigned num_layers)
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800878{
879 const int max_2d_size = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 16384 : 8192;
880 const int max_array_size = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 2048 : 512;
881 struct ds_surface_info info;
882 uint32_t dw1, dw2, dw3, dw4, dw5, dw6;
Chia-I Wu06bed192014-08-20 13:57:18 +0800883 uint32_t *dw;
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800884
885 INTEL_GPU_ASSERT(gpu, 6, 7.5);
886
887 if (img) {
888 ds_init_info(gpu, img, format, level, first_layer, num_layers, &info);
889 }
890 else {
891 ds_init_info_null(gpu, &info);
892 }
893
894 switch (info.surface_type) {
895 case GEN6_SURFTYPE_NULL:
896 break;
897 case GEN6_SURFTYPE_1D:
898 assert(info.width <= max_2d_size && info.height == 1 &&
899 info.depth <= max_array_size);
900 assert(info.first_layer < max_array_size - 1 &&
901 info.num_layers <= max_array_size);
902 break;
903 case GEN6_SURFTYPE_2D:
904 assert(info.width <= max_2d_size && info.height <= max_2d_size &&
905 info.depth <= max_array_size);
906 assert(info.first_layer < max_array_size - 1 &&
907 info.num_layers <= max_array_size);
908 break;
909 case GEN6_SURFTYPE_3D:
910 assert(info.width <= 2048 && info.height <= 2048 && info.depth <= 2048);
911 assert(info.first_layer < 2048 && info.num_layers <= max_array_size);
912 break;
913 case GEN6_SURFTYPE_CUBE:
914 assert(info.width <= max_2d_size && info.height <= max_2d_size &&
915 info.depth == 1);
916 assert(info.first_layer == 0 && info.num_layers == 1);
917 assert(info.width == info.height);
918 break;
919 default:
920 assert(!"unexpected depth surface type");
921 break;
922 }
923
924 dw1 = info.surface_type << 29 |
925 info.format << 18;
926
927 if (info.zs.stride) {
928 /* required for GEN6+ */
929 assert(info.zs.stride > 0 && info.zs.stride < 128 * 1024 &&
930 info.zs.stride % 128 == 0);
931 assert(info.width <= info.zs.stride);
932
933 dw1 |= (info.zs.stride - 1);
934 }
935
936 dw2 = 0;
937
938 if (intel_gpu_gen(gpu) >= INTEL_GEN(7)) {
939 if (info.zs.stride)
940 dw1 |= 1 << 28;
941
942 if (info.stencil.stride)
943 dw1 |= 1 << 27;
944
945 if (info.hiz.stride)
946 dw1 |= 1 << 22;
947
948 dw3 = (info.height - 1) << 18 |
949 (info.width - 1) << 4 |
950 info.lod;
951
952 dw4 = (info.depth - 1) << 21 |
953 info.first_layer << 10;
954
955 dw5 = 0;
956
957 dw6 = (info.num_layers - 1) << 21;
958 }
959 else {
960 /* always Y-tiled */
961 dw1 |= 1 << 27 |
962 1 << 26;
963
964 if (info.hiz.stride) {
965 dw1 |= 1 << 22 |
966 1 << 21;
967 }
968
969 dw3 = (info.height - 1) << 19 |
970 (info.width - 1) << 6 |
971 info.lod << 2 |
972 GEN6_DEPTH_DW3_MIPLAYOUT_BELOW;
973
974 dw4 = (info.depth - 1) << 21 |
975 info.first_layer << 10 |
976 (info.num_layers - 1) << 1;
977
978 dw5 = 0;
979
980 dw6 = 0;
981 }
982
Chia-I Wu06bed192014-08-20 13:57:18 +0800983 STATIC_ASSERT(ARRAY_SIZE(view->cmd) >= 10);
984 dw = view->cmd;
985
Chia-I Wu9269d1c2014-08-16 12:47:47 +0800986 dw[0] = dw1;
987 dw[1] = dw2;
988 dw[2] = dw3;
989 dw[3] = dw4;
990 dw[4] = dw5;
991 dw[5] = dw6;
992
993 /* separate stencil */
994 if (info.stencil.stride) {
995 assert(info.stencil.stride > 0 && info.stencil.stride < 128 * 1024 &&
996 info.stencil.stride % 128 == 0);
997
998 dw[6] = info.stencil.stride - 1;
999 dw[7] = img->s8_offset;
1000
1001 if (intel_gpu_gen(gpu) >= INTEL_GEN(7.5))
1002 dw[6] |= GEN75_STENCIL_DW1_STENCIL_BUFFER_ENABLE;
1003 }
1004 else {
1005 dw[6] = 0;
1006 dw[7] = 0;
1007 }
1008
1009 /* hiz */
1010 if (info.hiz.stride) {
1011 dw[8] = info.hiz.stride - 1;
1012 dw[9] = img->aux_offset;
1013 }
1014 else {
1015 dw[8] = 0;
1016 dw[9] = 0;
1017 }
1018}
1019
Chia-I Wu5a323262014-08-11 10:31:53 +08001020void intel_null_view_init(struct intel_null_view *view,
1021 struct intel_dev *dev)
1022{
Chia-I Wucd83cf12014-08-23 17:26:08 +08001023 if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) {
Chia-I Wu06bed192014-08-20 13:57:18 +08001024 surface_state_null_gen7(dev->gpu, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001025 view->cmd_len = 8;
1026 } else {
Chia-I Wu06bed192014-08-20 13:57:18 +08001027 surface_state_null_gen6(dev->gpu, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001028 view->cmd_len = 6;
1029 }
Chia-I Wu5a323262014-08-11 10:31:53 +08001030}
1031
1032void intel_mem_view_init(struct intel_mem_view *view,
1033 struct intel_dev *dev,
1034 const XGL_MEMORY_VIEW_ATTACH_INFO *info)
1035{
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001036 bool will_write;
1037
1038 switch (info->state) {
1039 case XGL_MEMORY_STATE_GRAPHICS_SHADER_WRITE_ONLY:
1040 case XGL_MEMORY_STATE_GRAPHICS_SHADER_READ_WRITE:
1041 case XGL_MEMORY_STATE_COMPUTE_SHADER_WRITE_ONLY:
1042 case XGL_MEMORY_STATE_COMPUTE_SHADER_READ_WRITE:
1043 will_write = true;
1044 break;
1045 default:
1046 will_write = false;
1047 break;
1048 }
Chia-I Wu5a323262014-08-11 10:31:53 +08001049
1050 view->mem = intel_mem(info->mem);
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001051
1052 if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) {
Chia-I Wu06bed192014-08-20 13:57:18 +08001053 surface_state_buf_gen7(dev->gpu, info->offset,
1054 info->range, info->stride, info->format,
1055 will_write, will_write, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001056 view->cmd_len = 8;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001057 } else {
Chia-I Wu06bed192014-08-20 13:57:18 +08001058 surface_state_buf_gen6(dev->gpu, info->offset,
1059 info->range, info->stride, info->format,
1060 will_write, will_write, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001061 view->cmd_len = 6;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001062 }
Chia-I Wu5a323262014-08-11 10:31:53 +08001063}
1064
1065static void img_view_destroy(struct intel_obj *obj)
1066{
1067 struct intel_img_view *view = intel_img_view_from_obj(obj);
1068
1069 intel_img_view_destroy(view);
1070}
1071
1072XGL_RESULT intel_img_view_create(struct intel_dev *dev,
1073 const XGL_IMAGE_VIEW_CREATE_INFO *info,
1074 struct intel_img_view **view_ret)
1075{
1076 struct intel_img *img = intel_img(info->image);
1077 struct intel_img_view *view;
1078
1079 view = (struct intel_img_view *) intel_base_create(dev, sizeof(*view),
1080 dev->base.dbg, XGL_DBG_OBJECT_IMAGE_VIEW, info, 0);
1081 if (!view)
1082 return XGL_ERROR_OUT_OF_MEMORY;
1083
1084 view->obj.destroy = img_view_destroy;
1085
1086 view->img = img;
1087 view->swizzles = info->channels;
1088 view->min_lod = info->minLod;
1089
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001090 if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) {
Chia-I Wu06bed192014-08-20 13:57:18 +08001091 surface_state_tex_gen7(dev->gpu, img, info->viewType, info->format,
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001092 info->subresourceRange.baseMipLevel,
1093 info->subresourceRange.mipLevels,
1094 info->subresourceRange.baseArraySlice,
1095 info->subresourceRange.arraySize, false, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001096 view->cmd_len = 8;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001097 } else {
Chia-I Wu06bed192014-08-20 13:57:18 +08001098 surface_state_tex_gen6(dev->gpu, img, info->viewType, info->format,
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001099 info->subresourceRange.baseMipLevel,
1100 info->subresourceRange.mipLevels,
1101 info->subresourceRange.baseArraySlice,
1102 info->subresourceRange.arraySize, false, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001103 view->cmd_len = 6;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001104 }
1105
Chia-I Wu5a323262014-08-11 10:31:53 +08001106 *view_ret = view;
1107
1108 return XGL_SUCCESS;
1109}
1110
1111void intel_img_view_destroy(struct intel_img_view *view)
1112{
1113 intel_base_destroy(&view->obj.base);
1114}
1115
1116static void rt_view_destroy(struct intel_obj *obj)
1117{
1118 struct intel_rt_view *view = intel_rt_view_from_obj(obj);
1119
1120 intel_rt_view_destroy(view);
1121}
1122
1123XGL_RESULT intel_rt_view_create(struct intel_dev *dev,
1124 const XGL_COLOR_ATTACHMENT_VIEW_CREATE_INFO *info,
1125 struct intel_rt_view **view_ret)
1126{
1127 struct intel_img *img = intel_img(info->image);
1128 struct intel_rt_view *view;
1129
1130 view = (struct intel_rt_view *) intel_base_create(dev, sizeof(*view),
1131 dev->base.dbg, XGL_DBG_OBJECT_COLOR_TARGET_VIEW, info, 0);
1132 if (!view)
1133 return XGL_ERROR_OUT_OF_MEMORY;
1134
1135 view->obj.destroy = rt_view_destroy;
1136
1137 view->img = img;
1138
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001139 if (intel_gpu_gen(dev->gpu) >= INTEL_GEN(7)) {
Chia-I Wu06bed192014-08-20 13:57:18 +08001140 surface_state_tex_gen7(dev->gpu, img,
1141 img_type_to_view_type(img->type),
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001142 info->format, info->mipLevel, 1,
1143 info->baseArraySlice, info->arraySize,
1144 true, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001145 view->cmd_len = 8;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001146 } else {
Chia-I Wu06bed192014-08-20 13:57:18 +08001147 surface_state_tex_gen6(dev->gpu, img,
1148 img_type_to_view_type(img->type),
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001149 info->format, info->mipLevel, 1,
1150 info->baseArraySlice, info->arraySize,
1151 true, view->cmd);
Chia-I Wucd83cf12014-08-23 17:26:08 +08001152 view->cmd_len = 6;
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001153 }
1154
Chia-I Wu5a323262014-08-11 10:31:53 +08001155 *view_ret = view;
1156
1157 return XGL_SUCCESS;
1158}
1159
1160void intel_rt_view_destroy(struct intel_rt_view *view)
1161{
1162 intel_base_destroy(&view->obj.base);
1163}
1164
1165static void ds_view_destroy(struct intel_obj *obj)
1166{
1167 struct intel_ds_view *view = intel_ds_view_from_obj(obj);
1168
1169 intel_ds_view_destroy(view);
1170}
1171
1172XGL_RESULT intel_ds_view_create(struct intel_dev *dev,
1173 const XGL_DEPTH_STENCIL_VIEW_CREATE_INFO *info,
1174 struct intel_ds_view **view_ret)
1175{
1176 struct intel_img *img = intel_img(info->image);
1177 struct intel_ds_view *view;
1178
1179 view = (struct intel_ds_view *) intel_base_create(dev, sizeof(*view),
1180 dev->base.dbg, XGL_DBG_OBJECT_DEPTH_STENCIL_VIEW, info, 0);
1181 if (!view)
1182 return XGL_ERROR_OUT_OF_MEMORY;
1183
1184 view->obj.destroy = ds_view_destroy;
1185
1186 view->img = img;
1187
Chia-I Wu06bed192014-08-20 13:57:18 +08001188 ds_view_init(view, dev->gpu, img, img->layout.format, info->mipLevel,
1189 info->baseArraySlice, info->arraySize);
Chia-I Wu9269d1c2014-08-16 12:47:47 +08001190
Chia-I Wu5a323262014-08-11 10:31:53 +08001191 *view_ret = view;
1192
1193 return XGL_SUCCESS;
1194}
1195
1196void intel_ds_view_destroy(struct intel_ds_view *view)
1197{
1198 intel_base_destroy(&view->obj.base);
1199}
1200
1201XGL_RESULT XGLAPI intelCreateImageView(
1202 XGL_DEVICE device,
1203 const XGL_IMAGE_VIEW_CREATE_INFO* pCreateInfo,
1204 XGL_IMAGE_VIEW* pView)
1205{
1206 struct intel_dev *dev = intel_dev(device);
1207
1208 return intel_img_view_create(dev, pCreateInfo,
1209 (struct intel_img_view **) pView);
1210}
1211
1212XGL_RESULT XGLAPI intelCreateColorAttachmentView(
1213 XGL_DEVICE device,
1214 const XGL_COLOR_ATTACHMENT_VIEW_CREATE_INFO* pCreateInfo,
1215 XGL_COLOR_ATTACHMENT_VIEW* pView)
1216{
1217 struct intel_dev *dev = intel_dev(device);
1218
1219 return intel_rt_view_create(dev, pCreateInfo,
1220 (struct intel_rt_view **) pView);
1221}
1222
1223XGL_RESULT XGLAPI intelCreateDepthStencilView(
1224 XGL_DEVICE device,
1225 const XGL_DEPTH_STENCIL_VIEW_CREATE_INFO* pCreateInfo,
1226 XGL_DEPTH_STENCIL_VIEW* pView)
1227{
1228 struct intel_dev *dev = intel_dev(device);
1229
1230 return intel_ds_view_create(dev, pCreateInfo,
1231 (struct intel_ds_view **) pView);
1232}