blob: 286716e078a10cd340cd69a870350d72b38889b1 [file] [log] [blame]
Chia-I Wua5714e82014-08-11 15:33:42 +08001/*
2 * XGL
3 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
Chia-I Wu44e42362014-09-02 08:32:09 +080023 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
Chia-I Wua5714e82014-08-11 15:33:42 +080026 */
27
Chia-I Wu97702a62014-08-11 15:33:42 +080028#include <math.h>
29#include "genhw/genhw.h"
Chia-I Wua5714e82014-08-11 15:33:42 +080030#include "dev.h"
31#include "state.h"
32
Chia-I Wu97702a62014-08-11 15:33:42 +080033static int translate_compare_func(XGL_COMPARE_FUNC func)
34{
35 switch (func) {
36 case XGL_COMPARE_NEVER: return GEN6_COMPAREFUNCTION_NEVER;
37 case XGL_COMPARE_LESS: return GEN6_COMPAREFUNCTION_LESS;
38 case XGL_COMPARE_EQUAL: return GEN6_COMPAREFUNCTION_EQUAL;
39 case XGL_COMPARE_LESS_EQUAL: return GEN6_COMPAREFUNCTION_LEQUAL;
40 case XGL_COMPARE_GREATER: return GEN6_COMPAREFUNCTION_GREATER;
41 case XGL_COMPARE_NOT_EQUAL: return GEN6_COMPAREFUNCTION_NOTEQUAL;
42 case XGL_COMPARE_GREATER_EQUAL: return GEN6_COMPAREFUNCTION_GEQUAL;
43 case XGL_COMPARE_ALWAYS: return GEN6_COMPAREFUNCTION_ALWAYS;
44 default:
45 assert(!"unknown compare_func");
46 return GEN6_COMPAREFUNCTION_NEVER;
47 }
48}
49
50static int translate_stencil_op(XGL_STENCIL_OP op)
51{
52 switch (op) {
53 case XGL_STENCIL_OP_KEEP: return GEN6_STENCILOP_KEEP;
54 case XGL_STENCIL_OP_ZERO: return GEN6_STENCILOP_ZERO;
55 case XGL_STENCIL_OP_REPLACE: return GEN6_STENCILOP_REPLACE;
56 case XGL_STENCIL_OP_INC_CLAMP: return GEN6_STENCILOP_INCRSAT;
57 case XGL_STENCIL_OP_DEC_CLAMP: return GEN6_STENCILOP_DECRSAT;
58 case XGL_STENCIL_OP_INVERT: return GEN6_STENCILOP_INVERT;
59 case XGL_STENCIL_OP_INC_WRAP: return GEN6_STENCILOP_INCR;
60 case XGL_STENCIL_OP_DEC_WRAP: return GEN6_STENCILOP_DECR;
61 default:
62 assert(!"unknown stencil op");
63 return GEN6_STENCILOP_KEEP;
64 }
65}
66
67static int translate_blend_func(XGL_BLEND_FUNC func)
68{
69 switch (func) {
70 case XGL_BLEND_FUNC_ADD: return GEN6_BLENDFUNCTION_ADD;
71 case XGL_BLEND_FUNC_SUBTRACT: return GEN6_BLENDFUNCTION_SUBTRACT;
72 case XGL_BLEND_FUNC_REVERSE_SUBTRACT: return GEN6_BLENDFUNCTION_REVERSE_SUBTRACT;
73 case XGL_BLEND_FUNC_MIN: return GEN6_BLENDFUNCTION_MIN;
74 case XGL_BLEND_FUNC_MAX: return GEN6_BLENDFUNCTION_MAX;
75 default:
76 assert(!"unknown blend func");
77 return GEN6_BLENDFUNCTION_ADD;
78 };
79}
80
81static int translate_blend(XGL_BLEND blend)
82{
83 switch (blend) {
84 case XGL_BLEND_ZERO: return GEN6_BLENDFACTOR_ZERO;
85 case XGL_BLEND_ONE: return GEN6_BLENDFACTOR_ONE;
86 case XGL_BLEND_SRC_COLOR: return GEN6_BLENDFACTOR_SRC_COLOR;
87 case XGL_BLEND_ONE_MINUS_SRC_COLOR: return GEN6_BLENDFACTOR_INV_SRC_COLOR;
88 case XGL_BLEND_DEST_COLOR: return GEN6_BLENDFACTOR_DST_COLOR;
89 case XGL_BLEND_ONE_MINUS_DEST_COLOR: return GEN6_BLENDFACTOR_INV_DST_COLOR;
90 case XGL_BLEND_SRC_ALPHA: return GEN6_BLENDFACTOR_SRC_ALPHA;
91 case XGL_BLEND_ONE_MINUS_SRC_ALPHA: return GEN6_BLENDFACTOR_INV_SRC_ALPHA;
92 case XGL_BLEND_DEST_ALPHA: return GEN6_BLENDFACTOR_DST_ALPHA;
93 case XGL_BLEND_ONE_MINUS_DEST_ALPHA: return GEN6_BLENDFACTOR_INV_DST_ALPHA;
94 case XGL_BLEND_CONSTANT_COLOR: return GEN6_BLENDFACTOR_CONST_COLOR;
95 case XGL_BLEND_ONE_MINUS_CONSTANT_COLOR: return GEN6_BLENDFACTOR_INV_CONST_COLOR;
96 case XGL_BLEND_CONSTANT_ALPHA: return GEN6_BLENDFACTOR_CONST_ALPHA;
97 case XGL_BLEND_ONE_MINUS_CONSTANT_ALPHA: return GEN6_BLENDFACTOR_INV_CONST_ALPHA;
98 case XGL_BLEND_SRC_ALPHA_SATURATE: return GEN6_BLENDFACTOR_SRC_ALPHA_SATURATE;
99 case XGL_BLEND_SRC1_COLOR: return GEN6_BLENDFACTOR_SRC1_COLOR;
100 case XGL_BLEND_ONE_MINUS_SRC1_COLOR: return GEN6_BLENDFACTOR_INV_SRC1_COLOR;
101 case XGL_BLEND_SRC1_ALPHA: return GEN6_BLENDFACTOR_SRC1_ALPHA;
102 case XGL_BLEND_ONE_MINUS_SRC1_ALPHA: return GEN6_BLENDFACTOR_INV_SRC1_ALPHA;
103 default:
104 assert(!"unknown blend factor");
105 return GEN6_BLENDFACTOR_ONE;
106 };
107}
108
109static void
110raster_state_init(struct intel_raster_state *state,
111 const struct intel_gpu *gpu,
112 const XGL_RASTER_STATE_CREATE_INFO *info)
113{
114 switch (info->fillMode) {
115 case XFL_FILL_POINTS:
116 state->cmd_sf_fill |= GEN7_SF_DW1_FRONTFACE_POINT |
117 GEN7_SF_DW1_BACKFACE_POINT;
118 break;
119 case XGL_FILL_WIREFRAME:
120 state->cmd_sf_fill |= GEN7_SF_DW1_FRONTFACE_WIREFRAME |
121 GEN7_SF_DW1_BACKFACE_WIREFRAME;
122 break;
123 case XGL_FILL_SOLID:
124 default:
125 state->cmd_sf_fill |= GEN7_SF_DW1_FRONTFACE_SOLID |
126 GEN7_SF_DW1_BACKFACE_SOLID;
127 break;
128 }
129
130 if (info->frontFace == XGL_FRONT_FACE_CCW) {
131 state->cmd_sf_fill |= GEN7_SF_DW1_FRONTWINDING_CCW;
132 state->cmd_clip_cull |= GEN7_CLIP_DW1_FRONTWINDING_CCW;
133 }
134
135 switch (info->cullMode) {
136 case XGL_CULL_NONE:
137 default:
138 state->cmd_sf_cull |= GEN7_SF_DW2_CULLMODE_NONE;
139 state->cmd_clip_cull |= GEN7_CLIP_DW1_CULLMODE_NONE;
140 break;
141 case XGL_CULL_FRONT:
142 state->cmd_sf_cull |= GEN7_SF_DW2_CULLMODE_FRONT;
143 state->cmd_clip_cull |= GEN7_CLIP_DW1_CULLMODE_FRONT;
144 break;
145 case XGL_CULL_BACK:
146 state->cmd_sf_cull |= GEN7_SF_DW2_CULLMODE_BACK;
147 state->cmd_clip_cull |= GEN7_CLIP_DW1_CULLMODE_BACK;
148 break;
149 case XGL_CULL_FRONT_AND_BACK:
150 state->cmd_sf_cull |= GEN7_SF_DW2_CULLMODE_BOTH;
151 state->cmd_clip_cull |= GEN7_CLIP_DW1_CULLMODE_BOTH;
152 break;
153 }
154
155 /* only GEN7+ needs cull mode in 3DSTATE_CLIP */
156 if (intel_gpu_gen(gpu) == INTEL_GEN(6))
157 state->cmd_clip_cull = 0;
158
159 /* XXX scale info->depthBias back into NDC */
160 state->cmd_depth_offset_const = u_fui((float) info->depthBias * 2.0f);
161 state->cmd_depth_offset_clamp = u_fui(info->depthBiasClamp);
162 state->cmd_depth_offset_scale = u_fui(info->slopeScaledDepthBias);
163}
164
165static void
166viewport_get_guardband(const struct intel_gpu *gpu,
167 int center_x, int center_y,
168 int *min_gbx, int *max_gbx,
169 int *min_gby, int *max_gby)
170{
171 /*
172 * From the Sandy Bridge PRM, volume 2 part 1, page 234:
173 *
174 * "Per-Device Guardband Extents
175 *
176 * - Supported X,Y ScreenSpace "Guardband" Extent: [-16K,16K-1]
177 * - Maximum Post-Clamp Delta (X or Y): 16K"
178 *
179 * "In addition, in order to be correctly rendered, objects must have a
180 * screenspace bounding box not exceeding 8K in the X or Y direction.
181 * This additional restriction must also be comprehended by software,
182 * i.e., enforced by use of clipping."
183 *
184 * From the Ivy Bridge PRM, volume 2 part 1, page 248:
185 *
186 * "Per-Device Guardband Extents
187 *
188 * - Supported X,Y ScreenSpace "Guardband" Extent: [-32K,32K-1]
189 * - Maximum Post-Clamp Delta (X or Y): N/A"
190 *
191 * "In addition, in order to be correctly rendered, objects must have a
192 * screenspace bounding box not exceeding 8K in the X or Y direction.
193 * This additional restriction must also be comprehended by software,
194 * i.e., enforced by use of clipping."
195 *
196 * Combined, the bounding box of any object can not exceed 8K in both
197 * width and height.
198 *
199 * Below we set the guardband as a squre of length 8K, centered at where
200 * the viewport is. This makes sure all objects passing the GB test are
201 * valid to the renderer, and those failing the XY clipping have a
202 * better chance of passing the GB test.
203 */
204 const int max_extent = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 32768 : 16384;
205 const int half_len = 8192 / 2;
206
207 /* make sure the guardband is within the valid range */
208 if (center_x - half_len < -max_extent)
209 center_x = -max_extent + half_len;
210 else if (center_x + half_len > max_extent - 1)
211 center_x = max_extent - half_len;
212
213 if (center_y - half_len < -max_extent)
214 center_y = -max_extent + half_len;
215 else if (center_y + half_len > max_extent - 1)
216 center_y = max_extent - half_len;
217
218 *min_gbx = (float) (center_x - half_len);
219 *max_gbx = (float) (center_x + half_len);
220 *min_gby = (float) (center_y - half_len);
221 *max_gby = (float) (center_y + half_len);
222}
223
224static XGL_RESULT
Chia-I Wu7b566a42014-08-22 10:58:57 +0800225viewport_state_alloc_cmd(struct intel_viewport_state *state,
226 const struct intel_gpu *gpu,
227 const XGL_VIEWPORT_STATE_CREATE_INFO *info)
228{
229 INTEL_GPU_ASSERT(gpu, 6, 7.5);
230
Chia-I Wu7d841502014-08-30 14:29:15 +0800231 state->viewport_count = info->viewportCount;
Chia-I Wu7b566a42014-08-22 10:58:57 +0800232 state->scissor_enable = info->scissorEnable;
233
234 if (intel_gpu_gen(gpu) >= INTEL_GEN(7)) {
235 state->cmd_align = GEN7_ALIGNMENT_SF_CLIP_VIEWPORT;
236 state->cmd_len = 16 * info->viewportCount;
237
238 state->cmd_clip_offset = 8;
239 } else {
240 state->cmd_align = GEN6_ALIGNMENT_SF_VIEWPORT;
241 state->cmd_len = 8 * info->viewportCount;
242
243 state->cmd_clip_offset =
244 u_align(state->cmd_len, GEN6_ALIGNMENT_CLIP_VIEWPORT);
245 state->cmd_len = state->cmd_clip_offset + 4 * info->viewportCount;
246 }
247
248 state->cmd_cc_offset =
249 u_align(state->cmd_len, GEN6_ALIGNMENT_CC_VIEWPORT);
250 state->cmd_len = state->cmd_cc_offset + 2 * info->viewportCount;
251
252 if (state->scissor_enable) {
253 state->cmd_scissor_rect_offset =
254 u_align(state->cmd_len, GEN6_ALIGNMENT_SCISSOR_RECT);
255 state->cmd_len = state->cmd_scissor_rect_offset +
256 2 * info->viewportCount;
257 }
258
259 state->cmd = icd_alloc(sizeof(uint32_t) * state->cmd_len,
260 0, XGL_SYSTEM_ALLOC_INTERNAL);
261 if (!state->cmd)
262 return XGL_ERROR_OUT_OF_MEMORY;
263
264 return XGL_SUCCESS;
265}
266
267static XGL_RESULT
Chia-I Wu97702a62014-08-11 15:33:42 +0800268viewport_state_init(struct intel_viewport_state *state,
269 const struct intel_gpu *gpu,
270 const XGL_VIEWPORT_STATE_CREATE_INFO *info)
271{
272 const XGL_UINT sf_stride = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 16 : 8;
273 const XGL_UINT clip_stride = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 16 : 4;
274 uint32_t *sf_viewport, *clip_viewport, *cc_viewport, *scissor_rect;
275 XGL_UINT i;
Chia-I Wu7b566a42014-08-22 10:58:57 +0800276 XGL_RESULT ret;
Chia-I Wu97702a62014-08-11 15:33:42 +0800277
278 INTEL_GPU_ASSERT(gpu, 6, 7.5);
279
Chia-I Wu7b566a42014-08-22 10:58:57 +0800280 ret = viewport_state_alloc_cmd(state, gpu, info);
281 if (ret != XGL_SUCCESS)
282 return ret;
Chia-I Wu97702a62014-08-11 15:33:42 +0800283
284 sf_viewport = state->cmd;
Chia-I Wu7b566a42014-08-22 10:58:57 +0800285 clip_viewport = state->cmd + state->cmd_clip_offset;
286 cc_viewport = state->cmd + state->cmd_cc_offset;
287 scissor_rect = state->cmd + state->cmd_scissor_rect_offset;
Chia-I Wu97702a62014-08-11 15:33:42 +0800288
289 for (i = 0; i < info->viewportCount; i++) {
290 const XGL_VIEWPORT *viewport = &info->viewports[i];
291 const XGL_RECT *scissor = &info->scissors[i];
292 uint32_t *dw = NULL;
293 float translate[3], scale[3];
294 int min_gbx, max_gbx, min_gby, max_gby;
295
296 scale[0] = viewport->width / 2.0f;
297 scale[1] = viewport->height / 2.0f;
298 scale[2] = (viewport->maxDepth - viewport->minDepth) / 2.0;
299 translate[0] = viewport->originX + scale[0];
300 translate[1] = viewport->originY + scale[1];
301 translate[2] = (viewport->minDepth + viewport->maxDepth) / 2.0f;
302
303 viewport_get_guardband(gpu, (int) translate[0], (int) translate[1],
304 &min_gbx, &max_gbx, &min_gby, &max_gby);
305
306 /* SF_VIEWPORT */
307 dw = sf_viewport;
308 dw[0] = u_fui(scale[0]);
309 dw[1] = u_fui(scale[1]);
310 dw[2] = u_fui(scale[2]);
311 dw[3] = u_fui(translate[0]);
312 dw[4] = u_fui(translate[1]);
313 dw[5] = u_fui(translate[2]);
314 dw[6] = 0;
315 dw[7] = 0;
316 sf_viewport += sf_stride;
317
318 /* CLIP_VIEWPORT */
319 dw = clip_viewport;
Chia-I Wu3a702202014-08-30 18:23:55 +0800320 dw[0] = u_fui(((float) min_gbx - translate[0]) / fabsf(scale[0]));
321 dw[1] = u_fui(((float) max_gbx - translate[0]) / fabsf(scale[0]));
322 dw[2] = u_fui(((float) min_gby - translate[1]) / fabsf(scale[1]));
323 dw[3] = u_fui(((float) max_gby - translate[1]) / fabsf(scale[1]));
Chia-I Wu97702a62014-08-11 15:33:42 +0800324 clip_viewport += clip_stride;
325
326 /* CC_VIEWPORT */
327 dw = cc_viewport;
328 dw[0] = u_fui(viewport->minDepth);
329 dw[1] = u_fui(viewport->maxDepth);
330 cc_viewport += 2;
331
332 /* SCISSOR_RECT */
Chia-I Wu7b566a42014-08-22 10:58:57 +0800333 if (state->scissor_enable) {
334 int16_t max_x, max_y;
335
336 max_x = (scissor->offset.x + scissor->extent.width - 1) & 0xffff;
337 max_y = (scissor->offset.y + scissor->extent.height - 1) & 0xffff;
338
339 dw = scissor_rect;
340 if (scissor->extent.width && scissor->extent.height) {
341 dw[0] = (scissor->offset.y & 0xffff) << 16 |
342 (scissor->offset.x & 0xffff);
343 dw[1] = max_y << 16 | max_x;
344 } else {
345 dw[0] = 1 << 16 | 1;
346 dw[1] = 0;
347 }
348 scissor_rect += 2;
Chia-I Wu97702a62014-08-11 15:33:42 +0800349 }
Chia-I Wu97702a62014-08-11 15:33:42 +0800350 }
351
352 return XGL_SUCCESS;
353}
354
355static void
356msaa_state_init(struct intel_msaa_state *state,
357 const struct intel_gpu *gpu,
358 const XGL_MSAA_STATE_CREATE_INFO *info)
359{
360 /* taken from Mesa */
Chia-I Wu97702a62014-08-11 15:33:42 +0800361 static const uint32_t brw_multisample_positions_4x = 0xae2ae662;
362 static const uint32_t brw_multisample_positions_8x[] = { 0xdbb39d79, 0x3ff55117 };
363 uint32_t cmd, cmd_len;
364 uint32_t *dw = state->cmd;
365
366 INTEL_GPU_ASSERT(gpu, 6, 7.5);
367 STATIC_ASSERT(ARRAY_SIZE(state->cmd) >= 6);
368
Chia-I Wu0b171262014-08-29 15:03:28 +0800369 state->sample_count = info->samples;
370 if (!state->sample_count)
371 state->sample_count = 1;
372
Chia-I Wu97702a62014-08-11 15:33:42 +0800373 /* 3DSTATE_MULTISAMPLE */
Chia-I Wu426072d2014-08-26 14:31:55 +0800374 cmd = GEN6_RENDER_CMD(3D, 3DSTATE_MULTISAMPLE);
Chia-I Wu97702a62014-08-11 15:33:42 +0800375 cmd_len = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 4 : 3;
376
377 dw[0] = cmd | (cmd_len - 2);
378 if (info->samples <= 1) {
379 dw[1] = GEN6_MULTISAMPLE_DW1_NUMSAMPLES_1;
Chia-I Wu1586f6e2014-08-30 15:28:35 +0800380 dw[2] = 0;
Chia-I Wu97702a62014-08-11 15:33:42 +0800381 } else if (info->samples <= 4 || intel_gpu_gen(gpu) == INTEL_GEN(6)) {
382 dw[1] = GEN6_MULTISAMPLE_DW1_NUMSAMPLES_4;
383 dw[2] = brw_multisample_positions_4x;
384 } else {
385 dw[1] = GEN7_MULTISAMPLE_DW1_NUMSAMPLES_8;
386 dw[2] = brw_multisample_positions_8x[0];
387 dw[3] = brw_multisample_positions_8x[1];
388 }
389
390 dw += cmd_len;
391
Chia-I Wuf3c59252014-08-22 09:26:22 +0800392 state->cmd_len = cmd_len + 2;
393
Chia-I Wu97702a62014-08-11 15:33:42 +0800394 /* 3DSTATE_SAMPLE_MASK */
Chia-I Wu426072d2014-08-26 14:31:55 +0800395 cmd = GEN6_RENDER_CMD(3D, 3DSTATE_SAMPLE_MASK);
Chia-I Wu97702a62014-08-11 15:33:42 +0800396 cmd_len = 2;
397
398 dw[0] = cmd | (cmd_len - 2);
399 dw[1] = info->sampleMask & ((1 << info->samples) - 1);
400}
401
402static void
403blend_state_init(struct intel_blend_state *state,
404 const struct intel_gpu *gpu,
405 const XGL_COLOR_BLEND_STATE_CREATE_INFO *info)
406{
407 XGL_UINT i;
408
409 INTEL_GPU_ASSERT(gpu, 6, 7.5);
410
411 for (i = 0; i < ARRAY_SIZE(info->attachment); i++) {
412 const XGL_COLOR_ATTACHMENT_BLEND_STATE *att = &info->attachment[i];
413 uint32_t *dw = &state->cmd[2 * i];
414
415 if (att->blendEnable) {
416 dw[0] = 1 << 31 |
417 translate_blend_func(att->blendFuncAlpha) << 26 |
418 translate_blend(att->srcBlendAlpha) << 20 |
419 translate_blend(att->destBlendAlpha) << 15 |
420 translate_blend_func(att->blendFuncColor) << 11 |
421 translate_blend(att->srcBlendColor) << 5 |
422 translate_blend(att->destBlendColor);
423
424 if (att->blendFuncAlpha != att->blendFuncColor ||
425 att->srcBlendAlpha != att->srcBlendColor ||
426 att->destBlendAlpha != att->destBlendColor)
427 dw[0] |= 1 << 30;
428 }
429
430 dw[1] = GEN6_BLEND_DW1_COLORCLAMP_RTFORMAT |
431 0x3;
432 }
433
434 memcpy(state->cmd_blend_color, info->blendConst, sizeof(info->blendConst));
435}
436
437static XGL_RESULT
438ds_state_init(struct intel_ds_state *state,
439 const struct intel_gpu *gpu,
440 const XGL_DEPTH_STENCIL_STATE_CREATE_INFO *info)
441{
442 uint32_t *dw = state->cmd;
443
444 INTEL_GPU_ASSERT(gpu, 6, 7.5);
445
446 STATIC_ASSERT(ARRAY_SIZE(state->cmd) >= 3);
447
448 if (info->depthBoundsEnable)
449 return XGL_ERROR_UNKNOWN;
450
451 /*
452 * From the Sandy Bridge PRM, volume 2 part 1, page 359:
453 *
454 * "If the Depth Buffer is either undefined or does not have a surface
455 * format of D32_FLOAT_S8X24_UINT or D24_UNORM_S8_UINT and separate
456 * stencil buffer is disabled, Stencil Test Enable must be DISABLED"
457 *
458 * From the Sandy Bridge PRM, volume 2 part 1, page 370:
459 *
460 * "This field (Stencil Test Enable) cannot be enabled if
461 * Surface Format in 3DSTATE_DEPTH_BUFFER is set to D16_UNORM."
462 *
463 * TODO We do not check these yet.
464 */
465 if (info->stencilTestEnable) {
466 dw[0] = 1 << 31 |
467 translate_compare_func(info->front.stencilFunc) << 28 |
468 translate_stencil_op(info->front.stencilFailOp) << 25 |
469 translate_stencil_op(info->front.stencilDepthFailOp) << 22 |
470 translate_stencil_op(info->front.stencilPassOp) << 19 |
471 1 << 15 |
472 translate_compare_func(info->back.stencilFunc) << 12 |
473 translate_stencil_op(info->back.stencilFailOp) << 9 |
474 translate_stencil_op(info->back.stencilDepthFailOp) << 6 |
475 translate_stencil_op(info->back.stencilPassOp) << 3;
476
477 if (info->stencilWriteMask)
478 dw[0] |= 1 << 18;
479
480 dw[1] = (info->stencilReadMask & 0xff) << 24 |
481 (info->stencilWriteMask & 0xff) << 16;
482
483 state->cmd_stencil_ref = (info->front.stencilRef & 0xff) << 24 |
484 (info->back.stencilRef & 0xff) << 16;
485 }
486
487 /*
488 * From the Sandy Bridge PRM, volume 2 part 1, page 360:
489 *
490 * "Enabling the Depth Test function without defining a Depth Buffer is
491 * UNDEFINED."
492 *
493 * From the Sandy Bridge PRM, volume 2 part 1, page 375:
494 *
495 * "A Depth Buffer must be defined before enabling writes to it, or
496 * operation is UNDEFINED."
497 *
498 * TODO We do not check these yet.
499 */
500 if (info->depthTestEnable) {
501 dw[2] = 1 << 31 |
502 translate_compare_func(info->depthFunc) << 27 |
503 (bool) info->depthWriteEnable << 26;
504 } else {
505 dw[2] = GEN6_COMPAREFUNCTION_ALWAYS << 27;
506 }
507
508 return XGL_SUCCESS;
509}
510
Chia-I Wua5714e82014-08-11 15:33:42 +0800511static void viewport_state_destroy(struct intel_obj *obj)
512{
513 struct intel_viewport_state *state = intel_viewport_state_from_obj(obj);
514
515 intel_viewport_state_destroy(state);
516}
517
518XGL_RESULT intel_viewport_state_create(struct intel_dev *dev,
519 const XGL_VIEWPORT_STATE_CREATE_INFO *info,
520 struct intel_viewport_state **state_ret)
521{
522 struct intel_viewport_state *state;
Chia-I Wu97702a62014-08-11 15:33:42 +0800523 XGL_RESULT ret;
Chia-I Wua5714e82014-08-11 15:33:42 +0800524
525 state = (struct intel_viewport_state *) intel_base_create(dev,
526 sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_VIEWPORT_STATE,
527 info, 0);
528 if (!state)
529 return XGL_ERROR_OUT_OF_MEMORY;
530
531 state->obj.destroy = viewport_state_destroy;
532
Chia-I Wu97702a62014-08-11 15:33:42 +0800533 ret = viewport_state_init(state, dev->gpu, info);
534 if (ret != XGL_SUCCESS) {
535 intel_viewport_state_destroy(state);
536 return ret;
537 }
Chia-I Wua5714e82014-08-11 15:33:42 +0800538
539 *state_ret = state;
540
541 return XGL_SUCCESS;
542}
543
544void intel_viewport_state_destroy(struct intel_viewport_state *state)
545{
Chia-I Wu97702a62014-08-11 15:33:42 +0800546 icd_free(state->cmd);
Chia-I Wua5714e82014-08-11 15:33:42 +0800547 intel_base_destroy(&state->obj.base);
548}
549
550static void raster_state_destroy(struct intel_obj *obj)
551{
552 struct intel_raster_state *state = intel_raster_state_from_obj(obj);
553
554 intel_raster_state_destroy(state);
555}
556
557XGL_RESULT intel_raster_state_create(struct intel_dev *dev,
558 const XGL_RASTER_STATE_CREATE_INFO *info,
559 struct intel_raster_state **state_ret)
560{
561 struct intel_raster_state *state;
562
563 state = (struct intel_raster_state *) intel_base_create(dev,
564 sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_RASTER_STATE,
565 info, 0);
566 if (!state)
567 return XGL_ERROR_OUT_OF_MEMORY;
568
569 state->obj.destroy = raster_state_destroy;
570
Chia-I Wu97702a62014-08-11 15:33:42 +0800571 raster_state_init(state, dev->gpu, info);
Chia-I Wua5714e82014-08-11 15:33:42 +0800572
573 *state_ret = state;
574
575 return XGL_SUCCESS;
576}
577
578void intel_raster_state_destroy(struct intel_raster_state *state)
579{
580 intel_base_destroy(&state->obj.base);
581}
582
583static void msaa_state_destroy(struct intel_obj *obj)
584{
585 struct intel_msaa_state *state = intel_msaa_state_from_obj(obj);
586
587 intel_msaa_state_destroy(state);
588}
589
590XGL_RESULT intel_msaa_state_create(struct intel_dev *dev,
591 const XGL_MSAA_STATE_CREATE_INFO *info,
592 struct intel_msaa_state **state_ret)
593{
594 struct intel_msaa_state *state;
595
596 state = (struct intel_msaa_state *) intel_base_create(dev,
597 sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_MSAA_STATE,
598 info, 0);
599 if (!state)
600 return XGL_ERROR_OUT_OF_MEMORY;
601
602 state->obj.destroy = msaa_state_destroy;
603
Chia-I Wu97702a62014-08-11 15:33:42 +0800604 msaa_state_init(state, dev->gpu, info);
Chia-I Wua5714e82014-08-11 15:33:42 +0800605
606 *state_ret = state;
607
608 return XGL_SUCCESS;
609}
610
611void intel_msaa_state_destroy(struct intel_msaa_state *state)
612{
613 intel_base_destroy(&state->obj.base);
614}
615
616static void blend_state_destroy(struct intel_obj *obj)
617{
618 struct intel_blend_state *state = intel_blend_state_from_obj(obj);
619
620 intel_blend_state_destroy(state);
621}
622
623XGL_RESULT intel_blend_state_create(struct intel_dev *dev,
624 const XGL_COLOR_BLEND_STATE_CREATE_INFO *info,
625 struct intel_blend_state **state_ret)
626{
627 struct intel_blend_state *state;
628
629 state = (struct intel_blend_state *) intel_base_create(dev,
Courtney Goeltzenleuchter985ad492014-08-27 14:04:17 -0600630 sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_COLOR_BLEND_STATE,
Chia-I Wua5714e82014-08-11 15:33:42 +0800631 info, 0);
632 if (!state)
633 return XGL_ERROR_OUT_OF_MEMORY;
634
635 state->obj.destroy = blend_state_destroy;
636
Chia-I Wu97702a62014-08-11 15:33:42 +0800637 blend_state_init(state, dev->gpu, info);
Chia-I Wua5714e82014-08-11 15:33:42 +0800638
639 *state_ret = state;
640
641 return XGL_SUCCESS;
642}
643
644void intel_blend_state_destroy(struct intel_blend_state *state)
645{
646 intel_base_destroy(&state->obj.base);
647}
648
649static void ds_state_destroy(struct intel_obj *obj)
650{
651 struct intel_ds_state *state = intel_ds_state_from_obj(obj);
652
653 intel_ds_state_destroy(state);
654}
655
656XGL_RESULT intel_ds_state_create(struct intel_dev *dev,
657 const XGL_DEPTH_STENCIL_STATE_CREATE_INFO *info,
658 struct intel_ds_state **state_ret)
659{
660 struct intel_ds_state *state;
Chia-I Wu97702a62014-08-11 15:33:42 +0800661 XGL_RESULT ret;
Chia-I Wua5714e82014-08-11 15:33:42 +0800662
663 state = (struct intel_ds_state *) intel_base_create(dev,
Courtney Goeltzenleuchtere7dc05f2014-08-22 16:26:07 -0600664 sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_DEPTH_STENCIL_STATE,
Chia-I Wua5714e82014-08-11 15:33:42 +0800665 info, 0);
666 if (!state)
667 return XGL_ERROR_OUT_OF_MEMORY;
668
669 state->obj.destroy = ds_state_destroy;
670
Chia-I Wu97702a62014-08-11 15:33:42 +0800671 ret = ds_state_init(state, dev->gpu, info);
672 if (ret != XGL_SUCCESS) {
673 intel_ds_state_destroy(state);
674 return ret;
675 }
Chia-I Wua5714e82014-08-11 15:33:42 +0800676
677 *state_ret = state;
678
679 return XGL_SUCCESS;
680}
681
682void intel_ds_state_destroy(struct intel_ds_state *state)
683{
684 intel_base_destroy(&state->obj.base);
685}
686
687XGL_RESULT XGLAPI intelCreateViewportState(
688 XGL_DEVICE device,
689 const XGL_VIEWPORT_STATE_CREATE_INFO* pCreateInfo,
690 XGL_VIEWPORT_STATE_OBJECT* pState)
691{
692 struct intel_dev *dev = intel_dev(device);
693
694 return intel_viewport_state_create(dev, pCreateInfo,
695 (struct intel_viewport_state **) pState);
696}
697
698XGL_RESULT XGLAPI intelCreateRasterState(
699 XGL_DEVICE device,
700 const XGL_RASTER_STATE_CREATE_INFO* pCreateInfo,
701 XGL_RASTER_STATE_OBJECT* pState)
702{
703 struct intel_dev *dev = intel_dev(device);
704
705 return intel_raster_state_create(dev, pCreateInfo,
706 (struct intel_raster_state **) pState);
707}
708
709XGL_RESULT XGLAPI intelCreateMsaaState(
710 XGL_DEVICE device,
711 const XGL_MSAA_STATE_CREATE_INFO* pCreateInfo,
712 XGL_MSAA_STATE_OBJECT* pState)
713{
714 struct intel_dev *dev = intel_dev(device);
715
716 return intel_msaa_state_create(dev, pCreateInfo,
717 (struct intel_msaa_state **) pState);
718}
719
720XGL_RESULT XGLAPI intelCreateColorBlendState(
721 XGL_DEVICE device,
722 const XGL_COLOR_BLEND_STATE_CREATE_INFO* pCreateInfo,
723 XGL_COLOR_BLEND_STATE_OBJECT* pState)
724{
725 struct intel_dev *dev = intel_dev(device);
726
727 return intel_blend_state_create(dev, pCreateInfo,
728 (struct intel_blend_state **) pState);
729}
730
731XGL_RESULT XGLAPI intelCreateDepthStencilState(
732 XGL_DEVICE device,
733 const XGL_DEPTH_STENCIL_STATE_CREATE_INFO* pCreateInfo,
734 XGL_DEPTH_STENCIL_STATE_OBJECT* pState)
735{
736 struct intel_dev *dev = intel_dev(device);
737
738 return intel_ds_state_create(dev, pCreateInfo,
739 (struct intel_ds_state **) pState);
740}