blob: f0fed39ea8f19a9093e6a2c2c706146a21a633f7 [file] [log] [blame]
Chia-I Wua5714e82014-08-11 15:33:42 +08001/*
2 * XGL
3 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
Chia-I Wu97702a62014-08-11 15:33:42 +080025#include <math.h>
26#include "genhw/genhw.h"
Chia-I Wua5714e82014-08-11 15:33:42 +080027#include "dev.h"
28#include "state.h"
29
Chia-I Wu97702a62014-08-11 15:33:42 +080030static int translate_compare_func(XGL_COMPARE_FUNC func)
31{
32 switch (func) {
33 case XGL_COMPARE_NEVER: return GEN6_COMPAREFUNCTION_NEVER;
34 case XGL_COMPARE_LESS: return GEN6_COMPAREFUNCTION_LESS;
35 case XGL_COMPARE_EQUAL: return GEN6_COMPAREFUNCTION_EQUAL;
36 case XGL_COMPARE_LESS_EQUAL: return GEN6_COMPAREFUNCTION_LEQUAL;
37 case XGL_COMPARE_GREATER: return GEN6_COMPAREFUNCTION_GREATER;
38 case XGL_COMPARE_NOT_EQUAL: return GEN6_COMPAREFUNCTION_NOTEQUAL;
39 case XGL_COMPARE_GREATER_EQUAL: return GEN6_COMPAREFUNCTION_GEQUAL;
40 case XGL_COMPARE_ALWAYS: return GEN6_COMPAREFUNCTION_ALWAYS;
41 default:
42 assert(!"unknown compare_func");
43 return GEN6_COMPAREFUNCTION_NEVER;
44 }
45}
46
47static int translate_stencil_op(XGL_STENCIL_OP op)
48{
49 switch (op) {
50 case XGL_STENCIL_OP_KEEP: return GEN6_STENCILOP_KEEP;
51 case XGL_STENCIL_OP_ZERO: return GEN6_STENCILOP_ZERO;
52 case XGL_STENCIL_OP_REPLACE: return GEN6_STENCILOP_REPLACE;
53 case XGL_STENCIL_OP_INC_CLAMP: return GEN6_STENCILOP_INCRSAT;
54 case XGL_STENCIL_OP_DEC_CLAMP: return GEN6_STENCILOP_DECRSAT;
55 case XGL_STENCIL_OP_INVERT: return GEN6_STENCILOP_INVERT;
56 case XGL_STENCIL_OP_INC_WRAP: return GEN6_STENCILOP_INCR;
57 case XGL_STENCIL_OP_DEC_WRAP: return GEN6_STENCILOP_DECR;
58 default:
59 assert(!"unknown stencil op");
60 return GEN6_STENCILOP_KEEP;
61 }
62}
63
64static int translate_blend_func(XGL_BLEND_FUNC func)
65{
66 switch (func) {
67 case XGL_BLEND_FUNC_ADD: return GEN6_BLENDFUNCTION_ADD;
68 case XGL_BLEND_FUNC_SUBTRACT: return GEN6_BLENDFUNCTION_SUBTRACT;
69 case XGL_BLEND_FUNC_REVERSE_SUBTRACT: return GEN6_BLENDFUNCTION_REVERSE_SUBTRACT;
70 case XGL_BLEND_FUNC_MIN: return GEN6_BLENDFUNCTION_MIN;
71 case XGL_BLEND_FUNC_MAX: return GEN6_BLENDFUNCTION_MAX;
72 default:
73 assert(!"unknown blend func");
74 return GEN6_BLENDFUNCTION_ADD;
75 };
76}
77
78static int translate_blend(XGL_BLEND blend)
79{
80 switch (blend) {
81 case XGL_BLEND_ZERO: return GEN6_BLENDFACTOR_ZERO;
82 case XGL_BLEND_ONE: return GEN6_BLENDFACTOR_ONE;
83 case XGL_BLEND_SRC_COLOR: return GEN6_BLENDFACTOR_SRC_COLOR;
84 case XGL_BLEND_ONE_MINUS_SRC_COLOR: return GEN6_BLENDFACTOR_INV_SRC_COLOR;
85 case XGL_BLEND_DEST_COLOR: return GEN6_BLENDFACTOR_DST_COLOR;
86 case XGL_BLEND_ONE_MINUS_DEST_COLOR: return GEN6_BLENDFACTOR_INV_DST_COLOR;
87 case XGL_BLEND_SRC_ALPHA: return GEN6_BLENDFACTOR_SRC_ALPHA;
88 case XGL_BLEND_ONE_MINUS_SRC_ALPHA: return GEN6_BLENDFACTOR_INV_SRC_ALPHA;
89 case XGL_BLEND_DEST_ALPHA: return GEN6_BLENDFACTOR_DST_ALPHA;
90 case XGL_BLEND_ONE_MINUS_DEST_ALPHA: return GEN6_BLENDFACTOR_INV_DST_ALPHA;
91 case XGL_BLEND_CONSTANT_COLOR: return GEN6_BLENDFACTOR_CONST_COLOR;
92 case XGL_BLEND_ONE_MINUS_CONSTANT_COLOR: return GEN6_BLENDFACTOR_INV_CONST_COLOR;
93 case XGL_BLEND_CONSTANT_ALPHA: return GEN6_BLENDFACTOR_CONST_ALPHA;
94 case XGL_BLEND_ONE_MINUS_CONSTANT_ALPHA: return GEN6_BLENDFACTOR_INV_CONST_ALPHA;
95 case XGL_BLEND_SRC_ALPHA_SATURATE: return GEN6_BLENDFACTOR_SRC_ALPHA_SATURATE;
96 case XGL_BLEND_SRC1_COLOR: return GEN6_BLENDFACTOR_SRC1_COLOR;
97 case XGL_BLEND_ONE_MINUS_SRC1_COLOR: return GEN6_BLENDFACTOR_INV_SRC1_COLOR;
98 case XGL_BLEND_SRC1_ALPHA: return GEN6_BLENDFACTOR_SRC1_ALPHA;
99 case XGL_BLEND_ONE_MINUS_SRC1_ALPHA: return GEN6_BLENDFACTOR_INV_SRC1_ALPHA;
100 default:
101 assert(!"unknown blend factor");
102 return GEN6_BLENDFACTOR_ONE;
103 };
104}
105
106static void
107raster_state_init(struct intel_raster_state *state,
108 const struct intel_gpu *gpu,
109 const XGL_RASTER_STATE_CREATE_INFO *info)
110{
111 switch (info->fillMode) {
112 case XFL_FILL_POINTS:
113 state->cmd_sf_fill |= GEN7_SF_DW1_FRONTFACE_POINT |
114 GEN7_SF_DW1_BACKFACE_POINT;
115 break;
116 case XGL_FILL_WIREFRAME:
117 state->cmd_sf_fill |= GEN7_SF_DW1_FRONTFACE_WIREFRAME |
118 GEN7_SF_DW1_BACKFACE_WIREFRAME;
119 break;
120 case XGL_FILL_SOLID:
121 default:
122 state->cmd_sf_fill |= GEN7_SF_DW1_FRONTFACE_SOLID |
123 GEN7_SF_DW1_BACKFACE_SOLID;
124 break;
125 }
126
127 if (info->frontFace == XGL_FRONT_FACE_CCW) {
128 state->cmd_sf_fill |= GEN7_SF_DW1_FRONTWINDING_CCW;
129 state->cmd_clip_cull |= GEN7_CLIP_DW1_FRONTWINDING_CCW;
130 }
131
132 switch (info->cullMode) {
133 case XGL_CULL_NONE:
134 default:
135 state->cmd_sf_cull |= GEN7_SF_DW2_CULLMODE_NONE;
136 state->cmd_clip_cull |= GEN7_CLIP_DW1_CULLMODE_NONE;
137 break;
138 case XGL_CULL_FRONT:
139 state->cmd_sf_cull |= GEN7_SF_DW2_CULLMODE_FRONT;
140 state->cmd_clip_cull |= GEN7_CLIP_DW1_CULLMODE_FRONT;
141 break;
142 case XGL_CULL_BACK:
143 state->cmd_sf_cull |= GEN7_SF_DW2_CULLMODE_BACK;
144 state->cmd_clip_cull |= GEN7_CLIP_DW1_CULLMODE_BACK;
145 break;
146 case XGL_CULL_FRONT_AND_BACK:
147 state->cmd_sf_cull |= GEN7_SF_DW2_CULLMODE_BOTH;
148 state->cmd_clip_cull |= GEN7_CLIP_DW1_CULLMODE_BOTH;
149 break;
150 }
151
152 /* only GEN7+ needs cull mode in 3DSTATE_CLIP */
153 if (intel_gpu_gen(gpu) == INTEL_GEN(6))
154 state->cmd_clip_cull = 0;
155
156 /* XXX scale info->depthBias back into NDC */
157 state->cmd_depth_offset_const = u_fui((float) info->depthBias * 2.0f);
158 state->cmd_depth_offset_clamp = u_fui(info->depthBiasClamp);
159 state->cmd_depth_offset_scale = u_fui(info->slopeScaledDepthBias);
160}
161
162static void
163viewport_get_guardband(const struct intel_gpu *gpu,
164 int center_x, int center_y,
165 int *min_gbx, int *max_gbx,
166 int *min_gby, int *max_gby)
167{
168 /*
169 * From the Sandy Bridge PRM, volume 2 part 1, page 234:
170 *
171 * "Per-Device Guardband Extents
172 *
173 * - Supported X,Y ScreenSpace "Guardband" Extent: [-16K,16K-1]
174 * - Maximum Post-Clamp Delta (X or Y): 16K"
175 *
176 * "In addition, in order to be correctly rendered, objects must have a
177 * screenspace bounding box not exceeding 8K in the X or Y direction.
178 * This additional restriction must also be comprehended by software,
179 * i.e., enforced by use of clipping."
180 *
181 * From the Ivy Bridge PRM, volume 2 part 1, page 248:
182 *
183 * "Per-Device Guardband Extents
184 *
185 * - Supported X,Y ScreenSpace "Guardband" Extent: [-32K,32K-1]
186 * - Maximum Post-Clamp Delta (X or Y): N/A"
187 *
188 * "In addition, in order to be correctly rendered, objects must have a
189 * screenspace bounding box not exceeding 8K in the X or Y direction.
190 * This additional restriction must also be comprehended by software,
191 * i.e., enforced by use of clipping."
192 *
193 * Combined, the bounding box of any object can not exceed 8K in both
194 * width and height.
195 *
196 * Below we set the guardband as a squre of length 8K, centered at where
197 * the viewport is. This makes sure all objects passing the GB test are
198 * valid to the renderer, and those failing the XY clipping have a
199 * better chance of passing the GB test.
200 */
201 const int max_extent = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 32768 : 16384;
202 const int half_len = 8192 / 2;
203
204 /* make sure the guardband is within the valid range */
205 if (center_x - half_len < -max_extent)
206 center_x = -max_extent + half_len;
207 else if (center_x + half_len > max_extent - 1)
208 center_x = max_extent - half_len;
209
210 if (center_y - half_len < -max_extent)
211 center_y = -max_extent + half_len;
212 else if (center_y + half_len > max_extent - 1)
213 center_y = max_extent - half_len;
214
215 *min_gbx = (float) (center_x - half_len);
216 *max_gbx = (float) (center_x + half_len);
217 *min_gby = (float) (center_y - half_len);
218 *max_gby = (float) (center_y + half_len);
219}
220
221static XGL_RESULT
Chia-I Wu7b566a42014-08-22 10:58:57 +0800222viewport_state_alloc_cmd(struct intel_viewport_state *state,
223 const struct intel_gpu *gpu,
224 const XGL_VIEWPORT_STATE_CREATE_INFO *info)
225{
226 INTEL_GPU_ASSERT(gpu, 6, 7.5);
227
228 state->scissor_enable = info->scissorEnable;
229
230 if (intel_gpu_gen(gpu) >= INTEL_GEN(7)) {
231 state->cmd_align = GEN7_ALIGNMENT_SF_CLIP_VIEWPORT;
232 state->cmd_len = 16 * info->viewportCount;
233
234 state->cmd_clip_offset = 8;
235 } else {
236 state->cmd_align = GEN6_ALIGNMENT_SF_VIEWPORT;
237 state->cmd_len = 8 * info->viewportCount;
238
239 state->cmd_clip_offset =
240 u_align(state->cmd_len, GEN6_ALIGNMENT_CLIP_VIEWPORT);
241 state->cmd_len = state->cmd_clip_offset + 4 * info->viewportCount;
242 }
243
244 state->cmd_cc_offset =
245 u_align(state->cmd_len, GEN6_ALIGNMENT_CC_VIEWPORT);
246 state->cmd_len = state->cmd_cc_offset + 2 * info->viewportCount;
247
248 if (state->scissor_enable) {
249 state->cmd_scissor_rect_offset =
250 u_align(state->cmd_len, GEN6_ALIGNMENT_SCISSOR_RECT);
251 state->cmd_len = state->cmd_scissor_rect_offset +
252 2 * info->viewportCount;
253 }
254
255 state->cmd = icd_alloc(sizeof(uint32_t) * state->cmd_len,
256 0, XGL_SYSTEM_ALLOC_INTERNAL);
257 if (!state->cmd)
258 return XGL_ERROR_OUT_OF_MEMORY;
259
260 return XGL_SUCCESS;
261}
262
263static XGL_RESULT
Chia-I Wu97702a62014-08-11 15:33:42 +0800264viewport_state_init(struct intel_viewport_state *state,
265 const struct intel_gpu *gpu,
266 const XGL_VIEWPORT_STATE_CREATE_INFO *info)
267{
268 const XGL_UINT sf_stride = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 16 : 8;
269 const XGL_UINT clip_stride = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 16 : 4;
270 uint32_t *sf_viewport, *clip_viewport, *cc_viewport, *scissor_rect;
271 XGL_UINT i;
Chia-I Wu7b566a42014-08-22 10:58:57 +0800272 XGL_RESULT ret;
Chia-I Wu97702a62014-08-11 15:33:42 +0800273
274 INTEL_GPU_ASSERT(gpu, 6, 7.5);
275
Chia-I Wu7b566a42014-08-22 10:58:57 +0800276 ret = viewport_state_alloc_cmd(state, gpu, info);
277 if (ret != XGL_SUCCESS)
278 return ret;
Chia-I Wu97702a62014-08-11 15:33:42 +0800279
280 sf_viewport = state->cmd;
Chia-I Wu7b566a42014-08-22 10:58:57 +0800281 clip_viewport = state->cmd + state->cmd_clip_offset;
282 cc_viewport = state->cmd + state->cmd_cc_offset;
283 scissor_rect = state->cmd + state->cmd_scissor_rect_offset;
Chia-I Wu97702a62014-08-11 15:33:42 +0800284
285 for (i = 0; i < info->viewportCount; i++) {
286 const XGL_VIEWPORT *viewport = &info->viewports[i];
287 const XGL_RECT *scissor = &info->scissors[i];
288 uint32_t *dw = NULL;
289 float translate[3], scale[3];
290 int min_gbx, max_gbx, min_gby, max_gby;
291
292 scale[0] = viewport->width / 2.0f;
293 scale[1] = viewport->height / 2.0f;
294 scale[2] = (viewport->maxDepth - viewport->minDepth) / 2.0;
295 translate[0] = viewport->originX + scale[0];
296 translate[1] = viewport->originY + scale[1];
297 translate[2] = (viewport->minDepth + viewport->maxDepth) / 2.0f;
298
299 viewport_get_guardband(gpu, (int) translate[0], (int) translate[1],
300 &min_gbx, &max_gbx, &min_gby, &max_gby);
301
302 /* SF_VIEWPORT */
303 dw = sf_viewport;
304 dw[0] = u_fui(scale[0]);
305 dw[1] = u_fui(scale[1]);
306 dw[2] = u_fui(scale[2]);
307 dw[3] = u_fui(translate[0]);
308 dw[4] = u_fui(translate[1]);
309 dw[5] = u_fui(translate[2]);
310 dw[6] = 0;
311 dw[7] = 0;
312 sf_viewport += sf_stride;
313
314 /* CLIP_VIEWPORT */
315 dw = clip_viewport;
316 dw[0] = ((float) min_gbx - translate[0]) / fabsf(scale[0]);
317 dw[1] = ((float) max_gbx - translate[0]) / fabsf(scale[0]);
318 dw[2] = ((float) min_gby - translate[1]) / fabsf(scale[1]);
319 dw[3] = ((float) max_gby - translate[1]) / fabsf(scale[1]);
320 clip_viewport += clip_stride;
321
322 /* CC_VIEWPORT */
323 dw = cc_viewport;
324 dw[0] = u_fui(viewport->minDepth);
325 dw[1] = u_fui(viewport->maxDepth);
326 cc_viewport += 2;
327
328 /* SCISSOR_RECT */
Chia-I Wu7b566a42014-08-22 10:58:57 +0800329 if (state->scissor_enable) {
330 int16_t max_x, max_y;
331
332 max_x = (scissor->offset.x + scissor->extent.width - 1) & 0xffff;
333 max_y = (scissor->offset.y + scissor->extent.height - 1) & 0xffff;
334
335 dw = scissor_rect;
336 if (scissor->extent.width && scissor->extent.height) {
337 dw[0] = (scissor->offset.y & 0xffff) << 16 |
338 (scissor->offset.x & 0xffff);
339 dw[1] = max_y << 16 | max_x;
340 } else {
341 dw[0] = 1 << 16 | 1;
342 dw[1] = 0;
343 }
344 scissor_rect += 2;
Chia-I Wu97702a62014-08-11 15:33:42 +0800345 }
Chia-I Wu97702a62014-08-11 15:33:42 +0800346 }
347
348 return XGL_SUCCESS;
349}
350
351static void
352msaa_state_init(struct intel_msaa_state *state,
353 const struct intel_gpu *gpu,
354 const XGL_MSAA_STATE_CREATE_INFO *info)
355{
356 /* taken from Mesa */
357 static const uint32_t brw_multisample_positions_1x_2x = 0x0088cc44;
358 static const uint32_t brw_multisample_positions_4x = 0xae2ae662;
359 static const uint32_t brw_multisample_positions_8x[] = { 0xdbb39d79, 0x3ff55117 };
360 uint32_t cmd, cmd_len;
361 uint32_t *dw = state->cmd;
362
363 INTEL_GPU_ASSERT(gpu, 6, 7.5);
364 STATIC_ASSERT(ARRAY_SIZE(state->cmd) >= 6);
365
Chia-I Wu0b171262014-08-29 15:03:28 +0800366 state->sample_count = info->samples;
367 if (!state->sample_count)
368 state->sample_count = 1;
369
Chia-I Wu97702a62014-08-11 15:33:42 +0800370 /* 3DSTATE_MULTISAMPLE */
Chia-I Wu426072d2014-08-26 14:31:55 +0800371 cmd = GEN6_RENDER_CMD(3D, 3DSTATE_MULTISAMPLE);
Chia-I Wu97702a62014-08-11 15:33:42 +0800372 cmd_len = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 4 : 3;
373
374 dw[0] = cmd | (cmd_len - 2);
375 if (info->samples <= 1) {
376 dw[1] = GEN6_MULTISAMPLE_DW1_NUMSAMPLES_1;
377 dw[2] = brw_multisample_positions_1x_2x;
378 } else if (info->samples <= 4 || intel_gpu_gen(gpu) == INTEL_GEN(6)) {
379 dw[1] = GEN6_MULTISAMPLE_DW1_NUMSAMPLES_4;
380 dw[2] = brw_multisample_positions_4x;
381 } else {
382 dw[1] = GEN7_MULTISAMPLE_DW1_NUMSAMPLES_8;
383 dw[2] = brw_multisample_positions_8x[0];
384 dw[3] = brw_multisample_positions_8x[1];
385 }
386
387 dw += cmd_len;
388
Chia-I Wuf3c59252014-08-22 09:26:22 +0800389 state->cmd_len = cmd_len + 2;
390
Chia-I Wu97702a62014-08-11 15:33:42 +0800391 /* 3DSTATE_SAMPLE_MASK */
Chia-I Wu426072d2014-08-26 14:31:55 +0800392 cmd = GEN6_RENDER_CMD(3D, 3DSTATE_SAMPLE_MASK);
Chia-I Wu97702a62014-08-11 15:33:42 +0800393 cmd_len = 2;
394
395 dw[0] = cmd | (cmd_len - 2);
396 dw[1] = info->sampleMask & ((1 << info->samples) - 1);
397}
398
399static void
400blend_state_init(struct intel_blend_state *state,
401 const struct intel_gpu *gpu,
402 const XGL_COLOR_BLEND_STATE_CREATE_INFO *info)
403{
404 XGL_UINT i;
405
406 INTEL_GPU_ASSERT(gpu, 6, 7.5);
407
408 for (i = 0; i < ARRAY_SIZE(info->attachment); i++) {
409 const XGL_COLOR_ATTACHMENT_BLEND_STATE *att = &info->attachment[i];
410 uint32_t *dw = &state->cmd[2 * i];
411
412 if (att->blendEnable) {
413 dw[0] = 1 << 31 |
414 translate_blend_func(att->blendFuncAlpha) << 26 |
415 translate_blend(att->srcBlendAlpha) << 20 |
416 translate_blend(att->destBlendAlpha) << 15 |
417 translate_blend_func(att->blendFuncColor) << 11 |
418 translate_blend(att->srcBlendColor) << 5 |
419 translate_blend(att->destBlendColor);
420
421 if (att->blendFuncAlpha != att->blendFuncColor ||
422 att->srcBlendAlpha != att->srcBlendColor ||
423 att->destBlendAlpha != att->destBlendColor)
424 dw[0] |= 1 << 30;
425 }
426
427 dw[1] = GEN6_BLEND_DW1_COLORCLAMP_RTFORMAT |
428 0x3;
429 }
430
431 memcpy(state->cmd_blend_color, info->blendConst, sizeof(info->blendConst));
432}
433
434static XGL_RESULT
435ds_state_init(struct intel_ds_state *state,
436 const struct intel_gpu *gpu,
437 const XGL_DEPTH_STENCIL_STATE_CREATE_INFO *info)
438{
439 uint32_t *dw = state->cmd;
440
441 INTEL_GPU_ASSERT(gpu, 6, 7.5);
442
443 STATIC_ASSERT(ARRAY_SIZE(state->cmd) >= 3);
444
445 if (info->depthBoundsEnable)
446 return XGL_ERROR_UNKNOWN;
447
448 /*
449 * From the Sandy Bridge PRM, volume 2 part 1, page 359:
450 *
451 * "If the Depth Buffer is either undefined or does not have a surface
452 * format of D32_FLOAT_S8X24_UINT or D24_UNORM_S8_UINT and separate
453 * stencil buffer is disabled, Stencil Test Enable must be DISABLED"
454 *
455 * From the Sandy Bridge PRM, volume 2 part 1, page 370:
456 *
457 * "This field (Stencil Test Enable) cannot be enabled if
458 * Surface Format in 3DSTATE_DEPTH_BUFFER is set to D16_UNORM."
459 *
460 * TODO We do not check these yet.
461 */
462 if (info->stencilTestEnable) {
463 dw[0] = 1 << 31 |
464 translate_compare_func(info->front.stencilFunc) << 28 |
465 translate_stencil_op(info->front.stencilFailOp) << 25 |
466 translate_stencil_op(info->front.stencilDepthFailOp) << 22 |
467 translate_stencil_op(info->front.stencilPassOp) << 19 |
468 1 << 15 |
469 translate_compare_func(info->back.stencilFunc) << 12 |
470 translate_stencil_op(info->back.stencilFailOp) << 9 |
471 translate_stencil_op(info->back.stencilDepthFailOp) << 6 |
472 translate_stencil_op(info->back.stencilPassOp) << 3;
473
474 if (info->stencilWriteMask)
475 dw[0] |= 1 << 18;
476
477 dw[1] = (info->stencilReadMask & 0xff) << 24 |
478 (info->stencilWriteMask & 0xff) << 16;
479
480 state->cmd_stencil_ref = (info->front.stencilRef & 0xff) << 24 |
481 (info->back.stencilRef & 0xff) << 16;
482 }
483
484 /*
485 * From the Sandy Bridge PRM, volume 2 part 1, page 360:
486 *
487 * "Enabling the Depth Test function without defining a Depth Buffer is
488 * UNDEFINED."
489 *
490 * From the Sandy Bridge PRM, volume 2 part 1, page 375:
491 *
492 * "A Depth Buffer must be defined before enabling writes to it, or
493 * operation is UNDEFINED."
494 *
495 * TODO We do not check these yet.
496 */
497 if (info->depthTestEnable) {
498 dw[2] = 1 << 31 |
499 translate_compare_func(info->depthFunc) << 27 |
500 (bool) info->depthWriteEnable << 26;
501 } else {
502 dw[2] = GEN6_COMPAREFUNCTION_ALWAYS << 27;
503 }
504
505 return XGL_SUCCESS;
506}
507
Chia-I Wua5714e82014-08-11 15:33:42 +0800508static void viewport_state_destroy(struct intel_obj *obj)
509{
510 struct intel_viewport_state *state = intel_viewport_state_from_obj(obj);
511
512 intel_viewport_state_destroy(state);
513}
514
515XGL_RESULT intel_viewport_state_create(struct intel_dev *dev,
516 const XGL_VIEWPORT_STATE_CREATE_INFO *info,
517 struct intel_viewport_state **state_ret)
518{
519 struct intel_viewport_state *state;
Chia-I Wu97702a62014-08-11 15:33:42 +0800520 XGL_RESULT ret;
Chia-I Wua5714e82014-08-11 15:33:42 +0800521
522 state = (struct intel_viewport_state *) intel_base_create(dev,
523 sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_VIEWPORT_STATE,
524 info, 0);
525 if (!state)
526 return XGL_ERROR_OUT_OF_MEMORY;
527
528 state->obj.destroy = viewport_state_destroy;
529
Chia-I Wu97702a62014-08-11 15:33:42 +0800530 ret = viewport_state_init(state, dev->gpu, info);
531 if (ret != XGL_SUCCESS) {
532 intel_viewport_state_destroy(state);
533 return ret;
534 }
Chia-I Wua5714e82014-08-11 15:33:42 +0800535
536 *state_ret = state;
537
538 return XGL_SUCCESS;
539}
540
541void intel_viewport_state_destroy(struct intel_viewport_state *state)
542{
Chia-I Wu97702a62014-08-11 15:33:42 +0800543 icd_free(state->cmd);
Chia-I Wua5714e82014-08-11 15:33:42 +0800544 intel_base_destroy(&state->obj.base);
545}
546
547static void raster_state_destroy(struct intel_obj *obj)
548{
549 struct intel_raster_state *state = intel_raster_state_from_obj(obj);
550
551 intel_raster_state_destroy(state);
552}
553
554XGL_RESULT intel_raster_state_create(struct intel_dev *dev,
555 const XGL_RASTER_STATE_CREATE_INFO *info,
556 struct intel_raster_state **state_ret)
557{
558 struct intel_raster_state *state;
559
560 state = (struct intel_raster_state *) intel_base_create(dev,
561 sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_RASTER_STATE,
562 info, 0);
563 if (!state)
564 return XGL_ERROR_OUT_OF_MEMORY;
565
566 state->obj.destroy = raster_state_destroy;
567
Chia-I Wu97702a62014-08-11 15:33:42 +0800568 raster_state_init(state, dev->gpu, info);
Chia-I Wua5714e82014-08-11 15:33:42 +0800569
570 *state_ret = state;
571
572 return XGL_SUCCESS;
573}
574
575void intel_raster_state_destroy(struct intel_raster_state *state)
576{
577 intel_base_destroy(&state->obj.base);
578}
579
580static void msaa_state_destroy(struct intel_obj *obj)
581{
582 struct intel_msaa_state *state = intel_msaa_state_from_obj(obj);
583
584 intel_msaa_state_destroy(state);
585}
586
587XGL_RESULT intel_msaa_state_create(struct intel_dev *dev,
588 const XGL_MSAA_STATE_CREATE_INFO *info,
589 struct intel_msaa_state **state_ret)
590{
591 struct intel_msaa_state *state;
592
593 state = (struct intel_msaa_state *) intel_base_create(dev,
594 sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_MSAA_STATE,
595 info, 0);
596 if (!state)
597 return XGL_ERROR_OUT_OF_MEMORY;
598
599 state->obj.destroy = msaa_state_destroy;
600
Chia-I Wu97702a62014-08-11 15:33:42 +0800601 msaa_state_init(state, dev->gpu, info);
Chia-I Wua5714e82014-08-11 15:33:42 +0800602
603 *state_ret = state;
604
605 return XGL_SUCCESS;
606}
607
608void intel_msaa_state_destroy(struct intel_msaa_state *state)
609{
610 intel_base_destroy(&state->obj.base);
611}
612
613static void blend_state_destroy(struct intel_obj *obj)
614{
615 struct intel_blend_state *state = intel_blend_state_from_obj(obj);
616
617 intel_blend_state_destroy(state);
618}
619
620XGL_RESULT intel_blend_state_create(struct intel_dev *dev,
621 const XGL_COLOR_BLEND_STATE_CREATE_INFO *info,
622 struct intel_blend_state **state_ret)
623{
624 struct intel_blend_state *state;
625
626 state = (struct intel_blend_state *) intel_base_create(dev,
Courtney Goeltzenleuchter985ad492014-08-27 14:04:17 -0600627 sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_COLOR_BLEND_STATE,
Chia-I Wua5714e82014-08-11 15:33:42 +0800628 info, 0);
629 if (!state)
630 return XGL_ERROR_OUT_OF_MEMORY;
631
632 state->obj.destroy = blend_state_destroy;
633
Chia-I Wu97702a62014-08-11 15:33:42 +0800634 blend_state_init(state, dev->gpu, info);
Chia-I Wua5714e82014-08-11 15:33:42 +0800635
636 *state_ret = state;
637
638 return XGL_SUCCESS;
639}
640
641void intel_blend_state_destroy(struct intel_blend_state *state)
642{
643 intel_base_destroy(&state->obj.base);
644}
645
646static void ds_state_destroy(struct intel_obj *obj)
647{
648 struct intel_ds_state *state = intel_ds_state_from_obj(obj);
649
650 intel_ds_state_destroy(state);
651}
652
653XGL_RESULT intel_ds_state_create(struct intel_dev *dev,
654 const XGL_DEPTH_STENCIL_STATE_CREATE_INFO *info,
655 struct intel_ds_state **state_ret)
656{
657 struct intel_ds_state *state;
Chia-I Wu97702a62014-08-11 15:33:42 +0800658 XGL_RESULT ret;
Chia-I Wua5714e82014-08-11 15:33:42 +0800659
660 state = (struct intel_ds_state *) intel_base_create(dev,
Courtney Goeltzenleuchtere7dc05f2014-08-22 16:26:07 -0600661 sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_DEPTH_STENCIL_STATE,
Chia-I Wua5714e82014-08-11 15:33:42 +0800662 info, 0);
663 if (!state)
664 return XGL_ERROR_OUT_OF_MEMORY;
665
666 state->obj.destroy = ds_state_destroy;
667
Chia-I Wu97702a62014-08-11 15:33:42 +0800668 ret = ds_state_init(state, dev->gpu, info);
669 if (ret != XGL_SUCCESS) {
670 intel_ds_state_destroy(state);
671 return ret;
672 }
Chia-I Wua5714e82014-08-11 15:33:42 +0800673
674 *state_ret = state;
675
676 return XGL_SUCCESS;
677}
678
679void intel_ds_state_destroy(struct intel_ds_state *state)
680{
681 intel_base_destroy(&state->obj.base);
682}
683
684XGL_RESULT XGLAPI intelCreateViewportState(
685 XGL_DEVICE device,
686 const XGL_VIEWPORT_STATE_CREATE_INFO* pCreateInfo,
687 XGL_VIEWPORT_STATE_OBJECT* pState)
688{
689 struct intel_dev *dev = intel_dev(device);
690
691 return intel_viewport_state_create(dev, pCreateInfo,
692 (struct intel_viewport_state **) pState);
693}
694
695XGL_RESULT XGLAPI intelCreateRasterState(
696 XGL_DEVICE device,
697 const XGL_RASTER_STATE_CREATE_INFO* pCreateInfo,
698 XGL_RASTER_STATE_OBJECT* pState)
699{
700 struct intel_dev *dev = intel_dev(device);
701
702 return intel_raster_state_create(dev, pCreateInfo,
703 (struct intel_raster_state **) pState);
704}
705
706XGL_RESULT XGLAPI intelCreateMsaaState(
707 XGL_DEVICE device,
708 const XGL_MSAA_STATE_CREATE_INFO* pCreateInfo,
709 XGL_MSAA_STATE_OBJECT* pState)
710{
711 struct intel_dev *dev = intel_dev(device);
712
713 return intel_msaa_state_create(dev, pCreateInfo,
714 (struct intel_msaa_state **) pState);
715}
716
717XGL_RESULT XGLAPI intelCreateColorBlendState(
718 XGL_DEVICE device,
719 const XGL_COLOR_BLEND_STATE_CREATE_INFO* pCreateInfo,
720 XGL_COLOR_BLEND_STATE_OBJECT* pState)
721{
722 struct intel_dev *dev = intel_dev(device);
723
724 return intel_blend_state_create(dev, pCreateInfo,
725 (struct intel_blend_state **) pState);
726}
727
728XGL_RESULT XGLAPI intelCreateDepthStencilState(
729 XGL_DEVICE device,
730 const XGL_DEPTH_STENCIL_STATE_CREATE_INFO* pCreateInfo,
731 XGL_DEPTH_STENCIL_STATE_OBJECT* pState)
732{
733 struct intel_dev *dev = intel_dev(device);
734
735 return intel_ds_state_create(dev, pCreateInfo,
736 (struct intel_ds_state **) pState);
737}