blob: aec0b94cf306f1d7991f188a91098ccb2508a2aa [file] [log] [blame]
Chia-I Wua5714e82014-08-11 15:33:42 +08001/*
2 * XGL
3 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
Chia-I Wu97702a62014-08-11 15:33:42 +080025#include <math.h>
26#include "genhw/genhw.h"
Chia-I Wua5714e82014-08-11 15:33:42 +080027#include "dev.h"
28#include "state.h"
29
Chia-I Wu97702a62014-08-11 15:33:42 +080030static int translate_compare_func(XGL_COMPARE_FUNC func)
31{
32 switch (func) {
33 case XGL_COMPARE_NEVER: return GEN6_COMPAREFUNCTION_NEVER;
34 case XGL_COMPARE_LESS: return GEN6_COMPAREFUNCTION_LESS;
35 case XGL_COMPARE_EQUAL: return GEN6_COMPAREFUNCTION_EQUAL;
36 case XGL_COMPARE_LESS_EQUAL: return GEN6_COMPAREFUNCTION_LEQUAL;
37 case XGL_COMPARE_GREATER: return GEN6_COMPAREFUNCTION_GREATER;
38 case XGL_COMPARE_NOT_EQUAL: return GEN6_COMPAREFUNCTION_NOTEQUAL;
39 case XGL_COMPARE_GREATER_EQUAL: return GEN6_COMPAREFUNCTION_GEQUAL;
40 case XGL_COMPARE_ALWAYS: return GEN6_COMPAREFUNCTION_ALWAYS;
41 default:
42 assert(!"unknown compare_func");
43 return GEN6_COMPAREFUNCTION_NEVER;
44 }
45}
46
47static int translate_stencil_op(XGL_STENCIL_OP op)
48{
49 switch (op) {
50 case XGL_STENCIL_OP_KEEP: return GEN6_STENCILOP_KEEP;
51 case XGL_STENCIL_OP_ZERO: return GEN6_STENCILOP_ZERO;
52 case XGL_STENCIL_OP_REPLACE: return GEN6_STENCILOP_REPLACE;
53 case XGL_STENCIL_OP_INC_CLAMP: return GEN6_STENCILOP_INCRSAT;
54 case XGL_STENCIL_OP_DEC_CLAMP: return GEN6_STENCILOP_DECRSAT;
55 case XGL_STENCIL_OP_INVERT: return GEN6_STENCILOP_INVERT;
56 case XGL_STENCIL_OP_INC_WRAP: return GEN6_STENCILOP_INCR;
57 case XGL_STENCIL_OP_DEC_WRAP: return GEN6_STENCILOP_DECR;
58 default:
59 assert(!"unknown stencil op");
60 return GEN6_STENCILOP_KEEP;
61 }
62}
63
64static int translate_blend_func(XGL_BLEND_FUNC func)
65{
66 switch (func) {
67 case XGL_BLEND_FUNC_ADD: return GEN6_BLENDFUNCTION_ADD;
68 case XGL_BLEND_FUNC_SUBTRACT: return GEN6_BLENDFUNCTION_SUBTRACT;
69 case XGL_BLEND_FUNC_REVERSE_SUBTRACT: return GEN6_BLENDFUNCTION_REVERSE_SUBTRACT;
70 case XGL_BLEND_FUNC_MIN: return GEN6_BLENDFUNCTION_MIN;
71 case XGL_BLEND_FUNC_MAX: return GEN6_BLENDFUNCTION_MAX;
72 default:
73 assert(!"unknown blend func");
74 return GEN6_BLENDFUNCTION_ADD;
75 };
76}
77
78static int translate_blend(XGL_BLEND blend)
79{
80 switch (blend) {
81 case XGL_BLEND_ZERO: return GEN6_BLENDFACTOR_ZERO;
82 case XGL_BLEND_ONE: return GEN6_BLENDFACTOR_ONE;
83 case XGL_BLEND_SRC_COLOR: return GEN6_BLENDFACTOR_SRC_COLOR;
84 case XGL_BLEND_ONE_MINUS_SRC_COLOR: return GEN6_BLENDFACTOR_INV_SRC_COLOR;
85 case XGL_BLEND_DEST_COLOR: return GEN6_BLENDFACTOR_DST_COLOR;
86 case XGL_BLEND_ONE_MINUS_DEST_COLOR: return GEN6_BLENDFACTOR_INV_DST_COLOR;
87 case XGL_BLEND_SRC_ALPHA: return GEN6_BLENDFACTOR_SRC_ALPHA;
88 case XGL_BLEND_ONE_MINUS_SRC_ALPHA: return GEN6_BLENDFACTOR_INV_SRC_ALPHA;
89 case XGL_BLEND_DEST_ALPHA: return GEN6_BLENDFACTOR_DST_ALPHA;
90 case XGL_BLEND_ONE_MINUS_DEST_ALPHA: return GEN6_BLENDFACTOR_INV_DST_ALPHA;
91 case XGL_BLEND_CONSTANT_COLOR: return GEN6_BLENDFACTOR_CONST_COLOR;
92 case XGL_BLEND_ONE_MINUS_CONSTANT_COLOR: return GEN6_BLENDFACTOR_INV_CONST_COLOR;
93 case XGL_BLEND_CONSTANT_ALPHA: return GEN6_BLENDFACTOR_CONST_ALPHA;
94 case XGL_BLEND_ONE_MINUS_CONSTANT_ALPHA: return GEN6_BLENDFACTOR_INV_CONST_ALPHA;
95 case XGL_BLEND_SRC_ALPHA_SATURATE: return GEN6_BLENDFACTOR_SRC_ALPHA_SATURATE;
96 case XGL_BLEND_SRC1_COLOR: return GEN6_BLENDFACTOR_SRC1_COLOR;
97 case XGL_BLEND_ONE_MINUS_SRC1_COLOR: return GEN6_BLENDFACTOR_INV_SRC1_COLOR;
98 case XGL_BLEND_SRC1_ALPHA: return GEN6_BLENDFACTOR_SRC1_ALPHA;
99 case XGL_BLEND_ONE_MINUS_SRC1_ALPHA: return GEN6_BLENDFACTOR_INV_SRC1_ALPHA;
100 default:
101 assert(!"unknown blend factor");
102 return GEN6_BLENDFACTOR_ONE;
103 };
104}
105
106static void
107raster_state_init(struct intel_raster_state *state,
108 const struct intel_gpu *gpu,
109 const XGL_RASTER_STATE_CREATE_INFO *info)
110{
111 switch (info->fillMode) {
112 case XFL_FILL_POINTS:
113 state->cmd_sf_fill |= GEN7_SF_DW1_FRONTFACE_POINT |
114 GEN7_SF_DW1_BACKFACE_POINT;
115 break;
116 case XGL_FILL_WIREFRAME:
117 state->cmd_sf_fill |= GEN7_SF_DW1_FRONTFACE_WIREFRAME |
118 GEN7_SF_DW1_BACKFACE_WIREFRAME;
119 break;
120 case XGL_FILL_SOLID:
121 default:
122 state->cmd_sf_fill |= GEN7_SF_DW1_FRONTFACE_SOLID |
123 GEN7_SF_DW1_BACKFACE_SOLID;
124 break;
125 }
126
127 if (info->frontFace == XGL_FRONT_FACE_CCW) {
128 state->cmd_sf_fill |= GEN7_SF_DW1_FRONTWINDING_CCW;
129 state->cmd_clip_cull |= GEN7_CLIP_DW1_FRONTWINDING_CCW;
130 }
131
132 switch (info->cullMode) {
133 case XGL_CULL_NONE:
134 default:
135 state->cmd_sf_cull |= GEN7_SF_DW2_CULLMODE_NONE;
136 state->cmd_clip_cull |= GEN7_CLIP_DW1_CULLMODE_NONE;
137 break;
138 case XGL_CULL_FRONT:
139 state->cmd_sf_cull |= GEN7_SF_DW2_CULLMODE_FRONT;
140 state->cmd_clip_cull |= GEN7_CLIP_DW1_CULLMODE_FRONT;
141 break;
142 case XGL_CULL_BACK:
143 state->cmd_sf_cull |= GEN7_SF_DW2_CULLMODE_BACK;
144 state->cmd_clip_cull |= GEN7_CLIP_DW1_CULLMODE_BACK;
145 break;
146 case XGL_CULL_FRONT_AND_BACK:
147 state->cmd_sf_cull |= GEN7_SF_DW2_CULLMODE_BOTH;
148 state->cmd_clip_cull |= GEN7_CLIP_DW1_CULLMODE_BOTH;
149 break;
150 }
151
152 /* only GEN7+ needs cull mode in 3DSTATE_CLIP */
153 if (intel_gpu_gen(gpu) == INTEL_GEN(6))
154 state->cmd_clip_cull = 0;
155
156 /* XXX scale info->depthBias back into NDC */
157 state->cmd_depth_offset_const = u_fui((float) info->depthBias * 2.0f);
158 state->cmd_depth_offset_clamp = u_fui(info->depthBiasClamp);
159 state->cmd_depth_offset_scale = u_fui(info->slopeScaledDepthBias);
160}
161
162static void
163viewport_get_guardband(const struct intel_gpu *gpu,
164 int center_x, int center_y,
165 int *min_gbx, int *max_gbx,
166 int *min_gby, int *max_gby)
167{
168 /*
169 * From the Sandy Bridge PRM, volume 2 part 1, page 234:
170 *
171 * "Per-Device Guardband Extents
172 *
173 * - Supported X,Y ScreenSpace "Guardband" Extent: [-16K,16K-1]
174 * - Maximum Post-Clamp Delta (X or Y): 16K"
175 *
176 * "In addition, in order to be correctly rendered, objects must have a
177 * screenspace bounding box not exceeding 8K in the X or Y direction.
178 * This additional restriction must also be comprehended by software,
179 * i.e., enforced by use of clipping."
180 *
181 * From the Ivy Bridge PRM, volume 2 part 1, page 248:
182 *
183 * "Per-Device Guardband Extents
184 *
185 * - Supported X,Y ScreenSpace "Guardband" Extent: [-32K,32K-1]
186 * - Maximum Post-Clamp Delta (X or Y): N/A"
187 *
188 * "In addition, in order to be correctly rendered, objects must have a
189 * screenspace bounding box not exceeding 8K in the X or Y direction.
190 * This additional restriction must also be comprehended by software,
191 * i.e., enforced by use of clipping."
192 *
193 * Combined, the bounding box of any object can not exceed 8K in both
194 * width and height.
195 *
196 * Below we set the guardband as a squre of length 8K, centered at where
197 * the viewport is. This makes sure all objects passing the GB test are
198 * valid to the renderer, and those failing the XY clipping have a
199 * better chance of passing the GB test.
200 */
201 const int max_extent = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 32768 : 16384;
202 const int half_len = 8192 / 2;
203
204 /* make sure the guardband is within the valid range */
205 if (center_x - half_len < -max_extent)
206 center_x = -max_extent + half_len;
207 else if (center_x + half_len > max_extent - 1)
208 center_x = max_extent - half_len;
209
210 if (center_y - half_len < -max_extent)
211 center_y = -max_extent + half_len;
212 else if (center_y + half_len > max_extent - 1)
213 center_y = max_extent - half_len;
214
215 *min_gbx = (float) (center_x - half_len);
216 *max_gbx = (float) (center_x + half_len);
217 *min_gby = (float) (center_y - half_len);
218 *max_gby = (float) (center_y + half_len);
219}
220
221static XGL_RESULT
Chia-I Wu7b566a42014-08-22 10:58:57 +0800222viewport_state_alloc_cmd(struct intel_viewport_state *state,
223 const struct intel_gpu *gpu,
224 const XGL_VIEWPORT_STATE_CREATE_INFO *info)
225{
226 INTEL_GPU_ASSERT(gpu, 6, 7.5);
227
228 state->scissor_enable = info->scissorEnable;
229
230 if (intel_gpu_gen(gpu) >= INTEL_GEN(7)) {
231 state->cmd_align = GEN7_ALIGNMENT_SF_CLIP_VIEWPORT;
232 state->cmd_len = 16 * info->viewportCount;
233
234 state->cmd_clip_offset = 8;
235 } else {
236 state->cmd_align = GEN6_ALIGNMENT_SF_VIEWPORT;
237 state->cmd_len = 8 * info->viewportCount;
238
239 state->cmd_clip_offset =
240 u_align(state->cmd_len, GEN6_ALIGNMENT_CLIP_VIEWPORT);
241 state->cmd_len = state->cmd_clip_offset + 4 * info->viewportCount;
242 }
243
244 state->cmd_cc_offset =
245 u_align(state->cmd_len, GEN6_ALIGNMENT_CC_VIEWPORT);
246 state->cmd_len = state->cmd_cc_offset + 2 * info->viewportCount;
247
248 if (state->scissor_enable) {
249 state->cmd_scissor_rect_offset =
250 u_align(state->cmd_len, GEN6_ALIGNMENT_SCISSOR_RECT);
251 state->cmd_len = state->cmd_scissor_rect_offset +
252 2 * info->viewportCount;
253 }
254
255 state->cmd = icd_alloc(sizeof(uint32_t) * state->cmd_len,
256 0, XGL_SYSTEM_ALLOC_INTERNAL);
257 if (!state->cmd)
258 return XGL_ERROR_OUT_OF_MEMORY;
259
260 return XGL_SUCCESS;
261}
262
263static XGL_RESULT
Chia-I Wu97702a62014-08-11 15:33:42 +0800264viewport_state_init(struct intel_viewport_state *state,
265 const struct intel_gpu *gpu,
266 const XGL_VIEWPORT_STATE_CREATE_INFO *info)
267{
268 const XGL_UINT sf_stride = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 16 : 8;
269 const XGL_UINT clip_stride = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 16 : 4;
270 uint32_t *sf_viewport, *clip_viewport, *cc_viewport, *scissor_rect;
271 XGL_UINT i;
Chia-I Wu7b566a42014-08-22 10:58:57 +0800272 XGL_RESULT ret;
Chia-I Wu97702a62014-08-11 15:33:42 +0800273
274 INTEL_GPU_ASSERT(gpu, 6, 7.5);
275
Chia-I Wu7b566a42014-08-22 10:58:57 +0800276 ret = viewport_state_alloc_cmd(state, gpu, info);
277 if (ret != XGL_SUCCESS)
278 return ret;
Chia-I Wu97702a62014-08-11 15:33:42 +0800279
280 sf_viewport = state->cmd;
Chia-I Wu7b566a42014-08-22 10:58:57 +0800281 clip_viewport = state->cmd + state->cmd_clip_offset;
282 cc_viewport = state->cmd + state->cmd_cc_offset;
283 scissor_rect = state->cmd + state->cmd_scissor_rect_offset;
Chia-I Wu97702a62014-08-11 15:33:42 +0800284
285 for (i = 0; i < info->viewportCount; i++) {
286 const XGL_VIEWPORT *viewport = &info->viewports[i];
287 const XGL_RECT *scissor = &info->scissors[i];
288 uint32_t *dw = NULL;
289 float translate[3], scale[3];
290 int min_gbx, max_gbx, min_gby, max_gby;
291
292 scale[0] = viewport->width / 2.0f;
293 scale[1] = viewport->height / 2.0f;
294 scale[2] = (viewport->maxDepth - viewport->minDepth) / 2.0;
295 translate[0] = viewport->originX + scale[0];
296 translate[1] = viewport->originY + scale[1];
297 translate[2] = (viewport->minDepth + viewport->maxDepth) / 2.0f;
298
299 viewport_get_guardband(gpu, (int) translate[0], (int) translate[1],
300 &min_gbx, &max_gbx, &min_gby, &max_gby);
301
302 /* SF_VIEWPORT */
303 dw = sf_viewport;
304 dw[0] = u_fui(scale[0]);
305 dw[1] = u_fui(scale[1]);
306 dw[2] = u_fui(scale[2]);
307 dw[3] = u_fui(translate[0]);
308 dw[4] = u_fui(translate[1]);
309 dw[5] = u_fui(translate[2]);
310 dw[6] = 0;
311 dw[7] = 0;
312 sf_viewport += sf_stride;
313
314 /* CLIP_VIEWPORT */
315 dw = clip_viewport;
316 dw[0] = ((float) min_gbx - translate[0]) / fabsf(scale[0]);
317 dw[1] = ((float) max_gbx - translate[0]) / fabsf(scale[0]);
318 dw[2] = ((float) min_gby - translate[1]) / fabsf(scale[1]);
319 dw[3] = ((float) max_gby - translate[1]) / fabsf(scale[1]);
320 clip_viewport += clip_stride;
321
322 /* CC_VIEWPORT */
323 dw = cc_viewport;
324 dw[0] = u_fui(viewport->minDepth);
325 dw[1] = u_fui(viewport->maxDepth);
326 cc_viewport += 2;
327
328 /* SCISSOR_RECT */
Chia-I Wu7b566a42014-08-22 10:58:57 +0800329 if (state->scissor_enable) {
330 int16_t max_x, max_y;
331
332 max_x = (scissor->offset.x + scissor->extent.width - 1) & 0xffff;
333 max_y = (scissor->offset.y + scissor->extent.height - 1) & 0xffff;
334
335 dw = scissor_rect;
336 if (scissor->extent.width && scissor->extent.height) {
337 dw[0] = (scissor->offset.y & 0xffff) << 16 |
338 (scissor->offset.x & 0xffff);
339 dw[1] = max_y << 16 | max_x;
340 } else {
341 dw[0] = 1 << 16 | 1;
342 dw[1] = 0;
343 }
344 scissor_rect += 2;
Chia-I Wu97702a62014-08-11 15:33:42 +0800345 }
Chia-I Wu97702a62014-08-11 15:33:42 +0800346 }
347
348 return XGL_SUCCESS;
349}
350
351static void
352msaa_state_init(struct intel_msaa_state *state,
353 const struct intel_gpu *gpu,
354 const XGL_MSAA_STATE_CREATE_INFO *info)
355{
356 /* taken from Mesa */
357 static const uint32_t brw_multisample_positions_1x_2x = 0x0088cc44;
358 static const uint32_t brw_multisample_positions_4x = 0xae2ae662;
359 static const uint32_t brw_multisample_positions_8x[] = { 0xdbb39d79, 0x3ff55117 };
360 uint32_t cmd, cmd_len;
361 uint32_t *dw = state->cmd;
362
363 INTEL_GPU_ASSERT(gpu, 6, 7.5);
364 STATIC_ASSERT(ARRAY_SIZE(state->cmd) >= 6);
365
366 /* 3DSTATE_MULTISAMPLE */
Chia-I Wub0b9f692014-08-21 11:33:29 +0800367 cmd = GEN_RENDER_CMD(3D, GEN6, 3DSTATE_MULTISAMPLE);
Chia-I Wu97702a62014-08-11 15:33:42 +0800368 cmd_len = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 4 : 3;
369
370 dw[0] = cmd | (cmd_len - 2);
371 if (info->samples <= 1) {
372 dw[1] = GEN6_MULTISAMPLE_DW1_NUMSAMPLES_1;
373 dw[2] = brw_multisample_positions_1x_2x;
374 } else if (info->samples <= 4 || intel_gpu_gen(gpu) == INTEL_GEN(6)) {
375 dw[1] = GEN6_MULTISAMPLE_DW1_NUMSAMPLES_4;
376 dw[2] = brw_multisample_positions_4x;
377 } else {
378 dw[1] = GEN7_MULTISAMPLE_DW1_NUMSAMPLES_8;
379 dw[2] = brw_multisample_positions_8x[0];
380 dw[3] = brw_multisample_positions_8x[1];
381 }
382
383 dw += cmd_len;
384
Chia-I Wuf3c59252014-08-22 09:26:22 +0800385 state->cmd_len = cmd_len + 2;
386
Chia-I Wu97702a62014-08-11 15:33:42 +0800387 /* 3DSTATE_SAMPLE_MASK */
Chia-I Wub0b9f692014-08-21 11:33:29 +0800388 cmd = GEN_RENDER_CMD(3D, GEN6, 3DSTATE_SAMPLE_MASK);
Chia-I Wu97702a62014-08-11 15:33:42 +0800389 cmd_len = 2;
390
391 dw[0] = cmd | (cmd_len - 2);
392 dw[1] = info->sampleMask & ((1 << info->samples) - 1);
393}
394
395static void
396blend_state_init(struct intel_blend_state *state,
397 const struct intel_gpu *gpu,
398 const XGL_COLOR_BLEND_STATE_CREATE_INFO *info)
399{
400 XGL_UINT i;
401
402 INTEL_GPU_ASSERT(gpu, 6, 7.5);
403
404 for (i = 0; i < ARRAY_SIZE(info->attachment); i++) {
405 const XGL_COLOR_ATTACHMENT_BLEND_STATE *att = &info->attachment[i];
406 uint32_t *dw = &state->cmd[2 * i];
407
408 if (att->blendEnable) {
409 dw[0] = 1 << 31 |
410 translate_blend_func(att->blendFuncAlpha) << 26 |
411 translate_blend(att->srcBlendAlpha) << 20 |
412 translate_blend(att->destBlendAlpha) << 15 |
413 translate_blend_func(att->blendFuncColor) << 11 |
414 translate_blend(att->srcBlendColor) << 5 |
415 translate_blend(att->destBlendColor);
416
417 if (att->blendFuncAlpha != att->blendFuncColor ||
418 att->srcBlendAlpha != att->srcBlendColor ||
419 att->destBlendAlpha != att->destBlendColor)
420 dw[0] |= 1 << 30;
421 }
422
423 dw[1] = GEN6_BLEND_DW1_COLORCLAMP_RTFORMAT |
424 0x3;
425 }
426
427 memcpy(state->cmd_blend_color, info->blendConst, sizeof(info->blendConst));
428}
429
430static XGL_RESULT
431ds_state_init(struct intel_ds_state *state,
432 const struct intel_gpu *gpu,
433 const XGL_DEPTH_STENCIL_STATE_CREATE_INFO *info)
434{
435 uint32_t *dw = state->cmd;
436
437 INTEL_GPU_ASSERT(gpu, 6, 7.5);
438
439 STATIC_ASSERT(ARRAY_SIZE(state->cmd) >= 3);
440
441 if (info->depthBoundsEnable)
442 return XGL_ERROR_UNKNOWN;
443
444 /*
445 * From the Sandy Bridge PRM, volume 2 part 1, page 359:
446 *
447 * "If the Depth Buffer is either undefined or does not have a surface
448 * format of D32_FLOAT_S8X24_UINT or D24_UNORM_S8_UINT and separate
449 * stencil buffer is disabled, Stencil Test Enable must be DISABLED"
450 *
451 * From the Sandy Bridge PRM, volume 2 part 1, page 370:
452 *
453 * "This field (Stencil Test Enable) cannot be enabled if
454 * Surface Format in 3DSTATE_DEPTH_BUFFER is set to D16_UNORM."
455 *
456 * TODO We do not check these yet.
457 */
458 if (info->stencilTestEnable) {
459 dw[0] = 1 << 31 |
460 translate_compare_func(info->front.stencilFunc) << 28 |
461 translate_stencil_op(info->front.stencilFailOp) << 25 |
462 translate_stencil_op(info->front.stencilDepthFailOp) << 22 |
463 translate_stencil_op(info->front.stencilPassOp) << 19 |
464 1 << 15 |
465 translate_compare_func(info->back.stencilFunc) << 12 |
466 translate_stencil_op(info->back.stencilFailOp) << 9 |
467 translate_stencil_op(info->back.stencilDepthFailOp) << 6 |
468 translate_stencil_op(info->back.stencilPassOp) << 3;
469
470 if (info->stencilWriteMask)
471 dw[0] |= 1 << 18;
472
473 dw[1] = (info->stencilReadMask & 0xff) << 24 |
474 (info->stencilWriteMask & 0xff) << 16;
475
476 state->cmd_stencil_ref = (info->front.stencilRef & 0xff) << 24 |
477 (info->back.stencilRef & 0xff) << 16;
478 }
479
480 /*
481 * From the Sandy Bridge PRM, volume 2 part 1, page 360:
482 *
483 * "Enabling the Depth Test function without defining a Depth Buffer is
484 * UNDEFINED."
485 *
486 * From the Sandy Bridge PRM, volume 2 part 1, page 375:
487 *
488 * "A Depth Buffer must be defined before enabling writes to it, or
489 * operation is UNDEFINED."
490 *
491 * TODO We do not check these yet.
492 */
493 if (info->depthTestEnable) {
494 dw[2] = 1 << 31 |
495 translate_compare_func(info->depthFunc) << 27 |
496 (bool) info->depthWriteEnable << 26;
497 } else {
498 dw[2] = GEN6_COMPAREFUNCTION_ALWAYS << 27;
499 }
500
501 return XGL_SUCCESS;
502}
503
Chia-I Wua5714e82014-08-11 15:33:42 +0800504static void viewport_state_destroy(struct intel_obj *obj)
505{
506 struct intel_viewport_state *state = intel_viewport_state_from_obj(obj);
507
508 intel_viewport_state_destroy(state);
509}
510
511XGL_RESULT intel_viewport_state_create(struct intel_dev *dev,
512 const XGL_VIEWPORT_STATE_CREATE_INFO *info,
513 struct intel_viewport_state **state_ret)
514{
515 struct intel_viewport_state *state;
Chia-I Wu97702a62014-08-11 15:33:42 +0800516 XGL_RESULT ret;
Chia-I Wua5714e82014-08-11 15:33:42 +0800517
518 state = (struct intel_viewport_state *) intel_base_create(dev,
519 sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_VIEWPORT_STATE,
520 info, 0);
521 if (!state)
522 return XGL_ERROR_OUT_OF_MEMORY;
523
524 state->obj.destroy = viewport_state_destroy;
525
Chia-I Wu97702a62014-08-11 15:33:42 +0800526 ret = viewport_state_init(state, dev->gpu, info);
527 if (ret != XGL_SUCCESS) {
528 intel_viewport_state_destroy(state);
529 return ret;
530 }
Chia-I Wua5714e82014-08-11 15:33:42 +0800531
532 *state_ret = state;
533
534 return XGL_SUCCESS;
535}
536
537void intel_viewport_state_destroy(struct intel_viewport_state *state)
538{
Chia-I Wu97702a62014-08-11 15:33:42 +0800539 icd_free(state->cmd);
Chia-I Wua5714e82014-08-11 15:33:42 +0800540 intel_base_destroy(&state->obj.base);
541}
542
543static void raster_state_destroy(struct intel_obj *obj)
544{
545 struct intel_raster_state *state = intel_raster_state_from_obj(obj);
546
547 intel_raster_state_destroy(state);
548}
549
550XGL_RESULT intel_raster_state_create(struct intel_dev *dev,
551 const XGL_RASTER_STATE_CREATE_INFO *info,
552 struct intel_raster_state **state_ret)
553{
554 struct intel_raster_state *state;
555
556 state = (struct intel_raster_state *) intel_base_create(dev,
557 sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_RASTER_STATE,
558 info, 0);
559 if (!state)
560 return XGL_ERROR_OUT_OF_MEMORY;
561
562 state->obj.destroy = raster_state_destroy;
563
Chia-I Wu97702a62014-08-11 15:33:42 +0800564 raster_state_init(state, dev->gpu, info);
Chia-I Wua5714e82014-08-11 15:33:42 +0800565
566 *state_ret = state;
567
568 return XGL_SUCCESS;
569}
570
571void intel_raster_state_destroy(struct intel_raster_state *state)
572{
573 intel_base_destroy(&state->obj.base);
574}
575
576static void msaa_state_destroy(struct intel_obj *obj)
577{
578 struct intel_msaa_state *state = intel_msaa_state_from_obj(obj);
579
580 intel_msaa_state_destroy(state);
581}
582
583XGL_RESULT intel_msaa_state_create(struct intel_dev *dev,
584 const XGL_MSAA_STATE_CREATE_INFO *info,
585 struct intel_msaa_state **state_ret)
586{
587 struct intel_msaa_state *state;
588
589 state = (struct intel_msaa_state *) intel_base_create(dev,
590 sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_MSAA_STATE,
591 info, 0);
592 if (!state)
593 return XGL_ERROR_OUT_OF_MEMORY;
594
595 state->obj.destroy = msaa_state_destroy;
596
Chia-I Wu97702a62014-08-11 15:33:42 +0800597 msaa_state_init(state, dev->gpu, info);
Chia-I Wua5714e82014-08-11 15:33:42 +0800598
599 *state_ret = state;
600
601 return XGL_SUCCESS;
602}
603
604void intel_msaa_state_destroy(struct intel_msaa_state *state)
605{
606 intel_base_destroy(&state->obj.base);
607}
608
609static void blend_state_destroy(struct intel_obj *obj)
610{
611 struct intel_blend_state *state = intel_blend_state_from_obj(obj);
612
613 intel_blend_state_destroy(state);
614}
615
616XGL_RESULT intel_blend_state_create(struct intel_dev *dev,
617 const XGL_COLOR_BLEND_STATE_CREATE_INFO *info,
618 struct intel_blend_state **state_ret)
619{
620 struct intel_blend_state *state;
621
622 state = (struct intel_blend_state *) intel_base_create(dev,
623 sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_MSAA_STATE,
624 info, 0);
625 if (!state)
626 return XGL_ERROR_OUT_OF_MEMORY;
627
628 state->obj.destroy = blend_state_destroy;
629
Chia-I Wu97702a62014-08-11 15:33:42 +0800630 blend_state_init(state, dev->gpu, info);
Chia-I Wua5714e82014-08-11 15:33:42 +0800631
632 *state_ret = state;
633
634 return XGL_SUCCESS;
635}
636
637void intel_blend_state_destroy(struct intel_blend_state *state)
638{
639 intel_base_destroy(&state->obj.base);
640}
641
642static void ds_state_destroy(struct intel_obj *obj)
643{
644 struct intel_ds_state *state = intel_ds_state_from_obj(obj);
645
646 intel_ds_state_destroy(state);
647}
648
649XGL_RESULT intel_ds_state_create(struct intel_dev *dev,
650 const XGL_DEPTH_STENCIL_STATE_CREATE_INFO *info,
651 struct intel_ds_state **state_ret)
652{
653 struct intel_ds_state *state;
Chia-I Wu97702a62014-08-11 15:33:42 +0800654 XGL_RESULT ret;
Chia-I Wua5714e82014-08-11 15:33:42 +0800655
656 state = (struct intel_ds_state *) intel_base_create(dev,
Courtney Goeltzenleuchtere7dc05f2014-08-22 16:26:07 -0600657 sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_DEPTH_STENCIL_STATE,
Chia-I Wua5714e82014-08-11 15:33:42 +0800658 info, 0);
659 if (!state)
660 return XGL_ERROR_OUT_OF_MEMORY;
661
662 state->obj.destroy = ds_state_destroy;
663
Chia-I Wu97702a62014-08-11 15:33:42 +0800664 ret = ds_state_init(state, dev->gpu, info);
665 if (ret != XGL_SUCCESS) {
666 intel_ds_state_destroy(state);
667 return ret;
668 }
Chia-I Wua5714e82014-08-11 15:33:42 +0800669
670 *state_ret = state;
671
672 return XGL_SUCCESS;
673}
674
675void intel_ds_state_destroy(struct intel_ds_state *state)
676{
677 intel_base_destroy(&state->obj.base);
678}
679
680XGL_RESULT XGLAPI intelCreateViewportState(
681 XGL_DEVICE device,
682 const XGL_VIEWPORT_STATE_CREATE_INFO* pCreateInfo,
683 XGL_VIEWPORT_STATE_OBJECT* pState)
684{
685 struct intel_dev *dev = intel_dev(device);
686
687 return intel_viewport_state_create(dev, pCreateInfo,
688 (struct intel_viewport_state **) pState);
689}
690
691XGL_RESULT XGLAPI intelCreateRasterState(
692 XGL_DEVICE device,
693 const XGL_RASTER_STATE_CREATE_INFO* pCreateInfo,
694 XGL_RASTER_STATE_OBJECT* pState)
695{
696 struct intel_dev *dev = intel_dev(device);
697
698 return intel_raster_state_create(dev, pCreateInfo,
699 (struct intel_raster_state **) pState);
700}
701
702XGL_RESULT XGLAPI intelCreateMsaaState(
703 XGL_DEVICE device,
704 const XGL_MSAA_STATE_CREATE_INFO* pCreateInfo,
705 XGL_MSAA_STATE_OBJECT* pState)
706{
707 struct intel_dev *dev = intel_dev(device);
708
709 return intel_msaa_state_create(dev, pCreateInfo,
710 (struct intel_msaa_state **) pState);
711}
712
713XGL_RESULT XGLAPI intelCreateColorBlendState(
714 XGL_DEVICE device,
715 const XGL_COLOR_BLEND_STATE_CREATE_INFO* pCreateInfo,
716 XGL_COLOR_BLEND_STATE_OBJECT* pState)
717{
718 struct intel_dev *dev = intel_dev(device);
719
720 return intel_blend_state_create(dev, pCreateInfo,
721 (struct intel_blend_state **) pState);
722}
723
724XGL_RESULT XGLAPI intelCreateDepthStencilState(
725 XGL_DEVICE device,
726 const XGL_DEPTH_STENCIL_STATE_CREATE_INFO* pCreateInfo,
727 XGL_DEPTH_STENCIL_STATE_OBJECT* pState)
728{
729 struct intel_dev *dev = intel_dev(device);
730
731 return intel_ds_state_create(dev, pCreateInfo,
732 (struct intel_ds_state **) pState);
733}