blob: 4ed8ff9bfaa668b6dc3198c9e419c281e076b76c [file] [log] [blame]
Chia-I Wua5714e82014-08-11 15:33:42 +08001/*
2 * XGL
3 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
Chia-I Wu97702a62014-08-11 15:33:42 +080025#include <math.h>
26#include "genhw/genhw.h"
Chia-I Wua5714e82014-08-11 15:33:42 +080027#include "dev.h"
28#include "state.h"
29
Chia-I Wu97702a62014-08-11 15:33:42 +080030static int translate_compare_func(XGL_COMPARE_FUNC func)
31{
32 switch (func) {
33 case XGL_COMPARE_NEVER: return GEN6_COMPAREFUNCTION_NEVER;
34 case XGL_COMPARE_LESS: return GEN6_COMPAREFUNCTION_LESS;
35 case XGL_COMPARE_EQUAL: return GEN6_COMPAREFUNCTION_EQUAL;
36 case XGL_COMPARE_LESS_EQUAL: return GEN6_COMPAREFUNCTION_LEQUAL;
37 case XGL_COMPARE_GREATER: return GEN6_COMPAREFUNCTION_GREATER;
38 case XGL_COMPARE_NOT_EQUAL: return GEN6_COMPAREFUNCTION_NOTEQUAL;
39 case XGL_COMPARE_GREATER_EQUAL: return GEN6_COMPAREFUNCTION_GEQUAL;
40 case XGL_COMPARE_ALWAYS: return GEN6_COMPAREFUNCTION_ALWAYS;
41 default:
42 assert(!"unknown compare_func");
43 return GEN6_COMPAREFUNCTION_NEVER;
44 }
45}
46
47static int translate_stencil_op(XGL_STENCIL_OP op)
48{
49 switch (op) {
50 case XGL_STENCIL_OP_KEEP: return GEN6_STENCILOP_KEEP;
51 case XGL_STENCIL_OP_ZERO: return GEN6_STENCILOP_ZERO;
52 case XGL_STENCIL_OP_REPLACE: return GEN6_STENCILOP_REPLACE;
53 case XGL_STENCIL_OP_INC_CLAMP: return GEN6_STENCILOP_INCRSAT;
54 case XGL_STENCIL_OP_DEC_CLAMP: return GEN6_STENCILOP_DECRSAT;
55 case XGL_STENCIL_OP_INVERT: return GEN6_STENCILOP_INVERT;
56 case XGL_STENCIL_OP_INC_WRAP: return GEN6_STENCILOP_INCR;
57 case XGL_STENCIL_OP_DEC_WRAP: return GEN6_STENCILOP_DECR;
58 default:
59 assert(!"unknown stencil op");
60 return GEN6_STENCILOP_KEEP;
61 }
62}
63
64static int translate_blend_func(XGL_BLEND_FUNC func)
65{
66 switch (func) {
67 case XGL_BLEND_FUNC_ADD: return GEN6_BLENDFUNCTION_ADD;
68 case XGL_BLEND_FUNC_SUBTRACT: return GEN6_BLENDFUNCTION_SUBTRACT;
69 case XGL_BLEND_FUNC_REVERSE_SUBTRACT: return GEN6_BLENDFUNCTION_REVERSE_SUBTRACT;
70 case XGL_BLEND_FUNC_MIN: return GEN6_BLENDFUNCTION_MIN;
71 case XGL_BLEND_FUNC_MAX: return GEN6_BLENDFUNCTION_MAX;
72 default:
73 assert(!"unknown blend func");
74 return GEN6_BLENDFUNCTION_ADD;
75 };
76}
77
78static int translate_blend(XGL_BLEND blend)
79{
80 switch (blend) {
81 case XGL_BLEND_ZERO: return GEN6_BLENDFACTOR_ZERO;
82 case XGL_BLEND_ONE: return GEN6_BLENDFACTOR_ONE;
83 case XGL_BLEND_SRC_COLOR: return GEN6_BLENDFACTOR_SRC_COLOR;
84 case XGL_BLEND_ONE_MINUS_SRC_COLOR: return GEN6_BLENDFACTOR_INV_SRC_COLOR;
85 case XGL_BLEND_DEST_COLOR: return GEN6_BLENDFACTOR_DST_COLOR;
86 case XGL_BLEND_ONE_MINUS_DEST_COLOR: return GEN6_BLENDFACTOR_INV_DST_COLOR;
87 case XGL_BLEND_SRC_ALPHA: return GEN6_BLENDFACTOR_SRC_ALPHA;
88 case XGL_BLEND_ONE_MINUS_SRC_ALPHA: return GEN6_BLENDFACTOR_INV_SRC_ALPHA;
89 case XGL_BLEND_DEST_ALPHA: return GEN6_BLENDFACTOR_DST_ALPHA;
90 case XGL_BLEND_ONE_MINUS_DEST_ALPHA: return GEN6_BLENDFACTOR_INV_DST_ALPHA;
91 case XGL_BLEND_CONSTANT_COLOR: return GEN6_BLENDFACTOR_CONST_COLOR;
92 case XGL_BLEND_ONE_MINUS_CONSTANT_COLOR: return GEN6_BLENDFACTOR_INV_CONST_COLOR;
93 case XGL_BLEND_CONSTANT_ALPHA: return GEN6_BLENDFACTOR_CONST_ALPHA;
94 case XGL_BLEND_ONE_MINUS_CONSTANT_ALPHA: return GEN6_BLENDFACTOR_INV_CONST_ALPHA;
95 case XGL_BLEND_SRC_ALPHA_SATURATE: return GEN6_BLENDFACTOR_SRC_ALPHA_SATURATE;
96 case XGL_BLEND_SRC1_COLOR: return GEN6_BLENDFACTOR_SRC1_COLOR;
97 case XGL_BLEND_ONE_MINUS_SRC1_COLOR: return GEN6_BLENDFACTOR_INV_SRC1_COLOR;
98 case XGL_BLEND_SRC1_ALPHA: return GEN6_BLENDFACTOR_SRC1_ALPHA;
99 case XGL_BLEND_ONE_MINUS_SRC1_ALPHA: return GEN6_BLENDFACTOR_INV_SRC1_ALPHA;
100 default:
101 assert(!"unknown blend factor");
102 return GEN6_BLENDFACTOR_ONE;
103 };
104}
105
106static void
107raster_state_init(struct intel_raster_state *state,
108 const struct intel_gpu *gpu,
109 const XGL_RASTER_STATE_CREATE_INFO *info)
110{
111 switch (info->fillMode) {
112 case XFL_FILL_POINTS:
113 state->cmd_sf_fill |= GEN7_SF_DW1_FRONTFACE_POINT |
114 GEN7_SF_DW1_BACKFACE_POINT;
115 break;
116 case XGL_FILL_WIREFRAME:
117 state->cmd_sf_fill |= GEN7_SF_DW1_FRONTFACE_WIREFRAME |
118 GEN7_SF_DW1_BACKFACE_WIREFRAME;
119 break;
120 case XGL_FILL_SOLID:
121 default:
122 state->cmd_sf_fill |= GEN7_SF_DW1_FRONTFACE_SOLID |
123 GEN7_SF_DW1_BACKFACE_SOLID;
124 break;
125 }
126
127 if (info->frontFace == XGL_FRONT_FACE_CCW) {
128 state->cmd_sf_fill |= GEN7_SF_DW1_FRONTWINDING_CCW;
129 state->cmd_clip_cull |= GEN7_CLIP_DW1_FRONTWINDING_CCW;
130 }
131
132 switch (info->cullMode) {
133 case XGL_CULL_NONE:
134 default:
135 state->cmd_sf_cull |= GEN7_SF_DW2_CULLMODE_NONE;
136 state->cmd_clip_cull |= GEN7_CLIP_DW1_CULLMODE_NONE;
137 break;
138 case XGL_CULL_FRONT:
139 state->cmd_sf_cull |= GEN7_SF_DW2_CULLMODE_FRONT;
140 state->cmd_clip_cull |= GEN7_CLIP_DW1_CULLMODE_FRONT;
141 break;
142 case XGL_CULL_BACK:
143 state->cmd_sf_cull |= GEN7_SF_DW2_CULLMODE_BACK;
144 state->cmd_clip_cull |= GEN7_CLIP_DW1_CULLMODE_BACK;
145 break;
146 case XGL_CULL_FRONT_AND_BACK:
147 state->cmd_sf_cull |= GEN7_SF_DW2_CULLMODE_BOTH;
148 state->cmd_clip_cull |= GEN7_CLIP_DW1_CULLMODE_BOTH;
149 break;
150 }
151
152 /* only GEN7+ needs cull mode in 3DSTATE_CLIP */
153 if (intel_gpu_gen(gpu) == INTEL_GEN(6))
154 state->cmd_clip_cull = 0;
155
156 /* XXX scale info->depthBias back into NDC */
157 state->cmd_depth_offset_const = u_fui((float) info->depthBias * 2.0f);
158 state->cmd_depth_offset_clamp = u_fui(info->depthBiasClamp);
159 state->cmd_depth_offset_scale = u_fui(info->slopeScaledDepthBias);
160}
161
162static void
163viewport_get_guardband(const struct intel_gpu *gpu,
164 int center_x, int center_y,
165 int *min_gbx, int *max_gbx,
166 int *min_gby, int *max_gby)
167{
168 /*
169 * From the Sandy Bridge PRM, volume 2 part 1, page 234:
170 *
171 * "Per-Device Guardband Extents
172 *
173 * - Supported X,Y ScreenSpace "Guardband" Extent: [-16K,16K-1]
174 * - Maximum Post-Clamp Delta (X or Y): 16K"
175 *
176 * "In addition, in order to be correctly rendered, objects must have a
177 * screenspace bounding box not exceeding 8K in the X or Y direction.
178 * This additional restriction must also be comprehended by software,
179 * i.e., enforced by use of clipping."
180 *
181 * From the Ivy Bridge PRM, volume 2 part 1, page 248:
182 *
183 * "Per-Device Guardband Extents
184 *
185 * - Supported X,Y ScreenSpace "Guardband" Extent: [-32K,32K-1]
186 * - Maximum Post-Clamp Delta (X or Y): N/A"
187 *
188 * "In addition, in order to be correctly rendered, objects must have a
189 * screenspace bounding box not exceeding 8K in the X or Y direction.
190 * This additional restriction must also be comprehended by software,
191 * i.e., enforced by use of clipping."
192 *
193 * Combined, the bounding box of any object can not exceed 8K in both
194 * width and height.
195 *
196 * Below we set the guardband as a squre of length 8K, centered at where
197 * the viewport is. This makes sure all objects passing the GB test are
198 * valid to the renderer, and those failing the XY clipping have a
199 * better chance of passing the GB test.
200 */
201 const int max_extent = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 32768 : 16384;
202 const int half_len = 8192 / 2;
203
204 /* make sure the guardband is within the valid range */
205 if (center_x - half_len < -max_extent)
206 center_x = -max_extent + half_len;
207 else if (center_x + half_len > max_extent - 1)
208 center_x = max_extent - half_len;
209
210 if (center_y - half_len < -max_extent)
211 center_y = -max_extent + half_len;
212 else if (center_y + half_len > max_extent - 1)
213 center_y = max_extent - half_len;
214
215 *min_gbx = (float) (center_x - half_len);
216 *max_gbx = (float) (center_x + half_len);
217 *min_gby = (float) (center_y - half_len);
218 *max_gby = (float) (center_y + half_len);
219}
220
221static XGL_RESULT
222viewport_state_init(struct intel_viewport_state *state,
223 const struct intel_gpu *gpu,
224 const XGL_VIEWPORT_STATE_CREATE_INFO *info)
225{
226 const XGL_UINT sf_stride = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 16 : 8;
227 const XGL_UINT clip_stride = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 16 : 4;
228 uint32_t *sf_viewport, *clip_viewport, *cc_viewport, *scissor_rect;
229 XGL_UINT i;
230
231 INTEL_GPU_ASSERT(gpu, 6, 7.5);
232
233 state->scissor_enable = info->scissorEnable;
234
235 if (intel_gpu_gen(gpu) >= INTEL_GEN(7))
236 state->size = (16 + 2 + 2) * info->viewportCount;
237 else
238 state->size = (8 + 4 + 2 + 2) * info->viewportCount;
239
240 state->cmd = icd_alloc(sizeof(uint32_t) * state->size,
241 0, XGL_SYSTEM_ALLOC_INTERNAL);
242 if (!state->cmd)
243 return XGL_ERROR_OUT_OF_MEMORY;
244
245 sf_viewport = state->cmd;
246 clip_viewport = sf_viewport + 8;
247 cc_viewport = sf_viewport +
248 ((intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 16 : 12);
249 scissor_rect = cc_viewport + 2;
250
251 for (i = 0; i < info->viewportCount; i++) {
252 const XGL_VIEWPORT *viewport = &info->viewports[i];
253 const XGL_RECT *scissor = &info->scissors[i];
254 uint32_t *dw = NULL;
255 float translate[3], scale[3];
256 int min_gbx, max_gbx, min_gby, max_gby;
257
258 scale[0] = viewport->width / 2.0f;
259 scale[1] = viewport->height / 2.0f;
260 scale[2] = (viewport->maxDepth - viewport->minDepth) / 2.0;
261 translate[0] = viewport->originX + scale[0];
262 translate[1] = viewport->originY + scale[1];
263 translate[2] = (viewport->minDepth + viewport->maxDepth) / 2.0f;
264
265 viewport_get_guardband(gpu, (int) translate[0], (int) translate[1],
266 &min_gbx, &max_gbx, &min_gby, &max_gby);
267
268 /* SF_VIEWPORT */
269 dw = sf_viewport;
270 dw[0] = u_fui(scale[0]);
271 dw[1] = u_fui(scale[1]);
272 dw[2] = u_fui(scale[2]);
273 dw[3] = u_fui(translate[0]);
274 dw[4] = u_fui(translate[1]);
275 dw[5] = u_fui(translate[2]);
276 dw[6] = 0;
277 dw[7] = 0;
278 sf_viewport += sf_stride;
279
280 /* CLIP_VIEWPORT */
281 dw = clip_viewport;
282 dw[0] = ((float) min_gbx - translate[0]) / fabsf(scale[0]);
283 dw[1] = ((float) max_gbx - translate[0]) / fabsf(scale[0]);
284 dw[2] = ((float) min_gby - translate[1]) / fabsf(scale[1]);
285 dw[3] = ((float) max_gby - translate[1]) / fabsf(scale[1]);
286 clip_viewport += clip_stride;
287
288 /* CC_VIEWPORT */
289 dw = cc_viewport;
290 dw[0] = u_fui(viewport->minDepth);
291 dw[1] = u_fui(viewport->maxDepth);
292 cc_viewport += 2;
293
294 /* SCISSOR_RECT */
295 dw = scissor_rect;
296 if (scissor->extent.width && scissor->extent.height) {
297 dw[0] = (scissor->offset.y & 0xffff) << 16 |
298 (scissor->offset.x & 0xffff);
299 dw[1] =
300 ((scissor->offset.y + scissor->extent.height - 1) & 0xffff) << 16 |
301 ((scissor->offset.x + scissor->extent.width - 1) & 0xffff);
302 } else {
303 dw[0] = 1 << 16 | 1;
304 dw[1] = 0;
305 }
306 scissor_rect += 2;
307 }
308
309 return XGL_SUCCESS;
310}
311
312static void
313msaa_state_init(struct intel_msaa_state *state,
314 const struct intel_gpu *gpu,
315 const XGL_MSAA_STATE_CREATE_INFO *info)
316{
317 /* taken from Mesa */
318 static const uint32_t brw_multisample_positions_1x_2x = 0x0088cc44;
319 static const uint32_t brw_multisample_positions_4x = 0xae2ae662;
320 static const uint32_t brw_multisample_positions_8x[] = { 0xdbb39d79, 0x3ff55117 };
321 uint32_t cmd, cmd_len;
322 uint32_t *dw = state->cmd;
323
324 INTEL_GPU_ASSERT(gpu, 6, 7.5);
325 STATIC_ASSERT(ARRAY_SIZE(state->cmd) >= 6);
326
327 /* 3DSTATE_MULTISAMPLE */
Chia-I Wub0b9f692014-08-21 11:33:29 +0800328 cmd = GEN_RENDER_CMD(3D, GEN6, 3DSTATE_MULTISAMPLE);
Chia-I Wu97702a62014-08-11 15:33:42 +0800329 cmd_len = (intel_gpu_gen(gpu) >= INTEL_GEN(7)) ? 4 : 3;
330
331 dw[0] = cmd | (cmd_len - 2);
332 if (info->samples <= 1) {
333 dw[1] = GEN6_MULTISAMPLE_DW1_NUMSAMPLES_1;
334 dw[2] = brw_multisample_positions_1x_2x;
335 } else if (info->samples <= 4 || intel_gpu_gen(gpu) == INTEL_GEN(6)) {
336 dw[1] = GEN6_MULTISAMPLE_DW1_NUMSAMPLES_4;
337 dw[2] = brw_multisample_positions_4x;
338 } else {
339 dw[1] = GEN7_MULTISAMPLE_DW1_NUMSAMPLES_8;
340 dw[2] = brw_multisample_positions_8x[0];
341 dw[3] = brw_multisample_positions_8x[1];
342 }
343
344 dw += cmd_len;
345
346 /* 3DSTATE_SAMPLE_MASK */
Chia-I Wub0b9f692014-08-21 11:33:29 +0800347 cmd = GEN_RENDER_CMD(3D, GEN6, 3DSTATE_SAMPLE_MASK);
Chia-I Wu97702a62014-08-11 15:33:42 +0800348 cmd_len = 2;
349
350 dw[0] = cmd | (cmd_len - 2);
351 dw[1] = info->sampleMask & ((1 << info->samples) - 1);
352}
353
354static void
355blend_state_init(struct intel_blend_state *state,
356 const struct intel_gpu *gpu,
357 const XGL_COLOR_BLEND_STATE_CREATE_INFO *info)
358{
359 XGL_UINT i;
360
361 INTEL_GPU_ASSERT(gpu, 6, 7.5);
362
363 for (i = 0; i < ARRAY_SIZE(info->attachment); i++) {
364 const XGL_COLOR_ATTACHMENT_BLEND_STATE *att = &info->attachment[i];
365 uint32_t *dw = &state->cmd[2 * i];
366
367 if (att->blendEnable) {
368 dw[0] = 1 << 31 |
369 translate_blend_func(att->blendFuncAlpha) << 26 |
370 translate_blend(att->srcBlendAlpha) << 20 |
371 translate_blend(att->destBlendAlpha) << 15 |
372 translate_blend_func(att->blendFuncColor) << 11 |
373 translate_blend(att->srcBlendColor) << 5 |
374 translate_blend(att->destBlendColor);
375
376 if (att->blendFuncAlpha != att->blendFuncColor ||
377 att->srcBlendAlpha != att->srcBlendColor ||
378 att->destBlendAlpha != att->destBlendColor)
379 dw[0] |= 1 << 30;
380 }
381
382 dw[1] = GEN6_BLEND_DW1_COLORCLAMP_RTFORMAT |
383 0x3;
384 }
385
386 memcpy(state->cmd_blend_color, info->blendConst, sizeof(info->blendConst));
387}
388
389static XGL_RESULT
390ds_state_init(struct intel_ds_state *state,
391 const struct intel_gpu *gpu,
392 const XGL_DEPTH_STENCIL_STATE_CREATE_INFO *info)
393{
394 uint32_t *dw = state->cmd;
395
396 INTEL_GPU_ASSERT(gpu, 6, 7.5);
397
398 STATIC_ASSERT(ARRAY_SIZE(state->cmd) >= 3);
399
400 if (info->depthBoundsEnable)
401 return XGL_ERROR_UNKNOWN;
402
403 /*
404 * From the Sandy Bridge PRM, volume 2 part 1, page 359:
405 *
406 * "If the Depth Buffer is either undefined or does not have a surface
407 * format of D32_FLOAT_S8X24_UINT or D24_UNORM_S8_UINT and separate
408 * stencil buffer is disabled, Stencil Test Enable must be DISABLED"
409 *
410 * From the Sandy Bridge PRM, volume 2 part 1, page 370:
411 *
412 * "This field (Stencil Test Enable) cannot be enabled if
413 * Surface Format in 3DSTATE_DEPTH_BUFFER is set to D16_UNORM."
414 *
415 * TODO We do not check these yet.
416 */
417 if (info->stencilTestEnable) {
418 dw[0] = 1 << 31 |
419 translate_compare_func(info->front.stencilFunc) << 28 |
420 translate_stencil_op(info->front.stencilFailOp) << 25 |
421 translate_stencil_op(info->front.stencilDepthFailOp) << 22 |
422 translate_stencil_op(info->front.stencilPassOp) << 19 |
423 1 << 15 |
424 translate_compare_func(info->back.stencilFunc) << 12 |
425 translate_stencil_op(info->back.stencilFailOp) << 9 |
426 translate_stencil_op(info->back.stencilDepthFailOp) << 6 |
427 translate_stencil_op(info->back.stencilPassOp) << 3;
428
429 if (info->stencilWriteMask)
430 dw[0] |= 1 << 18;
431
432 dw[1] = (info->stencilReadMask & 0xff) << 24 |
433 (info->stencilWriteMask & 0xff) << 16;
434
435 state->cmd_stencil_ref = (info->front.stencilRef & 0xff) << 24 |
436 (info->back.stencilRef & 0xff) << 16;
437 }
438
439 /*
440 * From the Sandy Bridge PRM, volume 2 part 1, page 360:
441 *
442 * "Enabling the Depth Test function without defining a Depth Buffer is
443 * UNDEFINED."
444 *
445 * From the Sandy Bridge PRM, volume 2 part 1, page 375:
446 *
447 * "A Depth Buffer must be defined before enabling writes to it, or
448 * operation is UNDEFINED."
449 *
450 * TODO We do not check these yet.
451 */
452 if (info->depthTestEnable) {
453 dw[2] = 1 << 31 |
454 translate_compare_func(info->depthFunc) << 27 |
455 (bool) info->depthWriteEnable << 26;
456 } else {
457 dw[2] = GEN6_COMPAREFUNCTION_ALWAYS << 27;
458 }
459
460 return XGL_SUCCESS;
461}
462
Chia-I Wua5714e82014-08-11 15:33:42 +0800463static void viewport_state_destroy(struct intel_obj *obj)
464{
465 struct intel_viewport_state *state = intel_viewport_state_from_obj(obj);
466
467 intel_viewport_state_destroy(state);
468}
469
470XGL_RESULT intel_viewport_state_create(struct intel_dev *dev,
471 const XGL_VIEWPORT_STATE_CREATE_INFO *info,
472 struct intel_viewport_state **state_ret)
473{
474 struct intel_viewport_state *state;
Chia-I Wu97702a62014-08-11 15:33:42 +0800475 XGL_RESULT ret;
Chia-I Wua5714e82014-08-11 15:33:42 +0800476
477 state = (struct intel_viewport_state *) intel_base_create(dev,
478 sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_VIEWPORT_STATE,
479 info, 0);
480 if (!state)
481 return XGL_ERROR_OUT_OF_MEMORY;
482
483 state->obj.destroy = viewport_state_destroy;
484
Chia-I Wu97702a62014-08-11 15:33:42 +0800485 ret = viewport_state_init(state, dev->gpu, info);
486 if (ret != XGL_SUCCESS) {
487 intel_viewport_state_destroy(state);
488 return ret;
489 }
Chia-I Wua5714e82014-08-11 15:33:42 +0800490
491 *state_ret = state;
492
493 return XGL_SUCCESS;
494}
495
496void intel_viewport_state_destroy(struct intel_viewport_state *state)
497{
Chia-I Wu97702a62014-08-11 15:33:42 +0800498 icd_free(state->cmd);
Chia-I Wua5714e82014-08-11 15:33:42 +0800499 intel_base_destroy(&state->obj.base);
500}
501
502static void raster_state_destroy(struct intel_obj *obj)
503{
504 struct intel_raster_state *state = intel_raster_state_from_obj(obj);
505
506 intel_raster_state_destroy(state);
507}
508
509XGL_RESULT intel_raster_state_create(struct intel_dev *dev,
510 const XGL_RASTER_STATE_CREATE_INFO *info,
511 struct intel_raster_state **state_ret)
512{
513 struct intel_raster_state *state;
514
515 state = (struct intel_raster_state *) intel_base_create(dev,
516 sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_RASTER_STATE,
517 info, 0);
518 if (!state)
519 return XGL_ERROR_OUT_OF_MEMORY;
520
521 state->obj.destroy = raster_state_destroy;
522
Chia-I Wu97702a62014-08-11 15:33:42 +0800523 raster_state_init(state, dev->gpu, info);
Chia-I Wua5714e82014-08-11 15:33:42 +0800524
525 *state_ret = state;
526
527 return XGL_SUCCESS;
528}
529
530void intel_raster_state_destroy(struct intel_raster_state *state)
531{
532 intel_base_destroy(&state->obj.base);
533}
534
535static void msaa_state_destroy(struct intel_obj *obj)
536{
537 struct intel_msaa_state *state = intel_msaa_state_from_obj(obj);
538
539 intel_msaa_state_destroy(state);
540}
541
542XGL_RESULT intel_msaa_state_create(struct intel_dev *dev,
543 const XGL_MSAA_STATE_CREATE_INFO *info,
544 struct intel_msaa_state **state_ret)
545{
546 struct intel_msaa_state *state;
547
548 state = (struct intel_msaa_state *) intel_base_create(dev,
549 sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_MSAA_STATE,
550 info, 0);
551 if (!state)
552 return XGL_ERROR_OUT_OF_MEMORY;
553
554 state->obj.destroy = msaa_state_destroy;
555
Chia-I Wu97702a62014-08-11 15:33:42 +0800556 msaa_state_init(state, dev->gpu, info);
Chia-I Wua5714e82014-08-11 15:33:42 +0800557
558 *state_ret = state;
559
560 return XGL_SUCCESS;
561}
562
563void intel_msaa_state_destroy(struct intel_msaa_state *state)
564{
565 intel_base_destroy(&state->obj.base);
566}
567
568static void blend_state_destroy(struct intel_obj *obj)
569{
570 struct intel_blend_state *state = intel_blend_state_from_obj(obj);
571
572 intel_blend_state_destroy(state);
573}
574
575XGL_RESULT intel_blend_state_create(struct intel_dev *dev,
576 const XGL_COLOR_BLEND_STATE_CREATE_INFO *info,
577 struct intel_blend_state **state_ret)
578{
579 struct intel_blend_state *state;
580
581 state = (struct intel_blend_state *) intel_base_create(dev,
582 sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_MSAA_STATE,
583 info, 0);
584 if (!state)
585 return XGL_ERROR_OUT_OF_MEMORY;
586
587 state->obj.destroy = blend_state_destroy;
588
Chia-I Wu97702a62014-08-11 15:33:42 +0800589 blend_state_init(state, dev->gpu, info);
Chia-I Wua5714e82014-08-11 15:33:42 +0800590
591 *state_ret = state;
592
593 return XGL_SUCCESS;
594}
595
596void intel_blend_state_destroy(struct intel_blend_state *state)
597{
598 intel_base_destroy(&state->obj.base);
599}
600
601static void ds_state_destroy(struct intel_obj *obj)
602{
603 struct intel_ds_state *state = intel_ds_state_from_obj(obj);
604
605 intel_ds_state_destroy(state);
606}
607
608XGL_RESULT intel_ds_state_create(struct intel_dev *dev,
609 const XGL_DEPTH_STENCIL_STATE_CREATE_INFO *info,
610 struct intel_ds_state **state_ret)
611{
612 struct intel_ds_state *state;
Chia-I Wu97702a62014-08-11 15:33:42 +0800613 XGL_RESULT ret;
Chia-I Wua5714e82014-08-11 15:33:42 +0800614
615 state = (struct intel_ds_state *) intel_base_create(dev,
616 sizeof(*state), dev->base.dbg, XGL_DBG_OBJECT_MSAA_STATE,
617 info, 0);
618 if (!state)
619 return XGL_ERROR_OUT_OF_MEMORY;
620
621 state->obj.destroy = ds_state_destroy;
622
Chia-I Wu97702a62014-08-11 15:33:42 +0800623 ret = ds_state_init(state, dev->gpu, info);
624 if (ret != XGL_SUCCESS) {
625 intel_ds_state_destroy(state);
626 return ret;
627 }
Chia-I Wua5714e82014-08-11 15:33:42 +0800628
629 *state_ret = state;
630
631 return XGL_SUCCESS;
632}
633
634void intel_ds_state_destroy(struct intel_ds_state *state)
635{
636 intel_base_destroy(&state->obj.base);
637}
638
639XGL_RESULT XGLAPI intelCreateViewportState(
640 XGL_DEVICE device,
641 const XGL_VIEWPORT_STATE_CREATE_INFO* pCreateInfo,
642 XGL_VIEWPORT_STATE_OBJECT* pState)
643{
644 struct intel_dev *dev = intel_dev(device);
645
646 return intel_viewport_state_create(dev, pCreateInfo,
647 (struct intel_viewport_state **) pState);
648}
649
650XGL_RESULT XGLAPI intelCreateRasterState(
651 XGL_DEVICE device,
652 const XGL_RASTER_STATE_CREATE_INFO* pCreateInfo,
653 XGL_RASTER_STATE_OBJECT* pState)
654{
655 struct intel_dev *dev = intel_dev(device);
656
657 return intel_raster_state_create(dev, pCreateInfo,
658 (struct intel_raster_state **) pState);
659}
660
661XGL_RESULT XGLAPI intelCreateMsaaState(
662 XGL_DEVICE device,
663 const XGL_MSAA_STATE_CREATE_INFO* pCreateInfo,
664 XGL_MSAA_STATE_OBJECT* pState)
665{
666 struct intel_dev *dev = intel_dev(device);
667
668 return intel_msaa_state_create(dev, pCreateInfo,
669 (struct intel_msaa_state **) pState);
670}
671
672XGL_RESULT XGLAPI intelCreateColorBlendState(
673 XGL_DEVICE device,
674 const XGL_COLOR_BLEND_STATE_CREATE_INFO* pCreateInfo,
675 XGL_COLOR_BLEND_STATE_OBJECT* pState)
676{
677 struct intel_dev *dev = intel_dev(device);
678
679 return intel_blend_state_create(dev, pCreateInfo,
680 (struct intel_blend_state **) pState);
681}
682
683XGL_RESULT XGLAPI intelCreateDepthStencilState(
684 XGL_DEVICE device,
685 const XGL_DEPTH_STENCIL_STATE_CREATE_INFO* pCreateInfo,
686 XGL_DEPTH_STENCIL_STATE_OBJECT* pState)
687{
688 struct intel_dev *dev = intel_dev(device);
689
690 return intel_ds_state_create(dev, pCreateInfo,
691 (struct intel_ds_state **) pState);
692}