blob: 21bd456ba60a7b8eb6677124573fbe0844c19475 [file] [log] [blame]
Chia-I Wu1f7540b2014-08-22 13:56:18 +08001/*
2 * XGL
3 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
Chia-I Wu44e42362014-09-02 08:32:09 +080023 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
Chia-I Wu1f7540b2014-08-22 13:56:18 +080026 */
27
Chia-I Wu98824592014-09-02 09:42:46 +080028#include "shader.h"
Chia-I Wu1f7540b2014-08-22 13:56:18 +080029#include "pipeline_priv.h"
Cody Northropbc851432014-09-23 10:06:32 -060030#include "compiler/pipeline/pipeline_compiler_interface.h"
Chia-I Wu1f7540b2014-08-22 13:56:18 +080031
Chia-I Wu20983762014-09-02 12:07:28 +080032static struct intel_pipeline_rmap_slot *rmap_get_slot(struct intel_pipeline_rmap *rmap,
33 XGL_DESCRIPTOR_SET_SLOT_TYPE type,
34 XGL_UINT index)
Chia-I Wu1f7540b2014-08-22 13:56:18 +080035{
36 const XGL_UINT resource_offset = rmap->rt_count;
37 const XGL_UINT uav_offset = resource_offset + rmap->resource_count;
38 const XGL_UINT sampler_offset = uav_offset + rmap->uav_count;
Chia-I Wu1d125092014-10-08 08:49:38 +080039 const XGL_UINT ve_offset = sampler_offset + rmap->sampler_count;
Chia-I Wu20983762014-09-02 12:07:28 +080040 struct intel_pipeline_rmap_slot *slot;
Chia-I Wu1f7540b2014-08-22 13:56:18 +080041
42 switch (type) {
43 case XGL_SLOT_UNUSED:
44 slot = NULL;
45 break;
46 case XGL_SLOT_SHADER_RESOURCE:
47 slot = &rmap->slots[resource_offset + index];
48 break;
49 case XGL_SLOT_SHADER_UAV:
50 slot = &rmap->slots[uav_offset + index];
51 break;
52 case XGL_SLOT_SHADER_SAMPLER:
53 slot = &rmap->slots[sampler_offset + index];
54 break;
Chia-I Wu1d125092014-10-08 08:49:38 +080055 case XGL_SLOT_VERTEX_INPUT:
56 slot = &rmap->slots[ve_offset + index];
57 break;
Chia-I Wu1f7540b2014-08-22 13:56:18 +080058 default:
59 assert(!"unknown rmap slot type");
60 slot = NULL;
61 break;
62 }
63
64 return slot;
65}
66
Chia-I Wu20983762014-09-02 12:07:28 +080067static bool rmap_init_slots_with_path(struct intel_pipeline_rmap *rmap,
Chia-I Wu1f7540b2014-08-22 13:56:18 +080068 const XGL_DESCRIPTOR_SET_MAPPING *mapping,
69 XGL_UINT *nest_path,
70 XGL_UINT nest_level)
71{
72 XGL_UINT i;
73
74 for (i = 0; i < mapping->descriptorCount; i++) {
75 const XGL_DESCRIPTOR_SLOT_INFO *info = &mapping->pDescriptorInfo[i];
Chia-I Wu20983762014-09-02 12:07:28 +080076 struct intel_pipeline_rmap_slot *slot;
Chia-I Wu1f7540b2014-08-22 13:56:18 +080077
78 if (info->slotObjectType == XGL_SLOT_NEXT_DESCRIPTOR_SET) {
79 nest_path[nest_level] = i;
80 if (!rmap_init_slots_with_path(rmap, info->pNextLevelSet,
81 nest_path, nest_level + 1))
82 return false;
83
84 continue;
85 }
86
87 slot = rmap_get_slot(rmap, info->slotObjectType,
88 info->shaderEntityIndex);
89 if (!slot)
90 continue;
91
92 assert(!slot->path_len);
93 slot->path_len = nest_level + 1;
94
95 if (nest_level) {
96 slot->u.path = icd_alloc(sizeof(slot->u.path[0]) *
97 slot->path_len, 0, XGL_SYSTEM_ALLOC_INTERNAL);
98 if (!slot->u.path) {
99 slot->path_len = 0;
100 return false;
101 }
102
103 memcpy(slot->u.path, nest_path,
104 sizeof(slot->u.path[0]) * nest_level);
105 slot->u.path[nest_level] = i;
106 } else {
107 slot->u.index = i;
108 }
109 }
110
111 return true;
112}
113
Chia-I Wu20983762014-09-02 12:07:28 +0800114static bool rmap_init_slots(struct intel_pipeline_rmap *rmap,
Chia-I Wu1f7540b2014-08-22 13:56:18 +0800115 const XGL_DESCRIPTOR_SET_MAPPING *mapping,
116 XGL_UINT depth)
117{
118 XGL_UINT *nest_path;
119 bool ok;
120
121 if (depth) {
122 nest_path = icd_alloc(sizeof(nest_path[0]) * depth,
123 0, XGL_SYSTEM_ALLOC_INTERNAL_TEMP);
124 if (!nest_path)
125 return false;
126 } else {
127 nest_path = NULL;
128 }
129
130 ok = rmap_init_slots_with_path(rmap, mapping, nest_path, 0);
131
132 if (nest_path)
133 icd_free(nest_path);
134
135 return ok;
136}
137
Chia-I Wu20983762014-09-02 12:07:28 +0800138static void rmap_update_count(struct intel_pipeline_rmap *rmap,
Chia-I Wu1f7540b2014-08-22 13:56:18 +0800139 XGL_DESCRIPTOR_SET_SLOT_TYPE type,
140 XGL_UINT index)
141{
142 switch (type) {
143 case XGL_SLOT_UNUSED:
144 break;
145 case XGL_SLOT_SHADER_RESOURCE:
146 if (rmap->resource_count < index + 1)
147 rmap->resource_count = index + 1;
148 break;
149 case XGL_SLOT_SHADER_UAV:
150 if (rmap->uav_count < index + 1)
151 rmap->uav_count = index + 1;
152 break;
153 case XGL_SLOT_SHADER_SAMPLER:
154 if (rmap->sampler_count < index + 1)
155 rmap->sampler_count = index + 1;
156 break;
Chia-I Wu1d125092014-10-08 08:49:38 +0800157 case XGL_SLOT_VERTEX_INPUT:
158 if (rmap->vb_count < index + 1)
159 rmap->vb_count = index + 1;
160 break;
Chia-I Wu1f7540b2014-08-22 13:56:18 +0800161 default:
162 assert(!"unknown rmap slot type");
163 break;
164 }
165}
166
Chia-I Wu20983762014-09-02 12:07:28 +0800167static XGL_UINT rmap_init_counts(struct intel_pipeline_rmap *rmap,
Chia-I Wu1f7540b2014-08-22 13:56:18 +0800168 const XGL_DESCRIPTOR_SET_MAPPING *mapping)
169{
170 XGL_UINT depth = 0;
171 XGL_UINT i;
172
173 for (i = 0; i < mapping->descriptorCount; i++) {
174 const XGL_DESCRIPTOR_SLOT_INFO *info = &mapping->pDescriptorInfo[i];
175
176 if (info->slotObjectType == XGL_SLOT_NEXT_DESCRIPTOR_SET) {
177 const XGL_UINT d = rmap_init_counts(rmap,
178 info->pNextLevelSet);
179 if (depth < d + 1)
180 depth = d + 1;
181
182 continue;
183 }
184
185 rmap_update_count(rmap, info->slotObjectType,
186 info->shaderEntityIndex);
187 }
188
189 return depth;
190}
191
Chia-I Wu20983762014-09-02 12:07:28 +0800192static void rmap_destroy(struct intel_pipeline_rmap *rmap)
Chia-I Wua6d50aa2014-09-02 10:21:34 +0800193{
194 XGL_UINT i;
195
196 for (i = 0; i < rmap->slot_count; i++) {
Chia-I Wu20983762014-09-02 12:07:28 +0800197 struct intel_pipeline_rmap_slot *slot = &rmap->slots[i];
Chia-I Wua6d50aa2014-09-02 10:21:34 +0800198
199 switch (slot->path_len) {
200 case 0:
201 case 1:
Chia-I Wu20983762014-09-02 12:07:28 +0800202 case INTEL_PIPELINE_RMAP_SLOT_RT:
203 case INTEL_PIPELINE_RMAP_SLOT_DYN:
Chia-I Wua6d50aa2014-09-02 10:21:34 +0800204 break;
205 default:
206 icd_free(slot->u.path);
207 break;
208 }
209 }
210
211 icd_free(rmap->slots);
212 icd_free(rmap);
213}
214
Chia-I Wu20983762014-09-02 12:07:28 +0800215static struct intel_pipeline_rmap *rmap_create(struct intel_dev *dev,
216 const XGL_DESCRIPTOR_SET_MAPPING *mapping,
217 const XGL_DYNAMIC_MEMORY_VIEW_SLOT_INFO *dyn,
218 XGL_UINT rt_count)
Chia-I Wu1f7540b2014-08-22 13:56:18 +0800219{
Chia-I Wu20983762014-09-02 12:07:28 +0800220 struct intel_pipeline_rmap *rmap;
221 struct intel_pipeline_rmap_slot *slot;
Chia-I Wu1f7540b2014-08-22 13:56:18 +0800222 XGL_UINT depth, rt;
223
224 rmap = icd_alloc(sizeof(*rmap), 0, XGL_SYSTEM_ALLOC_INTERNAL);
225 if (!rmap)
226 return NULL;
227
228 memset(rmap, 0, sizeof(*rmap));
229
230 depth = rmap_init_counts(rmap, mapping);
231
232 /* add RTs and the dynamic memory view */
233 rmap_update_count(rmap, dyn->slotObjectType, dyn->shaderEntityIndex);
234 rmap->rt_count = rt_count;
235
236 rmap->slot_count = rmap->rt_count + rmap->resource_count +
Chia-I Wu1d125092014-10-08 08:49:38 +0800237 rmap->uav_count + rmap->sampler_count + rmap->vb_count;
Chia-I Wu1f7540b2014-08-22 13:56:18 +0800238
239 rmap->slots = icd_alloc(sizeof(rmap->slots[0]) * rmap->slot_count,
240 0, XGL_SYSTEM_ALLOC_INTERNAL);
241 if (!rmap->slots) {
242 icd_free(rmap);
243 return NULL;
244 }
245
246 memset(rmap->slots, 0, sizeof(rmap->slots[0]) * rmap->slot_count);
247
248 if (!rmap_init_slots(rmap, mapping, depth)) {
Chia-I Wua6d50aa2014-09-02 10:21:34 +0800249 rmap_destroy(rmap);
Chia-I Wu1f7540b2014-08-22 13:56:18 +0800250 return NULL;
251 }
252
253 /* add RTs and the dynamic memory view */
254 slot = rmap_get_slot(rmap, dyn->slotObjectType, dyn->shaderEntityIndex);
255 if (slot) {
Chia-I Wu20983762014-09-02 12:07:28 +0800256 slot->path_len = INTEL_PIPELINE_RMAP_SLOT_DYN;
Chia-I Wu1f7540b2014-08-22 13:56:18 +0800257 slot->u.index = 0;
258 }
259 for (rt = 0; rt < rmap->rt_count; rt++) {
260 slot = &rmap->slots[rt];
Chia-I Wu20983762014-09-02 12:07:28 +0800261 slot->path_len = INTEL_PIPELINE_RMAP_SLOT_RT;
Chia-I Wu1f7540b2014-08-22 13:56:18 +0800262 slot->u.index = rt;
263 }
264
265 return rmap;
266}
267
Chia-I Wuf2b6d722014-09-02 08:52:27 +0800268static XGL_RESULT pipeline_shader_copy_ir(struct intel_pipeline_shader *sh,
Chia-I Wu39026c92014-09-02 10:03:19 +0800269 const struct intel_shader *ir)
Chia-I Wu98824592014-09-02 09:42:46 +0800270{
Chia-I Wu39026c92014-09-02 10:03:19 +0800271 sh->pCode = icd_alloc(ir->ir->size, 0, XGL_SYSTEM_ALLOC_INTERNAL_SHADER);
272 if (!sh->pCode)
Chia-I Wu98824592014-09-02 09:42:46 +0800273 return XGL_ERROR_OUT_OF_MEMORY;
274
Chia-I Wu39026c92014-09-02 10:03:19 +0800275 memcpy(sh->pCode, ir->ir->kernel, ir->ir->size);
276 sh->codeSize = ir->ir->size;
Chia-I Wu98824592014-09-02 09:42:46 +0800277
Chia-I Wu39026c92014-09-02 10:03:19 +0800278 sh->uses = ir->uses;
Chia-I Wu98824592014-09-02 09:42:46 +0800279
Chia-I Wu39026c92014-09-02 10:03:19 +0800280 sh->in_count = ir->in_count;
281 sh->out_count = ir->out_count;
282 sh->sampler_count = ir->sampler_count;
283 sh->surface_count = ir->surface_count;
284 sh->urb_grf_start = ir->urb_grf_start;
Chia-I Wu39026c92014-09-02 10:03:19 +0800285 sh->barycentric_interps = ir->barycentric_interps;
286
287 return XGL_SUCCESS;
288}
289
290static XGL_RESULT pipeline_build_vs(struct intel_pipeline *pipeline,
291 const struct intel_pipeline_create_info *info)
292{
Chia-I Wuf2b6d722014-09-02 08:52:27 +0800293 struct intel_pipeline_shader *vs = &pipeline->vs;
Chia-I Wu39026c92014-09-02 10:03:19 +0800294 XGL_RESULT ret;
295
296 ret = pipeline_shader_copy_ir(vs, intel_shader(info->vs.shader));
297 if (ret != XGL_SUCCESS)
298 return ret;
299
Chia-I Wu46809782014-10-07 15:40:38 +0800300 assert(!info->vs.linkConstBufferCount);
Chia-I Wuc3ddee62014-09-02 10:53:20 +0800301
Chia-I Wu39026c92014-09-02 10:03:19 +0800302 vs->rmap = rmap_create(pipeline->dev,
303 &info->vs.descriptorSetMapping[0],
304 &info->vs.dynamicMemoryViewMapping, 0);
305 if (!vs->rmap) {
306 icd_free(vs->pCode);
307 return XGL_ERROR_OUT_OF_MEMORY;
Chia-I Wu98824592014-09-02 09:42:46 +0800308 }
309
Chia-I Wu39026c92014-09-02 10:03:19 +0800310 pipeline->active_shaders |= SHADER_VERTEX_FLAG;
311
312 return XGL_SUCCESS;
313}
314
315static XGL_RESULT pipeline_build_tcs(struct intel_pipeline *pipeline,
316 const struct intel_pipeline_create_info *info)
317{
Chia-I Wu95959fb2014-09-02 11:01:03 +0800318 struct intel_pipeline_shader *tcs = &pipeline->tcs;
Chia-I Wu39026c92014-09-02 10:03:19 +0800319 XGL_RESULT ret;
320
321 ret = pipeline_shader_copy_ir(tcs, intel_shader(info->tcs.shader));
322 if (ret != XGL_SUCCESS)
323 return ret;
324
Chia-I Wu46809782014-10-07 15:40:38 +0800325 assert(!info->tcs.linkConstBufferCount);
Chia-I Wuc3ddee62014-09-02 10:53:20 +0800326
Chia-I Wu39026c92014-09-02 10:03:19 +0800327 pipeline->active_shaders |= SHADER_TESS_CONTROL_FLAG;
328
329 return XGL_SUCCESS;
330}
331
332static XGL_RESULT pipeline_build_tes(struct intel_pipeline *pipeline,
333 const struct intel_pipeline_create_info *info)
334{
Chia-I Wu95959fb2014-09-02 11:01:03 +0800335 struct intel_pipeline_shader *tes = &pipeline->tes;
Chia-I Wu39026c92014-09-02 10:03:19 +0800336 XGL_RESULT ret;
337
338 ret = pipeline_shader_copy_ir(tes, intel_shader(info->tes.shader));
339 if (ret != XGL_SUCCESS)
340 return ret;
341
Chia-I Wu46809782014-10-07 15:40:38 +0800342 assert(!info->tes.linkConstBufferCount);
Chia-I Wuc3ddee62014-09-02 10:53:20 +0800343
Chia-I Wu39026c92014-09-02 10:03:19 +0800344 pipeline->active_shaders |= SHADER_TESS_EVAL_FLAG;
345
346 return XGL_SUCCESS;
347}
348
349static XGL_RESULT pipeline_build_gs(struct intel_pipeline *pipeline,
350 const struct intel_pipeline_create_info *info)
351{
Chia-I Wuf2b6d722014-09-02 08:52:27 +0800352 struct intel_pipeline_shader *gs = &pipeline->gs;
Chia-I Wu39026c92014-09-02 10:03:19 +0800353 XGL_RESULT ret;
354
355 ret = pipeline_shader_copy_ir(gs, intel_shader(info->gs.shader));
356 if (ret != XGL_SUCCESS)
357 return ret;
358
Chia-I Wu46809782014-10-07 15:40:38 +0800359 assert(!info->tes.linkConstBufferCount);
Chia-I Wuc3ddee62014-09-02 10:53:20 +0800360
Chia-I Wu39026c92014-09-02 10:03:19 +0800361 pipeline->active_shaders |= SHADER_GEOMETRY_FLAG;
362
363 return XGL_SUCCESS;
364}
365
366static XGL_RESULT pipeline_build_fs(struct intel_pipeline *pipeline,
367 const struct intel_pipeline_create_info *info)
368{
Chia-I Wuf2b6d722014-09-02 08:52:27 +0800369 struct intel_pipeline_shader *fs = &pipeline->fs;
Chia-I Wu39026c92014-09-02 10:03:19 +0800370 XGL_RESULT ret;
371
372 ret = pipeline_shader_copy_ir(fs, intel_shader(info->fs.shader));
373 if (ret != XGL_SUCCESS)
374 return ret;
375
Chia-I Wu46809782014-10-07 15:40:38 +0800376 assert(!info->fs.linkConstBufferCount);
Chia-I Wuc3ddee62014-09-02 10:53:20 +0800377
Cody Northropbc851432014-09-23 10:06:32 -0600378
379 // Right here, lower the IR to ISA using NOS
380 // This must be after assignment of pipeline constant
381 // buffer, but before the ISA copy (which can eventually
382 // go away)
383
384 ret = intel_pipeline_shader_compile(fs, intel_shader(info->fs.shader));
385 if (ret != XGL_SUCCESS)
386 return ret;
387
388 // continue to copy the ISA out of kernel* until the above call
389 // is hooked up completely
390
391 ret = pipeline_shader_copy_ir(fs, intel_shader(info->fs.shader));
392 if (ret != XGL_SUCCESS)
393 return ret;
394
Chia-I Wu39026c92014-09-02 10:03:19 +0800395 /* assuming one RT; need to parse the shader */
396 fs->rmap = rmap_create(pipeline->dev,
397 &info->fs.descriptorSetMapping[0],
398 &info->fs.dynamicMemoryViewMapping, 1);
399 if (!fs->rmap) {
400 icd_free(fs->pCode);
401 return XGL_ERROR_OUT_OF_MEMORY;
402 }
403
Chia-I Wu39026c92014-09-02 10:03:19 +0800404 pipeline->active_shaders |= SHADER_FRAGMENT_FLAG;
405
406 return XGL_SUCCESS;
407}
408
409static XGL_RESULT pipeline_build_cs(struct intel_pipeline *pipeline,
410 const struct intel_pipeline_create_info *info)
411{
Chia-I Wu95959fb2014-09-02 11:01:03 +0800412 struct intel_pipeline_shader *cs = &pipeline->cs;
Chia-I Wu39026c92014-09-02 10:03:19 +0800413 XGL_RESULT ret;
414
415 ret = pipeline_shader_copy_ir(cs, intel_shader(info->compute.cs.shader));
416 if (ret != XGL_SUCCESS)
417 return ret;
418
Chia-I Wu46809782014-10-07 15:40:38 +0800419 assert(!info->compute.cs.linkConstBufferCount);
Chia-I Wuc3ddee62014-09-02 10:53:20 +0800420
Chia-I Wu39026c92014-09-02 10:03:19 +0800421 pipeline->active_shaders |= SHADER_COMPUTE_FLAG;
422
Chia-I Wu98824592014-09-02 09:42:46 +0800423 return XGL_SUCCESS;
424}
425
426XGL_RESULT pipeline_build_shaders(struct intel_pipeline *pipeline,
427 const struct intel_pipeline_create_info *info)
428{
429 XGL_RESULT ret = XGL_SUCCESS;
430
431 if (ret == XGL_SUCCESS && info->vs.shader)
Chia-I Wu39026c92014-09-02 10:03:19 +0800432 ret = pipeline_build_vs(pipeline, info);
Chia-I Wu98824592014-09-02 09:42:46 +0800433 if (ret == XGL_SUCCESS && info->tcs.shader)
Chia-I Wu39026c92014-09-02 10:03:19 +0800434 ret = pipeline_build_tcs(pipeline, info);
Chia-I Wu98824592014-09-02 09:42:46 +0800435 if (ret == XGL_SUCCESS && info->tes.shader)
Chia-I Wu39026c92014-09-02 10:03:19 +0800436 ret = pipeline_build_tes(pipeline, info);
Chia-I Wu98824592014-09-02 09:42:46 +0800437 if (ret == XGL_SUCCESS && info->gs.shader)
Chia-I Wu39026c92014-09-02 10:03:19 +0800438 ret = pipeline_build_gs(pipeline, info);
Chia-I Wu98824592014-09-02 09:42:46 +0800439 if (ret == XGL_SUCCESS && info->fs.shader)
Chia-I Wu39026c92014-09-02 10:03:19 +0800440 ret = pipeline_build_fs(pipeline, info);
441
442 if (ret == XGL_SUCCESS && info->compute.cs.shader)
443 ret = pipeline_build_cs(pipeline, info);
Chia-I Wu98824592014-09-02 09:42:46 +0800444
445 return ret;
446}
447
Chia-I Wuf2b6d722014-09-02 08:52:27 +0800448static void pipeline_tear_shader(struct intel_pipeline_shader *sh)
Chia-I Wu39026c92014-09-02 10:03:19 +0800449{
450 icd_free(sh->pCode);
451 if (sh->rmap)
452 rmap_destroy(sh->rmap);
453}
454
Chia-I Wu98824592014-09-02 09:42:46 +0800455void pipeline_tear_shaders(struct intel_pipeline *pipeline)
456{
457 if (pipeline->active_shaders & SHADER_VERTEX_FLAG) {
Chia-I Wuc3ddee62014-09-02 10:53:20 +0800458 pipeline_tear_shader(&pipeline->vs);
Chia-I Wu98824592014-09-02 09:42:46 +0800459 }
460
Chia-I Wu39026c92014-09-02 10:03:19 +0800461 if (pipeline->active_shaders & SHADER_TESS_CONTROL_FLAG) {
Chia-I Wu95959fb2014-09-02 11:01:03 +0800462 pipeline_tear_shader(&pipeline->tcs);
Chia-I Wu39026c92014-09-02 10:03:19 +0800463 }
464
465 if (pipeline->active_shaders & SHADER_TESS_EVAL_FLAG) {
Chia-I Wu95959fb2014-09-02 11:01:03 +0800466 pipeline_tear_shader(&pipeline->tes);
Chia-I Wu39026c92014-09-02 10:03:19 +0800467 }
468
469 if (pipeline->active_shaders & SHADER_GEOMETRY_FLAG) {
470 pipeline_tear_shader(&pipeline->gs);
471 }
472
473 if (pipeline->active_shaders & SHADER_FRAGMENT_FLAG) {
Chia-I Wuc3ddee62014-09-02 10:53:20 +0800474 pipeline_tear_shader(&pipeline->fs);
Chia-I Wu39026c92014-09-02 10:03:19 +0800475 }
476
477 if (pipeline->active_shaders & SHADER_COMPUTE_FLAG) {
Chia-I Wu95959fb2014-09-02 11:01:03 +0800478 pipeline_tear_shader(&pipeline->cs);
Chia-I Wu39026c92014-09-02 10:03:19 +0800479 }
Chia-I Wu98824592014-09-02 09:42:46 +0800480}
Chia-I Wu9fe3ec42014-10-17 09:49:16 +0800481
482struct intel_pipeline_shader *intel_pipeline_shader_create_meta(struct intel_dev *dev,
483 enum intel_dev_meta_shader id)
484{
485 static const uint32_t gen6_clear_code[] = {
486 0x00600001, 0x202003be, 0x00000040, 0x00000000, // mov(8) m1<1>F g2<0,1,0>F { align1 1Q };
487 0x00600001, 0x204003be, 0x00000044, 0x00000000, // mov(8) m2<1>F g2.1<0,1,0>F { align1 1Q };
488 0x00600001, 0x206003be, 0x00000048, 0x00000000, // mov(8) m3<1>F g2.2<0,1,0>F { align1 1Q };
489 0x00600001, 0x208003be, 0x0000004c, 0x00000000, // mov(8) m4<1>F g2.3<0,1,0>F { align1 1Q };
490 0x05600032, 0x20001fc8, 0x008d0020, 0x88019400, // sendc(8) null m1<8,8,1>F
491 // render RT write SIMD8 LastRT Surface = 0 mlen 4 rlen 0 { align1 1Q EOT };
492 };
493 static const uint32_t gen7_clear_code[] = {
494 0x20010b01, 0x00027c00, // mov(8) g124<1>F g2<0,1,0>F { align1 1Q compacted };
495 0x20150b01, 0x00027d00, // mov(8) g125<1>F g2.1<0,1,0>F { align1 1Q compacted };
496 0x20190b01, 0x00027e00, // mov(8) g126<1>F g2.2<0,1,0>F { align1 1Q compacted };
497 0x201d0b01, 0x00027f00, // mov(8) g127<1>F g2.3<0,1,0>F { align1 1Q compacted };
498 0x05600032, 0x20001fa8, 0x008d0f80, 0x88031400, // sendc(8) null g124<8,8,1>F
499 // render RT write SIMD8 LastRT Surface = 0 mlen 4 rlen 0 { align1 1Q EOT };
500 };
501 static const uint32_t gen6_copy_mem_code[] = {
502 0x00600040, 0x20a06d29, 0x00480028, 0x10101010, // add(8) g5<1>UW g1.4<2,4,0>UW 0x10101010V { align1 1Q };
503 0x00600001, 0x20a00062, 0x00000000, 0x00000000, // mov(8) m5<1>UD 0x00000000UD { align1 1Q };
504 0x00600001, 0x20c0013d, 0x008d00a0, 0x00000000, // mov(8) g6<1>F g5<8,8,1>UW { align1 1Q };
505 0x00600040, 0x20607fbd, 0x008d00c0, 0x3f000000, // add(8) g3<1>F g6<8,8,1>F 0.5F { align1 1Q };
506 0x00600001, 0x204003a5, 0x008d0060, 0x00000000, // mov(8) g2<1>D g3<8,8,1>F { align1 1Q };
507 0x00600040, 0x204014a6, 0x008d0040, 0x00000080, // add(8) m2<1>D g2<8,8,1>D g4<0,1,0>D { align1 1Q };
508 0x02600031, 0x20401fc9, 0x008d0040, 0x08417001, // send(8) g2<1>UW m2<8,8,1>F
509 // sampler (1, 0, 7, 1) mlen 4 rlen 4 { align1 1Q };
510 0x00600001, 0x202003be, 0x008d0040, 0x00000000, // mov(8) m1<1>F g2<8,8,1>F { align1 1Q };
511 0x00600001, 0x204003be, 0x008d0060, 0x00000000, // mov(8) m2<1>F g3<8,8,1>F { align1 1Q };
512 0x00600001, 0x206003be, 0x008d0080, 0x00000000, // mov(8) m3<1>F g4<8,8,1>F { align1 1Q };
513 0x00600001, 0x208003be, 0x008d00a0, 0x00000000, // mov(8) m4<1>F g5<8,8,1>F { align1 1Q };
514 0x05600032, 0x20001fc8, 0x008d0020, 0x88019400, // sendc(8) null m1<8,8,1>F
515 // render RT write SIMD8 LastRT Surface = 0 mlen 4 rlen 0 { align1 1Q EOT };
516 };
517 static const uint32_t gen7_copy_mem_code[] = {
518 0x00600040, 0x20a06d29, 0x00480028, 0x10101010, // add(8) g5<1>UW g1.4<2,4,0>UW 0x10101010V { align1 1Q };
519 0x00600001, 0x20600065, 0x00000000, 0x00000000, // mov(8) g3<1>D 0x00000000UD { align1 1Q };
520 0x00600001, 0x20c0013d, 0x008d00a0, 0x00000000, // mov(8) g6<1>F g5<8,8,1>UW { align1 1Q };
521 0x00600040, 0x20a07fbd, 0x008d00c0, 0x3f000000, // add(8) g5<1>F g6<8,8,1>F 0.5F { align1 1Q };
522 0x2000eb01, 0x00050707, // mov(8) g7<1>D g5<8,8,1>F { align1 1Q compacted };
523 0x20018b40, 0x04070207, // add(8) g2<1>D g7<8,8,1>D g4<0,1,0>D { align1 1Q compacted };
524 0x02600031, 0x2f801fa9, 0x008d0040, 0x04427001, // send(8) g124<1>UW g2<8,8,1>F
525 // sampler (1, 0, 7, 1) mlen 2 rlen 4 { align1 1Q };
526 0x05600032, 0x20001fa8, 0x008d0f80, 0x88031400, // sendc(8) null g124<8,8,1>F
527 // render RT write SIMD8 LastRT Surface = 0 mlen 4 rlen 0 { align1 1Q EOT };
528 };
529 XGL_UINT surface_count, urb_grf_start;
530 struct intel_pipeline_shader *sh;
531 const void *code;
532 XGL_SIZE code_size;
533
534 switch (intel_gpu_gen(dev->gpu)) {
535 case INTEL_GEN(6):
536 if (id == INTEL_DEV_META_FS_COPY_MEM) {
537 code = gen6_copy_mem_code;
538 code_size = sizeof(gen6_copy_mem_code);
539 surface_count = 2;
540 urb_grf_start = 4;
541 } else {
542 code = gen6_clear_code;
543 code_size = sizeof(gen6_clear_code);
544 surface_count = 1;
545 urb_grf_start = 2;
546 }
547 break;
548 case INTEL_GEN(7):
549 case INTEL_GEN(7.5):
550 if (id == INTEL_DEV_META_FS_COPY_MEM) {
551 code = gen7_copy_mem_code;
552 code_size = sizeof(gen7_copy_mem_code);
553 surface_count = 2;
554 urb_grf_start = 4;
555 } else {
556 code = gen7_clear_code;
557 code_size = sizeof(gen7_clear_code);
558 surface_count = 1;
559 urb_grf_start = 2;
560 }
561 break;
562 default:
563 code = NULL;
564 break;
565 }
566
567 if (!code)
568 return NULL;
569
570 sh = icd_alloc(sizeof(*sh), 0, XGL_SYSTEM_ALLOC_INTERNAL);
571 if (!sh)
572 return NULL;
573 memset(sh, 0, sizeof(*sh));
574
575 sh->pCode = icd_alloc(code_size, 0, XGL_SYSTEM_ALLOC_INTERNAL);
576 if (!sh->pCode) {
577 icd_free(sh);
578 return NULL;
579 }
580
581 memcpy(sh->pCode, code, code_size);
582 sh->codeSize = code_size;
583
584 sh->out_count = 1;
585 sh->surface_count = surface_count;
586 sh->urb_grf_start = urb_grf_start;
587
588 return sh;
589}
590
591void intel_pipeline_shader_destroy(struct intel_pipeline_shader *sh)
592{
593 if (sh->rmap)
594 rmap_destroy(sh->rmap);
595 if (sh->pCode)
596 icd_free(sh->pCode);
597 icd_free(sh);
598}