blob: d47d07c764b716b300d4a4bf4332a060fcc665d0 [file] [log] [blame]
Chia-I Wu1f7540b2014-08-22 13:56:18 +08001/*
2 * XGL
3 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
Chia-I Wu44e42362014-09-02 08:32:09 +080023 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
Chia-I Wu1f7540b2014-08-22 13:56:18 +080026 */
27
Chia-I Wu98824592014-09-02 09:42:46 +080028#include "shader.h"
Chia-I Wu1f7540b2014-08-22 13:56:18 +080029#include "pipeline_priv.h"
Cody Northropbc851432014-09-23 10:06:32 -060030#include "compiler/pipeline/pipeline_compiler_interface.h"
Chia-I Wu1f7540b2014-08-22 13:56:18 +080031
Chia-I Wu20983762014-09-02 12:07:28 +080032static struct intel_pipeline_rmap_slot *rmap_get_slot(struct intel_pipeline_rmap *rmap,
33 XGL_DESCRIPTOR_SET_SLOT_TYPE type,
34 XGL_UINT index)
Chia-I Wu1f7540b2014-08-22 13:56:18 +080035{
36 const XGL_UINT resource_offset = rmap->rt_count;
37 const XGL_UINT uav_offset = resource_offset + rmap->resource_count;
38 const XGL_UINT sampler_offset = uav_offset + rmap->uav_count;
Chia-I Wu1d125092014-10-08 08:49:38 +080039 const XGL_UINT ve_offset = sampler_offset + rmap->sampler_count;
Chia-I Wu20983762014-09-02 12:07:28 +080040 struct intel_pipeline_rmap_slot *slot;
Chia-I Wu1f7540b2014-08-22 13:56:18 +080041
42 switch (type) {
43 case XGL_SLOT_UNUSED:
44 slot = NULL;
45 break;
46 case XGL_SLOT_SHADER_RESOURCE:
47 slot = &rmap->slots[resource_offset + index];
48 break;
49 case XGL_SLOT_SHADER_UAV:
50 slot = &rmap->slots[uav_offset + index];
51 break;
52 case XGL_SLOT_SHADER_SAMPLER:
53 slot = &rmap->slots[sampler_offset + index];
54 break;
Chia-I Wu1d125092014-10-08 08:49:38 +080055 case XGL_SLOT_VERTEX_INPUT:
56 slot = &rmap->slots[ve_offset + index];
57 break;
Chia-I Wu1f7540b2014-08-22 13:56:18 +080058 default:
59 assert(!"unknown rmap slot type");
60 slot = NULL;
61 break;
62 }
63
64 return slot;
65}
66
Chia-I Wu20983762014-09-02 12:07:28 +080067static bool rmap_init_slots_with_path(struct intel_pipeline_rmap *rmap,
Chia-I Wu1f7540b2014-08-22 13:56:18 +080068 const XGL_DESCRIPTOR_SET_MAPPING *mapping,
69 XGL_UINT *nest_path,
70 XGL_UINT nest_level)
71{
72 XGL_UINT i;
73
74 for (i = 0; i < mapping->descriptorCount; i++) {
75 const XGL_DESCRIPTOR_SLOT_INFO *info = &mapping->pDescriptorInfo[i];
Chia-I Wu20983762014-09-02 12:07:28 +080076 struct intel_pipeline_rmap_slot *slot;
Chia-I Wu1f7540b2014-08-22 13:56:18 +080077
78 if (info->slotObjectType == XGL_SLOT_NEXT_DESCRIPTOR_SET) {
79 nest_path[nest_level] = i;
80 if (!rmap_init_slots_with_path(rmap, info->pNextLevelSet,
81 nest_path, nest_level + 1))
82 return false;
83
84 continue;
85 }
86
87 slot = rmap_get_slot(rmap, info->slotObjectType,
88 info->shaderEntityIndex);
89 if (!slot)
90 continue;
91
92 assert(!slot->path_len);
93 slot->path_len = nest_level + 1;
94
95 if (nest_level) {
96 slot->u.path = icd_alloc(sizeof(slot->u.path[0]) *
97 slot->path_len, 0, XGL_SYSTEM_ALLOC_INTERNAL);
98 if (!slot->u.path) {
99 slot->path_len = 0;
100 return false;
101 }
102
103 memcpy(slot->u.path, nest_path,
104 sizeof(slot->u.path[0]) * nest_level);
105 slot->u.path[nest_level] = i;
106 } else {
107 slot->u.index = i;
108 }
109 }
110
111 return true;
112}
113
Chia-I Wu20983762014-09-02 12:07:28 +0800114static bool rmap_init_slots(struct intel_pipeline_rmap *rmap,
Chia-I Wu1f7540b2014-08-22 13:56:18 +0800115 const XGL_DESCRIPTOR_SET_MAPPING *mapping,
116 XGL_UINT depth)
117{
118 XGL_UINT *nest_path;
119 bool ok;
120
121 if (depth) {
122 nest_path = icd_alloc(sizeof(nest_path[0]) * depth,
123 0, XGL_SYSTEM_ALLOC_INTERNAL_TEMP);
124 if (!nest_path)
125 return false;
126 } else {
127 nest_path = NULL;
128 }
129
130 ok = rmap_init_slots_with_path(rmap, mapping, nest_path, 0);
131
132 if (nest_path)
133 icd_free(nest_path);
134
135 return ok;
136}
137
Chia-I Wu20983762014-09-02 12:07:28 +0800138static void rmap_update_count(struct intel_pipeline_rmap *rmap,
Chia-I Wu1f7540b2014-08-22 13:56:18 +0800139 XGL_DESCRIPTOR_SET_SLOT_TYPE type,
140 XGL_UINT index)
141{
142 switch (type) {
143 case XGL_SLOT_UNUSED:
144 break;
145 case XGL_SLOT_SHADER_RESOURCE:
146 if (rmap->resource_count < index + 1)
147 rmap->resource_count = index + 1;
148 break;
149 case XGL_SLOT_SHADER_UAV:
150 if (rmap->uav_count < index + 1)
151 rmap->uav_count = index + 1;
152 break;
153 case XGL_SLOT_SHADER_SAMPLER:
154 if (rmap->sampler_count < index + 1)
155 rmap->sampler_count = index + 1;
156 break;
Chia-I Wu1d125092014-10-08 08:49:38 +0800157 case XGL_SLOT_VERTEX_INPUT:
158 if (rmap->vb_count < index + 1)
159 rmap->vb_count = index + 1;
160 break;
Chia-I Wu1f7540b2014-08-22 13:56:18 +0800161 default:
162 assert(!"unknown rmap slot type");
163 break;
164 }
165}
166
Chia-I Wu20983762014-09-02 12:07:28 +0800167static XGL_UINT rmap_init_counts(struct intel_pipeline_rmap *rmap,
Chia-I Wu1f7540b2014-08-22 13:56:18 +0800168 const XGL_DESCRIPTOR_SET_MAPPING *mapping)
169{
170 XGL_UINT depth = 0;
171 XGL_UINT i;
172
173 for (i = 0; i < mapping->descriptorCount; i++) {
174 const XGL_DESCRIPTOR_SLOT_INFO *info = &mapping->pDescriptorInfo[i];
175
176 if (info->slotObjectType == XGL_SLOT_NEXT_DESCRIPTOR_SET) {
177 const XGL_UINT d = rmap_init_counts(rmap,
178 info->pNextLevelSet);
179 if (depth < d + 1)
180 depth = d + 1;
181
182 continue;
183 }
184
185 rmap_update_count(rmap, info->slotObjectType,
186 info->shaderEntityIndex);
187 }
188
189 return depth;
190}
191
Chia-I Wu20983762014-09-02 12:07:28 +0800192static void rmap_destroy(struct intel_pipeline_rmap *rmap)
Chia-I Wua6d50aa2014-09-02 10:21:34 +0800193{
194 XGL_UINT i;
195
196 for (i = 0; i < rmap->slot_count; i++) {
Chia-I Wu20983762014-09-02 12:07:28 +0800197 struct intel_pipeline_rmap_slot *slot = &rmap->slots[i];
Chia-I Wua6d50aa2014-09-02 10:21:34 +0800198
199 switch (slot->path_len) {
200 case 0:
201 case 1:
Chia-I Wu20983762014-09-02 12:07:28 +0800202 case INTEL_PIPELINE_RMAP_SLOT_RT:
203 case INTEL_PIPELINE_RMAP_SLOT_DYN:
Chia-I Wua6d50aa2014-09-02 10:21:34 +0800204 break;
205 default:
206 icd_free(slot->u.path);
207 break;
208 }
209 }
210
211 icd_free(rmap->slots);
212 icd_free(rmap);
213}
214
Chia-I Wu20983762014-09-02 12:07:28 +0800215static struct intel_pipeline_rmap *rmap_create(struct intel_dev *dev,
216 const XGL_DESCRIPTOR_SET_MAPPING *mapping,
217 const XGL_DYNAMIC_MEMORY_VIEW_SLOT_INFO *dyn,
218 XGL_UINT rt_count)
Chia-I Wu1f7540b2014-08-22 13:56:18 +0800219{
Chia-I Wu20983762014-09-02 12:07:28 +0800220 struct intel_pipeline_rmap *rmap;
221 struct intel_pipeline_rmap_slot *slot;
Chia-I Wu1f7540b2014-08-22 13:56:18 +0800222 XGL_UINT depth, rt;
223
224 rmap = icd_alloc(sizeof(*rmap), 0, XGL_SYSTEM_ALLOC_INTERNAL);
225 if (!rmap)
226 return NULL;
227
228 memset(rmap, 0, sizeof(*rmap));
229
230 depth = rmap_init_counts(rmap, mapping);
231
232 /* add RTs and the dynamic memory view */
233 rmap_update_count(rmap, dyn->slotObjectType, dyn->shaderEntityIndex);
234 rmap->rt_count = rt_count;
235
236 rmap->slot_count = rmap->rt_count + rmap->resource_count +
Chia-I Wu1d125092014-10-08 08:49:38 +0800237 rmap->uav_count + rmap->sampler_count + rmap->vb_count;
Chia-I Wu1f7540b2014-08-22 13:56:18 +0800238
239 rmap->slots = icd_alloc(sizeof(rmap->slots[0]) * rmap->slot_count,
240 0, XGL_SYSTEM_ALLOC_INTERNAL);
241 if (!rmap->slots) {
242 icd_free(rmap);
243 return NULL;
244 }
245
246 memset(rmap->slots, 0, sizeof(rmap->slots[0]) * rmap->slot_count);
247
248 if (!rmap_init_slots(rmap, mapping, depth)) {
Chia-I Wua6d50aa2014-09-02 10:21:34 +0800249 rmap_destroy(rmap);
Chia-I Wu1f7540b2014-08-22 13:56:18 +0800250 return NULL;
251 }
252
253 /* add RTs and the dynamic memory view */
254 slot = rmap_get_slot(rmap, dyn->slotObjectType, dyn->shaderEntityIndex);
255 if (slot) {
Chia-I Wu20983762014-09-02 12:07:28 +0800256 slot->path_len = INTEL_PIPELINE_RMAP_SLOT_DYN;
Chia-I Wu1f7540b2014-08-22 13:56:18 +0800257 slot->u.index = 0;
258 }
259 for (rt = 0; rt < rmap->rt_count; rt++) {
260 slot = &rmap->slots[rt];
Chia-I Wu20983762014-09-02 12:07:28 +0800261 slot->path_len = INTEL_PIPELINE_RMAP_SLOT_RT;
Chia-I Wu1f7540b2014-08-22 13:56:18 +0800262 slot->u.index = rt;
263 }
264
265 return rmap;
266}
267
Chia-I Wuf2b6d722014-09-02 08:52:27 +0800268static XGL_RESULT pipeline_shader_copy_ir(struct intel_pipeline_shader *sh,
Chia-I Wu39026c92014-09-02 10:03:19 +0800269 const struct intel_shader *ir)
Chia-I Wu98824592014-09-02 09:42:46 +0800270{
Chia-I Wu39026c92014-09-02 10:03:19 +0800271 sh->pCode = icd_alloc(ir->ir->size, 0, XGL_SYSTEM_ALLOC_INTERNAL_SHADER);
272 if (!sh->pCode)
Chia-I Wu98824592014-09-02 09:42:46 +0800273 return XGL_ERROR_OUT_OF_MEMORY;
274
Chia-I Wu39026c92014-09-02 10:03:19 +0800275 memcpy(sh->pCode, ir->ir->kernel, ir->ir->size);
276 sh->codeSize = ir->ir->size;
Chia-I Wu98824592014-09-02 09:42:46 +0800277
Chia-I Wu39026c92014-09-02 10:03:19 +0800278 sh->uses = ir->uses;
Chia-I Wu98824592014-09-02 09:42:46 +0800279
Chia-I Wu39026c92014-09-02 10:03:19 +0800280 sh->in_count = ir->in_count;
281 sh->out_count = ir->out_count;
282 sh->sampler_count = ir->sampler_count;
283 sh->surface_count = ir->surface_count;
284 sh->urb_grf_start = ir->urb_grf_start;
Chia-I Wu39026c92014-09-02 10:03:19 +0800285 sh->barycentric_interps = ir->barycentric_interps;
286
287 return XGL_SUCCESS;
288}
289
290static XGL_RESULT pipeline_build_vs(struct intel_pipeline *pipeline,
291 const struct intel_pipeline_create_info *info)
292{
Chia-I Wuf2b6d722014-09-02 08:52:27 +0800293 struct intel_pipeline_shader *vs = &pipeline->vs;
Chia-I Wu39026c92014-09-02 10:03:19 +0800294 XGL_RESULT ret;
295
296 ret = pipeline_shader_copy_ir(vs, intel_shader(info->vs.shader));
297 if (ret != XGL_SUCCESS)
298 return ret;
299
Chia-I Wu46809782014-10-07 15:40:38 +0800300 assert(!info->vs.linkConstBufferCount);
Chia-I Wuc3ddee62014-09-02 10:53:20 +0800301
Cody Northrop83e2b032014-09-25 17:00:31 -0600302 // Right here, lower the IR to ISA using NOS
303 // This must be after assignment of pipeline constant buffer
304 ret = intel_pipeline_shader_compile(vs, intel_shader(info->vs.shader));
305 if (ret != XGL_SUCCESS)
306 return ret;
307
Chia-I Wu39026c92014-09-02 10:03:19 +0800308 vs->rmap = rmap_create(pipeline->dev,
309 &info->vs.descriptorSetMapping[0],
310 &info->vs.dynamicMemoryViewMapping, 0);
311 if (!vs->rmap) {
312 icd_free(vs->pCode);
313 return XGL_ERROR_OUT_OF_MEMORY;
Chia-I Wu98824592014-09-02 09:42:46 +0800314 }
315
Chia-I Wu39026c92014-09-02 10:03:19 +0800316 pipeline->active_shaders |= SHADER_VERTEX_FLAG;
317
318 return XGL_SUCCESS;
319}
320
321static XGL_RESULT pipeline_build_tcs(struct intel_pipeline *pipeline,
322 const struct intel_pipeline_create_info *info)
323{
Chia-I Wu95959fb2014-09-02 11:01:03 +0800324 struct intel_pipeline_shader *tcs = &pipeline->tcs;
Chia-I Wu39026c92014-09-02 10:03:19 +0800325 XGL_RESULT ret;
326
327 ret = pipeline_shader_copy_ir(tcs, intel_shader(info->tcs.shader));
328 if (ret != XGL_SUCCESS)
329 return ret;
330
Chia-I Wu46809782014-10-07 15:40:38 +0800331 assert(!info->tcs.linkConstBufferCount);
Chia-I Wuc3ddee62014-09-02 10:53:20 +0800332
Chia-I Wu39026c92014-09-02 10:03:19 +0800333 pipeline->active_shaders |= SHADER_TESS_CONTROL_FLAG;
334
335 return XGL_SUCCESS;
336}
337
338static XGL_RESULT pipeline_build_tes(struct intel_pipeline *pipeline,
339 const struct intel_pipeline_create_info *info)
340{
Chia-I Wu95959fb2014-09-02 11:01:03 +0800341 struct intel_pipeline_shader *tes = &pipeline->tes;
Chia-I Wu39026c92014-09-02 10:03:19 +0800342 XGL_RESULT ret;
343
344 ret = pipeline_shader_copy_ir(tes, intel_shader(info->tes.shader));
345 if (ret != XGL_SUCCESS)
346 return ret;
347
Chia-I Wu46809782014-10-07 15:40:38 +0800348 assert(!info->tes.linkConstBufferCount);
Chia-I Wuc3ddee62014-09-02 10:53:20 +0800349
Chia-I Wu39026c92014-09-02 10:03:19 +0800350 pipeline->active_shaders |= SHADER_TESS_EVAL_FLAG;
351
352 return XGL_SUCCESS;
353}
354
355static XGL_RESULT pipeline_build_gs(struct intel_pipeline *pipeline,
356 const struct intel_pipeline_create_info *info)
357{
Chia-I Wuf2b6d722014-09-02 08:52:27 +0800358 struct intel_pipeline_shader *gs = &pipeline->gs;
Chia-I Wu39026c92014-09-02 10:03:19 +0800359 XGL_RESULT ret;
360
361 ret = pipeline_shader_copy_ir(gs, intel_shader(info->gs.shader));
362 if (ret != XGL_SUCCESS)
363 return ret;
364
Chia-I Wu46809782014-10-07 15:40:38 +0800365 assert(!info->tes.linkConstBufferCount);
Chia-I Wuc3ddee62014-09-02 10:53:20 +0800366
Chia-I Wu39026c92014-09-02 10:03:19 +0800367 pipeline->active_shaders |= SHADER_GEOMETRY_FLAG;
368
369 return XGL_SUCCESS;
370}
371
372static XGL_RESULT pipeline_build_fs(struct intel_pipeline *pipeline,
373 const struct intel_pipeline_create_info *info)
374{
Chia-I Wuf2b6d722014-09-02 08:52:27 +0800375 struct intel_pipeline_shader *fs = &pipeline->fs;
Chia-I Wu39026c92014-09-02 10:03:19 +0800376 XGL_RESULT ret;
377
378 ret = pipeline_shader_copy_ir(fs, intel_shader(info->fs.shader));
379 if (ret != XGL_SUCCESS)
380 return ret;
381
Chia-I Wu46809782014-10-07 15:40:38 +0800382 assert(!info->fs.linkConstBufferCount);
Chia-I Wuc3ddee62014-09-02 10:53:20 +0800383
Cody Northropbc851432014-09-23 10:06:32 -0600384 // Right here, lower the IR to ISA using NOS
Cody Northrop83e2b032014-09-25 17:00:31 -0600385 // This must be after assignment of pipeline constant buffer
Cody Northropbc851432014-09-23 10:06:32 -0600386 ret = intel_pipeline_shader_compile(fs, intel_shader(info->fs.shader));
387 if (ret != XGL_SUCCESS)
388 return ret;
389
Chia-I Wu39026c92014-09-02 10:03:19 +0800390 /* assuming one RT; need to parse the shader */
391 fs->rmap = rmap_create(pipeline->dev,
392 &info->fs.descriptorSetMapping[0],
393 &info->fs.dynamicMemoryViewMapping, 1);
394 if (!fs->rmap) {
395 icd_free(fs->pCode);
396 return XGL_ERROR_OUT_OF_MEMORY;
397 }
398
Chia-I Wu39026c92014-09-02 10:03:19 +0800399 pipeline->active_shaders |= SHADER_FRAGMENT_FLAG;
400
401 return XGL_SUCCESS;
402}
403
404static XGL_RESULT pipeline_build_cs(struct intel_pipeline *pipeline,
405 const struct intel_pipeline_create_info *info)
406{
Chia-I Wu95959fb2014-09-02 11:01:03 +0800407 struct intel_pipeline_shader *cs = &pipeline->cs;
Chia-I Wu39026c92014-09-02 10:03:19 +0800408 XGL_RESULT ret;
409
410 ret = pipeline_shader_copy_ir(cs, intel_shader(info->compute.cs.shader));
411 if (ret != XGL_SUCCESS)
412 return ret;
413
Chia-I Wu46809782014-10-07 15:40:38 +0800414 assert(!info->compute.cs.linkConstBufferCount);
Chia-I Wuc3ddee62014-09-02 10:53:20 +0800415
Chia-I Wu39026c92014-09-02 10:03:19 +0800416 pipeline->active_shaders |= SHADER_COMPUTE_FLAG;
417
Chia-I Wu98824592014-09-02 09:42:46 +0800418 return XGL_SUCCESS;
419}
420
421XGL_RESULT pipeline_build_shaders(struct intel_pipeline *pipeline,
422 const struct intel_pipeline_create_info *info)
423{
424 XGL_RESULT ret = XGL_SUCCESS;
425
426 if (ret == XGL_SUCCESS && info->vs.shader)
Chia-I Wu39026c92014-09-02 10:03:19 +0800427 ret = pipeline_build_vs(pipeline, info);
Chia-I Wu98824592014-09-02 09:42:46 +0800428 if (ret == XGL_SUCCESS && info->tcs.shader)
Chia-I Wu39026c92014-09-02 10:03:19 +0800429 ret = pipeline_build_tcs(pipeline, info);
Chia-I Wu98824592014-09-02 09:42:46 +0800430 if (ret == XGL_SUCCESS && info->tes.shader)
Chia-I Wu39026c92014-09-02 10:03:19 +0800431 ret = pipeline_build_tes(pipeline, info);
Chia-I Wu98824592014-09-02 09:42:46 +0800432 if (ret == XGL_SUCCESS && info->gs.shader)
Chia-I Wu39026c92014-09-02 10:03:19 +0800433 ret = pipeline_build_gs(pipeline, info);
Chia-I Wu98824592014-09-02 09:42:46 +0800434 if (ret == XGL_SUCCESS && info->fs.shader)
Chia-I Wu39026c92014-09-02 10:03:19 +0800435 ret = pipeline_build_fs(pipeline, info);
436
437 if (ret == XGL_SUCCESS && info->compute.cs.shader)
438 ret = pipeline_build_cs(pipeline, info);
Chia-I Wu98824592014-09-02 09:42:46 +0800439
440 return ret;
441}
442
Chia-I Wuf2b6d722014-09-02 08:52:27 +0800443static void pipeline_tear_shader(struct intel_pipeline_shader *sh)
Chia-I Wu39026c92014-09-02 10:03:19 +0800444{
445 icd_free(sh->pCode);
446 if (sh->rmap)
447 rmap_destroy(sh->rmap);
448}
449
Chia-I Wu98824592014-09-02 09:42:46 +0800450void pipeline_tear_shaders(struct intel_pipeline *pipeline)
451{
452 if (pipeline->active_shaders & SHADER_VERTEX_FLAG) {
Chia-I Wuc3ddee62014-09-02 10:53:20 +0800453 pipeline_tear_shader(&pipeline->vs);
Chia-I Wu98824592014-09-02 09:42:46 +0800454 }
455
Chia-I Wu39026c92014-09-02 10:03:19 +0800456 if (pipeline->active_shaders & SHADER_TESS_CONTROL_FLAG) {
Chia-I Wu95959fb2014-09-02 11:01:03 +0800457 pipeline_tear_shader(&pipeline->tcs);
Chia-I Wu39026c92014-09-02 10:03:19 +0800458 }
459
460 if (pipeline->active_shaders & SHADER_TESS_EVAL_FLAG) {
Chia-I Wu95959fb2014-09-02 11:01:03 +0800461 pipeline_tear_shader(&pipeline->tes);
Chia-I Wu39026c92014-09-02 10:03:19 +0800462 }
463
464 if (pipeline->active_shaders & SHADER_GEOMETRY_FLAG) {
465 pipeline_tear_shader(&pipeline->gs);
466 }
467
468 if (pipeline->active_shaders & SHADER_FRAGMENT_FLAG) {
Chia-I Wuc3ddee62014-09-02 10:53:20 +0800469 pipeline_tear_shader(&pipeline->fs);
Chia-I Wu39026c92014-09-02 10:03:19 +0800470 }
471
472 if (pipeline->active_shaders & SHADER_COMPUTE_FLAG) {
Chia-I Wu95959fb2014-09-02 11:01:03 +0800473 pipeline_tear_shader(&pipeline->cs);
Chia-I Wu39026c92014-09-02 10:03:19 +0800474 }
Chia-I Wu98824592014-09-02 09:42:46 +0800475}
Chia-I Wu9fe3ec42014-10-17 09:49:16 +0800476
477struct intel_pipeline_shader *intel_pipeline_shader_create_meta(struct intel_dev *dev,
478 enum intel_dev_meta_shader id)
479{
480 static const uint32_t gen6_clear_code[] = {
481 0x00600001, 0x202003be, 0x00000040, 0x00000000, // mov(8) m1<1>F g2<0,1,0>F { align1 1Q };
482 0x00600001, 0x204003be, 0x00000044, 0x00000000, // mov(8) m2<1>F g2.1<0,1,0>F { align1 1Q };
483 0x00600001, 0x206003be, 0x00000048, 0x00000000, // mov(8) m3<1>F g2.2<0,1,0>F { align1 1Q };
484 0x00600001, 0x208003be, 0x0000004c, 0x00000000, // mov(8) m4<1>F g2.3<0,1,0>F { align1 1Q };
485 0x05600032, 0x20001fc8, 0x008d0020, 0x88019400, // sendc(8) null m1<8,8,1>F
486 // render RT write SIMD8 LastRT Surface = 0 mlen 4 rlen 0 { align1 1Q EOT };
487 };
488 static const uint32_t gen7_clear_code[] = {
489 0x20010b01, 0x00027c00, // mov(8) g124<1>F g2<0,1,0>F { align1 1Q compacted };
490 0x20150b01, 0x00027d00, // mov(8) g125<1>F g2.1<0,1,0>F { align1 1Q compacted };
491 0x20190b01, 0x00027e00, // mov(8) g126<1>F g2.2<0,1,0>F { align1 1Q compacted };
492 0x201d0b01, 0x00027f00, // mov(8) g127<1>F g2.3<0,1,0>F { align1 1Q compacted };
493 0x05600032, 0x20001fa8, 0x008d0f80, 0x88031400, // sendc(8) null g124<8,8,1>F
494 // render RT write SIMD8 LastRT Surface = 0 mlen 4 rlen 0 { align1 1Q EOT };
495 };
496 static const uint32_t gen6_copy_mem_code[] = {
497 0x00600040, 0x20a06d29, 0x00480028, 0x10101010, // add(8) g5<1>UW g1.4<2,4,0>UW 0x10101010V { align1 1Q };
498 0x00600001, 0x20a00062, 0x00000000, 0x00000000, // mov(8) m5<1>UD 0x00000000UD { align1 1Q };
499 0x00600001, 0x20c0013d, 0x008d00a0, 0x00000000, // mov(8) g6<1>F g5<8,8,1>UW { align1 1Q };
500 0x00600040, 0x20607fbd, 0x008d00c0, 0x3f000000, // add(8) g3<1>F g6<8,8,1>F 0.5F { align1 1Q };
501 0x00600001, 0x204003a5, 0x008d0060, 0x00000000, // mov(8) g2<1>D g3<8,8,1>F { align1 1Q };
502 0x00600040, 0x204014a6, 0x008d0040, 0x00000080, // add(8) m2<1>D g2<8,8,1>D g4<0,1,0>D { align1 1Q };
503 0x02600031, 0x20401fc9, 0x008d0040, 0x08417001, // send(8) g2<1>UW m2<8,8,1>F
504 // sampler (1, 0, 7, 1) mlen 4 rlen 4 { align1 1Q };
505 0x00600001, 0x202003be, 0x008d0040, 0x00000000, // mov(8) m1<1>F g2<8,8,1>F { align1 1Q };
506 0x00600001, 0x204003be, 0x008d0060, 0x00000000, // mov(8) m2<1>F g3<8,8,1>F { align1 1Q };
507 0x00600001, 0x206003be, 0x008d0080, 0x00000000, // mov(8) m3<1>F g4<8,8,1>F { align1 1Q };
508 0x00600001, 0x208003be, 0x008d00a0, 0x00000000, // mov(8) m4<1>F g5<8,8,1>F { align1 1Q };
509 0x05600032, 0x20001fc8, 0x008d0020, 0x88019400, // sendc(8) null m1<8,8,1>F
510 // render RT write SIMD8 LastRT Surface = 0 mlen 4 rlen 0 { align1 1Q EOT };
511 };
512 static const uint32_t gen7_copy_mem_code[] = {
513 0x00600040, 0x20a06d29, 0x00480028, 0x10101010, // add(8) g5<1>UW g1.4<2,4,0>UW 0x10101010V { align1 1Q };
514 0x00600001, 0x20600065, 0x00000000, 0x00000000, // mov(8) g3<1>D 0x00000000UD { align1 1Q };
515 0x00600001, 0x20c0013d, 0x008d00a0, 0x00000000, // mov(8) g6<1>F g5<8,8,1>UW { align1 1Q };
516 0x00600040, 0x20a07fbd, 0x008d00c0, 0x3f000000, // add(8) g5<1>F g6<8,8,1>F 0.5F { align1 1Q };
517 0x2000eb01, 0x00050707, // mov(8) g7<1>D g5<8,8,1>F { align1 1Q compacted };
518 0x20018b40, 0x04070207, // add(8) g2<1>D g7<8,8,1>D g4<0,1,0>D { align1 1Q compacted };
519 0x02600031, 0x2f801fa9, 0x008d0040, 0x04427001, // send(8) g124<1>UW g2<8,8,1>F
520 // sampler (1, 0, 7, 1) mlen 2 rlen 4 { align1 1Q };
521 0x05600032, 0x20001fa8, 0x008d0f80, 0x88031400, // sendc(8) null g124<8,8,1>F
522 // render RT write SIMD8 LastRT Surface = 0 mlen 4 rlen 0 { align1 1Q EOT };
523 };
524 XGL_UINT surface_count, urb_grf_start;
525 struct intel_pipeline_shader *sh;
526 const void *code;
527 XGL_SIZE code_size;
528
529 switch (intel_gpu_gen(dev->gpu)) {
530 case INTEL_GEN(6):
531 if (id == INTEL_DEV_META_FS_COPY_MEM) {
532 code = gen6_copy_mem_code;
533 code_size = sizeof(gen6_copy_mem_code);
534 surface_count = 2;
535 urb_grf_start = 4;
536 } else {
537 code = gen6_clear_code;
538 code_size = sizeof(gen6_clear_code);
539 surface_count = 1;
540 urb_grf_start = 2;
541 }
542 break;
543 case INTEL_GEN(7):
544 case INTEL_GEN(7.5):
545 if (id == INTEL_DEV_META_FS_COPY_MEM) {
546 code = gen7_copy_mem_code;
547 code_size = sizeof(gen7_copy_mem_code);
548 surface_count = 2;
549 urb_grf_start = 4;
550 } else {
551 code = gen7_clear_code;
552 code_size = sizeof(gen7_clear_code);
553 surface_count = 1;
554 urb_grf_start = 2;
555 }
556 break;
557 default:
558 code = NULL;
559 break;
560 }
561
562 if (!code)
563 return NULL;
564
565 sh = icd_alloc(sizeof(*sh), 0, XGL_SYSTEM_ALLOC_INTERNAL);
566 if (!sh)
567 return NULL;
568 memset(sh, 0, sizeof(*sh));
569
570 sh->pCode = icd_alloc(code_size, 0, XGL_SYSTEM_ALLOC_INTERNAL);
571 if (!sh->pCode) {
572 icd_free(sh);
573 return NULL;
574 }
575
576 memcpy(sh->pCode, code, code_size);
577 sh->codeSize = code_size;
578
579 sh->out_count = 1;
580 sh->surface_count = surface_count;
581 sh->urb_grf_start = urb_grf_start;
582
583 return sh;
584}
585
586void intel_pipeline_shader_destroy(struct intel_pipeline_shader *sh)
587{
588 if (sh->rmap)
589 rmap_destroy(sh->rmap);
590 if (sh->pCode)
591 icd_free(sh->pCode);
592 icd_free(sh);
593}