intel: compute max_threads in intel_pipeline_shader
This avoids repeated compuations in command buffer building and enables us to
compute the total scratch space when we add support for it.
diff --git a/icd/intel/pipeline_shader.c b/icd/intel/pipeline_shader.c
index 7512689..a97f0b5 100644
--- a/icd/intel/pipeline_shader.c
+++ b/icd/intel/pipeline_shader.c
@@ -427,6 +427,14 @@
return XGL_SUCCESS;
}
+static void pipeline_post_build_shader(struct intel_pipeline *pipeline,
+ struct intel_pipeline_shader *sh,
+ const XGL_PIPELINE_SHADER *sh_info)
+{
+ sh->max_threads =
+ intel_gpu_get_max_threads(pipeline->dev->gpu, sh_info->stage);
+}
+
XGL_RESULT pipeline_build_shaders(struct intel_pipeline *pipeline,
const struct intel_pipeline_create_info *info)
{
@@ -446,6 +454,19 @@
if (ret == XGL_SUCCESS && info->compute.cs.shader)
ret = pipeline_build_cs(pipeline, info);
+ if (pipeline->active_shaders & SHADER_VERTEX_FLAG)
+ pipeline_post_build_shader(pipeline, &pipeline->vs, &info->vs);
+ if (pipeline->active_shaders & SHADER_TESS_CONTROL_FLAG)
+ pipeline_post_build_shader(pipeline, &pipeline->tcs, &info->tcs);
+ if (pipeline->active_shaders & SHADER_TESS_EVAL_FLAG)
+ pipeline_post_build_shader(pipeline, &pipeline->tes, &info->tes);
+ if (pipeline->active_shaders & SHADER_GEOMETRY_FLAG)
+ pipeline_post_build_shader(pipeline, &pipeline->gs, &info->gs);
+ if (pipeline->active_shaders & SHADER_FRAGMENT_FLAG)
+ pipeline_post_build_shader(pipeline, &pipeline->fs, &info->fs);
+ if (pipeline->active_shaders & SHADER_COMPUTE_FLAG)
+ pipeline_post_build_shader(pipeline, &pipeline->cs, &info->compute.cs);
+
return ret;
}
@@ -500,6 +521,19 @@
return NULL;
}
+ switch (id) {
+ case INTEL_DEV_META_VS_FILL_MEM:
+ case INTEL_DEV_META_VS_COPY_MEM:
+ case INTEL_DEV_META_VS_COPY_MEM_UNALIGNED:
+ sh->max_threads = intel_gpu_get_max_threads(dev->gpu,
+ XGL_SHADER_STAGE_VERTEX);
+ break;
+ default:
+ sh->max_threads = intel_gpu_get_max_threads(dev->gpu,
+ XGL_SHADER_STAGE_FRAGMENT);
+ break;
+ }
+
return sh;
}