blob: e464848034cd0c61f4f66ad7bcec13aeb5e9aef1 [file] [log] [blame]
jvanverth992ad362016-02-26 09:21:02 -08001/*
2* Copyright 2016 Google Inc.
3*
4* Use of this source code is governed by a BSD-style license that can be
5* found in the LICENSE file.
6*/
7
egdaniel22281c12016-03-23 13:49:40 -07008#include "GrVkPipelineState.h"
jvanverth992ad362016-02-26 09:21:02 -08009
10#include "GrPipeline.h"
11#include "GrVkCommandBuffer.h"
12#include "GrVkDescriptorPool.h"
13#include "GrVkGpu.h"
14#include "GrVkImageView.h"
15#include "GrVkMemory.h"
16#include "GrVkPipeline.h"
egdaniel22281c12016-03-23 13:49:40 -070017#include "GrVkRenderTarget.h"
jvanverth992ad362016-02-26 09:21:02 -080018#include "GrVkSampler.h"
19#include "GrVkTexture.h"
20#include "GrVkUniformBuffer.h"
21#include "glsl/GrGLSLFragmentProcessor.h"
22#include "glsl/GrGLSLGeometryProcessor.h"
23#include "glsl/GrGLSLXferProcessor.h"
24
egdaniel22281c12016-03-23 13:49:40 -070025GrVkPipelineState::GrVkPipelineState(GrVkGpu* gpu,
26 const GrVkPipelineState::Desc& desc,
27 GrVkPipeline* pipeline,
28 VkPipelineLayout layout,
29 VkDescriptorSetLayout dsLayout[2],
30 const BuiltinUniformHandles& builtinUniformHandles,
31 const UniformInfoArray& uniforms,
32 uint32_t vertexUniformSize,
33 uint32_t fragmentUniformSize,
34 uint32_t numSamplers,
35 GrGLSLPrimitiveProcessor* geometryProcessor,
36 GrGLSLXferProcessor* xferProcessor,
37 const GrGLSLFragProcs& fragmentProcessors)
egdanielc2dc1b22016-03-18 13:18:23 -070038 : fPipeline(pipeline)
jvanverth992ad362016-02-26 09:21:02 -080039 , fPipelineLayout(layout)
egdanielb4aa3622016-04-06 13:47:08 -070040 , fStartDS(SK_MaxS32)
41 , fDSCount(0)
jvanverth992ad362016-02-26 09:21:02 -080042 , fBuiltinUniformHandles(builtinUniformHandles)
43 , fGeometryProcessor(geometryProcessor)
44 , fXferProcessor(xferProcessor)
45 , fFragmentProcessors(fragmentProcessors)
egdaniel22281c12016-03-23 13:49:40 -070046 , fDesc(desc)
47 , fDataManager(uniforms, vertexUniformSize, fragmentUniformSize)
egdanielc2dc1b22016-03-18 13:18:23 -070048 , fSamplerPoolManager(dsLayout[GrVkUniformHandler::kSamplerDescSet],
49 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, numSamplers, gpu)
50 , fUniformPoolManager(dsLayout[GrVkUniformHandler::kUniformBufferDescSet],
egdanielb4aa3622016-04-06 13:47:08 -070051 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
52 (vertexUniformSize || fragmentUniformSize) ? 2 : 0, gpu) {
jvanverth992ad362016-02-26 09:21:02 -080053 fSamplers.setReserve(numSamplers);
54 fTextureViews.setReserve(numSamplers);
55 fTextures.setReserve(numSamplers);
56
egdanielc2dc1b22016-03-18 13:18:23 -070057 fDescriptorSets[0] = VK_NULL_HANDLE;
58 fDescriptorSets[1] = VK_NULL_HANDLE;
59
60 // Currently we are always binding a descriptor set for uniform buffers.
egdanielb4aa3622016-04-06 13:47:08 -070061 if (vertexUniformSize || fragmentUniformSize) {
62 fDSCount++;
63 fStartDS = GrVkUniformHandler::kUniformBufferDescSet;
64 }
egdanielc2dc1b22016-03-18 13:18:23 -070065 if (numSamplers) {
66 fDSCount++;
67 fStartDS = SkTMin(fStartDS, (int)GrVkUniformHandler::kSamplerDescSet);
68 }
jvanverth992ad362016-02-26 09:21:02 -080069
70 fVertexUniformBuffer.reset(GrVkUniformBuffer::Create(gpu, vertexUniformSize, true));
71 fFragmentUniformBuffer.reset(GrVkUniformBuffer::Create(gpu, fragmentUniformSize, true));
72
jvanverth992ad362016-02-26 09:21:02 -080073 fNumSamplers = numSamplers;
jvanverth992ad362016-02-26 09:21:02 -080074}
75
egdaniel22281c12016-03-23 13:49:40 -070076GrVkPipelineState::~GrVkPipelineState() {
jvanverth992ad362016-02-26 09:21:02 -080077 // Must of freed all GPU resources before this is destroyed
78 SkASSERT(!fPipeline);
jvanverth992ad362016-02-26 09:21:02 -080079 SkASSERT(!fPipelineLayout);
jvanverth992ad362016-02-26 09:21:02 -080080 SkASSERT(!fSamplers.count());
81 SkASSERT(!fTextureViews.count());
82 SkASSERT(!fTextures.count());
egdaniel8af936d2016-04-07 10:17:47 -070083 for (int i = 0; i < fFragmentProcessors.count(); ++i) {
84 delete fFragmentProcessors[i];
85 }
jvanverth992ad362016-02-26 09:21:02 -080086}
87
egdaniel22281c12016-03-23 13:49:40 -070088void GrVkPipelineState::freeTempResources(const GrVkGpu* gpu) {
jvanverth992ad362016-02-26 09:21:02 -080089 for (int i = 0; i < fSamplers.count(); ++i) {
90 fSamplers[i]->unref(gpu);
91 }
92 fSamplers.rewind();
93
94 for (int i = 0; i < fTextureViews.count(); ++i) {
95 fTextureViews[i]->unref(gpu);
96 }
97 fTextureViews.rewind();
98
99 for (int i = 0; i < fTextures.count(); ++i) {
100 fTextures[i]->unref(gpu);
101 }
102 fTextures.rewind();
103}
104
egdaniel22281c12016-03-23 13:49:40 -0700105void GrVkPipelineState::freeGPUResources(const GrVkGpu* gpu) {
jvanverth992ad362016-02-26 09:21:02 -0800106 if (fPipeline) {
107 fPipeline->unref(gpu);
108 fPipeline = nullptr;
109 }
egdanielc2dc1b22016-03-18 13:18:23 -0700110
jvanverth992ad362016-02-26 09:21:02 -0800111 if (fPipelineLayout) {
112 GR_VK_CALL(gpu->vkInterface(), DestroyPipelineLayout(gpu->device(),
113 fPipelineLayout,
114 nullptr));
jvanverth9846ef22016-03-02 12:08:22 -0800115 fPipelineLayout = VK_NULL_HANDLE;
jvanverth992ad362016-02-26 09:21:02 -0800116 }
117
jvanverth992ad362016-02-26 09:21:02 -0800118 if (fVertexUniformBuffer) {
119 fVertexUniformBuffer->release(gpu);
120 }
121
122 if (fFragmentUniformBuffer) {
123 fFragmentUniformBuffer->release(gpu);
124 }
egdanielc2dc1b22016-03-18 13:18:23 -0700125
126 fSamplerPoolManager.freeGPUResources(gpu);
127 fUniformPoolManager.freeGPUResources(gpu);
128
jvanverth992ad362016-02-26 09:21:02 -0800129 this->freeTempResources(gpu);
130}
131
egdaniel22281c12016-03-23 13:49:40 -0700132void GrVkPipelineState::abandonGPUResources() {
jvanverth992ad362016-02-26 09:21:02 -0800133 fPipeline->unrefAndAbandon();
134 fPipeline = nullptr;
egdanielc2dc1b22016-03-18 13:18:23 -0700135
jvanverth9846ef22016-03-02 12:08:22 -0800136 fPipelineLayout = VK_NULL_HANDLE;
jvanverth992ad362016-02-26 09:21:02 -0800137
138 fVertexUniformBuffer->abandon();
139 fFragmentUniformBuffer->abandon();
140
141 for (int i = 0; i < fSamplers.count(); ++i) {
142 fSamplers[i]->unrefAndAbandon();
143 }
144 fSamplers.rewind();
145
146 for (int i = 0; i < fTextureViews.count(); ++i) {
147 fTextureViews[i]->unrefAndAbandon();
148 }
149 fTextureViews.rewind();
150
151 for (int i = 0; i < fTextures.count(); ++i) {
152 fTextures[i]->unrefAndAbandon();
153 }
154 fTextures.rewind();
egdanielc2dc1b22016-03-18 13:18:23 -0700155
156 fSamplerPoolManager.abandonGPUResources();
157 fUniformPoolManager.abandonGPUResources();
jvanverth992ad362016-02-26 09:21:02 -0800158}
159
160static void append_texture_bindings(const GrProcessor& processor,
161 SkTArray<const GrTextureAccess*>* textureBindings) {
162 if (int numTextures = processor.numTextures()) {
163 const GrTextureAccess** bindings = textureBindings->push_back_n(numTextures);
164 int i = 0;
165 do {
166 bindings[i] = &processor.textureAccess(i);
167 } while (++i < numTextures);
168 }
169}
170
egdaniel22281c12016-03-23 13:49:40 -0700171void GrVkPipelineState::setData(GrVkGpu* gpu,
172 const GrPrimitiveProcessor& primProc,
173 const GrPipeline& pipeline) {
jvanverth992ad362016-02-26 09:21:02 -0800174 // This is here to protect against someone calling setData multiple times in a row without
175 // freeing the tempData between calls.
176 this->freeTempResources(gpu);
177
178 this->setRenderTargetState(pipeline);
179
180 SkSTArray<8, const GrTextureAccess*> textureBindings;
181
egdaniel22281c12016-03-23 13:49:40 -0700182 fGeometryProcessor->setData(fDataManager, primProc);
jvanverth992ad362016-02-26 09:21:02 -0800183 append_texture_bindings(primProc, &textureBindings);
184
185 for (int i = 0; i < fFragmentProcessors.count(); ++i) {
186 const GrFragmentProcessor& processor = pipeline.getFragmentProcessor(i);
egdaniel22281c12016-03-23 13:49:40 -0700187 fFragmentProcessors[i]->setData(fDataManager, processor);
188 fGeometryProcessor->setTransformData(primProc, fDataManager, i,
jvanverth992ad362016-02-26 09:21:02 -0800189 processor.coordTransforms());
190 append_texture_bindings(processor, &textureBindings);
191 }
192
egdaniel22281c12016-03-23 13:49:40 -0700193 fXferProcessor->setData(fDataManager, pipeline.getXferProcessor());
jvanverth992ad362016-02-26 09:21:02 -0800194 append_texture_bindings(pipeline.getXferProcessor(), &textureBindings);
195
egdanielc2dc1b22016-03-18 13:18:23 -0700196 // Get new descriptor sets
197 if (fNumSamplers) {
198 fSamplerPoolManager.getNewDescriptorSet(gpu,
199 &fDescriptorSets[GrVkUniformHandler::kSamplerDescSet]);
egdanielb4aa3622016-04-06 13:47:08 -0700200 this->writeSamplers(gpu, textureBindings);
egdanielc2dc1b22016-03-18 13:18:23 -0700201 }
egdanielb4aa3622016-04-06 13:47:08 -0700202
egdanielb4aa3622016-04-06 13:47:08 -0700203 if (fVertexUniformBuffer.get() || fFragmentUniformBuffer.get()) {
egdaniel7cbffda2016-04-08 13:27:53 -0700204 if (fDataManager.uploadUniformBuffers(gpu, fVertexUniformBuffer, fFragmentUniformBuffer) ||
205 VK_NULL_HANDLE == fDescriptorSets[GrVkUniformHandler::kUniformBufferDescSet]) {
206 fUniformPoolManager.getNewDescriptorSet(gpu,
egdanielc2dc1b22016-03-18 13:18:23 -0700207 &fDescriptorSets[GrVkUniformHandler::kUniformBufferDescSet]);
egdaniel7cbffda2016-04-08 13:27:53 -0700208 this->writeUniformBuffers(gpu);
209 }
egdanielb4aa3622016-04-06 13:47:08 -0700210 }
jvanverth992ad362016-02-26 09:21:02 -0800211}
212
egdaniel22281c12016-03-23 13:49:40 -0700213void GrVkPipelineState::writeUniformBuffers(const GrVkGpu* gpu) {
jvanverth992ad362016-02-26 09:21:02 -0800214 VkWriteDescriptorSet descriptorWrites[2];
215 memset(descriptorWrites, 0, 2 * sizeof(VkWriteDescriptorSet));
216
217 uint32_t firstUniformWrite = 0;
218 uint32_t uniformBindingUpdateCount = 0;
219
egdanield524f162016-02-26 13:06:55 -0800220 VkDescriptorBufferInfo vertBufferInfo;
jvanverth992ad362016-02-26 09:21:02 -0800221 // Vertex Uniform Buffer
222 if (fVertexUniformBuffer.get()) {
223 ++uniformBindingUpdateCount;
jvanverth992ad362016-02-26 09:21:02 -0800224 memset(&vertBufferInfo, 0, sizeof(VkDescriptorBufferInfo));
225 vertBufferInfo.buffer = fVertexUniformBuffer->buffer();
226 vertBufferInfo.offset = 0;
227 vertBufferInfo.range = fVertexUniformBuffer->size();
228
229 descriptorWrites[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
230 descriptorWrites[0].pNext = nullptr;
egdanielb4aa3622016-04-06 13:47:08 -0700231 descriptorWrites[0].dstSet = fDescriptorSets[GrVkUniformHandler::kUniformBufferDescSet];
jvanverth992ad362016-02-26 09:21:02 -0800232 descriptorWrites[0].dstBinding = GrVkUniformHandler::kVertexBinding;
233 descriptorWrites[0].dstArrayElement = 0;
234 descriptorWrites[0].descriptorCount = 1;
235 descriptorWrites[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
236 descriptorWrites[0].pImageInfo = nullptr;
237 descriptorWrites[0].pBufferInfo = &vertBufferInfo;
238 descriptorWrites[0].pTexelBufferView = nullptr;
egdaniel22281c12016-03-23 13:49:40 -0700239
240 fVertexUniformBuffer->addMemoryBarrier(gpu,
241 VK_ACCESS_HOST_WRITE_BIT,
242 VK_ACCESS_UNIFORM_READ_BIT,
243 VK_PIPELINE_STAGE_HOST_BIT,
244 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT,
245 false);
jvanverth992ad362016-02-26 09:21:02 -0800246 }
247
egdanield524f162016-02-26 13:06:55 -0800248 VkDescriptorBufferInfo fragBufferInfo;
jvanverth992ad362016-02-26 09:21:02 -0800249 // Fragment Uniform Buffer
250 if (fFragmentUniformBuffer.get()) {
251 if (0 == uniformBindingUpdateCount) {
252 firstUniformWrite = 1;
253 }
254 ++uniformBindingUpdateCount;
jvanverth992ad362016-02-26 09:21:02 -0800255 memset(&fragBufferInfo, 0, sizeof(VkDescriptorBufferInfo));
256 fragBufferInfo.buffer = fFragmentUniformBuffer->buffer();
257 fragBufferInfo.offset = 0;
258 fragBufferInfo.range = fFragmentUniformBuffer->size();
259
260 descriptorWrites[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
261 descriptorWrites[1].pNext = nullptr;
egdanielb4aa3622016-04-06 13:47:08 -0700262 descriptorWrites[1].dstSet = fDescriptorSets[GrVkUniformHandler::kUniformBufferDescSet];
jvanverth992ad362016-02-26 09:21:02 -0800263 descriptorWrites[1].dstBinding = GrVkUniformHandler::kFragBinding;;
264 descriptorWrites[1].dstArrayElement = 0;
265 descriptorWrites[1].descriptorCount = 1;
266 descriptorWrites[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
267 descriptorWrites[1].pImageInfo = nullptr;
268 descriptorWrites[1].pBufferInfo = &fragBufferInfo;
269 descriptorWrites[1].pTexelBufferView = nullptr;
egdaniel22281c12016-03-23 13:49:40 -0700270
271 fFragmentUniformBuffer->addMemoryBarrier(gpu,
272 VK_ACCESS_HOST_WRITE_BIT,
273 VK_ACCESS_UNIFORM_READ_BIT,
274 VK_PIPELINE_STAGE_HOST_BIT,
275 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
276 false);
jvanverth992ad362016-02-26 09:21:02 -0800277 }
278
279 if (uniformBindingUpdateCount) {
280 GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(),
281 uniformBindingUpdateCount,
282 &descriptorWrites[firstUniformWrite],
283 0, nullptr));
284 }
285}
286
egdaniel22281c12016-03-23 13:49:40 -0700287void GrVkPipelineState::writeSamplers(GrVkGpu* gpu,
288 const SkTArray<const GrTextureAccess*>& textureBindings) {
jvanverth992ad362016-02-26 09:21:02 -0800289 SkASSERT(fNumSamplers == textureBindings.count());
290
291 for (int i = 0; i < textureBindings.count(); ++i) {
egdaniel8b6394c2016-03-04 07:35:10 -0800292 const GrTextureParams& params = textureBindings[i]->getParams();
293 fSamplers.push(gpu->resourceProvider().findOrCreateCompatibleSampler(params));
jvanverth992ad362016-02-26 09:21:02 -0800294
295 GrVkTexture* texture = static_cast<GrVkTexture*>(textureBindings[i]->getTexture());
296
297 const GrVkImage::Resource* textureResource = texture->resource();
298 textureResource->ref();
299 fTextures.push(textureResource);
300
301 const GrVkImageView* textureView = texture->textureView();
302 textureView->ref();
303 fTextureViews.push(textureView);
304
305 // Change texture layout so it can be read in shader
306 VkImageLayout layout = texture->currentLayout();
307 VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
308 VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
309 VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
310 VkAccessFlags dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
311 texture->setImageLayout(gpu,
312 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
313 srcAccessMask,
314 dstAccessMask,
315 srcStageMask,
316 dstStageMask,
317 false);
318
319 VkDescriptorImageInfo imageInfo;
320 memset(&imageInfo, 0, sizeof(VkDescriptorImageInfo));
321 imageInfo.sampler = fSamplers[i]->sampler();
322 imageInfo.imageView = texture->textureView()->imageView();
323 imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
324
325 VkWriteDescriptorSet writeInfo;
326 memset(&writeInfo, 0, sizeof(VkWriteDescriptorSet));
327 writeInfo.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
328 writeInfo.pNext = nullptr;
329 writeInfo.dstSet = fDescriptorSets[GrVkUniformHandler::kSamplerDescSet];
330 writeInfo.dstBinding = i;
331 writeInfo.dstArrayElement = 0;
332 writeInfo.descriptorCount = 1;
333 writeInfo.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
334 writeInfo.pImageInfo = &imageInfo;
335 writeInfo.pBufferInfo = nullptr;
336 writeInfo.pTexelBufferView = nullptr;
337
338 GR_VK_CALL(gpu->vkInterface(), UpdateDescriptorSets(gpu->device(),
339 1,
340 &writeInfo,
341 0,
342 nullptr));
343 }
344}
345
egdaniel22281c12016-03-23 13:49:40 -0700346void GrVkPipelineState::setRenderTargetState(const GrPipeline& pipeline) {
jvanverth992ad362016-02-26 09:21:02 -0800347 // Load the RT height uniform if it is needed to y-flip gl_FragCoord.
348 if (fBuiltinUniformHandles.fRTHeightUni.isValid() &&
349 fRenderTargetState.fRenderTargetSize.fHeight != pipeline.getRenderTarget()->height()) {
egdaniel22281c12016-03-23 13:49:40 -0700350 fDataManager.set1f(fBuiltinUniformHandles.fRTHeightUni,
jvanverth992ad362016-02-26 09:21:02 -0800351 SkIntToScalar(pipeline.getRenderTarget()->height()));
352 }
353
354 // set RT adjustment
355 const GrRenderTarget* rt = pipeline.getRenderTarget();
356 SkISize size;
357 size.set(rt->width(), rt->height());
358 SkASSERT(fBuiltinUniformHandles.fRTAdjustmentUni.isValid());
359 if (fRenderTargetState.fRenderTargetOrigin != rt->origin() ||
360 fRenderTargetState.fRenderTargetSize != size) {
361 fRenderTargetState.fRenderTargetSize = size;
362 fRenderTargetState.fRenderTargetOrigin = rt->origin();
363
364 float rtAdjustmentVec[4];
365 fRenderTargetState.getRTAdjustmentVec(rtAdjustmentVec);
egdaniel22281c12016-03-23 13:49:40 -0700366 fDataManager.set4fv(fBuiltinUniformHandles.fRTAdjustmentUni, 1, rtAdjustmentVec);
jvanverth992ad362016-02-26 09:21:02 -0800367 }
368}
369
egdaniel22281c12016-03-23 13:49:40 -0700370void GrVkPipelineState::bind(const GrVkGpu* gpu, GrVkCommandBuffer* commandBuffer) {
jvanverth992ad362016-02-26 09:21:02 -0800371 commandBuffer->bindPipeline(gpu, fPipeline);
egdanielc2dc1b22016-03-18 13:18:23 -0700372
373 if (fDSCount) {
374 commandBuffer->bindDescriptorSets(gpu, this, fPipelineLayout, fStartDS, fDSCount,
375 &fDescriptorSets[fStartDS], 0, nullptr);
376 }
jvanverth992ad362016-02-26 09:21:02 -0800377}
378
egdaniel22281c12016-03-23 13:49:40 -0700379void GrVkPipelineState::addUniformResources(GrVkCommandBuffer& commandBuffer) {
egdanielc2dc1b22016-03-18 13:18:23 -0700380 if (fSamplerPoolManager.fPool) {
381 commandBuffer.addResource(fSamplerPoolManager.fPool);
382 }
383 if (fUniformPoolManager.fPool) {
384 commandBuffer.addResource(fUniformPoolManager.fPool);
385 }
386
jvanverth992ad362016-02-26 09:21:02 -0800387 if (fVertexUniformBuffer.get()) {
388 commandBuffer.addResource(fVertexUniformBuffer->resource());
389 }
390 if (fFragmentUniformBuffer.get()) {
391 commandBuffer.addResource(fFragmentUniformBuffer->resource());
392 }
393 for (int i = 0; i < fSamplers.count(); ++i) {
394 commandBuffer.addResource(fSamplers[i]);
395 }
396
397 for (int i = 0; i < fTextureViews.count(); ++i) {
398 commandBuffer.addResource(fTextureViews[i]);
399 }
400
401 for (int i = 0; i < fTextures.count(); ++i) {
402 commandBuffer.addResource(fTextures[i]);
403 }
egdanielc2dc1b22016-03-18 13:18:23 -0700404}
405
406////////////////////////////////////////////////////////////////////////////////
407
egdaniel22281c12016-03-23 13:49:40 -0700408void GrVkPipelineState::DescriptorPoolManager::getNewPool(GrVkGpu* gpu) {
egdanielc2dc1b22016-03-18 13:18:23 -0700409 if (fPool) {
410 fPool->unref(gpu);
411 SkASSERT(fMaxDescriptorSets < (SK_MaxU32 >> 1));
egdaniel5f3b0e02016-04-07 13:49:01 -0700412 if (fMaxDescriptorSets < kMaxDescSetLimit >> 1) {
413 fMaxDescriptorSets = fMaxDescriptorSets << 1;
414 } else {
415 fMaxDescriptorSets = kMaxDescSetLimit;
416 }
egdanielc2dc1b22016-03-18 13:18:23 -0700417
418 }
419 if (fMaxDescriptorSets) {
420 fPool = gpu->resourceProvider().findOrCreateCompatibleDescriptorPool(fDescType,
421 fMaxDescriptorSets);
422 }
423 SkASSERT(fPool || !fMaxDescriptorSets);
424}
425
egdaniel22281c12016-03-23 13:49:40 -0700426void GrVkPipelineState::DescriptorPoolManager::getNewDescriptorSet(GrVkGpu* gpu, VkDescriptorSet* ds) {
egdanielc2dc1b22016-03-18 13:18:23 -0700427 if (!fMaxDescriptorSets) {
428 return;
429 }
430 if (fCurrentDescriptorSet == fMaxDescriptorSets) {
431 this->getNewPool(gpu);
432 fCurrentDescriptorSet = 0;
433 }
434 fCurrentDescriptorSet++;
435
436 VkDescriptorSetAllocateInfo dsAllocateInfo;
437 memset(&dsAllocateInfo, 0, sizeof(VkDescriptorSetAllocateInfo));
438 dsAllocateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
439 dsAllocateInfo.pNext = nullptr;
440 dsAllocateInfo.descriptorPool = fPool->descPool();
441 dsAllocateInfo.descriptorSetCount = 1;
442 dsAllocateInfo.pSetLayouts = &fDescLayout;
egdanielc2dc1b22016-03-18 13:18:23 -0700443 GR_VK_CALL_ERRCHECK(gpu->vkInterface(), AllocateDescriptorSets(gpu->device(),
444 &dsAllocateInfo,
445 ds));
446}
447
egdaniel22281c12016-03-23 13:49:40 -0700448void GrVkPipelineState::DescriptorPoolManager::freeGPUResources(const GrVkGpu* gpu) {
egdanielc2dc1b22016-03-18 13:18:23 -0700449 if (fDescLayout) {
450 GR_VK_CALL(gpu->vkInterface(), DestroyDescriptorSetLayout(gpu->device(), fDescLayout,
451 nullptr));
452 fDescLayout = VK_NULL_HANDLE;
453 }
454
455 if (fPool) {
456 fPool->unref(gpu);
457 fPool = nullptr;
458 }
459}
460
egdaniel22281c12016-03-23 13:49:40 -0700461void GrVkPipelineState::DescriptorPoolManager::abandonGPUResources() {
egdanielc2dc1b22016-03-18 13:18:23 -0700462 fDescLayout = VK_NULL_HANDLE;
463 if (fPool) {
464 fPool->unrefAndAbandon();
465 fPool = nullptr;
466 }
jvanverth992ad362016-02-26 09:21:02 -0800467}
egdaniel22281c12016-03-23 13:49:40 -0700468
469uint32_t get_blend_info_key(const GrPipeline& pipeline) {
470 GrXferProcessor::BlendInfo blendInfo;
471 pipeline.getXferProcessor().getBlendInfo(&blendInfo);
472
473 static const uint32_t kBlendWriteShift = 1;
474 static const uint32_t kBlendCoeffShift = 5;
475 GR_STATIC_ASSERT(kLast_GrBlendCoeff < (1 << kBlendCoeffShift));
476 GR_STATIC_ASSERT(kFirstAdvancedGrBlendEquation - 1 < 4);
477
478 uint32_t key = blendInfo.fWriteColor;
479 key |= (blendInfo.fSrcBlend << kBlendWriteShift);
480 key |= (blendInfo.fDstBlend << (kBlendWriteShift + kBlendCoeffShift));
481 key |= (blendInfo.fEquation << (kBlendWriteShift + 2 * kBlendCoeffShift));
482
483 return key;
484}
485
486void GrVkPipelineState::BuildStateKey(const GrPipeline& pipeline, GrPrimitiveType primitiveType,
487 SkTArray<uint8_t, true>* key) {
488 // Save room for the key length and key header
489 key->reset();
490 key->push_back_n(kData_StateKeyOffset);
491
492 GrProcessorKeyBuilder b(key);
493
494 GrVkRenderTarget* vkRT = (GrVkRenderTarget*)pipeline.getRenderTarget();
495 vkRT->simpleRenderPass()->genKey(&b);
496
497 pipeline.getStencil().genKey(&b);
498
499 SkASSERT(sizeof(GrPipelineBuilder::DrawFace) <= sizeof(uint32_t));
500 b.add32(pipeline.getDrawFace());
501
502 b.add32(get_blend_info_key(pipeline));
503
504 b.add32(primitiveType);
505
halcanary9d524f22016-03-29 09:03:52 -0700506 // Set key length
egdaniel22281c12016-03-23 13:49:40 -0700507 int keyLength = key->count();
508 SkASSERT(0 == (keyLength % 4));
509 *reinterpret_cast<uint32_t*>(key->begin()) = SkToU32(keyLength);
510}