Timothy Liang | 5422f9a | 2018-08-10 10:57:55 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2018 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
| 8 | #include "GrMtlGpuCommandBuffer.h" |
| 9 | |
| 10 | #include "GrColor.h" |
Ethan Nicholas | 0106351 | 2018-10-08 16:58:25 -0400 | [diff] [blame] | 11 | #include "GrFixedClip.h" |
Timothy Liang | 5422f9a | 2018-08-10 10:57:55 -0400 | [diff] [blame] | 12 | #include "GrMtlPipelineState.h" |
| 13 | #include "GrMtlPipelineStateBuilder.h" |
| 14 | #include "GrMtlRenderTarget.h" |
Ethan Nicholas | 0106351 | 2018-10-08 16:58:25 -0400 | [diff] [blame] | 15 | #include "GrRenderTargetPriv.h" |
Timothy Liang | 5422f9a | 2018-08-10 10:57:55 -0400 | [diff] [blame] | 16 | |
| 17 | GrMtlGpuRTCommandBuffer::GrMtlGpuRTCommandBuffer( |
Ethan Nicholas | 56d19a5 | 2018-10-15 11:26:20 -0400 | [diff] [blame^] | 18 | GrMtlGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin, const SkRect& bounds, |
Timothy Liang | 5422f9a | 2018-08-10 10:57:55 -0400 | [diff] [blame] | 19 | const GrGpuRTCommandBuffer::LoadAndStoreInfo& colorInfo, |
| 20 | const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo& stencilInfo) |
| 21 | : INHERITED(rt, origin) |
| 22 | , fGpu(gpu) |
Ethan Nicholas | 56d19a5 | 2018-10-15 11:26:20 -0400 | [diff] [blame^] | 23 | , fBounds(bounds) |
Timothy Liang | 5422f9a | 2018-08-10 10:57:55 -0400 | [diff] [blame] | 24 | , fColorLoadAndStoreInfo(colorInfo) |
| 25 | , fStencilLoadAndStoreInfo(stencilInfo) |
| 26 | , fRenderPassDesc(this->createRenderPassDesc()) { |
| 27 | (void)fStencilLoadAndStoreInfo; // Silence unused var warning |
Ethan Nicholas | 0106351 | 2018-10-08 16:58:25 -0400 | [diff] [blame] | 28 | const GrMtlStencilAttachment* stencil = static_cast<GrMtlStencilAttachment*>( |
| 29 | rt->renderTargetPriv().getStencilAttachment()); |
| 30 | if (stencil) { |
| 31 | fRenderPassDesc.stencilAttachment.texture = stencil->stencilView(); |
| 32 | } |
Timothy Liang | 5422f9a | 2018-08-10 10:57:55 -0400 | [diff] [blame] | 33 | if (fColorLoadAndStoreInfo.fLoadOp == GrLoadOp::kClear) { |
| 34 | fCommandBufferInfo.fBounds = SkRect::MakeWH(fRenderTarget->width(), |
| 35 | fRenderTarget->height()); |
| 36 | this->internalBegin(); |
| 37 | this->internalEnd(); |
| 38 | fRenderPassDesc.colorAttachments[0].loadAction = MTLLoadActionLoad; |
| 39 | } else { |
| 40 | fCommandBufferInfo.fBounds.setEmpty(); |
| 41 | } |
Ethan Nicholas | 0106351 | 2018-10-08 16:58:25 -0400 | [diff] [blame] | 42 | switch (stencilInfo.fLoadOp) { |
| 43 | case GrLoadOp::kLoad: |
| 44 | fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionLoad; |
| 45 | break; |
| 46 | case GrLoadOp::kClear: |
| 47 | fCommandBufferInfo.fBounds = SkRect::MakeWH(fRenderTarget->width(), |
| 48 | fRenderTarget->height()); |
| 49 | fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionClear; |
| 50 | this->internalBegin(); |
| 51 | this->internalEnd(); |
| 52 | fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionLoad; |
| 53 | break; |
| 54 | case GrLoadOp::kDiscard: |
| 55 | fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionDontCare; |
| 56 | break; |
| 57 | } |
| 58 | switch (stencilInfo.fStoreOp) { |
| 59 | case GrStoreOp::kStore: |
| 60 | fRenderPassDesc.stencilAttachment.storeAction = MTLStoreActionStore; |
| 61 | break; |
| 62 | case GrStoreOp::kDiscard: |
| 63 | fRenderPassDesc.stencilAttachment.storeAction = MTLStoreActionDontCare; |
| 64 | break; |
| 65 | } |
Timothy Liang | 5422f9a | 2018-08-10 10:57:55 -0400 | [diff] [blame] | 66 | } |
| 67 | |
| 68 | GrMtlGpuRTCommandBuffer::~GrMtlGpuRTCommandBuffer() { |
| 69 | SkASSERT(fActiveRenderCmdEncoder == nil); |
| 70 | } |
| 71 | |
| 72 | void GrMtlGpuRTCommandBuffer::internalBegin() { |
| 73 | SkASSERT(fActiveRenderCmdEncoder == nil); |
| 74 | fActiveRenderCmdEncoder = |
| 75 | [fGpu->commandBuffer() |
| 76 | renderCommandEncoderWithDescriptor: fRenderPassDesc]; |
| 77 | SkASSERT(fActiveRenderCmdEncoder); |
| 78 | [fActiveRenderCmdEncoder setFrontFacingWinding: MTLWindingCounterClockwise]; |
| 79 | } |
| 80 | |
| 81 | void GrMtlGpuRTCommandBuffer::internalEnd() { |
| 82 | SkASSERT(fActiveRenderCmdEncoder); |
| 83 | [fActiveRenderCmdEncoder endEncoding]; |
| 84 | fActiveRenderCmdEncoder = nil; |
| 85 | SkASSERT(fActiveRenderCmdEncoder == nil); |
| 86 | } |
| 87 | |
| 88 | void GrMtlGpuRTCommandBuffer::submit() { |
| 89 | if (!fRenderTarget) { |
| 90 | return; |
| 91 | } |
| 92 | SkIRect iBounds; |
| 93 | fCommandBufferInfo.fBounds.roundOut(&iBounds); |
| 94 | fGpu->submitIndirectCommandBuffer(fRenderTarget, fOrigin, &iBounds); |
| 95 | } |
| 96 | |
| 97 | void GrMtlGpuRTCommandBuffer::copy(GrSurface* src, GrSurfaceOrigin srcOrigin, |
| 98 | const SkIRect& srcRect, const SkIPoint& dstPoint) { |
| 99 | // We cannot have an active encoder when we call copy since it requires its own |
| 100 | // command encoder. |
| 101 | SkASSERT(fActiveRenderCmdEncoder == nil); |
| 102 | fGpu->copySurface(fRenderTarget, fOrigin, src, srcOrigin, srcRect, dstPoint); |
| 103 | } |
| 104 | |
| 105 | GrMtlPipelineState* GrMtlGpuRTCommandBuffer::prepareDrawState( |
| 106 | const GrPrimitiveProcessor& primProc, |
| 107 | const GrPipeline& pipeline, |
| 108 | const GrPipeline::FixedDynamicState* fixedDynamicState, |
| 109 | const GrMesh meshes[], |
| 110 | int meshCount) { |
| 111 | // TODO: resolve textures and regenerate mipmaps as needed |
| 112 | bool hasPoints = false; |
| 113 | for (int i = 0; i < meshCount; ++i) { |
| 114 | if (meshes[i].primitiveType() == GrPrimitiveType::kPoints) { |
| 115 | hasPoints = true; |
| 116 | break; |
| 117 | } |
| 118 | } |
| 119 | GrProgramDesc desc; |
| 120 | if (!GrProgramDesc::Build(&desc, primProc, hasPoints, pipeline, *fGpu->caps()->shaderCaps())) { |
| 121 | return nullptr; |
| 122 | } |
| 123 | desc.finalize(); |
| 124 | |
| 125 | // TODO: use resource provider for pipeline |
| 126 | GrMtlPipelineState* pipelineState = |
| 127 | GrMtlPipelineStateBuilder::CreatePipelineState(primProc, pipeline, &desc, fGpu); |
| 128 | if (!pipelineState) { |
| 129 | return nullptr; |
| 130 | } |
| 131 | const GrTextureProxy* const* primProcProxies = nullptr; |
| 132 | if (fixedDynamicState) { |
| 133 | primProcProxies = fixedDynamicState->fPrimitiveProcessorTextures; |
| 134 | } |
| 135 | |
| 136 | // We cannot have an active encoder when we set the pipeline data since it requires its own |
| 137 | // command encoder. |
| 138 | SkASSERT(fActiveRenderCmdEncoder == nil); |
| 139 | pipelineState->setData(primProc, pipeline, primProcProxies); |
| 140 | |
| 141 | return pipelineState; |
| 142 | } |
| 143 | |
| 144 | void GrMtlGpuRTCommandBuffer::onDraw(const GrPrimitiveProcessor& primProc, |
| 145 | const GrPipeline& pipeline, |
| 146 | const GrPipeline::FixedDynamicState* fixedDynamicState, |
| 147 | const GrPipeline::DynamicStateArrays* dynamicStateArrays, |
| 148 | const GrMesh meshes[], |
| 149 | int meshCount, |
| 150 | const SkRect& bounds) { |
| 151 | SkASSERT(pipeline.renderTarget() == fRenderTarget); |
| 152 | if (!meshCount) { |
| 153 | return; |
| 154 | } |
| 155 | if (pipeline.isScissorEnabled()) { |
| 156 | return; // TODO: ScissorRects are not supported. |
| 157 | } |
| 158 | |
| 159 | std::unique_ptr<GrMtlPipelineState> pipelineState( |
| 160 | this->prepareDrawState(primProc, pipeline, fixedDynamicState, meshes, meshCount)); |
| 161 | if (!pipelineState) { |
| 162 | return; |
| 163 | } |
| 164 | |
| 165 | this->internalBegin(); |
| 166 | [fActiveRenderCmdEncoder setRenderPipelineState: pipelineState->mtlPipelineState()]; |
| 167 | |
| 168 | pipelineState->bind(fActiveRenderCmdEncoder); |
Timothy Liang | de0be80 | 2018-08-10 13:48:08 -0400 | [diff] [blame] | 169 | pipelineState->setBlendConstants(fActiveRenderCmdEncoder, fRenderTarget->config(), |
| 170 | pipeline.getXferProcessor()); |
Ethan Nicholas | 0106351 | 2018-10-08 16:58:25 -0400 | [diff] [blame] | 171 | pipelineState->setDepthStencilState(fActiveRenderCmdEncoder); |
Timothy Liang | 5422f9a | 2018-08-10 10:57:55 -0400 | [diff] [blame] | 172 | |
| 173 | for (int i = 0; i < meshCount; ++i) { |
| 174 | const GrMesh& mesh = meshes[i]; |
| 175 | SkASSERT(fActiveRenderCmdEncoder); |
| 176 | mesh.sendToGpu(this); |
| 177 | } |
| 178 | this->internalEnd(); |
| 179 | fCommandBufferInfo.fBounds.join(bounds); |
| 180 | } |
| 181 | |
Ethan Nicholas | 56d19a5 | 2018-10-15 11:26:20 -0400 | [diff] [blame^] | 182 | void GrMtlGpuRTCommandBuffer::onClear(const GrFixedClip& clip, GrColor color) { |
| 183 | // if we end up here from absClear, the clear bounds may be bigger than the RT proxy bounds - |
| 184 | // but in that case, scissor should be enabled, so this check should still succeed |
| 185 | SkASSERT(!clip.scissorEnabled() || clip.scissorRect().contains(fBounds)); |
| 186 | const auto& clear = GrColor4f::FromGrColor(color).fRGBA; |
| 187 | fRenderPassDesc.colorAttachments[0].clearColor = MTLClearColorMake(clear[0], clear[1], clear[2], |
| 188 | clear[3]); |
| 189 | fRenderPassDesc.colorAttachments[0].loadAction = MTLLoadActionClear; |
| 190 | this->internalBegin(); |
| 191 | this->internalEnd(); |
| 192 | fRenderPassDesc.colorAttachments[0].loadAction = MTLLoadActionLoad; |
| 193 | } |
| 194 | |
Ethan Nicholas | 0106351 | 2018-10-08 16:58:25 -0400 | [diff] [blame] | 195 | void GrMtlGpuRTCommandBuffer::onClearStencilClip(const GrFixedClip& clip, bool insideStencilMask) { |
| 196 | SkASSERT(!clip.hasWindowRectangles()); |
| 197 | |
| 198 | GrStencilAttachment* sb = fRenderTarget->renderTargetPriv().getStencilAttachment(); |
| 199 | // this should only be called internally when we know we have a |
| 200 | // stencil buffer. |
| 201 | SkASSERT(sb); |
| 202 | int stencilBitCount = sb->bits(); |
| 203 | |
| 204 | // The contract with the callers does not guarantee that we preserve all bits in the stencil |
| 205 | // during this clear. Thus we will clear the entire stencil to the desired value. |
| 206 | if (insideStencilMask) { |
| 207 | fRenderPassDesc.stencilAttachment.clearStencil = (1 << (stencilBitCount - 1)); |
| 208 | } else { |
| 209 | fRenderPassDesc.stencilAttachment.clearStencil = 0; |
| 210 | } |
| 211 | |
| 212 | fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionClear; |
| 213 | this->internalBegin(); |
| 214 | this->internalEnd(); |
| 215 | fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionLoad; |
| 216 | } |
| 217 | |
Timothy Liang | 5422f9a | 2018-08-10 10:57:55 -0400 | [diff] [blame] | 218 | MTLRenderPassDescriptor* GrMtlGpuRTCommandBuffer::createRenderPassDesc() const { |
| 219 | const static MTLLoadAction mtlLoadAction[] { |
| 220 | MTLLoadActionLoad, |
| 221 | MTLLoadActionClear, |
| 222 | MTLLoadActionDontCare |
| 223 | }; |
| 224 | GR_STATIC_ASSERT((int)GrLoadOp::kLoad == 0); |
| 225 | GR_STATIC_ASSERT((int)GrLoadOp::kClear == 1); |
| 226 | GR_STATIC_ASSERT((int)GrLoadOp::kDiscard == 2); |
| 227 | SkASSERT(fColorLoadAndStoreInfo.fLoadOp <= GrLoadOp::kDiscard); |
| 228 | |
| 229 | const static MTLStoreAction mtlStoreAction[] { |
| 230 | MTLStoreActionStore, |
| 231 | MTLStoreActionDontCare |
| 232 | }; |
| 233 | GR_STATIC_ASSERT((int)GrStoreOp::kStore == 0); |
| 234 | GR_STATIC_ASSERT((int)GrStoreOp::kDiscard == 1); |
| 235 | SkASSERT(fColorLoadAndStoreInfo.fStoreOp <= GrStoreOp::kDiscard); |
| 236 | |
| 237 | auto renderPassDesc = [[MTLRenderPassDescriptor alloc] init]; |
| 238 | renderPassDesc.colorAttachments[0].texture = |
| 239 | static_cast<GrMtlRenderTarget*>(fRenderTarget)->mtlRenderTexture(); |
| 240 | renderPassDesc.colorAttachments[0].slice = 0; |
| 241 | renderPassDesc.colorAttachments[0].level = 0; |
| 242 | const auto& clearColor = GrColor4f::FromGrColor(fColorLoadAndStoreInfo.fClearColor).fRGBA; |
| 243 | renderPassDesc.colorAttachments[0].clearColor = |
| 244 | MTLClearColorMake(clearColor[0], clearColor[1], clearColor[2], clearColor[3]); |
| 245 | renderPassDesc.colorAttachments[0].loadAction = |
| 246 | mtlLoadAction[static_cast<int>(fColorLoadAndStoreInfo.fLoadOp)]; |
| 247 | renderPassDesc.colorAttachments[0].storeAction = |
| 248 | mtlStoreAction[static_cast<int>(fColorLoadAndStoreInfo.fStoreOp)]; |
| 249 | return renderPassDesc; |
| 250 | } |
| 251 | |
| 252 | static MTLPrimitiveType gr_to_mtl_primitive(GrPrimitiveType primitiveType) { |
| 253 | const static MTLPrimitiveType mtlPrimitiveType[] { |
| 254 | MTLPrimitiveTypeTriangle, |
| 255 | MTLPrimitiveTypeTriangleStrip, |
| 256 | MTLPrimitiveTypePoint, |
| 257 | MTLPrimitiveTypeLine, |
| 258 | MTLPrimitiveTypeLineStrip |
| 259 | }; |
| 260 | GR_STATIC_ASSERT((int)GrPrimitiveType::kTriangles == 0); |
| 261 | GR_STATIC_ASSERT((int)GrPrimitiveType::kTriangleStrip == 1); |
| 262 | GR_STATIC_ASSERT((int)GrPrimitiveType::kPoints == 2); |
| 263 | GR_STATIC_ASSERT((int)GrPrimitiveType::kLines == 3); |
| 264 | GR_STATIC_ASSERT((int)GrPrimitiveType::kLineStrip == 4); |
| 265 | |
| 266 | SkASSERT(primitiveType <= GrPrimitiveType::kLineStrip); |
| 267 | return mtlPrimitiveType[static_cast<int>(primitiveType)]; |
| 268 | } |
| 269 | |
| 270 | void GrMtlGpuRTCommandBuffer::bindGeometry(const GrBuffer* vertexBuffer, |
| 271 | const GrBuffer* instanceBuffer) { |
| 272 | size_t bufferIndex = GrMtlUniformHandler::kLastUniformBinding + 1; |
| 273 | if (vertexBuffer) { |
| 274 | SkASSERT(!vertexBuffer->isCPUBacked()); |
| 275 | SkASSERT(!vertexBuffer->isMapped()); |
| 276 | |
| 277 | auto mtlVertexBuffer = static_cast<const GrMtlBuffer*>(vertexBuffer)->mtlBuffer(); |
| 278 | SkASSERT(mtlVertexBuffer); |
| 279 | [fActiveRenderCmdEncoder setVertexBuffer: mtlVertexBuffer |
| 280 | offset: 0 |
| 281 | atIndex: bufferIndex++]; |
| 282 | } |
| 283 | if (instanceBuffer) { |
| 284 | SkASSERT(!instanceBuffer->isCPUBacked()); |
| 285 | SkASSERT(!instanceBuffer->isMapped()); |
| 286 | |
| 287 | auto mtlInstanceBuffer = static_cast<const GrMtlBuffer*>(instanceBuffer)->mtlBuffer(); |
| 288 | SkASSERT(mtlInstanceBuffer); |
| 289 | [fActiveRenderCmdEncoder setVertexBuffer: mtlInstanceBuffer |
| 290 | offset: 0 |
| 291 | atIndex: bufferIndex++]; |
| 292 | } |
| 293 | } |
| 294 | |
| 295 | void GrMtlGpuRTCommandBuffer::sendInstancedMeshToGpu(GrPrimitiveType primitiveType, |
| 296 | const GrBuffer* vertexBuffer, |
| 297 | int vertexCount, |
| 298 | int baseVertex, |
| 299 | const GrBuffer* instanceBuffer, |
| 300 | int instanceCount, |
| 301 | int baseInstance) { |
| 302 | this->bindGeometry(vertexBuffer, instanceBuffer); |
| 303 | |
| 304 | SkASSERT(primitiveType != GrPrimitiveType::kLinesAdjacency); // Geometry shaders not supported. |
| 305 | [fActiveRenderCmdEncoder drawPrimitives: gr_to_mtl_primitive(primitiveType) |
| 306 | vertexStart: baseVertex |
| 307 | vertexCount: vertexCount |
| 308 | instanceCount: instanceCount |
| 309 | baseInstance: baseInstance]; |
| 310 | } |
| 311 | |
| 312 | void GrMtlGpuRTCommandBuffer::sendIndexedInstancedMeshToGpu(GrPrimitiveType primitiveType, |
| 313 | const GrBuffer* indexBuffer, |
| 314 | int indexCount, |
| 315 | int baseIndex, |
| 316 | const GrBuffer* vertexBuffer, |
| 317 | int baseVertex, |
| 318 | const GrBuffer* instanceBuffer, |
| 319 | int instanceCount, |
| 320 | int baseInstance, |
| 321 | GrPrimitiveRestart restart) { |
| 322 | this->bindGeometry(vertexBuffer, instanceBuffer); |
| 323 | |
| 324 | SkASSERT(primitiveType != GrPrimitiveType::kLinesAdjacency); // Geometry shaders not supported. |
| 325 | id<MTLBuffer> mtlIndexBuffer; |
| 326 | if (indexBuffer) { |
| 327 | SkASSERT(!indexBuffer->isCPUBacked()); |
| 328 | SkASSERT(!indexBuffer->isMapped()); |
| 329 | |
| 330 | mtlIndexBuffer = static_cast<const GrMtlBuffer*>(indexBuffer)->mtlBuffer(); |
| 331 | SkASSERT(mtlIndexBuffer); |
| 332 | } |
| 333 | |
| 334 | SkASSERT(restart == GrPrimitiveRestart::kNo); |
| 335 | [fActiveRenderCmdEncoder drawIndexedPrimitives: gr_to_mtl_primitive(primitiveType) |
| 336 | indexCount: indexCount |
| 337 | indexType: MTLIndexTypeUInt16 |
| 338 | indexBuffer: mtlIndexBuffer |
| 339 | indexBufferOffset: sizeof(uint16_t) * baseIndex |
| 340 | instanceCount: instanceCount |
| 341 | baseVertex: baseVertex |
| 342 | baseInstance: baseInstance]; |
| 343 | } |