implemented GrMtlGpuCommandBuffer for draws in Metal gpu backend
Bug: skia:
Change-Id: Ifc20a7734a02fe48ef79af201b4ebc882aedefb3
Reviewed-on: https://skia-review.googlesource.com/146382
Commit-Queue: Timothy Liang <timliang@google.com>
Reviewed-by: Greg Daniel <egdaniel@google.com>
diff --git a/gn/gpu.gni b/gn/gpu.gni
index e6533ec..fc115fb 100644
--- a/gn/gpu.gni
+++ b/gn/gpu.gni
@@ -634,6 +634,7 @@
"$_src/gpu/mtl/GrMtlGpu.h",
"$_src/gpu/mtl/GrMtlGpu.mm",
"$_src/gpu/mtl/GrMtlGpuCommandBuffer.h",
+ "$_src/gpu/mtl/GrMtlGpuCommandBuffer.mm",
"$_src/gpu/mtl/GrMtlPipelineState.h",
"$_src/gpu/mtl/GrMtlPipelineState.mm",
"$_src/gpu/mtl/GrMtlPipelineStateBuilder.h",
diff --git a/src/gpu/mtl/GrMtlCaps.mm b/src/gpu/mtl/GrMtlCaps.mm
index 110985f..26a111a 100644
--- a/src/gpu/mtl/GrMtlCaps.mm
+++ b/src/gpu/mtl/GrMtlCaps.mm
@@ -26,6 +26,16 @@
this->applyOptionsOverrides(contextOptions);
fShaderCaps->applyOptionsOverrides(contextOptions);
+
+ // The following are disabled due to the unfinished Metal backend, not because Metal itself
+ // doesn't support it.
+ fBlacklistCoverageCounting = true; // CCPR shaders have some incompatabilities with SkSLC
+ fFenceSyncSupport = false; // Fences are not implemented yet
+ fMipMapSupport = false; // GrMtlGpu::onRegenerateMipMapLevels() not implemented
+ fMultisampleDisableSupport = true; // MSAA and resolving not implemented yet
+ fDiscardRenderTargetSupport = false; // GrMtlGpuCommandBuffer::discard() not implemented
+ fAvoidStencilBuffers = true; // Stencils not implemented
+ fCrossContextTextureSupport = false; // GrMtlGpu::prepareTextureForCrossContextUsage() not impl
}
void GrMtlCaps::initFeatureSet(MTLFeatureSet featureSet) {
diff --git a/src/gpu/mtl/GrMtlGpu.h b/src/gpu/mtl/GrMtlGpu.h
index 1ae9878..bf9b1f5 100644
--- a/src/gpu/mtl/GrMtlGpu.h
+++ b/src/gpu/mtl/GrMtlGpu.h
@@ -110,6 +110,15 @@
void waitSemaphore(sk_sp<GrSemaphore> semaphore) override {}
sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override { return nullptr; }
+ // When the Metal backend actually uses indirect command buffers, this function will actually do
+ // what it says. For now, every command is encoded directly into the primary command buffer, so
+ // this function is pretty useless, except for indicating that a render target has been drawn
+ // to.
+ void submitIndirectCommandBuffer(GrSurface* surface, GrSurfaceOrigin origin,
+ const SkIRect* bounds) {
+ this->didWriteToSurface(surface, origin, bounds);
+ }
+
private:
GrMtlGpu(GrContext* context, const GrContextOptions& options,
id<MTLDevice> device, id<MTLCommandQueue> queue, MTLFeatureSet featureSet);
@@ -151,7 +160,9 @@
void onResolveRenderTarget(GrRenderTarget* target) override { return; }
- void onFinishFlush(bool insertedSemaphores) override {}
+ void onFinishFlush(bool insertedSemaphores) override {
+ this->submitCommandBuffer(kSkip_SyncQueue);
+ }
// Function that uploads data onto textures with private storage mode (GPU access only).
bool uploadToTexture(GrMtlTexture* tex, int left, int top, int width, int height,
diff --git a/src/gpu/mtl/GrMtlGpuCommandBuffer.h b/src/gpu/mtl/GrMtlGpuCommandBuffer.h
index e5b259c..d3f964f 100644
--- a/src/gpu/mtl/GrMtlGpuCommandBuffer.h
+++ b/src/gpu/mtl/GrMtlGpuCommandBuffer.h
@@ -10,9 +10,12 @@
#include "GrGpuCommandBuffer.h"
#include "GrMtlGpu.h"
+#include "GrMesh.h"
#import <metal/metal.h>
+typedef uint32_t GrColor;
+class GrMtlPipelineState;
class GrMtlRenderTarget;
class GrMtlGpuTextureCommandBuffer : public GrGpuTextureCommandBuffer {
@@ -37,19 +40,13 @@
typedef GrGpuTextureCommandBuffer INHERITED;
};
-class GrMtlGpuRTCommandBuffer : public GrGpuRTCommandBuffer {
+class GrMtlGpuRTCommandBuffer : public GrGpuRTCommandBuffer, private GrMesh::SendToGpuImpl {
public:
GrMtlGpuRTCommandBuffer(GrMtlGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin,
const GrGpuRTCommandBuffer::LoadAndStoreInfo& colorInfo,
- const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo& stencilInfo)
- : INHERITED(rt, origin)
- , fGpu(gpu)
- , fColorLoadAndStoreInfo(colorInfo)
- , fStencilLoadAndStoreInfo(stencilInfo) {
- // Silence unused var warning
- (void)fColorLoadAndStoreInfo;
- (void)fStencilLoadAndStoreInfo;
- }
+ const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo& stencilInfo);
+
+ ~GrMtlGpuRTCommandBuffer() override;
void begin() override {}
void end() override {}
@@ -61,29 +58,79 @@
void inlineUpload(GrOpFlushState* state, GrDeferredTextureUploadFn& upload) override {}
void copy(GrSurface* src, GrSurfaceOrigin srcOrigin, const SkIRect& srcRect,
- const SkIPoint& dstPoint) override {
- fGpu->copySurface(fRenderTarget, fOrigin, src, srcOrigin, srcRect, dstPoint);
- }
+ const SkIPoint& dstPoint) override;
+
+ void submit();
private:
+ void internalBegin();
+ void internalEnd();
+
GrGpu* gpu() override { return fGpu; }
+ GrMtlPipelineState* prepareDrawState(
+ const GrPrimitiveProcessor& primProc,
+ const GrPipeline& pipeline,
+ const GrPipeline::FixedDynamicState* fixedDynamicState,
+ const GrMesh meshes[],
+ int meshCount);
+
void onDraw(const GrPrimitiveProcessor& primProc,
const GrPipeline& pipeline,
const GrPipeline::FixedDynamicState* fixedDynamicState,
const GrPipeline::DynamicStateArrays* dynamicStateArrays,
const GrMesh mesh[],
int meshCount,
- const SkRect& bounds) override {}
+ const SkRect& bounds) override;
void onClear(const GrFixedClip& clip, GrColor color) override {}
void onClearStencilClip(const GrFixedClip& clip, bool insideStencilMask) override {}
+ MTLRenderPassDescriptor* createRenderPassDesc() const;
+
+ void bindGeometry(const GrBuffer* vertexBuffer, const GrBuffer* instanceBuffer);
+
+ // GrMesh::SendToGpuImpl methods. These issue the actual Metal draw commands.
+ // Marked final as a hint to the compiler to not use virtual dispatch.
+ void sendMeshToGpu(GrPrimitiveType primType, const GrBuffer* vertexBuffer, int vertexCount,
+ int baseVertex) final {
+ this->sendInstancedMeshToGpu(primType, vertexBuffer, vertexCount, baseVertex, nullptr, 1,
+ 0);
+ }
+
+ void sendIndexedMeshToGpu(GrPrimitiveType primType, const GrBuffer* indexBuffer, int indexCount,
+ int baseIndex, uint16_t /*minIndexValue*/, uint16_t /*maxIndexValue*/,
+ const GrBuffer* vertexBuffer, int baseVertex,
+ GrPrimitiveRestart restart) final {
+ SkASSERT(restart == GrPrimitiveRestart::kNo);
+ this->sendIndexedInstancedMeshToGpu(primType, indexBuffer, indexCount, baseIndex,
+ vertexBuffer, baseVertex, nullptr, 1, 0,
+ GrPrimitiveRestart::kNo);
+ }
+
+ void sendInstancedMeshToGpu(GrPrimitiveType, const GrBuffer* vertexBuffer, int vertexCount,
+ int baseVertex, const GrBuffer* instanceBuffer, int instanceCount,
+ int baseInstance) final;
+
+ void sendIndexedInstancedMeshToGpu(GrPrimitiveType, const GrBuffer* indexBuffer, int indexCount,
+ int baseIndex, const GrBuffer* vertexBuffer, int baseVertex,
+ const GrBuffer* instanceBuffer, int instanceCount,
+ int baseInstance, GrPrimitiveRestart) final;
+
GrMtlGpu* fGpu;
GrGpuRTCommandBuffer::LoadAndStoreInfo fColorLoadAndStoreInfo;
GrGpuRTCommandBuffer::StencilLoadAndStoreInfo fStencilLoadAndStoreInfo;
+ id<MTLRenderCommandEncoder> fActiveRenderCmdEncoder;
+ MTLRenderPassDescriptor* fRenderPassDesc;
+
+ struct CommandBufferInfo {
+ SkRect fBounds;
+ };
+
+ CommandBufferInfo fCommandBufferInfo;
+
typedef GrGpuRTCommandBuffer INHERITED;
};
diff --git a/src/gpu/mtl/GrMtlGpuCommandBuffer.mm b/src/gpu/mtl/GrMtlGpuCommandBuffer.mm
new file mode 100644
index 0000000..d390a1e
--- /dev/null
+++ b/src/gpu/mtl/GrMtlGpuCommandBuffer.mm
@@ -0,0 +1,272 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrMtlGpuCommandBuffer.h"
+
+#include "GrColor.h"
+#include "GrMtlPipelineState.h"
+#include "GrMtlPipelineStateBuilder.h"
+#include "GrMtlRenderTarget.h"
+
+GrMtlGpuRTCommandBuffer::GrMtlGpuRTCommandBuffer(
+ GrMtlGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin,
+ const GrGpuRTCommandBuffer::LoadAndStoreInfo& colorInfo,
+ const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo& stencilInfo)
+ : INHERITED(rt, origin)
+ , fGpu(gpu)
+ , fColorLoadAndStoreInfo(colorInfo)
+ , fStencilLoadAndStoreInfo(stencilInfo)
+ , fRenderPassDesc(this->createRenderPassDesc()) {
+ (void)fStencilLoadAndStoreInfo; // Silence unused var warning
+ if (fColorLoadAndStoreInfo.fLoadOp == GrLoadOp::kClear) {
+ fCommandBufferInfo.fBounds = SkRect::MakeWH(fRenderTarget->width(),
+ fRenderTarget->height());
+ this->internalBegin();
+ this->internalEnd();
+ fRenderPassDesc.colorAttachments[0].loadAction = MTLLoadActionLoad;
+ } else {
+ fCommandBufferInfo.fBounds.setEmpty();
+ }
+}
+
+GrMtlGpuRTCommandBuffer::~GrMtlGpuRTCommandBuffer() {
+ SkASSERT(fActiveRenderCmdEncoder == nil);
+}
+
+void GrMtlGpuRTCommandBuffer::internalBegin() {
+ SkASSERT(fActiveRenderCmdEncoder == nil);
+ fActiveRenderCmdEncoder =
+ [fGpu->commandBuffer()
+ renderCommandEncoderWithDescriptor: fRenderPassDesc];
+ SkASSERT(fActiveRenderCmdEncoder);
+ [fActiveRenderCmdEncoder setFrontFacingWinding: MTLWindingCounterClockwise];
+}
+
+void GrMtlGpuRTCommandBuffer::internalEnd() {
+ SkASSERT(fActiveRenderCmdEncoder);
+ [fActiveRenderCmdEncoder endEncoding];
+ fActiveRenderCmdEncoder = nil;
+ SkASSERT(fActiveRenderCmdEncoder == nil);
+}
+
+void GrMtlGpuRTCommandBuffer::submit() {
+ if (!fRenderTarget) {
+ return;
+ }
+ SkIRect iBounds;
+ fCommandBufferInfo.fBounds.roundOut(&iBounds);
+ fGpu->submitIndirectCommandBuffer(fRenderTarget, fOrigin, &iBounds);
+}
+
+void GrMtlGpuRTCommandBuffer::copy(GrSurface* src, GrSurfaceOrigin srcOrigin,
+ const SkIRect& srcRect, const SkIPoint& dstPoint) {
+ // We cannot have an active encoder when we call copy since it requires its own
+ // command encoder.
+ SkASSERT(fActiveRenderCmdEncoder == nil);
+ fGpu->copySurface(fRenderTarget, fOrigin, src, srcOrigin, srcRect, dstPoint);
+}
+
+GrMtlPipelineState* GrMtlGpuRTCommandBuffer::prepareDrawState(
+ const GrPrimitiveProcessor& primProc,
+ const GrPipeline& pipeline,
+ const GrPipeline::FixedDynamicState* fixedDynamicState,
+ const GrMesh meshes[],
+ int meshCount) {
+ // TODO: resolve textures and regenerate mipmaps as needed
+ bool hasPoints = false;
+ for (int i = 0; i < meshCount; ++i) {
+ if (meshes[i].primitiveType() == GrPrimitiveType::kPoints) {
+ hasPoints = true;
+ break;
+ }
+ }
+ GrProgramDesc desc;
+ if (!GrProgramDesc::Build(&desc, primProc, hasPoints, pipeline, *fGpu->caps()->shaderCaps())) {
+ return nullptr;
+ }
+ desc.finalize();
+
+ // TODO: use resource provider for pipeline
+ GrMtlPipelineState* pipelineState =
+ GrMtlPipelineStateBuilder::CreatePipelineState(primProc, pipeline, &desc, fGpu);
+ if (!pipelineState) {
+ return nullptr;
+ }
+ const GrTextureProxy* const* primProcProxies = nullptr;
+ if (fixedDynamicState) {
+ primProcProxies = fixedDynamicState->fPrimitiveProcessorTextures;
+ }
+
+ // We cannot have an active encoder when we set the pipeline data since it requires its own
+ // command encoder.
+ SkASSERT(fActiveRenderCmdEncoder == nil);
+ pipelineState->setData(primProc, pipeline, primProcProxies);
+
+ return pipelineState;
+}
+
+void GrMtlGpuRTCommandBuffer::onDraw(const GrPrimitiveProcessor& primProc,
+ const GrPipeline& pipeline,
+ const GrPipeline::FixedDynamicState* fixedDynamicState,
+ const GrPipeline::DynamicStateArrays* dynamicStateArrays,
+ const GrMesh meshes[],
+ int meshCount,
+ const SkRect& bounds) {
+ SkASSERT(pipeline.renderTarget() == fRenderTarget);
+ if (!meshCount) {
+ return;
+ }
+ if (pipeline.isScissorEnabled()) {
+ return; // TODO: ScissorRects are not supported.
+ }
+
+ std::unique_ptr<GrMtlPipelineState> pipelineState(
+ this->prepareDrawState(primProc, pipeline, fixedDynamicState, meshes, meshCount));
+ if (!pipelineState) {
+ return;
+ }
+
+ this->internalBegin();
+ [fActiveRenderCmdEncoder setRenderPipelineState: pipelineState->mtlPipelineState()];
+
+ pipelineState->bind(fActiveRenderCmdEncoder);
+
+ for (int i = 0; i < meshCount; ++i) {
+ const GrMesh& mesh = meshes[i];
+ SkASSERT(fActiveRenderCmdEncoder);
+ mesh.sendToGpu(this);
+ }
+ this->internalEnd();
+ fCommandBufferInfo.fBounds.join(bounds);
+}
+
+MTLRenderPassDescriptor* GrMtlGpuRTCommandBuffer::createRenderPassDesc() const {
+ const static MTLLoadAction mtlLoadAction[] {
+ MTLLoadActionLoad,
+ MTLLoadActionClear,
+ MTLLoadActionDontCare
+ };
+ GR_STATIC_ASSERT((int)GrLoadOp::kLoad == 0);
+ GR_STATIC_ASSERT((int)GrLoadOp::kClear == 1);
+ GR_STATIC_ASSERT((int)GrLoadOp::kDiscard == 2);
+ SkASSERT(fColorLoadAndStoreInfo.fLoadOp <= GrLoadOp::kDiscard);
+
+ const static MTLStoreAction mtlStoreAction[] {
+ MTLStoreActionStore,
+ MTLStoreActionDontCare
+ };
+ GR_STATIC_ASSERT((int)GrStoreOp::kStore == 0);
+ GR_STATIC_ASSERT((int)GrStoreOp::kDiscard == 1);
+ SkASSERT(fColorLoadAndStoreInfo.fStoreOp <= GrStoreOp::kDiscard);
+
+ auto renderPassDesc = [[MTLRenderPassDescriptor alloc] init];
+ renderPassDesc.colorAttachments[0].texture =
+ static_cast<GrMtlRenderTarget*>(fRenderTarget)->mtlRenderTexture();
+ renderPassDesc.colorAttachments[0].slice = 0;
+ renderPassDesc.colorAttachments[0].level = 0;
+ const auto& clearColor = GrColor4f::FromGrColor(fColorLoadAndStoreInfo.fClearColor).fRGBA;
+ renderPassDesc.colorAttachments[0].clearColor =
+ MTLClearColorMake(clearColor[0], clearColor[1], clearColor[2], clearColor[3]);
+ renderPassDesc.colorAttachments[0].loadAction =
+ mtlLoadAction[static_cast<int>(fColorLoadAndStoreInfo.fLoadOp)];
+ renderPassDesc.colorAttachments[0].storeAction =
+ mtlStoreAction[static_cast<int>(fColorLoadAndStoreInfo.fStoreOp)];
+ return renderPassDesc;
+}
+
+static MTLPrimitiveType gr_to_mtl_primitive(GrPrimitiveType primitiveType) {
+ const static MTLPrimitiveType mtlPrimitiveType[] {
+ MTLPrimitiveTypeTriangle,
+ MTLPrimitiveTypeTriangleStrip,
+ MTLPrimitiveTypePoint,
+ MTLPrimitiveTypeLine,
+ MTLPrimitiveTypeLineStrip
+ };
+ GR_STATIC_ASSERT((int)GrPrimitiveType::kTriangles == 0);
+ GR_STATIC_ASSERT((int)GrPrimitiveType::kTriangleStrip == 1);
+ GR_STATIC_ASSERT((int)GrPrimitiveType::kPoints == 2);
+ GR_STATIC_ASSERT((int)GrPrimitiveType::kLines == 3);
+ GR_STATIC_ASSERT((int)GrPrimitiveType::kLineStrip == 4);
+
+ SkASSERT(primitiveType <= GrPrimitiveType::kLineStrip);
+ return mtlPrimitiveType[static_cast<int>(primitiveType)];
+}
+
+void GrMtlGpuRTCommandBuffer::bindGeometry(const GrBuffer* vertexBuffer,
+ const GrBuffer* instanceBuffer) {
+ size_t bufferIndex = GrMtlUniformHandler::kLastUniformBinding + 1;
+ if (vertexBuffer) {
+ SkASSERT(!vertexBuffer->isCPUBacked());
+ SkASSERT(!vertexBuffer->isMapped());
+
+ auto mtlVertexBuffer = static_cast<const GrMtlBuffer*>(vertexBuffer)->mtlBuffer();
+ SkASSERT(mtlVertexBuffer);
+ [fActiveRenderCmdEncoder setVertexBuffer: mtlVertexBuffer
+ offset: 0
+ atIndex: bufferIndex++];
+ }
+ if (instanceBuffer) {
+ SkASSERT(!instanceBuffer->isCPUBacked());
+ SkASSERT(!instanceBuffer->isMapped());
+
+ auto mtlInstanceBuffer = static_cast<const GrMtlBuffer*>(instanceBuffer)->mtlBuffer();
+ SkASSERT(mtlInstanceBuffer);
+ [fActiveRenderCmdEncoder setVertexBuffer: mtlInstanceBuffer
+ offset: 0
+ atIndex: bufferIndex++];
+ }
+}
+
+void GrMtlGpuRTCommandBuffer::sendInstancedMeshToGpu(GrPrimitiveType primitiveType,
+ const GrBuffer* vertexBuffer,
+ int vertexCount,
+ int baseVertex,
+ const GrBuffer* instanceBuffer,
+ int instanceCount,
+ int baseInstance) {
+ this->bindGeometry(vertexBuffer, instanceBuffer);
+
+ SkASSERT(primitiveType != GrPrimitiveType::kLinesAdjacency); // Geometry shaders not supported.
+ [fActiveRenderCmdEncoder drawPrimitives: gr_to_mtl_primitive(primitiveType)
+ vertexStart: baseVertex
+ vertexCount: vertexCount
+ instanceCount: instanceCount
+ baseInstance: baseInstance];
+}
+
+void GrMtlGpuRTCommandBuffer::sendIndexedInstancedMeshToGpu(GrPrimitiveType primitiveType,
+ const GrBuffer* indexBuffer,
+ int indexCount,
+ int baseIndex,
+ const GrBuffer* vertexBuffer,
+ int baseVertex,
+ const GrBuffer* instanceBuffer,
+ int instanceCount,
+ int baseInstance,
+ GrPrimitiveRestart restart) {
+ this->bindGeometry(vertexBuffer, instanceBuffer);
+
+ SkASSERT(primitiveType != GrPrimitiveType::kLinesAdjacency); // Geometry shaders not supported.
+ id<MTLBuffer> mtlIndexBuffer;
+ if (indexBuffer) {
+ SkASSERT(!indexBuffer->isCPUBacked());
+ SkASSERT(!indexBuffer->isMapped());
+
+ mtlIndexBuffer = static_cast<const GrMtlBuffer*>(indexBuffer)->mtlBuffer();
+ SkASSERT(mtlIndexBuffer);
+ }
+
+ SkASSERT(restart == GrPrimitiveRestart::kNo);
+ [fActiveRenderCmdEncoder drawIndexedPrimitives: gr_to_mtl_primitive(primitiveType)
+ indexCount: indexCount
+ indexType: MTLIndexTypeUInt16
+ indexBuffer: mtlIndexBuffer
+ indexBufferOffset: sizeof(uint16_t) * baseIndex
+ instanceCount: instanceCount
+ baseVertex: baseVertex
+ baseInstance: baseInstance];
+}
diff --git a/src/sksl/SkSLMetalCodeGenerator.cpp b/src/sksl/SkSLMetalCodeGenerator.cpp
index 1ba0bbc..443c9e3 100644
--- a/src/sksl/SkSLMetalCodeGenerator.cpp
+++ b/src/sksl/SkSLMetalCodeGenerator.cpp
@@ -590,12 +590,16 @@
this->write(")]]");
}
}
- if (fInterfaceBlockNameMap.empty()) {
+ if (fProgram.fKind == Program::kFragment_Kind) {
+ if (fInterfaceBlockNameMap.empty()) {
// FIXME - Possibly have a different way of passing in u_skRTHeight or flip y axis
// in a different way altogether.
- this->write(", constant sksl_synthetic_uniforms& _anonInterface0 [[buffer(0)]]");
- }
- if (fProgram.fKind == Program::kFragment_Kind) {
+#ifdef SK_MOLTENVK
+ this->write(", constant sksl_synthetic_uniforms& _anonInterface0 [[buffer(0)]]");
+#else
+ this->write(", constant sksl_synthetic_uniforms& _anonInterface0 [[buffer(1)]]");
+#endif
+ }
this->write(", bool _frontFacing [[front_facing]]");
this->write(", float4 _fragCoord [[position]]");
} else if (fProgram.fKind == Program::kVertex_Kind) {