attached uniform and varying handlers to GrMtlPipelineStateBuilder
Bug: skia:
Change-Id: I4e8d09052153fa10bf887f07b70abe37d0c7e884
Reviewed-on: https://skia-review.googlesource.com/145159
Commit-Queue: Timothy Liang <timliang@google.com>
Reviewed-by: Greg Daniel <egdaniel@google.com>
diff --git a/src/gpu/mtl/GrMtlPipelineState.h b/src/gpu/mtl/GrMtlPipelineState.h
index 9c36dd5..e476bab 100644
--- a/src/gpu/mtl/GrMtlPipelineState.h
+++ b/src/gpu/mtl/GrMtlPipelineState.h
@@ -8,24 +8,33 @@
#ifndef GrMtlPipelineState_DEFINED
#define GrMtlPipelineState_DEFINED
+#include "GrMtlBuffer.h"
+
#import <metal/metal.h>
class GrMtlGpu;
+/**
+ * Wraps a MTLRenderPipelineState object and also contains more info about the pipeline as needed
+ * by Ganesh
+ */
class GrMtlPipelineState {
public:
- GrMtlPipelineState(id<MTLRenderPipelineState> pipelineState,
- MTLPixelFormat pixelFormat)
- : fPipelineState(pipelineState)
- , fPixelFormat(pixelFormat) {
- (void) fPixelFormat; // Silence unused-var warning.
-}
+ GrMtlPipelineState(GrMtlGpu* gpu,
+ id<MTLRenderPipelineState> pipelineState,
+ MTLPixelFormat pixelFormat,
+ GrMtlBuffer* geometryUniformBuffer,
+ GrMtlBuffer* fragmentUniformBuffer);
id<MTLRenderPipelineState> mtlPipelineState() { return fPipelineState; }
private:
+ GrMtlGpu* fGpu;
+
id<MTLRenderPipelineState> fPipelineState;
- MTLPixelFormat fPixelFormat;
+ MTLPixelFormat fPixelFormat;
+ sk_sp<GrMtlBuffer> fGeometryUniformBuffer;
+ sk_sp<GrMtlBuffer> fFragmentUniformBuffer;
};
#endif
diff --git a/src/gpu/mtl/GrMtlPipelineState.mm b/src/gpu/mtl/GrMtlPipelineState.mm
new file mode 100644
index 0000000..201d44b
--- /dev/null
+++ b/src/gpu/mtl/GrMtlPipelineState.mm
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrMtlPipelineState.h"
+
+#include "GrMtlBuffer.h"
+#include "GrMtlGpu.h"
+
+GrMtlPipelineState::GrMtlPipelineState(GrMtlGpu* gpu,
+ id<MTLRenderPipelineState> pipelineState,
+ MTLPixelFormat pixelFormat,
+ GrMtlBuffer* geometryUniformBuffer,
+ GrMtlBuffer* fragmentUniformBuffer)
+ : fGpu(gpu)
+ , fPipelineState(pipelineState)
+ , fPixelFormat(pixelFormat)
+ , fGeometryUniformBuffer(geometryUniformBuffer)
+ , fFragmentUniformBuffer(fragmentUniformBuffer) {
+ (void) fGpu; // Suppress unused-var warning.
+ (void) fPixelFormat; // Suppress unused-var warning.
+}
diff --git a/src/gpu/mtl/GrMtlPipelineStateBuilder.h b/src/gpu/mtl/GrMtlPipelineStateBuilder.h
index 3834dbf..0596b3e 100644
--- a/src/gpu/mtl/GrMtlPipelineStateBuilder.h
+++ b/src/gpu/mtl/GrMtlPipelineStateBuilder.h
@@ -10,6 +10,8 @@
#include "GrPipeline.h"
#include "GrProgramDesc.h"
+#include "GrMtlUniformHandler.h"
+#include "GrMtlVaryingHandler.h"
#include "SkSLCompiler.h"
#include "glsl/GrGLSLProgramBuilder.h"
@@ -31,11 +33,11 @@
const GrCaps* caps() const override;
- GrGLSLUniformHandler* uniformHandler() override { return nullptr; }
+ GrGLSLUniformHandler* uniformHandler() override { return &fUniformHandler; }
- const GrGLSLUniformHandler* uniformHandler() const override { return nullptr; }
+ const GrGLSLUniformHandler* uniformHandler() const override { return &fUniformHandler; }
- GrGLSLVaryingHandler* varyingHandler() override { return nullptr; }
+ GrGLSLVaryingHandler* varyingHandler() override { return &fVaryingHandler; }
void finalizeFragmentOutputColor(GrShaderVar& outputColor) override;
@@ -46,10 +48,11 @@
const SkSL::Program::Settings& settings,
GrProgramDesc* desc);
- GrMtlPipelineState* finalize(const GrPrimitiveProcessor&, const GrPipeline&,
- GrProgramDesc*);
+ GrMtlPipelineState* finalize(const GrPrimitiveProcessor&, const GrPipeline&, GrProgramDesc*);
GrMtlGpu* fGpu;
+ GrMtlUniformHandler fUniformHandler;
+ GrMtlVaryingHandler fVaryingHandler;
typedef GrGLSLProgramBuilder INHERITED;
};
diff --git a/src/gpu/mtl/GrMtlPipelineStateBuilder.mm b/src/gpu/mtl/GrMtlPipelineStateBuilder.mm
index d072f7f..f4f1807 100644
--- a/src/gpu/mtl/GrMtlPipelineStateBuilder.mm
+++ b/src/gpu/mtl/GrMtlPipelineStateBuilder.mm
@@ -34,7 +34,9 @@
GrProgramDesc* desc,
GrMtlGpu* gpu)
: INHERITED(primProc, pipeline, desc)
- , fGpu(gpu) {
+ , fGpu(gpu)
+ , fUniformHandler(this)
+ , fVaryingHandler(this) {
}
const GrCaps* GrMtlPipelineStateBuilder::caps() const {
@@ -139,7 +141,7 @@
static MTLVertexDescriptor* create_vertex_descriptor(const GrPrimitiveProcessor& primProc) {
uint32_t vertexBinding = 0, instanceBinding = 0;
- int nextBinding = 0;
+ int nextBinding = GrMtlUniformHandler::kLastUniformBinding + 1;
if (primProc.hasVertexAttributes()) {
vertexBinding = nextBinding++;
}
@@ -359,6 +361,15 @@
[[error localizedDescription] cStringUsingEncoding: NSASCIIStringEncoding]);
pipelineState = nil;
}
- return new GrMtlPipelineState(pipelineState,
- pipelineDescriptor.colorAttachments[0].pixelFormat);
+ return new GrMtlPipelineState(fGpu,
+ pipelineState,
+ pipelineDescriptor.colorAttachments[0].pixelFormat,
+ GrMtlBuffer::Create(fGpu,
+ fUniformHandler.fCurrentGeometryUBOOffset,
+ kVertex_GrBufferType,
+ kStatic_GrAccessPattern),
+ GrMtlBuffer::Create(fGpu,
+ fUniformHandler.fCurrentFragmentUBOOffset,
+ kVertex_GrBufferType,
+ kStatic_GrAccessPattern));
}
diff --git a/src/gpu/mtl/GrMtlUniformHandler.h b/src/gpu/mtl/GrMtlUniformHandler.h
new file mode 100644
index 0000000..4ee5a36
--- /dev/null
+++ b/src/gpu/mtl/GrMtlUniformHandler.h
@@ -0,0 +1,102 @@
+/*
+* Copyright 2018 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrMtlUniformHandler_DEFINED
+#define GrMtlUniformHandler_DEFINED
+
+#include "GrAllocator.h"
+#include "GrShaderVar.h"
+#include "glsl/GrGLSLUniformHandler.h"
+
+// TODO: this class is basically copy and pasted from GrVkUniformHandler so that we can have
+// some shaders working. The SkSL Metal code generator was written to work with GLSL generated for
+// the Ganesh Vulkan backend, so it should all work. There might be better ways to do things in
+// Metal and/or some Vulkan GLSLisms left in.
+class GrMtlUniformHandler : public GrGLSLUniformHandler {
+public:
+ static const int kUniformsPerBlock = 8;
+
+ enum {
+ kGeometryBinding = 0,
+ kFragBinding = 1,
+ kLastUniformBinding = kFragBinding,
+ };
+
+ // fUBOffset is only valid if the GrSLType of the fVariable is not a sampler
+ struct UniformInfo {
+ GrShaderVar fVariable;
+ uint32_t fVisibility;
+ uint32_t fUBOffset;
+ };
+ typedef GrTAllocator<UniformInfo> UniformInfoArray;
+
+ const GrShaderVar& getUniformVariable(UniformHandle u) const override {
+ return fUniforms[u.toIndex()].fVariable;
+ }
+
+ const char* getUniformCStr(UniformHandle u) const override {
+ return this->getUniformVariable(u).c_str();
+ }
+
+private:
+ explicit GrMtlUniformHandler(GrGLSLProgramBuilder* program)
+ : INHERITED(program)
+ , fUniforms(kUniformsPerBlock)
+ , fSamplers(kUniformsPerBlock)
+ , fCurrentGeometryUBOOffset(0)
+ , fCurrentFragmentUBOOffset(0) {
+ }
+
+ UniformHandle internalAddUniformArray(uint32_t visibility,
+ GrSLType type,
+ GrSLPrecision precision,
+ const char* name,
+ bool mangleName,
+ int arrayCount,
+ const char** outName) override;
+
+ SamplerHandle addSampler(uint32_t visibility,
+ GrSwizzle swizzle,
+ GrTextureType type,
+ GrSLPrecision precision,
+ const char* name) override;
+
+ int numSamplers() const { return fSamplers.count(); }
+ const GrShaderVar& samplerVariable(SamplerHandle handle) const override {
+ return fSamplers[handle.toIndex()].fVariable;
+ }
+ GrSwizzle samplerSwizzle(SamplerHandle handle) const override {
+ return fSamplerSwizzles[handle.toIndex()];
+ }
+ uint32_t samplerVisibility(SamplerHandle handle) const {
+ return fSamplers[handle.toIndex()].fVisibility;
+ }
+
+ void appendUniformDecls(GrShaderFlags, SkString*) const override;
+
+ bool hasGeometryUniforms() const { return fCurrentGeometryUBOOffset > 0; }
+ bool hasFragmentUniforms() const { return fCurrentFragmentUBOOffset > 0; }
+
+
+ const UniformInfo& getUniformInfo(UniformHandle u) const {
+ return fUniforms[u.toIndex()];
+ }
+
+
+ UniformInfoArray fUniforms;
+ UniformInfoArray fSamplers;
+ SkTArray<GrSwizzle> fSamplerSwizzles;
+
+ uint32_t fCurrentGeometryUBOOffset;
+ uint32_t fCurrentFragmentUBOOffset;
+
+ friend class GrMtlPipelineStateBuilder;
+
+ typedef GrGLSLUniformHandler INHERITED;
+};
+
+#endif
diff --git a/src/gpu/mtl/GrMtlUniformHandler.mm b/src/gpu/mtl/GrMtlUniformHandler.mm
new file mode 100644
index 0000000..a25d940
--- /dev/null
+++ b/src/gpu/mtl/GrMtlUniformHandler.mm
@@ -0,0 +1,361 @@
+/*
+* Copyright 2018 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#include "GrMtlUniformHandler.h"
+#include "glsl/GrGLSLProgramBuilder.h"
+
+// TODO: this class is basically copy and pasted from GrVklUniformHandler so that we can have
+// some shaders working. The SkSL Metal code generator was written to work with GLSL generated for
+// the Ganesh Vulkan backend, so it should all work. There might be better ways to do things in
+// Metal and/or some Vulkan GLSLisms left in.
+
+// To determine whether a current offset is aligned, we can just 'and' the lowest bits with the
+// alignment mask. A value of 0 means aligned, any other value is how many bytes past alignment we
+// are. This works since all alignments are powers of 2. The mask is always (alignment - 1).
+// This alignment mask will give correct alignments for using the std430 block layout. If you want
+// the std140 alignment, you can use this, but then make sure if you have an array type it is
+// aligned to 16 bytes (i.e. has mask of 0xF).
+// These are designated in the Vulkan spec, section 14.5.4 "Offset and Stride Assignment".
+// https://www.khronos.org/registry/vulkan/specs/1.0-wsi_extensions/html/Mtlspec.html#interfaces-resources-layout
+uint32_t grsltype_to_alignment_mask(GrSLType type) {
+ switch(type) {
+ case kByte_GrSLType: // fall through
+ case kUByte_GrSLType:
+ return 0x0;
+ case kByte2_GrSLType: // fall through
+ case kUByte2_GrSLType:
+ return 0x1;
+ case kByte3_GrSLType: // fall through
+ case kByte4_GrSLType:
+ case kUByte3_GrSLType:
+ case kUByte4_GrSLType:
+ return 0x3;
+ case kShort_GrSLType: // fall through
+ case kUShort_GrSLType:
+ return 0x1;
+ case kShort2_GrSLType: // fall through
+ case kUShort2_GrSLType:
+ return 0x3;
+ case kShort3_GrSLType: // fall through
+ case kShort4_GrSLType:
+ case kUShort3_GrSLType:
+ case kUShort4_GrSLType:
+ return 0x7;
+ case kInt_GrSLType:
+ case kUint_GrSLType:
+ return 0x3;
+ case kHalf_GrSLType: // fall through
+ case kFloat_GrSLType:
+ return 0x3;
+ case kHalf2_GrSLType: // fall through
+ case kFloat2_GrSLType:
+ return 0x7;
+ case kHalf3_GrSLType: // fall through
+ case kFloat3_GrSLType:
+ return 0xF;
+ case kHalf4_GrSLType: // fall through
+ case kFloat4_GrSLType:
+ return 0xF;
+ case kUint2_GrSLType:
+ return 0x7;
+ case kInt2_GrSLType:
+ return 0x7;
+ case kInt3_GrSLType:
+ return 0xF;
+ case kInt4_GrSLType:
+ return 0xF;
+ case kHalf2x2_GrSLType: // fall through
+ case kFloat2x2_GrSLType:
+ return 0x7;
+ case kHalf3x3_GrSLType: // fall through
+ case kFloat3x3_GrSLType:
+ return 0xF;
+ case kHalf4x4_GrSLType: // fall through
+ case kFloat4x4_GrSLType:
+ return 0xF;
+
+ // This query is only valid for certain types.
+ case kVoid_GrSLType:
+ case kBool_GrSLType:
+ case kTexture2DSampler_GrSLType:
+ case kTextureExternalSampler_GrSLType:
+ case kTexture2DRectSampler_GrSLType:
+ break;
+ }
+ SK_ABORT("Unexpected type");
+ return 0;
+}
+
+/** Returns the size in bytes taken up in vulkanbuffers for GrSLTypes. */
+static inline uint32_t grsltype_to_mtl_size(GrSLType type) {
+ switch(type) {
+ case kByte_GrSLType:
+ return sizeof(int8_t);
+ case kByte2_GrSLType:
+ return 2 * sizeof(int8_t);
+ case kByte3_GrSLType:
+ return 3 * sizeof(int8_t);
+ case kByte4_GrSLType:
+ return 4 * sizeof(int8_t);
+ case kUByte_GrSLType:
+ return sizeof(uint8_t);
+ case kUByte2_GrSLType:
+ return 2 * sizeof(uint8_t);
+ case kUByte3_GrSLType:
+ return 3 * sizeof(uint8_t);
+ case kUByte4_GrSLType:
+ return 4 * sizeof(uint8_t);
+ case kShort_GrSLType:
+ return sizeof(int16_t);
+ case kShort2_GrSLType:
+ return 2 * sizeof(int16_t);
+ case kShort3_GrSLType:
+ return 3 * sizeof(int16_t);
+ case kShort4_GrSLType:
+ return 4 * sizeof(int16_t);
+ case kUShort_GrSLType:
+ return sizeof(uint16_t);
+ case kUShort2_GrSLType:
+ return 2 * sizeof(uint16_t);
+ case kUShort3_GrSLType:
+ return 3 * sizeof(uint16_t);
+ case kUShort4_GrSLType:
+ return 4 * sizeof(uint16_t);
+ case kInt_GrSLType:
+ return sizeof(int32_t);
+ case kUint_GrSLType:
+ return sizeof(int32_t);
+ case kHalf_GrSLType: // fall through
+ case kFloat_GrSLType:
+ return sizeof(float);
+ case kHalf2_GrSLType: // fall through
+ case kFloat2_GrSLType:
+ return 2 * sizeof(float);
+ case kHalf3_GrSLType: // fall through
+ case kFloat3_GrSLType:
+ return 3 * sizeof(float);
+ case kHalf4_GrSLType: // fall through
+ case kFloat4_GrSLType:
+ return 4 * sizeof(float);
+ case kUint2_GrSLType:
+ return 2 * sizeof(uint32_t);
+ case kInt2_GrSLType:
+ return 2 * sizeof(int32_t);
+ case kInt3_GrSLType:
+ return 3 * sizeof(int32_t);
+ case kInt4_GrSLType:
+ return 4 * sizeof(int32_t);
+ case kHalf2x2_GrSLType: // fall through
+ case kFloat2x2_GrSLType:
+ //TODO: this will be 4 * szof(float) on std430.
+ return 8 * sizeof(float);
+ case kHalf3x3_GrSLType: // fall through
+ case kFloat3x3_GrSLType:
+ return 12 * sizeof(float);
+ case kHalf4x4_GrSLType: // fall through
+ case kFloat4x4_GrSLType:
+ return 16 * sizeof(float);
+
+ // This query is only valid for certain types.
+ case kVoid_GrSLType:
+ case kBool_GrSLType:
+ case kTexture2DSampler_GrSLType:
+ case kTextureExternalSampler_GrSLType:
+ case kTexture2DRectSampler_GrSLType:
+ break;
+ }
+ SK_ABORT("Unexpected type");
+ return 0;
+}
+
+// TODO: The SKSLC assumes Metal sizes and alignments are calculated by the std140 layout below.
+// This will need to be changed along with the MSL generator to match the Metal standard instead.
+//
+// Given the current offset into the ubo, calculate the offset for the uniform we're trying to add
+// taking into consideration all alignment requirements. The uniformOffset is set to the offset for
+// the new uniform, and currentOffset is updated to be the offset to the end of the new uniform.
+void get_ubo_aligned_offset(uint32_t* uniformOffset,
+ uint32_t* currentOffset,
+ GrSLType type,
+ int arrayCount) {
+ uint32_t alignmentMask = grsltype_to_alignment_mask(type);
+ // We want to use the std140 layout here, so we must make arrays align to 16 bytes.
+ if (arrayCount || type == kFloat2x2_GrSLType) {
+ alignmentMask = 0xF;
+ }
+ uint32_t offsetDiff = *currentOffset & alignmentMask;
+ if (offsetDiff != 0) {
+ offsetDiff = alignmentMask - offsetDiff + 1;
+ }
+ *uniformOffset = *currentOffset + offsetDiff;
+ SkASSERT(sizeof(float) == 4);
+ if (arrayCount) {
+ uint32_t elementSize = SkTMax<uint32_t>(16, grsltype_to_mtl_size(type));
+ SkASSERT(0 == (elementSize & 0xF));
+ *currentOffset = *uniformOffset + elementSize * arrayCount;
+ } else {
+ *currentOffset = *uniformOffset + grsltype_to_mtl_size(type);
+ }
+}
+
+GrGLSLUniformHandler::UniformHandle GrMtlUniformHandler::internalAddUniformArray(
+ uint32_t visibility,
+ GrSLType type,
+ GrSLPrecision precision,
+ const char* name,
+ bool mangleName,
+ int arrayCount,
+ const char** outName) {
+ SkASSERT(name && strlen(name));
+ // For now asserting the the visibility is either geometry types (vertex, tesselation, geometry,
+ // etc.) or only fragment.
+ SkASSERT(kVertex_GrShaderFlag == visibility ||
+ kGeometry_GrShaderFlag == visibility ||
+ (kVertex_GrShaderFlag | kGeometry_GrShaderFlag) == visibility ||
+ kFragment_GrShaderFlag == visibility);
+ SkASSERT(kDefault_GrSLPrecision == precision || GrSLTypeIsFloatType(type));
+ GrSLTypeIsFloatType(type);
+
+ UniformInfo& uni = fUniforms.push_back();
+ uni.fVariable.setType(type);
+ // TODO this is a bit hacky, lets think of a better way. Basically we need to be able to use
+ // the uniform view matrix name in the GP, and the GP is immutable so it has to tell the PB
+ // exactly what name it wants to use for the uniform view matrix. If we prefix anythings, then
+ // the names will mismatch. I think the correct solution is to have all GPs which need the
+ // uniform view matrix, they should upload the view matrix in their setData along with regular
+ // uniforms.
+ char prefix = 'u';
+ if ('u' == name[0] || !strncmp(name, GR_NO_MANGLE_PREFIX, strlen(GR_NO_MANGLE_PREFIX))) {
+ prefix = '\0';
+ }
+ fProgramBuilder->nameVariable(uni.fVariable.accessName(), prefix, name, mangleName);
+ uni.fVariable.setArrayCount(arrayCount);
+ uni.fVisibility = visibility;
+ uni.fVariable.setPrecision(precision);
+ // When outputing the GLSL, only the outer uniform block will get the Uniform modifier. Thus
+ // we set the modifier to none for all uniforms declared inside the block.
+ uni.fVariable.setTypeModifier(GrShaderVar::kNone_TypeModifier);
+
+ uint32_t* currentOffset;
+ uint32_t geomStages = kVertex_GrShaderFlag | kGeometry_GrShaderFlag;
+ if (geomStages & visibility) {
+ currentOffset = &fCurrentGeometryUBOOffset;
+ } else {
+ SkASSERT(kFragment_GrShaderFlag == visibility);
+ currentOffset = &fCurrentFragmentUBOOffset;
+ }
+ get_ubo_aligned_offset(&uni.fUBOffset, currentOffset, type, arrayCount);
+
+ SkString layoutQualifier;
+ layoutQualifier.appendf("offset=%d", uni.fUBOffset);
+ uni.fVariable.addLayoutQualifier(layoutQualifier.c_str());
+
+ if (outName) {
+ *outName = uni.fVariable.c_str();
+ }
+
+ return GrGLSLUniformHandler::UniformHandle(fUniforms.count() - 1);
+}
+
+GrGLSLUniformHandler::SamplerHandle GrMtlUniformHandler::addSampler(uint32_t visibility,
+ GrSwizzle swizzle,
+ GrTextureType type,
+ GrSLPrecision precision,
+ const char* name) {
+ SkASSERT(name && strlen(name));
+ // For now asserting the the visibility is either only vertex, geometry, or fragment
+ SkASSERT(kVertex_GrShaderFlag == visibility ||
+ kFragment_GrShaderFlag == visibility ||
+ kGeometry_GrShaderFlag == visibility);
+ SkString mangleName;
+ char prefix = 'u';
+ fProgramBuilder->nameVariable(&mangleName, prefix, name, true);
+
+ UniformInfo& info = fSamplers.push_back();
+ info.fVariable.setType(GrSLCombinedSamplerTypeForTextureType(type));
+ info.fVariable.setTypeModifier(GrShaderVar::kUniform_TypeModifier);
+ info.fVariable.setPrecision(precision);
+ info.fVariable.setName(mangleName);
+ SkString layoutQualifier;
+ layoutQualifier.appendf("binding=%d", fSamplers.count() - 1);
+ info.fVariable.addLayoutQualifier(layoutQualifier.c_str());
+ info.fVisibility = visibility;
+ info.fUBOffset = 0;
+ fSamplerSwizzles.push_back(swizzle);
+ SkASSERT(fSamplerSwizzles.count() == fSamplers.count());
+ return GrGLSLUniformHandler::SamplerHandle(fSamplers.count() - 1);
+}
+
+void GrMtlUniformHandler::appendUniformDecls(GrShaderFlags visibility, SkString* out) const {
+ SkASSERT(kVertex_GrShaderFlag == visibility ||
+ kGeometry_GrShaderFlag == visibility ||
+ kFragment_GrShaderFlag == visibility);
+
+ for (int i = 0; i < fSamplers.count(); ++i) {
+ const UniformInfo& sampler = fSamplers[i];
+ SkASSERT(sampler.fVariable.getType() == kTexture2DSampler_GrSLType);
+ if (visibility == sampler.fVisibility) {
+ sampler.fVariable.appendDecl(fProgramBuilder->shaderCaps(), out);
+ out->append(";\n");
+ }
+ }
+
+#ifdef SK_DEBUG
+ bool firstGeomOffsetCheck = false;
+ bool firstFragOffsetCheck = false;
+ for (int i = 0; i < fUniforms.count(); ++i) {
+ const UniformInfo& localUniform = fUniforms[i];
+ if (kVertex_GrShaderFlag == localUniform.fVisibility ||
+ kGeometry_GrShaderFlag == localUniform.fVisibility ||
+ (kVertex_GrShaderFlag | kGeometry_GrShaderFlag) == localUniform.fVisibility) {
+ if (!firstGeomOffsetCheck) {
+ // Check to make sure we are starting our offset at 0 so the offset qualifier we
+ // set on each variable in the uniform block is valid.
+ SkASSERT(0 == localUniform.fUBOffset);
+ firstGeomOffsetCheck = true;
+ }
+ } else {
+ SkASSERT(kFragment_GrShaderFlag == localUniform.fVisibility);
+ if (!firstFragOffsetCheck) {
+ // Check to make sure we are starting our offset at 0 so the offset qualifier we
+ // set on each variable in the uniform block is valid.
+ SkASSERT(0 == localUniform.fUBOffset);
+ firstFragOffsetCheck = true;
+ }
+ }
+ }
+#endif
+
+ SkString uniformsString;
+ for (int i = 0; i < fUniforms.count(); ++i) {
+ const UniformInfo& localUniform = fUniforms[i];
+ if (visibility & localUniform.fVisibility) {
+ if (GrSLTypeIsFloatType(localUniform.fVariable.getType())) {
+ localUniform.fVariable.appendDecl(fProgramBuilder->shaderCaps(), &uniformsString);
+ uniformsString.append(";\n");
+ }
+ }
+ }
+
+ if (!uniformsString.isEmpty()) {
+ uint32_t uniformBinding;
+ const char* stage;
+ if (kVertex_GrShaderFlag == visibility) {
+ uniformBinding = kGeometryBinding;
+ stage = "vertex";
+ } else if (kGeometry_GrShaderFlag == visibility) {
+ uniformBinding = kGeometryBinding;
+ stage = "geometry";
+ } else {
+ SkASSERT(kFragment_GrShaderFlag == visibility);
+ uniformBinding = kFragBinding;
+ stage = "fragment";
+ }
+ out->appendf("layout (binding=%d) uniform %sUniformBuffer\n{\n", uniformBinding, stage);
+ out->appendf("%s\n};\n", uniformsString.c_str());
+ }
+}
diff --git a/src/gpu/mtl/GrMtlVaryingHandler.h b/src/gpu/mtl/GrMtlVaryingHandler.h
new file mode 100644
index 0000000..ee71583
--- /dev/null
+++ b/src/gpu/mtl/GrMtlVaryingHandler.h
@@ -0,0 +1,27 @@
+/*
+* Copyright 2018 Google Inc.
+*
+* Use of this source code is governed by a BSD-style license that can be
+* found in the LICENSE file.
+*/
+
+#ifndef GrMtlVaryingHandler_DEFINED
+#define GrMtlVaryingHandler_DEFINED
+
+#include "glsl/GrGLSLVarying.h"
+
+class GrMtlVaryingHandler : public GrGLSLVaryingHandler {
+public:
+ GrMtlVaryingHandler(GrGLSLProgramBuilder* program) : INHERITED(program) {}
+
+ typedef GrGLSLVaryingHandler::VarArray VarArray;
+
+private:
+ void onFinalize() override;
+
+ friend class GrMtlPipelineStateBuilder;
+
+ typedef GrGLSLVaryingHandler INHERITED;
+};
+
+#endif
diff --git a/src/gpu/mtl/GrMtlVaryingHandler.mm b/src/gpu/mtl/GrMtlVaryingHandler.mm
new file mode 100644
index 0000000..096c989
--- /dev/null
+++ b/src/gpu/mtl/GrMtlVaryingHandler.mm
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2018 Google Inc.
+ *
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "GrMtlVaryingHandler.h"
+
+void finalize_helper(GrMtlVaryingHandler::VarArray& vars) {
+ int locationIndex;
+ for (locationIndex = 0; locationIndex < vars.count(); locationIndex++) {
+ GrShaderVar& var = vars[locationIndex];
+ // Metal only allows scalars (including bool and char) and vectors as varyings
+ SkASSERT(GrSLTypeVecLength(var.getType()) != -1);
+
+ SkString location;
+ location.appendf("location = %d", locationIndex);
+ var.addLayoutQualifier(location.c_str());
+ }
+ // The max number of inputs is 60 for iOS and 32 for macOS. The max number of components is 60
+ // for iOS and 128 for macOS. To be conservative, we are going to assert that we have less than
+ // 15 varyings because in the worst case scenario, they are all vec4s (15 * 4 = 60). If we hit
+ // this assert, we can implement a function in GrMtlCaps to be less conservative.
+ SkASSERT(locationIndex <= 15);
+}
+
+void GrMtlVaryingHandler::onFinalize() {
+ finalize_helper(fVertexInputs);
+ finalize_helper(fVertexOutputs);
+ finalize_helper(fGeomInputs);
+ finalize_helper(fGeomOutputs);
+ finalize_helper(fFragInputs);
+ finalize_helper(fFragOutputs);
+}