blob: 9eb36d0ae81744e0d590be2077a826201eef16e3 [file] [log] [blame]
Timothy Liang057c3902018-08-08 10:48:45 -04001/*
2 * Copyright 2018 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrMtlPipelineState.h"
9
Timothy Liang6ed63962018-08-10 09:49:44 -040010#include "GrContext.h"
11#include "GrContextPriv.h"
12#include "GrPipeline.h"
13#include "GrRenderTarget.h"
Ethan Nicholas01063512018-10-08 16:58:25 -040014#include "GrRenderTargetPriv.h"
Timothy Liang6ed63962018-08-10 09:49:44 -040015#include "GrTexturePriv.h"
Timothy Liang057c3902018-08-08 10:48:45 -040016#include "GrMtlBuffer.h"
17#include "GrMtlGpu.h"
Timothy Liang6ed63962018-08-10 09:49:44 -040018#include "GrMtlSampler.h"
19#include "GrMtlTexture.h"
20#include "glsl/GrGLSLFragmentProcessor.h"
21#include "glsl/GrGLSLGeometryProcessor.h"
22#include "glsl/GrGLSLXferProcessor.h"
Timothy Liang057c3902018-08-08 10:48:45 -040023
Timothy Liang6ed63962018-08-10 09:49:44 -040024GrMtlPipelineState::SamplerBindings::SamplerBindings(const GrSamplerState& state,
25 GrTexture* texture,
Timothy Liang6ed63962018-08-10 09:49:44 -040026 GrMtlGpu* gpu)
Greg Daniel0f70be82018-10-08 17:35:08 +000027 : fTexture(static_cast<GrMtlTexture*>(texture)->mtlTexture()) {
Timothy Liang6ed63962018-08-10 09:49:44 -040028 // TODO: use resource provider to get sampler.
29 std::unique_ptr<GrMtlSampler> sampler(
30 GrMtlSampler::Create(gpu, state, texture->texturePriv().maxMipMapLevel()));
31 fSampler = sampler->mtlSamplerState();
32}
33
34GrMtlPipelineState::GrMtlPipelineState(
35 GrMtlGpu* gpu,
36 id<MTLRenderPipelineState> pipelineState,
37 MTLPixelFormat pixelFormat,
38 const GrGLSLBuiltinUniformHandles& builtinUniformHandles,
39 const UniformInfoArray& uniforms,
Brian Salomon12d22642019-01-29 14:38:50 -050040 sk_sp<GrMtlBuffer> geometryUniformBuffer,
41 sk_sp<GrMtlBuffer> fragmentUniformBuffer,
Timothy Liang6ed63962018-08-10 09:49:44 -040042 uint32_t numSamplers,
43 std::unique_ptr<GrGLSLPrimitiveProcessor> geometryProcessor,
44 std::unique_ptr<GrGLSLXferProcessor> xferProcessor,
45 std::unique_ptr<std::unique_ptr<GrGLSLFragmentProcessor>[]> fragmentProcessors,
46 int fragmentProcessorCnt)
Timothy Liang057c3902018-08-08 10:48:45 -040047 : fGpu(gpu)
48 , fPipelineState(pipelineState)
49 , fPixelFormat(pixelFormat)
Timothy Liang6ed63962018-08-10 09:49:44 -040050 , fBuiltinUniformHandles(builtinUniformHandles)
Brian Salomon12d22642019-01-29 14:38:50 -050051 , fGeometryUniformBuffer(std::move(geometryUniformBuffer))
52 , fFragmentUniformBuffer(std::move(fragmentUniformBuffer))
Timothy Liang6ed63962018-08-10 09:49:44 -040053 , fNumSamplers(numSamplers)
54 , fGeometryProcessor(std::move(geometryProcessor))
55 , fXferProcessor(std::move(xferProcessor))
56 , fFragmentProcessors(std::move(fragmentProcessors))
57 , fFragmentProcessorCnt(fragmentProcessorCnt)
Brian Salomon12d22642019-01-29 14:38:50 -050058 , fDataManager(uniforms, fGeometryUniformBuffer->sizeInBytes(),
59 fFragmentUniformBuffer->sizeInBytes()) {
Timothy Liang057c3902018-08-08 10:48:45 -040060 (void) fPixelFormat; // Suppress unused-var warning.
61}
Timothy Liang6ed63962018-08-10 09:49:44 -040062
63void GrMtlPipelineState::setData(const GrPrimitiveProcessor& primProc,
64 const GrPipeline& pipeline,
65 const GrTextureProxy* const primProcTextures[]) {
66 SkASSERT(primProcTextures || !primProc.numTextureSamplers());
67
68 this->setRenderTargetState(pipeline.proxy());
69 fGeometryProcessor->setData(fDataManager, primProc,
70 GrFragmentProcessor::CoordTransformIter(pipeline));
71 fSamplerBindings.reset();
72 for (int i = 0; i < primProc.numTextureSamplers(); ++i) {
73 const auto& sampler = primProc.textureSampler(i);
74 auto texture = static_cast<GrMtlTexture*>(primProcTextures[i]->peekTexture());
Greg Daniel0f70be82018-10-08 17:35:08 +000075 fSamplerBindings.emplace_back(sampler.samplerState(), texture, fGpu);
Timothy Liang6ed63962018-08-10 09:49:44 -040076 }
77
78 GrFragmentProcessor::Iter iter(pipeline);
79 GrGLSLFragmentProcessor::Iter glslIter(fFragmentProcessors.get(), fFragmentProcessorCnt);
80 const GrFragmentProcessor* fp = iter.next();
81 GrGLSLFragmentProcessor* glslFP = glslIter.next();
82 while (fp && glslFP) {
Michael Ludwigd3a357d2018-09-27 17:31:08 -040083 glslFP->setData(fDataManager, *fp);
Timothy Liang6ed63962018-08-10 09:49:44 -040084 for (int i = 0; i < fp->numTextureSamplers(); ++i) {
85 const auto& sampler = fp->textureSampler(i);
Greg Daniel0f70be82018-10-08 17:35:08 +000086 fSamplerBindings.emplace_back(sampler.samplerState(), sampler.peekTexture(), fGpu);
Timothy Liang6ed63962018-08-10 09:49:44 -040087 }
88 fp = iter.next();
89 glslFP = glslIter.next();
90 }
91 SkASSERT(!fp && !glslFP);
92
93 {
94 SkIPoint offset;
95 GrTexture* dstTexture = pipeline.peekDstTexture(&offset);
96
97 fXferProcessor->setData(fDataManager, pipeline.getXferProcessor(), dstTexture, offset);
98 }
99
100 if (GrTextureProxy* dstTextureProxy = pipeline.dstTextureProxy()) {
101 fSamplerBindings.emplace_back(GrSamplerState::ClampNearest(),
102 dstTextureProxy->peekTexture(),
Timothy Liang6ed63962018-08-10 09:49:44 -0400103 fGpu);
104 }
105
106 SkASSERT(fNumSamplers == fSamplerBindings.count());
107 if (fGeometryUniformBuffer || fFragmentUniformBuffer) {
108 fDataManager.uploadUniformBuffers(fGpu, fGeometryUniformBuffer.get(),
109 fFragmentUniformBuffer.get());
110 }
Ethan Nicholas01063512018-10-08 16:58:25 -0400111
112 if (pipeline.isStencilEnabled()) {
113 GrRenderTarget* rt = pipeline.renderTarget();
114 SkASSERT(rt->renderTargetPriv().getStencilAttachment());
115 fStencil.reset(*pipeline.getUserStencil(), pipeline.hasStencilClip(),
116 rt->renderTargetPriv().numStencilBits());
117 }
Timothy Liang6ed63962018-08-10 09:49:44 -0400118}
119
120void GrMtlPipelineState::bind(id<MTLRenderCommandEncoder> renderCmdEncoder) {
121 if (fGeometryUniformBuffer) {
122 [renderCmdEncoder setVertexBuffer: fGeometryUniformBuffer->mtlBuffer()
123 offset: 0
124 atIndex: GrMtlUniformHandler::kGeometryBinding];
125 }
126 if (fFragmentUniformBuffer) {
127 [renderCmdEncoder setFragmentBuffer: fFragmentUniformBuffer->mtlBuffer()
128 offset: 0
129 atIndex: GrMtlUniformHandler::kFragBinding];
130 }
131 SkASSERT(fNumSamplers == fSamplerBindings.count());
132 for (int index = 0; index < fNumSamplers; ++index) {
Greg Daniel0f70be82018-10-08 17:35:08 +0000133 [renderCmdEncoder setFragmentTexture: fSamplerBindings[index].fTexture
134 atIndex: index];
135 [renderCmdEncoder setFragmentSamplerState: fSamplerBindings[index].fSampler
136 atIndex: index];
Timothy Liang6ed63962018-08-10 09:49:44 -0400137 }
138}
139
140void GrMtlPipelineState::setRenderTargetState(const GrRenderTargetProxy* proxy) {
141 GrRenderTarget* rt = proxy->peekRenderTarget();
142
143 // Load the RT height uniform if it is needed to y-flip gl_FragCoord.
Greg Daniele6ab9982018-08-22 13:56:32 +0000144 if (fBuiltinUniformHandles.fRTHeightUni.isValid() &&
145 fRenderTargetState.fRenderTargetSize.fHeight != rt->height()) {
146 fDataManager.set1f(fBuiltinUniformHandles.fRTHeightUni, SkIntToScalar(rt->height()));
Timothy Liang6ed63962018-08-10 09:49:44 -0400147 }
148
149 // set RT adjustment
150 SkISize size;
151 size.set(rt->width(), rt->height());
152 SkASSERT(fBuiltinUniformHandles.fRTAdjustmentUni.isValid());
153 if (fRenderTargetState.fRenderTargetOrigin != proxy->origin() ||
154 fRenderTargetState.fRenderTargetSize != size) {
155 fRenderTargetState.fRenderTargetSize = size;
156 fRenderTargetState.fRenderTargetOrigin = proxy->origin();
157
158 float rtAdjustmentVec[4];
159 fRenderTargetState.getRTAdjustmentVec(rtAdjustmentVec);
160 fDataManager.set4fv(fBuiltinUniformHandles.fRTAdjustmentUni, 1, rtAdjustmentVec);
161 }
162}
Timothy Liangde0be802018-08-10 13:48:08 -0400163
164static bool blend_coeff_refs_constant(GrBlendCoeff coeff) {
165 switch (coeff) {
166 case kConstC_GrBlendCoeff:
167 case kIConstC_GrBlendCoeff:
168 case kConstA_GrBlendCoeff:
169 case kIConstA_GrBlendCoeff:
170 return true;
171 default:
172 return false;
173 }
174}
175
176void GrMtlPipelineState::setBlendConstants(id<MTLRenderCommandEncoder> renderCmdEncoder,
177 GrPixelConfig config,
178 const GrXferProcessor& xferProcessor) {
179 if (!renderCmdEncoder) {
180 return;
181 }
182
183 GrXferProcessor::BlendInfo blendInfo;
184 xferProcessor.getBlendInfo(&blendInfo);
185 GrBlendCoeff srcCoeff = blendInfo.fSrcBlend;
186 GrBlendCoeff dstCoeff = blendInfo.fDstBlend;
187 if (blend_coeff_refs_constant(srcCoeff) || blend_coeff_refs_constant(dstCoeff)) {
Timothy Liangde0be802018-08-10 13:48:08 -0400188 // Swizzle the blend to match what the shader will output.
189 const GrSwizzle& swizzle = fGpu->caps()->shaderCaps()->configOutputSwizzle(config);
Brian Osman422f95b2018-11-05 16:49:04 -0500190 SkPMColor4f blendConst = swizzle.applyTo(blendInfo.fBlendConstant);
Timothy Liangde0be802018-08-10 13:48:08 -0400191
Brian Osman422f95b2018-11-05 16:49:04 -0500192 [renderCmdEncoder setBlendColorRed: blendConst.fR
193 green: blendConst.fG
194 blue: blendConst.fB
195 alpha: blendConst.fA];
Timothy Liangde0be802018-08-10 13:48:08 -0400196 }
197}
Ethan Nicholas01063512018-10-08 16:58:25 -0400198
199MTLStencilOperation skia_stencil_op_to_mtl(GrStencilOp op) {
200 switch (op) {
201 case GrStencilOp::kKeep:
202 return MTLStencilOperationKeep;
203 case GrStencilOp::kZero:
204 return MTLStencilOperationZero;
205 case GrStencilOp::kReplace:
206 return MTLStencilOperationReplace;
207 case GrStencilOp::kInvert:
208 return MTLStencilOperationInvert;
209 case GrStencilOp::kIncWrap:
210 return MTLStencilOperationIncrementWrap;
211 case GrStencilOp::kDecWrap:
212 return MTLStencilOperationDecrementWrap;
213 case GrStencilOp::kIncClamp:
214 return MTLStencilOperationIncrementClamp;
215 case GrStencilOp::kDecClamp:
216 return MTLStencilOperationDecrementClamp;
217 }
218}
219
220MTLStencilDescriptor* skia_stencil_to_mtl(GrStencilSettings::Face face) {
221 MTLStencilDescriptor* result = [[MTLStencilDescriptor alloc] init];
222 switch (face.fTest) {
223 case GrStencilTest::kAlways:
224 result.stencilCompareFunction = MTLCompareFunctionAlways;
225 break;
226 case GrStencilTest::kNever:
227 result.stencilCompareFunction = MTLCompareFunctionNever;
228 break;
229 case GrStencilTest::kGreater:
230 result.stencilCompareFunction = MTLCompareFunctionGreater;
231 break;
232 case GrStencilTest::kGEqual:
233 result.stencilCompareFunction = MTLCompareFunctionGreaterEqual;
234 break;
235 case GrStencilTest::kLess:
236 result.stencilCompareFunction = MTLCompareFunctionLess;
237 break;
238 case GrStencilTest::kLEqual:
239 result.stencilCompareFunction = MTLCompareFunctionLessEqual;
240 break;
241 case GrStencilTest::kEqual:
242 result.stencilCompareFunction = MTLCompareFunctionEqual;
243 break;
244 case GrStencilTest::kNotEqual:
245 result.stencilCompareFunction = MTLCompareFunctionNotEqual;
246 break;
247 }
248 result.readMask = face.fTestMask;
249 result.writeMask = face.fWriteMask;
250 result.depthStencilPassOperation = skia_stencil_op_to_mtl(face.fPassOp);
251 result.stencilFailureOperation = skia_stencil_op_to_mtl(face.fFailOp);
252 return result;
253}
254
255void GrMtlPipelineState::setDepthStencilState(id<MTLRenderCommandEncoder> renderCmdEncoder) {
256 if (fStencil.isDisabled()) {
257 MTLDepthStencilDescriptor* desc = [[MTLDepthStencilDescriptor alloc] init];
258 id<MTLDepthStencilState> state = [fGpu->device() newDepthStencilStateWithDescriptor:desc];
259 [renderCmdEncoder setDepthStencilState:state];
260 }
261 else {
262 MTLDepthStencilDescriptor* desc = [[MTLDepthStencilDescriptor alloc] init];
263 desc.frontFaceStencil = skia_stencil_to_mtl(fStencil.front());
264 if (fStencil.isTwoSided()) {
265 desc.backFaceStencil = skia_stencil_to_mtl(fStencil.back());
266 [renderCmdEncoder setStencilFrontReferenceValue:fStencil.front().fRef
267 backReferenceValue:fStencil.back().fRef];
268 }
269 else {
270 desc.backFaceStencil = desc.frontFaceStencil;
271 [renderCmdEncoder setStencilReferenceValue:fStencil.front().fRef];
272 }
273 id<MTLDepthStencilState> state = [fGpu->device() newDepthStencilStateWithDescriptor:desc];
274 [renderCmdEncoder setDepthStencilState:state];
275 }
276}