blob: 859ff1dbda8523191d526790c8b3752067a09a19 [file] [log] [blame]
Jim Van Verthbe259dc2020-05-19 11:40:31 -04001/*
2 * Copyright 2020 Google LLC
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "src/gpu/GrUniformDataManager.h"
9
Brian Salomona59925f2021-09-16 09:25:47 -040010#include "src/gpu/GrProgramInfo.h"
Jim Van Verthbe259dc2020-05-19 11:40:31 -040011#include "src/gpu/GrShaderVar.h"
12
Jim Van Verthabf4d502021-02-10 14:39:25 -050013// ensure that these types are the sizes the uniform data is expecting
14static_assert(sizeof(int32_t) == 4);
15static_assert(sizeof(float) == 4);
16
Brian Salomona59925f2021-09-16 09:25:47 -040017//////////////////////////////////////////////////////////////////////////////
18
19GrUniformDataManager::UniformManager::UniformManager(ProgramUniforms uniforms, Layout layout)
20 : fUniforms(std::move(uniforms)), fLayout(layout) {}
21
22template <typename BaseType> static constexpr size_t tight_vec_size(int vecLength) {
23 return sizeof(BaseType) * vecLength;
24}
25
26/**
27 * From Section 7.6.2.2 "Standard Uniform Block Layout":
28 * 1. If the member is a scalar consuming N basic machine units, the base alignment is N.
29 * 2. If the member is a two- or four-component vector with components consuming N basic machine
30 * units, the base alignment is 2N or 4N, respectively.
31 * 3. If the member is a three-component vector with components consuming N
32 * basic machine units, the base alignment is 4N.
33 * 4. If the member is an array of scalars or vectors, the base alignment and array
34 * stride are set to match the base alignment of a single array element, according
35 * to rules (1), (2), and (3), and rounded up to the base alignment of a vec4. The
36 * array may have padding at the end; the base offset of the member following
37 * the array is rounded up to the next multiple of the base alignment.
38 * 5. If the member is a column-major matrix with C columns and R rows, the
39 * matrix is stored identically to an array of C column vectors with R components each,
40 * according to rule (4).
41 * 6. If the member is an array of S column-major matrices with C columns and
42 * R rows, the matrix is stored identically to a row of S × C column vectors
43 * with R components each, according to rule (4).
44 * 7. If the member is a row-major matrix with C columns and R rows, the matrix
45 * is stored identically to an array of R row vectors with C components each,
46 * according to rule (4).
47 * 8. If the member is an array of S row-major matrices with C columns and R
48 * rows, the matrix is stored identically to a row of S × R row vectors with C
49 * components each, according to rule (4).
50 * 9. If the member is a structure, the base alignment of the structure is N, where
51 * N is the largest base alignment value of any of its members, and rounded
52 * up to the base alignment of a vec4. The individual members of this substructure are then
53 * assigned offsets by applying this set of rules recursively,
54 * where the base offset of the first member of the sub-structure is equal to the
55 * aligned offset of the structure. The structure may have padding at the end;
56 * the base offset of the member following the sub-structure is rounded up to
57 * the next multiple of the base alignment of the structure.
58 * 10. If the member is an array of S structures, the S elements of the array are laid
59 * out in order, according to rule (9).
60 */
61template <typename BaseType, int RowsOrVecLength = 1, int Cols = 1>
62struct Rules140 {
63 /**
64 * For an array of scalars or vectors this returns the stride between array elements. For
65 * matrices or arrays of matrices this returns the stride between columns of the matrix. Note
66 * that for single (non-array) scalars or vectors we don't require a stride.
67 */
68 static constexpr size_t Stride(int count) {
69 SkASSERT(count >= 1 || count == GrShaderVar::kNonArray);
70 static_assert(RowsOrVecLength >= 1 && RowsOrVecLength <= 4);
71 static_assert(Cols >= 1 && Cols <= 4);
72 if (Cols != 1) {
73 // This is a matrix or array of matrices. We return the stride between columns.
74 SkASSERT(RowsOrVecLength > 1);
75 return Rules140<BaseType, RowsOrVecLength>::Stride(1);
76 }
77 if (count == 0) {
78 // Stride doesn't matter for a non-array.
79 return 0;
80 }
81
82 // Rule 4.
83
84 // Alignment of vec4 by Rule 2.
85 constexpr size_t kVec4Alignment = tight_vec_size<float>(4);
86 // Get alignment of a single vector of BaseType by Rule 1, 2, or 3
87 int n = RowsOrVecLength == 3 ? 4 : RowsOrVecLength;
88 size_t kElementAlignment = tight_vec_size<BaseType>(n);
89 // Round kElementAlignment up to multiple of kVec4Alignment.
90 size_t m = (kElementAlignment + kVec4Alignment - 1)/kVec4Alignment;
91 return m*kVec4Alignment;
92 }
93};
94
95/**
96 * When using the std430 storage layout, shader storage blocks will be laid out in buffer storage
97 * identically to uniform and shader storage blocks using the std140 layout, except that the base
98 * alignment and stride of arrays of scalars and vectors in rule 4 and of structures in rule 9 are
99 * not rounded up a multiple of the base alignment of a vec4.
100 */
101template <typename BaseType, int RowsOrVecLength = 1, int Cols = 1>
102struct Rules430 {
103 static constexpr size_t Stride(int count) {
104 SkASSERT(count >= 1 || count == GrShaderVar::kNonArray);
105 static_assert(RowsOrVecLength >= 1 && RowsOrVecLength <= 4);
106 static_assert(Cols >= 1 && Cols <= 4);
107
108 if (Cols != 1) {
109 // This is a matrix or array of matrices. We return the stride between columns.
110 SkASSERT(RowsOrVecLength > 1);
111 return Rules430<BaseType, RowsOrVecLength>::Stride(1);
112 }
113 if (count == 0) {
114 // Stride doesn't matter for a non-array.
115 return 0;
116 }
117 // Rule 4 without the round up to a multiple of align-of vec4.
118 return tight_vec_size<BaseType>(RowsOrVecLength == 3 ? 4 : RowsOrVecLength);
119 }
120};
121
122// The strides used here were derived from the rules we've imposed on ourselves in
123// GrMtlPipelineStateDataManger. Everything is tight except 3-component which have the stride of
124// their 4-component equivalents.
125template <typename BaseType, int RowsOrVecLength = 1, int Cols = 1>
126struct RulesMetal {
127 static constexpr size_t Stride(int count) {
128 SkASSERT(count >= 1 || count == GrShaderVar::kNonArray);
129 static_assert(RowsOrVecLength >= 1 && RowsOrVecLength <= 4);
130 static_assert(Cols >= 1 && Cols <= 4);
131 if (Cols != 1) {
132 // This is a matrix or array of matrices. We return the stride between columns.
133 SkASSERT(RowsOrVecLength > 1);
134 return RulesMetal<BaseType, RowsOrVecLength>::Stride(1);
135 }
136 if (count == 0) {
137 // Stride doesn't matter for a non-array.
138 return 0;
139 }
140 return tight_vec_size<BaseType>(RowsOrVecLength == 3 ? 4 : RowsOrVecLength);
141 }
142};
143
144template <template <typename BaseType, int RowsOrVecLength, int Cols> class Rules>
145class Writer {
146private:
147 using CType = GrProcessor::Uniform::CType;
148
149 template<typename BaseType, int RowsOrVecLength = 1, int Cols = 1>
150 static void Write(void* dst, int n, const BaseType v[]) {
151 if (dst) {
152 size_t stride = Rules<BaseType, RowsOrVecLength, Cols>::Stride(n);
153 n = (n == GrShaderVar::kNonArray) ? 1 : n;
154 n *= Cols;
155 if (stride == RowsOrVecLength*sizeof(BaseType)) {
156 std::memcpy(dst, v, n*stride);
157 } else {
158 for (int i = 0; i < n; ++i) {
159 std::memcpy(dst, v, RowsOrVecLength*sizeof(BaseType));
160 v += RowsOrVecLength;
161 dst = SkTAddOffset<void>(dst, stride);
162 }
163 }
164 }
165 }
166
167 static void WriteSkMatrices(void* d, int n, const SkMatrix m[]) {
168 size_t offset = 0;
169 for (int i = 0; i < std::max(n, 1); ++i) {
170 float mt[] = {
171 m[i].get(SkMatrix::kMScaleX),
172 m[i].get(SkMatrix::kMSkewY),
173 m[i].get(SkMatrix::kMPersp0),
174 m[i].get(SkMatrix::kMSkewX),
175 m[i].get(SkMatrix::kMScaleY),
176 m[i].get(SkMatrix::kMPersp1),
177 m[i].get(SkMatrix::kMTransX),
178 m[i].get(SkMatrix::kMTransY),
179 m[i].get(SkMatrix::kMPersp2),
180 };
181 Write<float, 3, 3>(SkTAddOffset<void>(d, offset), 1, mt);
182 // Stride() will give us the stride of each column, so mul by 3 to get matrix stride.
183 offset += 3*Rules<float, 3, 3>::Stride(1);
184 }
185 }
186
187public:
188 static void WriteUniform(GrSLType type, CType ctype, void* d, int n, const void* v) {
189 SkASSERT(d);
190 SkASSERT(n >= 1 || n == GrShaderVar::kNonArray);
191 switch (type) {
192 case kInt_GrSLType:
193 return Write<int32_t>(d, n, static_cast<const int32_t*>(v));
194
195 case kInt2_GrSLType:
196 return Write<int32_t, 2>(d, n, static_cast<const int32_t*>(v));
197
198 case kInt3_GrSLType:
199 return Write<int32_t, 3>(d, n, static_cast<const int32_t*>(v));
200
201 case kInt4_GrSLType:
202 return Write<int32_t, 4>(d, n, static_cast<const int32_t*>(v));
203
204 case kHalf_GrSLType:
205 case kFloat_GrSLType:
206 return Write<float>(d, n, static_cast<const float*>(v));
207
208 case kHalf2_GrSLType:
209 case kFloat2_GrSLType:
210 return Write<float, 2>(d, n, static_cast<const float*>(v));
211
212 case kHalf3_GrSLType:
213 case kFloat3_GrSLType:
214 return Write<float, 3>(d, n, static_cast<const float*>(v));
215
216 case kHalf4_GrSLType:
217 case kFloat4_GrSLType:
218 return Write<float, 4>(d, n, static_cast<const float*>(v));
219
220 case kHalf2x2_GrSLType:
221 case kFloat2x2_GrSLType:
222 return Write<float, 2, 2>(d, n, static_cast<const float*>(v));
223
224 case kHalf3x3_GrSLType:
225 case kFloat3x3_GrSLType: {
226 switch (ctype) {
227 case CType::kDefault:
228 return Write<float, 3, 3>(d, n, static_cast<const float*>(v));
229 case CType::kSkMatrix:
230 return WriteSkMatrices(d, n, static_cast<const SkMatrix*>(v));
231 }
232 SkUNREACHABLE;
233 }
234
235 case kHalf4x4_GrSLType:
236 case kFloat4x4_GrSLType:
237 return Write<float, 4, 4>(d, n, static_cast<const float*>(v));
238
239 default:
240 SK_ABORT("Unexpect uniform type");
241 }
242 }
243};
244
245bool GrUniformDataManager::UniformManager::writeUniforms(const GrProgramInfo& info, void* buffer) {
246 decltype(&Writer<Rules140>::WriteUniform) write;
247 switch (fLayout) {
248 case Layout::kStd140:
249 write = Writer<Rules140>::WriteUniform;
250 break;
251 case Layout::kStd430:
252 write = Writer<Rules430>::WriteUniform;
253 break;
254 case Layout::kMetal:
255 write = Writer<RulesMetal>::WriteUniform;
256 break;
257 }
258
259 bool wrote = false;
260 auto set = [&, processorIndex = 0](const GrProcessor& p) mutable {
261 SkASSERT(buffer);
262 const ProcessorUniforms& uniforms = fUniforms[processorIndex];
263 for (const NewUniform& u : uniforms) {
264 if (u.type != kVoid_GrSLType) {
265 SkASSERT(u.count >= 0);
266 static_assert(GrShaderVar::kNonArray == 0);
267 void* d = SkTAddOffset<void>(buffer, u.offset);
268 size_t index = u.indexInProcessor;
269 const void* v = p.uniformData(index);
270 write(u.type, p.uniforms()[index].ctype(), d, u.count, v);
271 wrote = true;
272 }
273 }
274 ++processorIndex;
275 };
276
277 info.visitProcessors(set);
278 return wrote;
279}
280
281//////////////////////////////////////////////////////////////////////////////
282
283GrUniformDataManager::GrUniformDataManager(ProgramUniforms uniforms,
284 Layout layout,
285 uint32_t uniformCount,
286 uint32_t uniformSize)
287 : fUniformSize(uniformSize)
288 , fUniformsDirty(false)
289 , fUniformManager(std::move(uniforms), layout) {
Jim Van Verthbe259dc2020-05-19 11:40:31 -0400290 fUniformData.reset(uniformSize);
291 fUniforms.push_back_n(uniformCount);
Brian Salomona59925f2021-09-16 09:25:47 -0400292 // subclasses fill in the legacy uniforms in their constructor
293}
294
295void GrUniformDataManager::setUniforms(const GrProgramInfo& info) {
296 if (fUniformManager.writeUniforms(info, fUniformData.get())) {
297 this->markDirty();
298 }
Jim Van Verthbe259dc2020-05-19 11:40:31 -0400299}
300
301void* GrUniformDataManager::getBufferPtrAndMarkDirty(const Uniform& uni) const {
302 fUniformsDirty = true;
303 return static_cast<char*>(fUniformData.get())+uni.fOffset;
304}
305
306void GrUniformDataManager::set1i(UniformHandle u, int32_t i) const {
307 const Uniform& uni = fUniforms[u.toIndex()];
308 SkASSERT(uni.fType == kInt_GrSLType || uni.fType == kShort_GrSLType);
309 SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
310 void* buffer = this->getBufferPtrAndMarkDirty(uni);
311 memcpy(buffer, &i, sizeof(int32_t));
312}
313
314void GrUniformDataManager::set1iv(UniformHandle u,
315 int arrayCount,
316 const int32_t v[]) const {
317 const Uniform& uni = fUniforms[u.toIndex()];
318 SkASSERT(uni.fType == kInt_GrSLType || uni.fType == kShort_GrSLType);
319 SkASSERT(arrayCount > 0);
320 SkASSERT(arrayCount <= uni.fArrayCount ||
321 (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
322
323 void* buffer = this->getBufferPtrAndMarkDirty(uni);
Jim Van Verthbe259dc2020-05-19 11:40:31 -0400324 for (int i = 0; i < arrayCount; ++i) {
325 const int32_t* curVec = &v[i];
326 memcpy(buffer, curVec, sizeof(int32_t));
327 buffer = static_cast<char*>(buffer) + 4*sizeof(int32_t);
328 }
329}
330
331void GrUniformDataManager::set1f(UniformHandle u, float v0) const {
332 const Uniform& uni = fUniforms[u.toIndex()];
333 SkASSERT(uni.fType == kFloat_GrSLType || uni.fType == kHalf_GrSLType);
334 SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
335 void* buffer = this->getBufferPtrAndMarkDirty(uni);
Jim Van Verthbe259dc2020-05-19 11:40:31 -0400336 memcpy(buffer, &v0, sizeof(float));
337}
338
339void GrUniformDataManager::set1fv(UniformHandle u,
340 int arrayCount,
341 const float v[]) const {
342 const Uniform& uni = fUniforms[u.toIndex()];
343 SkASSERT(uni.fType == kFloat_GrSLType || uni.fType == kHalf_GrSLType);
344 SkASSERT(arrayCount > 0);
345 SkASSERT(arrayCount <= uni.fArrayCount ||
346 (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
347
348 void* buffer = this->getBufferPtrAndMarkDirty(uni);
Jim Van Verthbe259dc2020-05-19 11:40:31 -0400349 for (int i = 0; i < arrayCount; ++i) {
350 const float* curVec = &v[i];
351 memcpy(buffer, curVec, sizeof(float));
352 buffer = static_cast<char*>(buffer) + 4*sizeof(float);
353 }
354}
355
356void GrUniformDataManager::set2i(UniformHandle u, int32_t i0, int32_t i1) const {
357 const Uniform& uni = fUniforms[u.toIndex()];
358 SkASSERT(uni.fType == kInt2_GrSLType || uni.fType == kShort2_GrSLType);
359 SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
360 void* buffer = this->getBufferPtrAndMarkDirty(uni);
361 int32_t v[2] = { i0, i1 };
362 memcpy(buffer, v, 2 * sizeof(int32_t));
363}
364
365void GrUniformDataManager::set2iv(UniformHandle u,
366 int arrayCount,
367 const int32_t v[]) const {
368 const Uniform& uni = fUniforms[u.toIndex()];
369 SkASSERT(uni.fType == kInt2_GrSLType || uni.fType == kShort2_GrSLType);
370 SkASSERT(arrayCount > 0);
371 SkASSERT(arrayCount <= uni.fArrayCount ||
372 (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
373
374 void* buffer = this->getBufferPtrAndMarkDirty(uni);
Jim Van Verthbe259dc2020-05-19 11:40:31 -0400375 for (int i = 0; i < arrayCount; ++i) {
376 const int32_t* curVec = &v[2 * i];
377 memcpy(buffer, curVec, 2 * sizeof(int32_t));
378 buffer = static_cast<char*>(buffer) + 4*sizeof(int32_t);
379 }
380}
381
382void GrUniformDataManager::set2f(UniformHandle u, float v0, float v1) const {
383 const Uniform& uni = fUniforms[u.toIndex()];
384 SkASSERT(uni.fType == kFloat2_GrSLType || uni.fType == kHalf2_GrSLType);
385 SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
386 void* buffer = this->getBufferPtrAndMarkDirty(uni);
Jim Van Verthbe259dc2020-05-19 11:40:31 -0400387 float v[2] = { v0, v1 };
388 memcpy(buffer, v, 2 * sizeof(float));
389}
390
391void GrUniformDataManager::set2fv(UniformHandle u,
392 int arrayCount,
393 const float v[]) const {
394 const Uniform& uni = fUniforms[u.toIndex()];
395 SkASSERT(uni.fType == kFloat2_GrSLType || uni.fType == kHalf2_GrSLType);
396 SkASSERT(arrayCount > 0);
397 SkASSERT(arrayCount <= uni.fArrayCount ||
398 (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
399
400 void* buffer = this->getBufferPtrAndMarkDirty(uni);
Jim Van Verthbe259dc2020-05-19 11:40:31 -0400401 for (int i = 0; i < arrayCount; ++i) {
402 const float* curVec = &v[2 * i];
403 memcpy(buffer, curVec, 2 * sizeof(float));
404 buffer = static_cast<char*>(buffer) + 4*sizeof(float);
405 }
406}
407
408void GrUniformDataManager::set3i(UniformHandle u,
409 int32_t i0,
410 int32_t i1,
411 int32_t i2) const {
412 const Uniform& uni = fUniforms[u.toIndex()];
413 SkASSERT(uni.fType == kInt3_GrSLType || uni.fType == kShort3_GrSLType);
414 SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
415 void* buffer = this->getBufferPtrAndMarkDirty(uni);
416 int32_t v[3] = { i0, i1, i2 };
417 memcpy(buffer, v, 3 * sizeof(int32_t));
418}
419
420void GrUniformDataManager::set3iv(UniformHandle u,
421 int arrayCount,
422 const int32_t v[]) const {
423 const Uniform& uni = fUniforms[u.toIndex()];
424 SkASSERT(uni.fType == kInt3_GrSLType || uni.fType == kShort3_GrSLType);
425 SkASSERT(arrayCount > 0);
426 SkASSERT(arrayCount <= uni.fArrayCount ||
427 (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
428
429 void* buffer = this->getBufferPtrAndMarkDirty(uni);
Jim Van Verthbe259dc2020-05-19 11:40:31 -0400430 for (int i = 0; i < arrayCount; ++i) {
431 const int32_t* curVec = &v[3 * i];
432 memcpy(buffer, curVec, 3 * sizeof(int32_t));
433 buffer = static_cast<char*>(buffer) + 4*sizeof(int32_t);
434 }
435}
436
437void GrUniformDataManager::set3f(UniformHandle u, float v0, float v1, float v2) const {
438 const Uniform& uni = fUniforms[u.toIndex()];
439 SkASSERT(uni.fType == kFloat3_GrSLType || uni.fType == kHalf3_GrSLType);
440 SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
441 void* buffer = this->getBufferPtrAndMarkDirty(uni);
Jim Van Verthbe259dc2020-05-19 11:40:31 -0400442 float v[3] = { v0, v1, v2 };
443 memcpy(buffer, v, 3 * sizeof(float));
444}
445
446void GrUniformDataManager::set3fv(UniformHandle u,
447 int arrayCount,
448 const float v[]) const {
449 const Uniform& uni = fUniforms[u.toIndex()];
450 SkASSERT(uni.fType == kFloat3_GrSLType || uni.fType == kHalf3_GrSLType);
451 SkASSERT(arrayCount > 0);
452 SkASSERT(arrayCount <= uni.fArrayCount ||
453 (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
454
455 void* buffer = this->getBufferPtrAndMarkDirty(uni);
Jim Van Verthbe259dc2020-05-19 11:40:31 -0400456 for (int i = 0; i < arrayCount; ++i) {
457 const float* curVec = &v[3 * i];
458 memcpy(buffer, curVec, 3 * sizeof(float));
459 buffer = static_cast<char*>(buffer) + 4*sizeof(float);
460 }
461}
462
463void GrUniformDataManager::set4i(UniformHandle u,
464 int32_t i0,
465 int32_t i1,
466 int32_t i2,
467 int32_t i3) const {
468 const Uniform& uni = fUniforms[u.toIndex()];
469 SkASSERT(uni.fType == kInt4_GrSLType || uni.fType == kShort4_GrSLType);
470 SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
471 void* buffer = this->getBufferPtrAndMarkDirty(uni);
472 int32_t v[4] = { i0, i1, i2, i3 };
473 memcpy(buffer, v, 4 * sizeof(int32_t));
474}
475
476void GrUniformDataManager::set4iv(UniformHandle u,
477 int arrayCount,
478 const int32_t v[]) const {
479 const Uniform& uni = fUniforms[u.toIndex()];
480 SkASSERT(uni.fType == kInt4_GrSLType || uni.fType == kShort4_GrSLType);
481 SkASSERT(arrayCount > 0);
482 SkASSERT(arrayCount <= uni.fArrayCount ||
483 (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
484
485 void* buffer = this->getBufferPtrAndMarkDirty(uni);
Jim Van Verthabf4d502021-02-10 14:39:25 -0500486 memcpy(buffer, v, arrayCount * 4 * sizeof(int32_t));
Jim Van Verthbe259dc2020-05-19 11:40:31 -0400487}
488
489void GrUniformDataManager::set4f(UniformHandle u,
490 float v0,
491 float v1,
492 float v2,
493 float v3) const {
494 const Uniform& uni = fUniforms[u.toIndex()];
495 SkASSERT(uni.fType == kFloat4_GrSLType || uni.fType == kHalf4_GrSLType);
496 SkASSERT(GrShaderVar::kNonArray == uni.fArrayCount);
497 void* buffer = this->getBufferPtrAndMarkDirty(uni);
Jim Van Verthbe259dc2020-05-19 11:40:31 -0400498 float v[4] = { v0, v1, v2, v3 };
499 memcpy(buffer, v, 4 * sizeof(float));
500}
501
502void GrUniformDataManager::set4fv(UniformHandle u,
503 int arrayCount,
504 const float v[]) const {
505 const Uniform& uni = fUniforms[u.toIndex()];
506 SkASSERT(uni.fType == kFloat4_GrSLType || uni.fType == kHalf4_GrSLType);
507 SkASSERT(arrayCount > 0);
508 SkASSERT(arrayCount <= uni.fArrayCount ||
509 (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
510
511 void* buffer = this->getBufferPtrAndMarkDirty(uni);
Jim Van Verthbe259dc2020-05-19 11:40:31 -0400512 memcpy(buffer, v, arrayCount * 4 * sizeof(float));
513}
514
515void GrUniformDataManager::setMatrix2f(UniformHandle u, const float matrix[]) const {
516 this->setMatrices<2>(u, 1, matrix);
517}
518
Jim Van Verthabf4d502021-02-10 14:39:25 -0500519void GrUniformDataManager::setMatrix2fv(UniformHandle u, int arrayCount, const float m[]) const {
Jim Van Verthbe259dc2020-05-19 11:40:31 -0400520 this->setMatrices<2>(u, arrayCount, m);
521}
522
523void GrUniformDataManager::setMatrix3f(UniformHandle u, const float matrix[]) const {
524 this->setMatrices<3>(u, 1, matrix);
525}
526
Jim Van Verthabf4d502021-02-10 14:39:25 -0500527void GrUniformDataManager::setMatrix3fv(UniformHandle u, int arrayCount, const float m[]) const {
Jim Van Verthbe259dc2020-05-19 11:40:31 -0400528 this->setMatrices<3>(u, arrayCount, m);
529}
530
531void GrUniformDataManager::setMatrix4f(UniformHandle u, const float matrix[]) const {
532 this->setMatrices<4>(u, 1, matrix);
533}
534
Jim Van Verthabf4d502021-02-10 14:39:25 -0500535void GrUniformDataManager::setMatrix4fv(UniformHandle u, int arrayCount, const float m[]) const {
Jim Van Verthbe259dc2020-05-19 11:40:31 -0400536 this->setMatrices<4>(u, arrayCount, m);
537}
538
539template<int N> struct set_uniform_matrix;
540
541template<int N> inline void GrUniformDataManager::setMatrices(UniformHandle u,
Jim Van Verthabf4d502021-02-10 14:39:25 -0500542 int arrayCount,
543 const float matrices[]) const {
Jim Van Verthbe259dc2020-05-19 11:40:31 -0400544 const Uniform& uni = fUniforms[u.toIndex()];
545 SkASSERT(uni.fType == kFloat2x2_GrSLType + (N - 2) ||
546 uni.fType == kHalf2x2_GrSLType + (N - 2));
547 SkASSERT(arrayCount > 0);
548 SkASSERT(arrayCount <= uni.fArrayCount ||
549 (1 == arrayCount && GrShaderVar::kNonArray == uni.fArrayCount));
550
551 void* buffer = fUniformData.get();
552 fUniformsDirty = true;
553
554 set_uniform_matrix<N>::set(buffer, uni.fOffset, arrayCount, matrices);
555}
556
557template<int N> struct set_uniform_matrix {
558 inline static void set(void* buffer, int uniformOffset, int count, const float matrices[]) {
Jim Van Verthbe259dc2020-05-19 11:40:31 -0400559 buffer = static_cast<char*>(buffer) + uniformOffset;
560 for (int i = 0; i < count; ++i) {
561 const float* matrix = &matrices[N * N * i];
562 for (int j = 0; j < N; ++j) {
563 memcpy(buffer, &matrix[j * N], N * sizeof(float));
564 buffer = static_cast<char*>(buffer) + 4 * sizeof(float);
565 }
566 }
567 }
568};
569
570template<> struct set_uniform_matrix<4> {
571 inline static void set(void* buffer, int uniformOffset, int count, const float matrices[]) {
Jim Van Verthbe259dc2020-05-19 11:40:31 -0400572 buffer = static_cast<char*>(buffer) + uniformOffset;
573 memcpy(buffer, matrices, count * 16 * sizeof(float));
574 }
575};
576