blob: a454043936b9f1c5a29ccbeeffeac871aca59975 [file] [log] [blame]
mtkleinc9adb052015-03-30 10:50:27 -07001/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
mtklein37478752015-06-15 10:58:42 -07008#include "Sk4px.h"
mtkleinc9adb052015-03-30 10:50:27 -07009#include "SkNx.h"
mtklein27e517a2015-05-14 17:53:04 -070010#include "SkRandom.h"
mtkleinc9adb052015-03-30 10:50:27 -070011#include "Test.h"
12
mtklein6f797092015-11-09 08:33:53 -080013template <int N>
mtkleinc9adb052015-03-30 10:50:27 -070014static void test_Nf(skiatest::Reporter* r) {
15
mtklein6c221b42015-11-20 13:53:19 -080016 auto assert_nearly_eq = [&](float eps, const SkNx<N, float>& v,
17 float a, float b, float c, float d) {
mtklein6f797092015-11-09 08:33:53 -080018 auto close = [=](float a, float b) { return fabsf(a-b) <= eps; };
19 float vals[4];
mtkleinc9adb052015-03-30 10:50:27 -070020 v.store(vals);
mtkleina156a8f2015-04-03 06:16:13 -070021 bool ok = close(vals[0], a) && close(vals[1], b)
mtklein7c249e52016-02-21 10:54:19 -080022 && close( v[0], a) && close( v[1], b);
mtkleina156a8f2015-04-03 06:16:13 -070023 REPORTER_ASSERT(r, ok);
mtkleinc9adb052015-03-30 10:50:27 -070024 if (N == 4) {
mtkleina156a8f2015-04-03 06:16:13 -070025 ok = close(vals[2], c) && close(vals[3], d)
mtklein7c249e52016-02-21 10:54:19 -080026 && close( v[2], c) && close( v[3], d);
mtkleina156a8f2015-04-03 06:16:13 -070027 REPORTER_ASSERT(r, ok);
mtkleinc9adb052015-03-30 10:50:27 -070028 }
29 };
mtklein6c221b42015-11-20 13:53:19 -080030 auto assert_eq = [&](const SkNx<N, float>& v, float a, float b, float c, float d) {
mtkleinc9adb052015-03-30 10:50:27 -070031 return assert_nearly_eq(0, v, a,b,c,d);
32 };
33
mtklein6f797092015-11-09 08:33:53 -080034 float vals[] = {3, 4, 5, 6};
mtklein6c221b42015-11-20 13:53:19 -080035 SkNx<N,float> a = SkNx<N,float>::Load(vals),
36 b(a),
37 c = a;
38 SkNx<N,float> d;
mtkleinc9adb052015-03-30 10:50:27 -070039 d = a;
40
41 assert_eq(a, 3, 4, 5, 6);
42 assert_eq(b, 3, 4, 5, 6);
43 assert_eq(c, 3, 4, 5, 6);
44 assert_eq(d, 3, 4, 5, 6);
45
46 assert_eq(a+b, 6, 8, 10, 12);
47 assert_eq(a*b, 9, 16, 25, 36);
48 assert_eq(a*b-b, 6, 12, 20, 30);
49 assert_eq((a*b).sqrt(), 3, 4, 5, 6);
50 assert_eq(a/b, 1, 1, 1, 1);
mtklein6c221b42015-11-20 13:53:19 -080051 assert_eq(SkNx<N,float>(0)-a, -3, -4, -5, -6);
mtkleinc9adb052015-03-30 10:50:27 -070052
mtklein6c221b42015-11-20 13:53:19 -080053 SkNx<N,float> fours(4);
mtkleinc9adb052015-03-30 10:50:27 -070054
55 assert_eq(fours.sqrt(), 2,2,2,2);
mtkleinf8f90e42016-03-21 10:04:46 -070056 assert_nearly_eq(0.001f, fours.rsqrt(), 0.5, 0.5, 0.5, 0.5);
mtkleinc9adb052015-03-30 10:50:27 -070057
mtkleinf8f90e42016-03-21 10:04:46 -070058 assert_nearly_eq(0.001f, fours.invert(), 0.25, 0.25, 0.25, 0.25);
mtkleinc9adb052015-03-30 10:50:27 -070059
mtklein6c221b42015-11-20 13:53:19 -080060 assert_eq(SkNx<N,float>::Min(a, fours), 3, 4, 4, 4);
61 assert_eq(SkNx<N,float>::Max(a, fours), 4, 4, 5, 6);
mtkleinc9adb052015-03-30 10:50:27 -070062
63 // Test some comparisons. This is not exhaustive.
64 REPORTER_ASSERT(r, (a == b).allTrue());
65 REPORTER_ASSERT(r, (a+b == a*b-b).anyTrue());
66 REPORTER_ASSERT(r, !(a+b == a*b-b).allTrue());
67 REPORTER_ASSERT(r, !(a+b == a*b).anyTrue());
68 REPORTER_ASSERT(r, !(a != b).anyTrue());
69 REPORTER_ASSERT(r, (a < fours).anyTrue());
70 REPORTER_ASSERT(r, (a <= fours).anyTrue());
71 REPORTER_ASSERT(r, !(a > fours).allTrue());
72 REPORTER_ASSERT(r, !(a >= fours).allTrue());
73}
74
75DEF_TEST(SkNf, r) {
mtklein6f797092015-11-09 08:33:53 -080076 test_Nf<2>(r);
77 test_Nf<4>(r);
mtkleinc9adb052015-03-30 10:50:27 -070078}
mtklein115acee2015-04-14 14:02:52 -070079
80template <int N, typename T>
81void test_Ni(skiatest::Reporter* r) {
mtklein6c221b42015-11-20 13:53:19 -080082 auto assert_eq = [&](const SkNx<N,T>& v, T a, T b, T c, T d, T e, T f, T g, T h) {
mtklein115acee2015-04-14 14:02:52 -070083 T vals[8];
84 v.store(vals);
85
86 switch (N) {
87 case 8: REPORTER_ASSERT(r, vals[4] == e && vals[5] == f && vals[6] == g && vals[7] == h);
88 case 4: REPORTER_ASSERT(r, vals[2] == c && vals[3] == d);
89 case 2: REPORTER_ASSERT(r, vals[0] == a && vals[1] == b);
90 }
mtklein1113da72015-04-27 12:08:01 -070091 switch (N) {
mtklein7c249e52016-02-21 10:54:19 -080092 case 8: REPORTER_ASSERT(r, v[4] == e && v[5] == f &&
93 v[6] == g && v[7] == h);
94 case 4: REPORTER_ASSERT(r, v[2] == c && v[3] == d);
95 case 2: REPORTER_ASSERT(r, v[0] == a && v[1] == b);
mtklein1113da72015-04-27 12:08:01 -070096 }
mtklein115acee2015-04-14 14:02:52 -070097 };
98
99 T vals[] = { 1,2,3,4,5,6,7,8 };
mtklein6c221b42015-11-20 13:53:19 -0800100 SkNx<N,T> a = SkNx<N,T>::Load(vals),
mtklein115acee2015-04-14 14:02:52 -0700101 b(a),
102 c = a;
mtklein6c221b42015-11-20 13:53:19 -0800103 SkNx<N,T> d;
mtklein115acee2015-04-14 14:02:52 -0700104 d = a;
105
106 assert_eq(a, 1,2,3,4,5,6,7,8);
107 assert_eq(b, 1,2,3,4,5,6,7,8);
108 assert_eq(c, 1,2,3,4,5,6,7,8);
109 assert_eq(d, 1,2,3,4,5,6,7,8);
110
111 assert_eq(a+a, 2,4,6,8,10,12,14,16);
112 assert_eq(a*a, 1,4,9,16,25,36,49,64);
113 assert_eq(a*a-a, 0,2,6,12,20,30,42,56);
114
115 assert_eq(a >> 2, 0,0,0,1,1,1,1,2);
116 assert_eq(a << 1, 2,4,6,8,10,12,14,16);
117
mtklein7c249e52016-02-21 10:54:19 -0800118 REPORTER_ASSERT(r, a[1] == 2);
mtklein115acee2015-04-14 14:02:52 -0700119}
120
mtklein6c221b42015-11-20 13:53:19 -0800121DEF_TEST(SkNx, r) {
mtklein115acee2015-04-14 14:02:52 -0700122 test_Ni<2, uint16_t>(r);
123 test_Ni<4, uint16_t>(r);
124 test_Ni<8, uint16_t>(r);
mtklein1113da72015-04-27 12:08:01 -0700125
126 test_Ni<2, int>(r);
127 test_Ni<4, int>(r);
128 test_Ni<8, int>(r);
mtklein115acee2015-04-14 14:02:52 -0700129}
mtklein27e517a2015-05-14 17:53:04 -0700130
mtkleine20633e2015-07-13 12:06:33 -0700131DEF_TEST(SkNi_min_lt, r) {
mtklein27e517a2015-05-14 17:53:04 -0700132 // Exhaustively check the 8x8 bit space.
133 for (int a = 0; a < (1<<8); a++) {
134 for (int b = 0; b < (1<<8); b++) {
mtkleine20633e2015-07-13 12:06:33 -0700135 Sk16b aw(a), bw(b);
mtklein7c249e52016-02-21 10:54:19 -0800136 REPORTER_ASSERT(r, Sk16b::Min(aw, bw)[0] == SkTMin(a, b));
137 REPORTER_ASSERT(r, !(aw < bw)[0] == !(a < b));
mtklein27e517a2015-05-14 17:53:04 -0700138 }}
139
140 // Exhausting the 16x16 bit space is kind of slow, so only do that in release builds.
141#ifdef SK_DEBUG
142 SkRandom rand;
143 for (int i = 0; i < (1<<16); i++) {
144 uint16_t a = rand.nextU() >> 16,
145 b = rand.nextU() >> 16;
mtklein7c249e52016-02-21 10:54:19 -0800146 REPORTER_ASSERT(r, Sk16h::Min(Sk16h(a), Sk16h(b))[0] == SkTMin(a, b));
mtklein27e517a2015-05-14 17:53:04 -0700147 }
148#else
149 for (int a = 0; a < (1<<16); a++) {
150 for (int b = 0; b < (1<<16); b++) {
mtklein7c249e52016-02-21 10:54:19 -0800151 REPORTER_ASSERT(r, Sk16h::Min(Sk16h(a), Sk16h(b))[0] == SkTMin(a, b));
mtklein27e517a2015-05-14 17:53:04 -0700152 }}
153#endif
154}
mtklein37478752015-06-15 10:58:42 -0700155
156DEF_TEST(SkNi_saturatedAdd, r) {
157 for (int a = 0; a < (1<<8); a++) {
158 for (int b = 0; b < (1<<8); b++) {
159 int exact = a+b;
160 if (exact > 255) { exact = 255; }
161 if (exact < 0) { exact = 0; }
162
mtklein7c249e52016-02-21 10:54:19 -0800163 REPORTER_ASSERT(r, Sk16b(a).saturatedAdd(Sk16b(b))[0] == exact);
mtklein37478752015-06-15 10:58:42 -0700164 }
165 }
166}
167
Herb Derby5eb15282017-10-10 17:14:18 -0400168DEF_TEST(SkNi_mulHi, r) {
169 // First 8 primes.
170 Sk4u a{ 0x00020000, 0x00030000, 0x00050000, 0x00070000 };
171 Sk4u b{ 0x000b0000, 0x000d0000, 0x00110000, 0x00130000 };
172
173 Sk4u q{22, 39, 85, 133};
174
175 Sk4u c = a.mulHi(b);
176 REPORTER_ASSERT(r, c[0] == q[0]);
177 REPORTER_ASSERT(r, c[1] == q[1]);
178 REPORTER_ASSERT(r, c[2] == q[2]);
179 REPORTER_ASSERT(r, c[3] == q[3]);
180}
181
mtklein37478752015-06-15 10:58:42 -0700182DEF_TEST(Sk4px_muldiv255round, r) {
183 for (int a = 0; a < (1<<8); a++) {
184 for (int b = 0; b < (1<<8); b++) {
185 int exact = (a*b+127)/255;
186
187 // Duplicate a and b 16x each.
mtklein059ac002015-06-22 10:39:38 -0700188 auto av = Sk4px::DupAlpha(a),
189 bv = Sk4px::DupAlpha(b);
mtklein37478752015-06-15 10:58:42 -0700190
191 // This way should always be exactly correct.
mtklein7c249e52016-02-21 10:54:19 -0800192 int correct = (av * bv).div255()[0];
mtklein37478752015-06-15 10:58:42 -0700193 REPORTER_ASSERT(r, correct == exact);
194
195 // We're a bit more flexible on this method: correct for 0 or 255, otherwise off by <=1.
mtklein7c249e52016-02-21 10:54:19 -0800196 int fast = av.approxMulDiv255(bv)[0];
mtklein37478752015-06-15 10:58:42 -0700197 REPORTER_ASSERT(r, fast-exact >= -1 && fast-exact <= 1);
198 if (a == 0 || a == 255 || b == 0 || b == 255) {
199 REPORTER_ASSERT(r, fast == exact);
200 }
201 }
202 }
203}
mtklein4be181e2015-07-14 10:54:19 -0700204
205DEF_TEST(Sk4px_widening, r) {
206 SkPMColor colors[] = {
207 SkPreMultiplyColor(0xff00ff00),
208 SkPreMultiplyColor(0x40008000),
209 SkPreMultiplyColor(0x7f020406),
210 SkPreMultiplyColor(0x00000000),
211 };
212 auto packed = Sk4px::Load4(colors);
213
214 auto wideLo = packed.widenLo(),
215 wideHi = packed.widenHi(),
216 wideLoHi = packed.widenLoHi(),
217 wideLoHiAlt = wideLo + wideHi;
218 REPORTER_ASSERT(r, 0 == memcmp(&wideLoHi, &wideLoHiAlt, sizeof(wideLoHi)));
219}
mtkleina508f3c2015-09-01 06:29:45 -0700220
mtkleinc33065a2016-01-15 12:16:40 -0800221DEF_TEST(SkNx_abs, r) {
222 auto fs = Sk4f(0.0f, -0.0f, 2.0f, -4.0f).abs();
mtklein7c249e52016-02-21 10:54:19 -0800223 REPORTER_ASSERT(r, fs[0] == 0.0f);
224 REPORTER_ASSERT(r, fs[1] == 0.0f);
225 REPORTER_ASSERT(r, fs[2] == 2.0f);
226 REPORTER_ASSERT(r, fs[3] == 4.0f);
Chris Dalton7732f4f2017-08-28 14:45:40 -0600227 auto fshi = Sk2f(0.0f, -0.0f).abs();
228 auto fslo = Sk2f(2.0f, -4.0f).abs();
229 REPORTER_ASSERT(r, fshi[0] == 0.0f);
230 REPORTER_ASSERT(r, fshi[1] == 0.0f);
231 REPORTER_ASSERT(r, fslo[0] == 2.0f);
232 REPORTER_ASSERT(r, fslo[1] == 4.0f);
mtkleinc33065a2016-01-15 12:16:40 -0800233}
mtklein629f25a2016-02-08 05:54:38 -0800234
Yuqian Li7da6ba22017-07-12 13:36:05 -0400235DEF_TEST(Sk4i_abs, r) {
236 auto is = Sk4i(0, -1, 2, -2147483647).abs();
237 REPORTER_ASSERT(r, is[0] == 0);
238 REPORTER_ASSERT(r, is[1] == 1);
239 REPORTER_ASSERT(r, is[2] == 2);
240 REPORTER_ASSERT(r, is[3] == 2147483647);
241}
242
243DEF_TEST(Sk4i_minmax, r) {
244 auto a = Sk4i(0, 2, 4, 6);
245 auto b = Sk4i(1, 1, 3, 7);
246 auto min = Sk4i::Min(a, b);
247 auto max = Sk4i::Max(a, b);
248 for(int i = 0; i < 4; ++i) {
249 REPORTER_ASSERT(r, min[i] == SkTMin(a[i], b[i]));
250 REPORTER_ASSERT(r, max[i] == SkTMax(a[i], b[i]));
251 }
252}
253
mtklein126626e2016-02-09 15:41:36 -0800254DEF_TEST(SkNx_floor, r) {
255 auto fs = Sk4f(0.4f, -0.4f, 0.6f, -0.6f).floor();
mtklein7c249e52016-02-21 10:54:19 -0800256 REPORTER_ASSERT(r, fs[0] == 0.0f);
257 REPORTER_ASSERT(r, fs[1] == -1.0f);
258 REPORTER_ASSERT(r, fs[2] == 0.0f);
259 REPORTER_ASSERT(r, fs[3] == -1.0f);
Chris Dalton89c5e882018-06-08 11:46:42 -0600260
261 auto fs2 = Sk2f(0.4f, -0.4f).floor();
262 REPORTER_ASSERT(r, fs2[0] == 0.0f);
263 REPORTER_ASSERT(r, fs2[1] == -1.0f);
264
265 auto fs3 = Sk2f(0.6f, -0.6f).floor();
266 REPORTER_ASSERT(r, fs3[0] == 0.0f);
267 REPORTER_ASSERT(r, fs3[1] == -1.0f);
mtklein126626e2016-02-09 15:41:36 -0800268}
269
mtkleine4c0bee2016-02-09 10:35:27 -0800270DEF_TEST(SkNx_shuffle, r) {
271 Sk4f f4(0,10,20,30);
mtklein629f25a2016-02-08 05:54:38 -0800272
mtkleine4c0bee2016-02-09 10:35:27 -0800273 Sk2f f2 = SkNx_shuffle<2,1>(f4);
274 REPORTER_ASSERT(r, f2[0] == 20);
275 REPORTER_ASSERT(r, f2[1] == 10);
276
277 f4 = SkNx_shuffle<0,1,1,0>(f2);
278 REPORTER_ASSERT(r, f4[0] == 20);
279 REPORTER_ASSERT(r, f4[1] == 10);
280 REPORTER_ASSERT(r, f4[2] == 10);
281 REPORTER_ASSERT(r, f4[3] == 20);
mtklein629f25a2016-02-08 05:54:38 -0800282}
283
mtklein0cf795f2016-02-17 07:23:36 -0800284DEF_TEST(SkNx_int_float, r) {
285 Sk4f f(-2.3f, 1.0f, 0.45f, 0.6f);
286
287 Sk4i i = SkNx_cast<int>(f);
288 REPORTER_ASSERT(r, i[0] == -2);
289 REPORTER_ASSERT(r, i[1] == 1);
290 REPORTER_ASSERT(r, i[2] == 0);
291 REPORTER_ASSERT(r, i[3] == 0);
292
293 f = SkNx_cast<float>(i);
294 REPORTER_ASSERT(r, f[0] == -2.0f);
295 REPORTER_ASSERT(r, f[1] == 1.0f);
296 REPORTER_ASSERT(r, f[2] == 0.0f);
297 REPORTER_ASSERT(r, f[3] == 0.0f);
298}
299
mtkleine4c0bee2016-02-09 10:35:27 -0800300#include "SkRandom.h"
301
mtklein629f25a2016-02-08 05:54:38 -0800302DEF_TEST(SkNx_u16_float, r) {
303 {
304 // u16 --> float
305 auto h4 = Sk4h(15, 17, 257, 65535);
306 auto f4 = SkNx_cast<float>(h4);
mtklein7c249e52016-02-21 10:54:19 -0800307 REPORTER_ASSERT(r, f4[0] == 15.0f);
308 REPORTER_ASSERT(r, f4[1] == 17.0f);
309 REPORTER_ASSERT(r, f4[2] == 257.0f);
310 REPORTER_ASSERT(r, f4[3] == 65535.0f);
mtklein629f25a2016-02-08 05:54:38 -0800311 }
312 {
313 // float -> u16
314 auto f4 = Sk4f(15, 17, 257, 65535);
315 auto h4 = SkNx_cast<uint16_t>(f4);
mtklein7c249e52016-02-21 10:54:19 -0800316 REPORTER_ASSERT(r, h4[0] == 15);
317 REPORTER_ASSERT(r, h4[1] == 17);
318 REPORTER_ASSERT(r, h4[2] == 257);
319 REPORTER_ASSERT(r, h4[3] == 65535);
mtklein629f25a2016-02-08 05:54:38 -0800320 }
321
322 // starting with any u16 value, we should be able to have a perfect round-trip in/out of floats
323 //
324 SkRandom rand;
mtkleine4c0bee2016-02-09 10:35:27 -0800325 for (int i = 0; i < 10000; ++i) {
mtklein629f25a2016-02-08 05:54:38 -0800326 const uint16_t s16[4] {
327 (uint16_t)rand.nextU16(), (uint16_t)rand.nextU16(),
328 (uint16_t)rand.nextU16(), (uint16_t)rand.nextU16(),
329 };
330 auto u4_0 = Sk4h::Load(s16);
331 auto f4 = SkNx_cast<float>(u4_0);
332 auto u4_1 = SkNx_cast<uint16_t>(f4);
333 uint16_t d16[4];
334 u4_1.store(d16);
335 REPORTER_ASSERT(r, !memcmp(s16, d16, sizeof(s16)));
336 }
337}
mtklein58e389b2016-07-15 07:00:11 -0700338
339// The SSE2 implementation of SkNx_cast<uint16_t>(Sk4i) is non-trivial, so worth a test.
340DEF_TEST(SkNx_int_u16, r) {
341 // These are pretty hard to get wrong.
342 for (int i = 0; i <= 0x7fff; i++) {
343 uint16_t expected = (uint16_t)i;
344 uint16_t actual = SkNx_cast<uint16_t>(Sk4i(i))[0];
345
346 REPORTER_ASSERT(r, expected == actual);
347 }
348
349 // A naive implementation with _mm_packs_epi32 would succeed up to 0x7fff but fail here:
350 for (int i = 0x8000; (1) && i <= 0xffff; i++) {
351 uint16_t expected = (uint16_t)i;
352 uint16_t actual = SkNx_cast<uint16_t>(Sk4i(i))[0];
353
354 REPORTER_ASSERT(r, expected == actual);
355 }
356}
msarettc0444612016-09-16 11:45:58 -0700357
358DEF_TEST(SkNx_4fLoad4Store4, r) {
359 float src[] = {
360 0.0f, 1.0f, 2.0f, 3.0f,
361 4.0f, 5.0f, 6.0f, 7.0f,
362 8.0f, 9.0f, 10.0f, 11.0f,
363 12.0f, 13.0f, 14.0f, 15.0f
364 };
365
366 Sk4f a, b, c, d;
Mike Klein33cbfd72016-10-06 11:09:27 -0400367 Sk4f::Load4(src, &a, &b, &c, &d);
msarettc0444612016-09-16 11:45:58 -0700368 REPORTER_ASSERT(r, 0.0f == a[0]);
369 REPORTER_ASSERT(r, 4.0f == a[1]);
370 REPORTER_ASSERT(r, 8.0f == a[2]);
371 REPORTER_ASSERT(r, 12.0f == a[3]);
372 REPORTER_ASSERT(r, 1.0f == b[0]);
373 REPORTER_ASSERT(r, 5.0f == b[1]);
374 REPORTER_ASSERT(r, 9.0f == b[2]);
375 REPORTER_ASSERT(r, 13.0f == b[3]);
376 REPORTER_ASSERT(r, 2.0f == c[0]);
377 REPORTER_ASSERT(r, 6.0f == c[1]);
378 REPORTER_ASSERT(r, 10.0f == c[2]);
379 REPORTER_ASSERT(r, 14.0f == c[3]);
380 REPORTER_ASSERT(r, 3.0f == d[0]);
381 REPORTER_ASSERT(r, 7.0f == d[1]);
382 REPORTER_ASSERT(r, 11.0f == d[2]);
383 REPORTER_ASSERT(r, 15.0f == d[3]);
384
385 float dst[16];
Mike Klein33cbfd72016-10-06 11:09:27 -0400386 Sk4f::Store4(dst, a, b, c, d);
msarettc0444612016-09-16 11:45:58 -0700387 REPORTER_ASSERT(r, 0 == memcmp(dst, src, 16 * sizeof(float)));
388}
Chris Dalton7732f4f2017-08-28 14:45:40 -0600389
390DEF_TEST(SkNx_neg, r) {
391 auto fs = -Sk4f(0.0f, -0.0f, 2.0f, -4.0f);
392 REPORTER_ASSERT(r, fs[0] == 0.0f);
393 REPORTER_ASSERT(r, fs[1] == 0.0f);
394 REPORTER_ASSERT(r, fs[2] == -2.0f);
395 REPORTER_ASSERT(r, fs[3] == 4.0f);
396 auto fshi = -Sk2f(0.0f, -0.0f);
397 auto fslo = -Sk2f(2.0f, -4.0f);
398 REPORTER_ASSERT(r, fshi[0] == 0.0f);
399 REPORTER_ASSERT(r, fshi[1] == 0.0f);
400 REPORTER_ASSERT(r, fslo[0] == -2.0f);
401 REPORTER_ASSERT(r, fslo[1] == 4.0f);
402}
403
404DEF_TEST(SkNx_thenElse, r) {
405 auto fs = (Sk4f(0.0f, -0.0f, 2.0f, -4.0f) < 0).thenElse(-1, 1);
406 REPORTER_ASSERT(r, fs[0] == 1);
407 REPORTER_ASSERT(r, fs[1] == 1);
408 REPORTER_ASSERT(r, fs[2] == 1);
409 REPORTER_ASSERT(r, fs[3] == -1);
410 auto fshi = (Sk2f(0.0f, -0.0f) < 0).thenElse(-1, 1);
411 auto fslo = (Sk2f(2.0f, -4.0f) < 0).thenElse(-1, 1);
412 REPORTER_ASSERT(r, fshi[0] == 1);
413 REPORTER_ASSERT(r, fshi[1] == 1);
414 REPORTER_ASSERT(r, fslo[0] == 1);
415 REPORTER_ASSERT(r, fslo[1] == -1);
416}
Mike Klein213d8212017-11-30 12:07:20 -0500417
418DEF_TEST(Sk4f_Load2, r) {
419 float xy[8] = { 0,1,2,3,4,5,6,7 };
420
421 Sk4f x,y;
422 Sk4f::Load2(xy, &x,&y);
423
424 REPORTER_ASSERT(r, x[0] == 0);
425 REPORTER_ASSERT(r, x[1] == 2);
426 REPORTER_ASSERT(r, x[2] == 4);
427 REPORTER_ASSERT(r, x[3] == 6);
428
429 REPORTER_ASSERT(r, y[0] == 1);
430 REPORTER_ASSERT(r, y[1] == 3);
431 REPORTER_ASSERT(r, y[2] == 5);
432 REPORTER_ASSERT(r, y[3] == 7);
433}
Chris Dalton0cb75872017-12-01 13:23:05 -0700434
Chris Dalton21f64372018-04-11 14:01:04 -0600435DEF_TEST(Sk2f_Load2, r) {
436 float xy[4] = { 0,1,2,3 };
437
438 Sk2f x,y;
439 Sk2f::Load2(xy, &x,&y);
440
441 REPORTER_ASSERT(r, x[0] == 0);
442 REPORTER_ASSERT(r, x[1] == 2);
443
444 REPORTER_ASSERT(r, y[0] == 1);
445 REPORTER_ASSERT(r, y[1] == 3);
446}
447
Chris Dalton42f02aa2018-04-08 23:58:43 -0600448DEF_TEST(Sk2f_Store2, r) {
449 Sk2f p0{0, 2};
450 Sk2f p1{1, 3};
451 float dst[4];
452 Sk2f::Store2(dst, p0, p1);
453 REPORTER_ASSERT(r, dst[0] == 0);
454 REPORTER_ASSERT(r, dst[1] == 1);
455 REPORTER_ASSERT(r, dst[2] == 2);
456 REPORTER_ASSERT(r, dst[3] == 3);
457}
458
Chris Dalton0cb75872017-12-01 13:23:05 -0700459DEF_TEST(Sk2f_Store3, r) {
460 Sk2f p0{0, 3};
461 Sk2f p1{1, 4};
462 Sk2f p2{2, 5};
463 float dst[6];
464 Sk2f::Store3(dst, p0, p1, p2);
465 REPORTER_ASSERT(r, dst[0] == 0);
466 REPORTER_ASSERT(r, dst[1] == 1);
467 REPORTER_ASSERT(r, dst[2] == 2);
468 REPORTER_ASSERT(r, dst[3] == 3);
469 REPORTER_ASSERT(r, dst[4] == 4);
470 REPORTER_ASSERT(r, dst[5] == 5);
471}
Chris Dalton6f8fa4e2018-02-06 17:55:30 -0700472
473DEF_TEST(Sk2f_Store4, r) {
474 Sk2f p0{0, 4};
475 Sk2f p1{1, 5};
476 Sk2f p2{2, 6};
477 Sk2f p3{3, 7};
Chris Dalton9f2dab02018-04-18 14:07:03 -0600478
479 float dst[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
Chris Dalton6f8fa4e2018-02-06 17:55:30 -0700480 Sk2f::Store4(dst, p0, p1, p2, p3);
481 REPORTER_ASSERT(r, dst[0] == 0);
482 REPORTER_ASSERT(r, dst[1] == 1);
483 REPORTER_ASSERT(r, dst[2] == 2);
484 REPORTER_ASSERT(r, dst[3] == 3);
485 REPORTER_ASSERT(r, dst[4] == 4);
486 REPORTER_ASSERT(r, dst[5] == 5);
487 REPORTER_ASSERT(r, dst[6] == 6);
488 REPORTER_ASSERT(r, dst[7] == 7);
Chris Dalton9f2dab02018-04-18 14:07:03 -0600489
490 // Ensure transposing to Sk4f works.
491 Sk4f dst4f[2] = {{-1, -1, -1, -1}, {-1, -1, -1, -1}};
492 Sk2f::Store4(dst4f, p0, p1, p2, p3);
493 REPORTER_ASSERT(r, dst4f[0][0] == 0);
494 REPORTER_ASSERT(r, dst4f[0][1] == 1);
495 REPORTER_ASSERT(r, dst4f[0][2] == 2);
496 REPORTER_ASSERT(r, dst4f[0][3] == 3);
497 REPORTER_ASSERT(r, dst4f[1][0] == 4);
498 REPORTER_ASSERT(r, dst4f[1][1] == 5);
499 REPORTER_ASSERT(r, dst4f[1][2] == 6);
500 REPORTER_ASSERT(r, dst4f[1][3] == 7);
501
Chris Dalton6f8fa4e2018-02-06 17:55:30 -0700502}
Mike Klein68ff92f2018-03-26 13:04:14 -0400503
Chris Daltone3fda932018-04-11 13:18:09 -0600504DEF_TEST(Sk4f_minmax, r) {
505 REPORTER_ASSERT(r, 3 == Sk4f(0,1,2,3).max());
506 REPORTER_ASSERT(r, 2 == Sk4f(1,-5,2,-1).max());
507 REPORTER_ASSERT(r, -1 == Sk4f(-2,-1,-6,-3).max());
508 REPORTER_ASSERT(r, 3 == Sk4f(3,2,1,0).max());
509
510 REPORTER_ASSERT(r, 0 == Sk4f(0,1,2,3).min());
511 REPORTER_ASSERT(r, -5 == Sk4f(1,-5,2,-1).min());
512 REPORTER_ASSERT(r, -6 == Sk4f(-2,-1,-6,-3).min());
513 REPORTER_ASSERT(r, 0 == Sk4f(3,2,1,0).min());
514}
515
Mike Klein68ff92f2018-03-26 13:04:14 -0400516DEF_TEST(SkNf_anyTrue_allTrue, r) {
517 REPORTER_ASSERT(r, (Sk2f{1,2} < Sk2f{3,4}).anyTrue());
518 REPORTER_ASSERT(r, (Sk2f{1,2} < Sk2f{3,4}).allTrue());
519 REPORTER_ASSERT(r, (Sk2f{3,2} < Sk2f{1,4}).anyTrue());
520 REPORTER_ASSERT(r, !(Sk2f{3,2} < Sk2f{1,4}).allTrue());
521 REPORTER_ASSERT(r, !(Sk2f{3,4} < Sk2f{1,2}).anyTrue());
522
523 REPORTER_ASSERT(r, (Sk4f{1,2,3,4} < Sk4f{3,4,5,6}).anyTrue());
524 REPORTER_ASSERT(r, (Sk4f{1,2,3,4} < Sk4f{3,4,5,6}).allTrue());
525 REPORTER_ASSERT(r, (Sk4f{1,2,3,4} < Sk4f{1,4,1,1}).anyTrue());
526 REPORTER_ASSERT(r, !(Sk4f{1,2,3,4} < Sk4f{1,4,1,1}).allTrue());
527 REPORTER_ASSERT(r, !(Sk4f{3,4,5,6} < Sk4f{1,2,3,4}).anyTrue());
528}