mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2015 Google Inc. |
| 3 | * |
| 4 | * Use of this source code is governed by a BSD-style license that can be |
| 5 | * found in the LICENSE file. |
| 6 | */ |
| 7 | |
Mike Klein | c0bd9f9 | 2019-04-23 12:05:21 -0500 | [diff] [blame] | 8 | #include "include/private/SkNx.h" |
| 9 | #include "include/utils/SkRandom.h" |
| 10 | #include "src/core/Sk4px.h" |
| 11 | #include "tests/Test.h" |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 12 | |
mtklein | 6f79709 | 2015-11-09 08:33:53 -0800 | [diff] [blame] | 13 | template <int N> |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 14 | static void test_Nf(skiatest::Reporter* r) { |
| 15 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 16 | auto assert_nearly_eq = [&](float eps, const SkNx<N, float>& v, |
| 17 | float a, float b, float c, float d) { |
mtklein | 6f79709 | 2015-11-09 08:33:53 -0800 | [diff] [blame] | 18 | auto close = [=](float a, float b) { return fabsf(a-b) <= eps; }; |
| 19 | float vals[4]; |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 20 | v.store(vals); |
mtklein | a156a8f | 2015-04-03 06:16:13 -0700 | [diff] [blame] | 21 | bool ok = close(vals[0], a) && close(vals[1], b) |
mtklein | 7c249e5 | 2016-02-21 10:54:19 -0800 | [diff] [blame] | 22 | && close( v[0], a) && close( v[1], b); |
mtklein | a156a8f | 2015-04-03 06:16:13 -0700 | [diff] [blame] | 23 | REPORTER_ASSERT(r, ok); |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 24 | if (N == 4) { |
mtklein | a156a8f | 2015-04-03 06:16:13 -0700 | [diff] [blame] | 25 | ok = close(vals[2], c) && close(vals[3], d) |
mtklein | 7c249e5 | 2016-02-21 10:54:19 -0800 | [diff] [blame] | 26 | && close( v[2], c) && close( v[3], d); |
mtklein | a156a8f | 2015-04-03 06:16:13 -0700 | [diff] [blame] | 27 | REPORTER_ASSERT(r, ok); |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 28 | } |
| 29 | }; |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 30 | auto assert_eq = [&](const SkNx<N, float>& v, float a, float b, float c, float d) { |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 31 | return assert_nearly_eq(0, v, a,b,c,d); |
| 32 | }; |
| 33 | |
mtklein | 6f79709 | 2015-11-09 08:33:53 -0800 | [diff] [blame] | 34 | float vals[] = {3, 4, 5, 6}; |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 35 | SkNx<N,float> a = SkNx<N,float>::Load(vals), |
| 36 | b(a), |
| 37 | c = a; |
| 38 | SkNx<N,float> d; |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 39 | d = a; |
| 40 | |
| 41 | assert_eq(a, 3, 4, 5, 6); |
| 42 | assert_eq(b, 3, 4, 5, 6); |
| 43 | assert_eq(c, 3, 4, 5, 6); |
| 44 | assert_eq(d, 3, 4, 5, 6); |
| 45 | |
| 46 | assert_eq(a+b, 6, 8, 10, 12); |
| 47 | assert_eq(a*b, 9, 16, 25, 36); |
| 48 | assert_eq(a*b-b, 6, 12, 20, 30); |
| 49 | assert_eq((a*b).sqrt(), 3, 4, 5, 6); |
| 50 | assert_eq(a/b, 1, 1, 1, 1); |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 51 | assert_eq(SkNx<N,float>(0)-a, -3, -4, -5, -6); |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 52 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 53 | SkNx<N,float> fours(4); |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 54 | |
| 55 | assert_eq(fours.sqrt(), 2,2,2,2); |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 56 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 57 | assert_eq(SkNx<N,float>::Min(a, fours), 3, 4, 4, 4); |
| 58 | assert_eq(SkNx<N,float>::Max(a, fours), 4, 4, 5, 6); |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 59 | |
| 60 | // Test some comparisons. This is not exhaustive. |
| 61 | REPORTER_ASSERT(r, (a == b).allTrue()); |
| 62 | REPORTER_ASSERT(r, (a+b == a*b-b).anyTrue()); |
| 63 | REPORTER_ASSERT(r, !(a+b == a*b-b).allTrue()); |
| 64 | REPORTER_ASSERT(r, !(a+b == a*b).anyTrue()); |
| 65 | REPORTER_ASSERT(r, !(a != b).anyTrue()); |
| 66 | REPORTER_ASSERT(r, (a < fours).anyTrue()); |
| 67 | REPORTER_ASSERT(r, (a <= fours).anyTrue()); |
| 68 | REPORTER_ASSERT(r, !(a > fours).allTrue()); |
| 69 | REPORTER_ASSERT(r, !(a >= fours).allTrue()); |
| 70 | } |
| 71 | |
| 72 | DEF_TEST(SkNf, r) { |
mtklein | 6f79709 | 2015-11-09 08:33:53 -0800 | [diff] [blame] | 73 | test_Nf<2>(r); |
| 74 | test_Nf<4>(r); |
mtklein | c9adb05 | 2015-03-30 10:50:27 -0700 | [diff] [blame] | 75 | } |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 76 | |
| 77 | template <int N, typename T> |
| 78 | void test_Ni(skiatest::Reporter* r) { |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 79 | auto assert_eq = [&](const SkNx<N,T>& v, T a, T b, T c, T d, T e, T f, T g, T h) { |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 80 | T vals[8]; |
| 81 | v.store(vals); |
| 82 | |
| 83 | switch (N) { |
John Stiles | 30212b7 | 2020-06-11 17:55:07 -0400 | [diff] [blame] | 84 | case 8: |
| 85 | REPORTER_ASSERT(r, vals[4] == e && vals[5] == f && vals[6] == g && vals[7] == h); |
| 86 | [[fallthrough]]; |
| 87 | case 4: |
| 88 | REPORTER_ASSERT(r, vals[2] == c && vals[3] == d); |
| 89 | [[fallthrough]]; |
| 90 | case 2: |
| 91 | REPORTER_ASSERT(r, vals[0] == a && vals[1] == b); |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 92 | } |
mtklein | 1113da7 | 2015-04-27 12:08:01 -0700 | [diff] [blame] | 93 | switch (N) { |
John Stiles | 30212b7 | 2020-06-11 17:55:07 -0400 | [diff] [blame] | 94 | case 8: |
| 95 | REPORTER_ASSERT(r, v[4] == e && v[5] == f && v[6] == g && v[7] == h); |
| 96 | [[fallthrough]]; |
| 97 | case 4: |
| 98 | REPORTER_ASSERT(r, v[2] == c && v[3] == d); |
| 99 | [[fallthrough]]; |
| 100 | case 2: |
| 101 | REPORTER_ASSERT(r, v[0] == a && v[1] == b); |
mtklein | 1113da7 | 2015-04-27 12:08:01 -0700 | [diff] [blame] | 102 | } |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 103 | }; |
| 104 | |
| 105 | T vals[] = { 1,2,3,4,5,6,7,8 }; |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 106 | SkNx<N,T> a = SkNx<N,T>::Load(vals), |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 107 | b(a), |
| 108 | c = a; |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 109 | SkNx<N,T> d; |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 110 | d = a; |
| 111 | |
| 112 | assert_eq(a, 1,2,3,4,5,6,7,8); |
| 113 | assert_eq(b, 1,2,3,4,5,6,7,8); |
| 114 | assert_eq(c, 1,2,3,4,5,6,7,8); |
| 115 | assert_eq(d, 1,2,3,4,5,6,7,8); |
| 116 | |
| 117 | assert_eq(a+a, 2,4,6,8,10,12,14,16); |
| 118 | assert_eq(a*a, 1,4,9,16,25,36,49,64); |
| 119 | assert_eq(a*a-a, 0,2,6,12,20,30,42,56); |
| 120 | |
| 121 | assert_eq(a >> 2, 0,0,0,1,1,1,1,2); |
| 122 | assert_eq(a << 1, 2,4,6,8,10,12,14,16); |
| 123 | |
mtklein | 7c249e5 | 2016-02-21 10:54:19 -0800 | [diff] [blame] | 124 | REPORTER_ASSERT(r, a[1] == 2); |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 125 | } |
| 126 | |
mtklein | 6c221b4 | 2015-11-20 13:53:19 -0800 | [diff] [blame] | 127 | DEF_TEST(SkNx, r) { |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 128 | test_Ni<2, uint16_t>(r); |
| 129 | test_Ni<4, uint16_t>(r); |
| 130 | test_Ni<8, uint16_t>(r); |
mtklein | 1113da7 | 2015-04-27 12:08:01 -0700 | [diff] [blame] | 131 | |
| 132 | test_Ni<2, int>(r); |
| 133 | test_Ni<4, int>(r); |
| 134 | test_Ni<8, int>(r); |
mtklein | 115acee | 2015-04-14 14:02:52 -0700 | [diff] [blame] | 135 | } |
mtklein | 27e517a | 2015-05-14 17:53:04 -0700 | [diff] [blame] | 136 | |
mtklein | e20633e | 2015-07-13 12:06:33 -0700 | [diff] [blame] | 137 | DEF_TEST(SkNi_min_lt, r) { |
mtklein | 27e517a | 2015-05-14 17:53:04 -0700 | [diff] [blame] | 138 | // Exhaustively check the 8x8 bit space. |
| 139 | for (int a = 0; a < (1<<8); a++) { |
| 140 | for (int b = 0; b < (1<<8); b++) { |
mtklein | e20633e | 2015-07-13 12:06:33 -0700 | [diff] [blame] | 141 | Sk16b aw(a), bw(b); |
Brian Osman | 788b916 | 2020-02-07 10:36:46 -0500 | [diff] [blame] | 142 | REPORTER_ASSERT(r, Sk16b::Min(aw, bw)[0] == std::min(a, b)); |
mtklein | 7c249e5 | 2016-02-21 10:54:19 -0800 | [diff] [blame] | 143 | REPORTER_ASSERT(r, !(aw < bw)[0] == !(a < b)); |
mtklein | 27e517a | 2015-05-14 17:53:04 -0700 | [diff] [blame] | 144 | }} |
| 145 | |
| 146 | // Exhausting the 16x16 bit space is kind of slow, so only do that in release builds. |
| 147 | #ifdef SK_DEBUG |
| 148 | SkRandom rand; |
| 149 | for (int i = 0; i < (1<<16); i++) { |
| 150 | uint16_t a = rand.nextU() >> 16, |
| 151 | b = rand.nextU() >> 16; |
Brian Osman | 788b916 | 2020-02-07 10:36:46 -0500 | [diff] [blame] | 152 | REPORTER_ASSERT(r, Sk16h::Min(Sk16h(a), Sk16h(b))[0] == std::min(a, b)); |
mtklein | 27e517a | 2015-05-14 17:53:04 -0700 | [diff] [blame] | 153 | } |
| 154 | #else |
| 155 | for (int a = 0; a < (1<<16); a++) { |
| 156 | for (int b = 0; b < (1<<16); b++) { |
Brian Osman | 788b916 | 2020-02-07 10:36:46 -0500 | [diff] [blame] | 157 | REPORTER_ASSERT(r, Sk16h::Min(Sk16h(a), Sk16h(b))[0] == std::min(a, b)); |
mtklein | 27e517a | 2015-05-14 17:53:04 -0700 | [diff] [blame] | 158 | }} |
| 159 | #endif |
| 160 | } |
mtklein | 3747875 | 2015-06-15 10:58:42 -0700 | [diff] [blame] | 161 | |
| 162 | DEF_TEST(SkNi_saturatedAdd, r) { |
| 163 | for (int a = 0; a < (1<<8); a++) { |
| 164 | for (int b = 0; b < (1<<8); b++) { |
| 165 | int exact = a+b; |
| 166 | if (exact > 255) { exact = 255; } |
| 167 | if (exact < 0) { exact = 0; } |
| 168 | |
mtklein | 7c249e5 | 2016-02-21 10:54:19 -0800 | [diff] [blame] | 169 | REPORTER_ASSERT(r, Sk16b(a).saturatedAdd(Sk16b(b))[0] == exact); |
mtklein | 3747875 | 2015-06-15 10:58:42 -0700 | [diff] [blame] | 170 | } |
| 171 | } |
| 172 | } |
| 173 | |
Herb Derby | 5eb1528 | 2017-10-10 17:14:18 -0400 | [diff] [blame] | 174 | DEF_TEST(SkNi_mulHi, r) { |
| 175 | // First 8 primes. |
| 176 | Sk4u a{ 0x00020000, 0x00030000, 0x00050000, 0x00070000 }; |
| 177 | Sk4u b{ 0x000b0000, 0x000d0000, 0x00110000, 0x00130000 }; |
| 178 | |
| 179 | Sk4u q{22, 39, 85, 133}; |
| 180 | |
| 181 | Sk4u c = a.mulHi(b); |
| 182 | REPORTER_ASSERT(r, c[0] == q[0]); |
| 183 | REPORTER_ASSERT(r, c[1] == q[1]); |
| 184 | REPORTER_ASSERT(r, c[2] == q[2]); |
| 185 | REPORTER_ASSERT(r, c[3] == q[3]); |
| 186 | } |
| 187 | |
mtklein | 3747875 | 2015-06-15 10:58:42 -0700 | [diff] [blame] | 188 | DEF_TEST(Sk4px_muldiv255round, r) { |
| 189 | for (int a = 0; a < (1<<8); a++) { |
| 190 | for (int b = 0; b < (1<<8); b++) { |
| 191 | int exact = (a*b+127)/255; |
| 192 | |
| 193 | // Duplicate a and b 16x each. |
Mike Klein | 7dfe6d9 | 2018-12-18 14:53:37 -0500 | [diff] [blame] | 194 | Sk4px av = Sk16b(a), |
| 195 | bv = Sk16b(b); |
mtklein | 3747875 | 2015-06-15 10:58:42 -0700 | [diff] [blame] | 196 | |
| 197 | // This way should always be exactly correct. |
mtklein | 7c249e5 | 2016-02-21 10:54:19 -0800 | [diff] [blame] | 198 | int correct = (av * bv).div255()[0]; |
mtklein | 3747875 | 2015-06-15 10:58:42 -0700 | [diff] [blame] | 199 | REPORTER_ASSERT(r, correct == exact); |
| 200 | |
| 201 | // We're a bit more flexible on this method: correct for 0 or 255, otherwise off by <=1. |
mtklein | 7c249e5 | 2016-02-21 10:54:19 -0800 | [diff] [blame] | 202 | int fast = av.approxMulDiv255(bv)[0]; |
mtklein | 3747875 | 2015-06-15 10:58:42 -0700 | [diff] [blame] | 203 | REPORTER_ASSERT(r, fast-exact >= -1 && fast-exact <= 1); |
| 204 | if (a == 0 || a == 255 || b == 0 || b == 255) { |
| 205 | REPORTER_ASSERT(r, fast == exact); |
| 206 | } |
| 207 | } |
| 208 | } |
| 209 | } |
mtklein | 4be181e | 2015-07-14 10:54:19 -0700 | [diff] [blame] | 210 | |
mtklein | c33065a | 2016-01-15 12:16:40 -0800 | [diff] [blame] | 211 | DEF_TEST(SkNx_abs, r) { |
| 212 | auto fs = Sk4f(0.0f, -0.0f, 2.0f, -4.0f).abs(); |
mtklein | 7c249e5 | 2016-02-21 10:54:19 -0800 | [diff] [blame] | 213 | REPORTER_ASSERT(r, fs[0] == 0.0f); |
| 214 | REPORTER_ASSERT(r, fs[1] == 0.0f); |
| 215 | REPORTER_ASSERT(r, fs[2] == 2.0f); |
| 216 | REPORTER_ASSERT(r, fs[3] == 4.0f); |
Chris Dalton | 7732f4f | 2017-08-28 14:45:40 -0600 | [diff] [blame] | 217 | auto fshi = Sk2f(0.0f, -0.0f).abs(); |
| 218 | auto fslo = Sk2f(2.0f, -4.0f).abs(); |
| 219 | REPORTER_ASSERT(r, fshi[0] == 0.0f); |
| 220 | REPORTER_ASSERT(r, fshi[1] == 0.0f); |
| 221 | REPORTER_ASSERT(r, fslo[0] == 2.0f); |
| 222 | REPORTER_ASSERT(r, fslo[1] == 4.0f); |
mtklein | c33065a | 2016-01-15 12:16:40 -0800 | [diff] [blame] | 223 | } |
mtklein | 629f25a | 2016-02-08 05:54:38 -0800 | [diff] [blame] | 224 | |
Yuqian Li | 7da6ba2 | 2017-07-12 13:36:05 -0400 | [diff] [blame] | 225 | DEF_TEST(Sk4i_abs, r) { |
| 226 | auto is = Sk4i(0, -1, 2, -2147483647).abs(); |
| 227 | REPORTER_ASSERT(r, is[0] == 0); |
| 228 | REPORTER_ASSERT(r, is[1] == 1); |
| 229 | REPORTER_ASSERT(r, is[2] == 2); |
| 230 | REPORTER_ASSERT(r, is[3] == 2147483647); |
| 231 | } |
| 232 | |
| 233 | DEF_TEST(Sk4i_minmax, r) { |
| 234 | auto a = Sk4i(0, 2, 4, 6); |
| 235 | auto b = Sk4i(1, 1, 3, 7); |
| 236 | auto min = Sk4i::Min(a, b); |
| 237 | auto max = Sk4i::Max(a, b); |
| 238 | for(int i = 0; i < 4; ++i) { |
Brian Osman | 788b916 | 2020-02-07 10:36:46 -0500 | [diff] [blame] | 239 | REPORTER_ASSERT(r, min[i] == std::min(a[i], b[i])); |
| 240 | REPORTER_ASSERT(r, max[i] == std::max(a[i], b[i])); |
Yuqian Li | 7da6ba2 | 2017-07-12 13:36:05 -0400 | [diff] [blame] | 241 | } |
| 242 | } |
| 243 | |
mtklein | 126626e | 2016-02-09 15:41:36 -0800 | [diff] [blame] | 244 | DEF_TEST(SkNx_floor, r) { |
| 245 | auto fs = Sk4f(0.4f, -0.4f, 0.6f, -0.6f).floor(); |
mtklein | 7c249e5 | 2016-02-21 10:54:19 -0800 | [diff] [blame] | 246 | REPORTER_ASSERT(r, fs[0] == 0.0f); |
| 247 | REPORTER_ASSERT(r, fs[1] == -1.0f); |
| 248 | REPORTER_ASSERT(r, fs[2] == 0.0f); |
| 249 | REPORTER_ASSERT(r, fs[3] == -1.0f); |
Chris Dalton | 89c5e88 | 2018-06-08 11:46:42 -0600 | [diff] [blame] | 250 | |
| 251 | auto fs2 = Sk2f(0.4f, -0.4f).floor(); |
| 252 | REPORTER_ASSERT(r, fs2[0] == 0.0f); |
| 253 | REPORTER_ASSERT(r, fs2[1] == -1.0f); |
| 254 | |
| 255 | auto fs3 = Sk2f(0.6f, -0.6f).floor(); |
| 256 | REPORTER_ASSERT(r, fs3[0] == 0.0f); |
| 257 | REPORTER_ASSERT(r, fs3[1] == -1.0f); |
mtklein | 126626e | 2016-02-09 15:41:36 -0800 | [diff] [blame] | 258 | } |
| 259 | |
mtklein | e4c0bee | 2016-02-09 10:35:27 -0800 | [diff] [blame] | 260 | DEF_TEST(SkNx_shuffle, r) { |
| 261 | Sk4f f4(0,10,20,30); |
mtklein | 629f25a | 2016-02-08 05:54:38 -0800 | [diff] [blame] | 262 | |
mtklein | e4c0bee | 2016-02-09 10:35:27 -0800 | [diff] [blame] | 263 | Sk2f f2 = SkNx_shuffle<2,1>(f4); |
| 264 | REPORTER_ASSERT(r, f2[0] == 20); |
| 265 | REPORTER_ASSERT(r, f2[1] == 10); |
| 266 | |
| 267 | f4 = SkNx_shuffle<0,1,1,0>(f2); |
| 268 | REPORTER_ASSERT(r, f4[0] == 20); |
| 269 | REPORTER_ASSERT(r, f4[1] == 10); |
| 270 | REPORTER_ASSERT(r, f4[2] == 10); |
| 271 | REPORTER_ASSERT(r, f4[3] == 20); |
mtklein | 629f25a | 2016-02-08 05:54:38 -0800 | [diff] [blame] | 272 | } |
| 273 | |
mtklein | 0cf795f | 2016-02-17 07:23:36 -0800 | [diff] [blame] | 274 | DEF_TEST(SkNx_int_float, r) { |
| 275 | Sk4f f(-2.3f, 1.0f, 0.45f, 0.6f); |
| 276 | |
| 277 | Sk4i i = SkNx_cast<int>(f); |
| 278 | REPORTER_ASSERT(r, i[0] == -2); |
| 279 | REPORTER_ASSERT(r, i[1] == 1); |
| 280 | REPORTER_ASSERT(r, i[2] == 0); |
| 281 | REPORTER_ASSERT(r, i[3] == 0); |
| 282 | |
| 283 | f = SkNx_cast<float>(i); |
| 284 | REPORTER_ASSERT(r, f[0] == -2.0f); |
| 285 | REPORTER_ASSERT(r, f[1] == 1.0f); |
| 286 | REPORTER_ASSERT(r, f[2] == 0.0f); |
| 287 | REPORTER_ASSERT(r, f[3] == 0.0f); |
| 288 | } |
| 289 | |
Mike Klein | c0bd9f9 | 2019-04-23 12:05:21 -0500 | [diff] [blame] | 290 | #include "include/utils/SkRandom.h" |
mtklein | e4c0bee | 2016-02-09 10:35:27 -0800 | [diff] [blame] | 291 | |
mtklein | 629f25a | 2016-02-08 05:54:38 -0800 | [diff] [blame] | 292 | DEF_TEST(SkNx_u16_float, r) { |
| 293 | { |
| 294 | // u16 --> float |
| 295 | auto h4 = Sk4h(15, 17, 257, 65535); |
| 296 | auto f4 = SkNx_cast<float>(h4); |
mtklein | 7c249e5 | 2016-02-21 10:54:19 -0800 | [diff] [blame] | 297 | REPORTER_ASSERT(r, f4[0] == 15.0f); |
| 298 | REPORTER_ASSERT(r, f4[1] == 17.0f); |
| 299 | REPORTER_ASSERT(r, f4[2] == 257.0f); |
| 300 | REPORTER_ASSERT(r, f4[3] == 65535.0f); |
mtklein | 629f25a | 2016-02-08 05:54:38 -0800 | [diff] [blame] | 301 | } |
| 302 | { |
| 303 | // float -> u16 |
| 304 | auto f4 = Sk4f(15, 17, 257, 65535); |
| 305 | auto h4 = SkNx_cast<uint16_t>(f4); |
mtklein | 7c249e5 | 2016-02-21 10:54:19 -0800 | [diff] [blame] | 306 | REPORTER_ASSERT(r, h4[0] == 15); |
| 307 | REPORTER_ASSERT(r, h4[1] == 17); |
| 308 | REPORTER_ASSERT(r, h4[2] == 257); |
| 309 | REPORTER_ASSERT(r, h4[3] == 65535); |
mtklein | 629f25a | 2016-02-08 05:54:38 -0800 | [diff] [blame] | 310 | } |
| 311 | |
| 312 | // starting with any u16 value, we should be able to have a perfect round-trip in/out of floats |
| 313 | // |
| 314 | SkRandom rand; |
mtklein | e4c0bee | 2016-02-09 10:35:27 -0800 | [diff] [blame] | 315 | for (int i = 0; i < 10000; ++i) { |
mtklein | 629f25a | 2016-02-08 05:54:38 -0800 | [diff] [blame] | 316 | const uint16_t s16[4] { |
Mike Klein | 5595f6e | 2018-09-05 10:51:00 -0400 | [diff] [blame] | 317 | (uint16_t)(rand.nextU() >> 16), (uint16_t)(rand.nextU() >> 16), |
| 318 | (uint16_t)(rand.nextU() >> 16), (uint16_t)(rand.nextU() >> 16), |
mtklein | 629f25a | 2016-02-08 05:54:38 -0800 | [diff] [blame] | 319 | }; |
| 320 | auto u4_0 = Sk4h::Load(s16); |
| 321 | auto f4 = SkNx_cast<float>(u4_0); |
| 322 | auto u4_1 = SkNx_cast<uint16_t>(f4); |
| 323 | uint16_t d16[4]; |
| 324 | u4_1.store(d16); |
| 325 | REPORTER_ASSERT(r, !memcmp(s16, d16, sizeof(s16))); |
| 326 | } |
| 327 | } |
mtklein | 58e389b | 2016-07-15 07:00:11 -0700 | [diff] [blame] | 328 | |
| 329 | // The SSE2 implementation of SkNx_cast<uint16_t>(Sk4i) is non-trivial, so worth a test. |
| 330 | DEF_TEST(SkNx_int_u16, r) { |
| 331 | // These are pretty hard to get wrong. |
| 332 | for (int i = 0; i <= 0x7fff; i++) { |
| 333 | uint16_t expected = (uint16_t)i; |
| 334 | uint16_t actual = SkNx_cast<uint16_t>(Sk4i(i))[0]; |
| 335 | |
| 336 | REPORTER_ASSERT(r, expected == actual); |
| 337 | } |
| 338 | |
| 339 | // A naive implementation with _mm_packs_epi32 would succeed up to 0x7fff but fail here: |
| 340 | for (int i = 0x8000; (1) && i <= 0xffff; i++) { |
| 341 | uint16_t expected = (uint16_t)i; |
| 342 | uint16_t actual = SkNx_cast<uint16_t>(Sk4i(i))[0]; |
| 343 | |
| 344 | REPORTER_ASSERT(r, expected == actual); |
| 345 | } |
| 346 | } |
msarett | c044461 | 2016-09-16 11:45:58 -0700 | [diff] [blame] | 347 | |
| 348 | DEF_TEST(SkNx_4fLoad4Store4, r) { |
| 349 | float src[] = { |
| 350 | 0.0f, 1.0f, 2.0f, 3.0f, |
| 351 | 4.0f, 5.0f, 6.0f, 7.0f, |
| 352 | 8.0f, 9.0f, 10.0f, 11.0f, |
| 353 | 12.0f, 13.0f, 14.0f, 15.0f |
| 354 | }; |
| 355 | |
| 356 | Sk4f a, b, c, d; |
Mike Klein | 33cbfd7 | 2016-10-06 11:09:27 -0400 | [diff] [blame] | 357 | Sk4f::Load4(src, &a, &b, &c, &d); |
msarett | c044461 | 2016-09-16 11:45:58 -0700 | [diff] [blame] | 358 | REPORTER_ASSERT(r, 0.0f == a[0]); |
| 359 | REPORTER_ASSERT(r, 4.0f == a[1]); |
| 360 | REPORTER_ASSERT(r, 8.0f == a[2]); |
| 361 | REPORTER_ASSERT(r, 12.0f == a[3]); |
| 362 | REPORTER_ASSERT(r, 1.0f == b[0]); |
| 363 | REPORTER_ASSERT(r, 5.0f == b[1]); |
| 364 | REPORTER_ASSERT(r, 9.0f == b[2]); |
| 365 | REPORTER_ASSERT(r, 13.0f == b[3]); |
| 366 | REPORTER_ASSERT(r, 2.0f == c[0]); |
| 367 | REPORTER_ASSERT(r, 6.0f == c[1]); |
| 368 | REPORTER_ASSERT(r, 10.0f == c[2]); |
| 369 | REPORTER_ASSERT(r, 14.0f == c[3]); |
| 370 | REPORTER_ASSERT(r, 3.0f == d[0]); |
| 371 | REPORTER_ASSERT(r, 7.0f == d[1]); |
| 372 | REPORTER_ASSERT(r, 11.0f == d[2]); |
| 373 | REPORTER_ASSERT(r, 15.0f == d[3]); |
| 374 | |
| 375 | float dst[16]; |
Mike Klein | 33cbfd7 | 2016-10-06 11:09:27 -0400 | [diff] [blame] | 376 | Sk4f::Store4(dst, a, b, c, d); |
msarett | c044461 | 2016-09-16 11:45:58 -0700 | [diff] [blame] | 377 | REPORTER_ASSERT(r, 0 == memcmp(dst, src, 16 * sizeof(float))); |
| 378 | } |
Chris Dalton | 7732f4f | 2017-08-28 14:45:40 -0600 | [diff] [blame] | 379 | |
| 380 | DEF_TEST(SkNx_neg, r) { |
| 381 | auto fs = -Sk4f(0.0f, -0.0f, 2.0f, -4.0f); |
| 382 | REPORTER_ASSERT(r, fs[0] == 0.0f); |
| 383 | REPORTER_ASSERT(r, fs[1] == 0.0f); |
| 384 | REPORTER_ASSERT(r, fs[2] == -2.0f); |
| 385 | REPORTER_ASSERT(r, fs[3] == 4.0f); |
| 386 | auto fshi = -Sk2f(0.0f, -0.0f); |
| 387 | auto fslo = -Sk2f(2.0f, -4.0f); |
| 388 | REPORTER_ASSERT(r, fshi[0] == 0.0f); |
| 389 | REPORTER_ASSERT(r, fshi[1] == 0.0f); |
| 390 | REPORTER_ASSERT(r, fslo[0] == -2.0f); |
| 391 | REPORTER_ASSERT(r, fslo[1] == 4.0f); |
| 392 | } |
| 393 | |
| 394 | DEF_TEST(SkNx_thenElse, r) { |
| 395 | auto fs = (Sk4f(0.0f, -0.0f, 2.0f, -4.0f) < 0).thenElse(-1, 1); |
| 396 | REPORTER_ASSERT(r, fs[0] == 1); |
| 397 | REPORTER_ASSERT(r, fs[1] == 1); |
| 398 | REPORTER_ASSERT(r, fs[2] == 1); |
| 399 | REPORTER_ASSERT(r, fs[3] == -1); |
| 400 | auto fshi = (Sk2f(0.0f, -0.0f) < 0).thenElse(-1, 1); |
| 401 | auto fslo = (Sk2f(2.0f, -4.0f) < 0).thenElse(-1, 1); |
| 402 | REPORTER_ASSERT(r, fshi[0] == 1); |
| 403 | REPORTER_ASSERT(r, fshi[1] == 1); |
| 404 | REPORTER_ASSERT(r, fslo[0] == 1); |
| 405 | REPORTER_ASSERT(r, fslo[1] == -1); |
| 406 | } |
Mike Klein | 213d821 | 2017-11-30 12:07:20 -0500 | [diff] [blame] | 407 | |
| 408 | DEF_TEST(Sk4f_Load2, r) { |
| 409 | float xy[8] = { 0,1,2,3,4,5,6,7 }; |
| 410 | |
| 411 | Sk4f x,y; |
| 412 | Sk4f::Load2(xy, &x,&y); |
| 413 | |
| 414 | REPORTER_ASSERT(r, x[0] == 0); |
| 415 | REPORTER_ASSERT(r, x[1] == 2); |
| 416 | REPORTER_ASSERT(r, x[2] == 4); |
| 417 | REPORTER_ASSERT(r, x[3] == 6); |
| 418 | |
| 419 | REPORTER_ASSERT(r, y[0] == 1); |
| 420 | REPORTER_ASSERT(r, y[1] == 3); |
| 421 | REPORTER_ASSERT(r, y[2] == 5); |
| 422 | REPORTER_ASSERT(r, y[3] == 7); |
| 423 | } |
Chris Dalton | 0cb7587 | 2017-12-01 13:23:05 -0700 | [diff] [blame] | 424 | |
Chris Dalton | 21f6437 | 2018-04-11 14:01:04 -0600 | [diff] [blame] | 425 | DEF_TEST(Sk2f_Load2, r) { |
| 426 | float xy[4] = { 0,1,2,3 }; |
| 427 | |
| 428 | Sk2f x,y; |
| 429 | Sk2f::Load2(xy, &x,&y); |
| 430 | |
| 431 | REPORTER_ASSERT(r, x[0] == 0); |
| 432 | REPORTER_ASSERT(r, x[1] == 2); |
| 433 | |
| 434 | REPORTER_ASSERT(r, y[0] == 1); |
| 435 | REPORTER_ASSERT(r, y[1] == 3); |
| 436 | } |
| 437 | |
Chris Dalton | 42f02aa | 2018-04-08 23:58:43 -0600 | [diff] [blame] | 438 | DEF_TEST(Sk2f_Store2, r) { |
| 439 | Sk2f p0{0, 2}; |
| 440 | Sk2f p1{1, 3}; |
| 441 | float dst[4]; |
| 442 | Sk2f::Store2(dst, p0, p1); |
| 443 | REPORTER_ASSERT(r, dst[0] == 0); |
| 444 | REPORTER_ASSERT(r, dst[1] == 1); |
| 445 | REPORTER_ASSERT(r, dst[2] == 2); |
| 446 | REPORTER_ASSERT(r, dst[3] == 3); |
| 447 | } |
| 448 | |
Chris Dalton | 0cb7587 | 2017-12-01 13:23:05 -0700 | [diff] [blame] | 449 | DEF_TEST(Sk2f_Store3, r) { |
| 450 | Sk2f p0{0, 3}; |
| 451 | Sk2f p1{1, 4}; |
| 452 | Sk2f p2{2, 5}; |
| 453 | float dst[6]; |
| 454 | Sk2f::Store3(dst, p0, p1, p2); |
| 455 | REPORTER_ASSERT(r, dst[0] == 0); |
| 456 | REPORTER_ASSERT(r, dst[1] == 1); |
| 457 | REPORTER_ASSERT(r, dst[2] == 2); |
| 458 | REPORTER_ASSERT(r, dst[3] == 3); |
| 459 | REPORTER_ASSERT(r, dst[4] == 4); |
| 460 | REPORTER_ASSERT(r, dst[5] == 5); |
| 461 | } |
Chris Dalton | 6f8fa4e | 2018-02-06 17:55:30 -0700 | [diff] [blame] | 462 | |
| 463 | DEF_TEST(Sk2f_Store4, r) { |
| 464 | Sk2f p0{0, 4}; |
| 465 | Sk2f p1{1, 5}; |
| 466 | Sk2f p2{2, 6}; |
| 467 | Sk2f p3{3, 7}; |
Chris Dalton | 9f2dab0 | 2018-04-18 14:07:03 -0600 | [diff] [blame] | 468 | |
| 469 | float dst[8] = {-1, -1, -1, -1, -1, -1, -1, -1}; |
Chris Dalton | 6f8fa4e | 2018-02-06 17:55:30 -0700 | [diff] [blame] | 470 | Sk2f::Store4(dst, p0, p1, p2, p3); |
| 471 | REPORTER_ASSERT(r, dst[0] == 0); |
| 472 | REPORTER_ASSERT(r, dst[1] == 1); |
| 473 | REPORTER_ASSERT(r, dst[2] == 2); |
| 474 | REPORTER_ASSERT(r, dst[3] == 3); |
| 475 | REPORTER_ASSERT(r, dst[4] == 4); |
| 476 | REPORTER_ASSERT(r, dst[5] == 5); |
| 477 | REPORTER_ASSERT(r, dst[6] == 6); |
| 478 | REPORTER_ASSERT(r, dst[7] == 7); |
Chris Dalton | 9f2dab0 | 2018-04-18 14:07:03 -0600 | [diff] [blame] | 479 | |
| 480 | // Ensure transposing to Sk4f works. |
| 481 | Sk4f dst4f[2] = {{-1, -1, -1, -1}, {-1, -1, -1, -1}}; |
| 482 | Sk2f::Store4(dst4f, p0, p1, p2, p3); |
| 483 | REPORTER_ASSERT(r, dst4f[0][0] == 0); |
| 484 | REPORTER_ASSERT(r, dst4f[0][1] == 1); |
| 485 | REPORTER_ASSERT(r, dst4f[0][2] == 2); |
| 486 | REPORTER_ASSERT(r, dst4f[0][3] == 3); |
| 487 | REPORTER_ASSERT(r, dst4f[1][0] == 4); |
| 488 | REPORTER_ASSERT(r, dst4f[1][1] == 5); |
| 489 | REPORTER_ASSERT(r, dst4f[1][2] == 6); |
| 490 | REPORTER_ASSERT(r, dst4f[1][3] == 7); |
| 491 | |
Chris Dalton | 6f8fa4e | 2018-02-06 17:55:30 -0700 | [diff] [blame] | 492 | } |
Mike Klein | 68ff92f | 2018-03-26 13:04:14 -0400 | [diff] [blame] | 493 | |
Chris Dalton | e3fda93 | 2018-04-11 13:18:09 -0600 | [diff] [blame] | 494 | DEF_TEST(Sk4f_minmax, r) { |
| 495 | REPORTER_ASSERT(r, 3 == Sk4f(0,1,2,3).max()); |
| 496 | REPORTER_ASSERT(r, 2 == Sk4f(1,-5,2,-1).max()); |
| 497 | REPORTER_ASSERT(r, -1 == Sk4f(-2,-1,-6,-3).max()); |
| 498 | REPORTER_ASSERT(r, 3 == Sk4f(3,2,1,0).max()); |
| 499 | |
| 500 | REPORTER_ASSERT(r, 0 == Sk4f(0,1,2,3).min()); |
| 501 | REPORTER_ASSERT(r, -5 == Sk4f(1,-5,2,-1).min()); |
| 502 | REPORTER_ASSERT(r, -6 == Sk4f(-2,-1,-6,-3).min()); |
| 503 | REPORTER_ASSERT(r, 0 == Sk4f(3,2,1,0).min()); |
| 504 | } |
| 505 | |
Mike Klein | 68ff92f | 2018-03-26 13:04:14 -0400 | [diff] [blame] | 506 | DEF_TEST(SkNf_anyTrue_allTrue, r) { |
| 507 | REPORTER_ASSERT(r, (Sk2f{1,2} < Sk2f{3,4}).anyTrue()); |
| 508 | REPORTER_ASSERT(r, (Sk2f{1,2} < Sk2f{3,4}).allTrue()); |
| 509 | REPORTER_ASSERT(r, (Sk2f{3,2} < Sk2f{1,4}).anyTrue()); |
| 510 | REPORTER_ASSERT(r, !(Sk2f{3,2} < Sk2f{1,4}).allTrue()); |
| 511 | REPORTER_ASSERT(r, !(Sk2f{3,4} < Sk2f{1,2}).anyTrue()); |
| 512 | |
| 513 | REPORTER_ASSERT(r, (Sk4f{1,2,3,4} < Sk4f{3,4,5,6}).anyTrue()); |
| 514 | REPORTER_ASSERT(r, (Sk4f{1,2,3,4} < Sk4f{3,4,5,6}).allTrue()); |
| 515 | REPORTER_ASSERT(r, (Sk4f{1,2,3,4} < Sk4f{1,4,1,1}).anyTrue()); |
| 516 | REPORTER_ASSERT(r, !(Sk4f{1,2,3,4} < Sk4f{1,4,1,1}).allTrue()); |
| 517 | REPORTER_ASSERT(r, !(Sk4f{3,4,5,6} < Sk4f{1,2,3,4}).anyTrue()); |
| 518 | } |