mtklein | 5da116f | 2014-11-11 06:16:42 -0800 | [diff] [blame] | 1 | #include "Test.h" |
| 2 | #include "Sk4x.h" |
| 3 | |
| 4 | #define ASSERT_EQ(a, b) REPORTER_ASSERT(r, a.equal(b).allTrue()) |
| 5 | #define ASSERT_NE(a, b) REPORTER_ASSERT(r, a.notEqual(b).allTrue()) |
| 6 | |
| 7 | DEF_TEST(Sk4x_Construction, r) { |
| 8 | Sk4f uninitialized; |
| 9 | Sk4f zero(0,0,0,0); |
| 10 | Sk4f foo(1,2,3,4), |
| 11 | bar(foo), |
| 12 | baz = bar; |
| 13 | ASSERT_EQ(foo, bar); |
| 14 | ASSERT_EQ(bar, baz); |
| 15 | ASSERT_EQ(baz, foo); |
| 16 | } |
| 17 | |
mtklein | b116619 | 2014-11-25 11:00:38 -0800 | [diff] [blame] | 18 | struct AlignedFloats { |
| 19 | Sk4f forces16ByteAlignment; // On 64-bit machines, the stack starts 128-bit aligned, |
| 20 | float fs[5]; // but not necessarily so on 32-bit. Adding an Sk4f forces it. |
| 21 | }; |
| 22 | |
mtklein | 5da116f | 2014-11-11 06:16:42 -0800 | [diff] [blame] | 23 | DEF_TEST(Sk4x_LoadStore, r) { |
mtklein | b116619 | 2014-11-25 11:00:38 -0800 | [diff] [blame] | 24 | AlignedFloats aligned; |
mtklein | 5da116f | 2014-11-11 06:16:42 -0800 | [diff] [blame] | 25 | // fs will be 16-byte aligned, fs+1 not. |
mtklein | b116619 | 2014-11-25 11:00:38 -0800 | [diff] [blame] | 26 | float* fs = aligned.fs; |
| 27 | for (int i = 0; i < 5; i++) { // set to 5,6,7,8,9 |
| 28 | fs[i] = float(i+5); |
| 29 | } |
mtklein | 5da116f | 2014-11-11 06:16:42 -0800 | [diff] [blame] | 30 | |
| 31 | Sk4f foo = Sk4f::Load(fs); |
| 32 | Sk4f bar = Sk4f::LoadAligned(fs); |
| 33 | ASSERT_EQ(foo, bar); |
| 34 | |
| 35 | foo = Sk4f::Load(fs+1); |
| 36 | ASSERT_NE(foo, bar); |
| 37 | |
| 38 | foo.storeAligned(fs); |
| 39 | bar.store(fs+1); |
| 40 | REPORTER_ASSERT(r, fs[0] == 6 && |
| 41 | fs[1] == 5 && |
| 42 | fs[2] == 6 && |
| 43 | fs[3] == 7 && |
| 44 | fs[4] == 8); |
| 45 | } |
| 46 | |
| 47 | DEF_TEST(Sk4x_Conversions, r) { |
| 48 | // Assuming IEEE floats. |
| 49 | Sk4f zerof(0,0,0,0); |
| 50 | Sk4i zeroi(0,0,0,0); |
| 51 | ASSERT_EQ(zeroi, zerof.cast<Sk4i>()); |
| 52 | ASSERT_EQ(zeroi, zerof.reinterpret<Sk4i>()); |
mtklein | ef09991 | 2014-11-12 07:27:01 -0800 | [diff] [blame] | 53 | ASSERT_EQ(zerof, zeroi.cast<Sk4f>()); |
| 54 | ASSERT_EQ(zerof, zeroi.reinterpret<Sk4f>()); |
mtklein | 5da116f | 2014-11-11 06:16:42 -0800 | [diff] [blame] | 55 | |
| 56 | Sk4f twof(2,2,2,2); |
| 57 | Sk4i twoi(2,2,2,2); |
| 58 | ASSERT_EQ(twoi, twof.cast<Sk4i>()); |
| 59 | ASSERT_NE(twoi, twof.reinterpret<Sk4i>()); |
mtklein | ef09991 | 2014-11-12 07:27:01 -0800 | [diff] [blame] | 60 | ASSERT_EQ(twof, twoi.cast<Sk4f>()); |
| 61 | ASSERT_NE(twof, twoi.reinterpret<Sk4f>()); |
mtklein | 5da116f | 2014-11-11 06:16:42 -0800 | [diff] [blame] | 62 | } |
| 63 | |
| 64 | DEF_TEST(Sk4x_Bits, r) { |
| 65 | ASSERT_EQ(Sk4i(0,0,0,0).bitNot(), Sk4i(-1,-1,-1,-1)); |
| 66 | |
| 67 | Sk4i a(2,3,4,5), |
| 68 | b(1,3,5,7); |
| 69 | ASSERT_EQ(Sk4i(0,3,4,5), a.bitAnd(b)); |
| 70 | ASSERT_EQ(Sk4i(3,3,5,7), a.bitOr(b)); |
| 71 | } |
| 72 | |
| 73 | DEF_TEST(Sk4x_Arith, r) { |
| 74 | ASSERT_EQ(Sk4f(4,6,8,10), Sk4f(1,2,3,4).add(Sk4f(3,4,5,6))); |
| 75 | ASSERT_EQ(Sk4f(-2,-2,-2,-2), Sk4f(1,2,3,4).subtract(Sk4f(3,4,5,6))); |
| 76 | ASSERT_EQ(Sk4f(3,8,15,24), Sk4f(1,2,3,4).multiply(Sk4f(3,4,5,6))); |
| 77 | |
| 78 | float third = 1.0f/3.0f; |
| 79 | ASSERT_EQ(Sk4f(1*third, 0.5f, 0.6f, 2*third), Sk4f(1,2,3,4).divide(Sk4f(3,4,5,6))); |
mtklein | ef09991 | 2014-11-12 07:27:01 -0800 | [diff] [blame] | 80 | |
| 81 | ASSERT_EQ(Sk4i(4,6,8,10), Sk4i(1,2,3,4).add(Sk4i(3,4,5,6))); |
| 82 | ASSERT_EQ(Sk4i(-2,-2,-2,-2), Sk4i(1,2,3,4).subtract(Sk4i(3,4,5,6))); |
| 83 | ASSERT_EQ(Sk4i(3,8,15,24), Sk4i(1,2,3,4).multiply(Sk4i(3,4,5,6))); |
mtklein | 5da116f | 2014-11-11 06:16:42 -0800 | [diff] [blame] | 84 | } |
| 85 | |
| 86 | DEF_TEST(Sk4x_Comparison, r) { |
| 87 | ASSERT_EQ(Sk4f(1,2,3,4), Sk4f(1,2,3,4)); |
| 88 | ASSERT_NE(Sk4f(4,3,2,1), Sk4f(1,2,3,4)); |
| 89 | |
| 90 | ASSERT_EQ(Sk4i(-1,-1,0,-1), Sk4f(1,2,5,4).equal(Sk4f(1,2,3,4))); |
| 91 | |
| 92 | ASSERT_EQ(Sk4i(-1,-1,-1,-1), Sk4f(1,2,3,4).lessThan(Sk4f(2,3,4,5))); |
| 93 | ASSERT_EQ(Sk4i(-1,-1,-1,-1), Sk4f(1,2,3,4).lessThanEqual(Sk4f(2,3,4,5))); |
| 94 | ASSERT_EQ(Sk4i(0,0,0,0), Sk4f(1,2,3,4).greaterThan(Sk4f(2,3,4,5))); |
| 95 | ASSERT_EQ(Sk4i(0,0,0,0), Sk4f(1,2,3,4).greaterThanEqual(Sk4f(2,3,4,5))); |
mtklein | ef09991 | 2014-11-12 07:27:01 -0800 | [diff] [blame] | 96 | |
| 97 | ASSERT_EQ(Sk4i(1,2,3,4), Sk4i(1,2,3,4)); |
| 98 | ASSERT_NE(Sk4i(4,3,2,1), Sk4i(1,2,3,4)); |
| 99 | |
| 100 | ASSERT_EQ(Sk4i(-1,-1,0,-1), Sk4i(1,2,5,4).equal(Sk4i(1,2,3,4))); |
| 101 | |
| 102 | ASSERT_EQ(Sk4i(-1,-1,-1,-1), Sk4i(1,2,3,4).lessThan(Sk4i(2,3,4,5))); |
| 103 | ASSERT_EQ(Sk4i(-1,-1,-1,-1), Sk4i(1,2,3,4).lessThanEqual(Sk4i(2,3,4,5))); |
| 104 | ASSERT_EQ(Sk4i(0,0,0,0), Sk4i(1,2,3,4).greaterThan(Sk4i(2,3,4,5))); |
| 105 | ASSERT_EQ(Sk4i(0,0,0,0), Sk4i(1,2,3,4).greaterThanEqual(Sk4i(2,3,4,5))); |
mtklein | 5da116f | 2014-11-11 06:16:42 -0800 | [diff] [blame] | 106 | } |
| 107 | |
| 108 | DEF_TEST(Sk4x_MinMax, r) { |
| 109 | ASSERT_EQ(Sk4f(1,2,2,1), Sk4f::Min(Sk4f(1,2,3,4), Sk4f(4,3,2,1))); |
| 110 | ASSERT_EQ(Sk4f(4,3,3,4), Sk4f::Max(Sk4f(1,2,3,4), Sk4f(4,3,2,1))); |
mtklein | ef09991 | 2014-11-12 07:27:01 -0800 | [diff] [blame] | 111 | ASSERT_EQ(Sk4i(1,2,2,1), Sk4i::Min(Sk4i(1,2,3,4), Sk4i(4,3,2,1))); |
| 112 | ASSERT_EQ(Sk4i(4,3,3,4), Sk4i::Max(Sk4i(1,2,3,4), Sk4i(4,3,2,1))); |
mtklein | 5da116f | 2014-11-11 06:16:42 -0800 | [diff] [blame] | 113 | } |
| 114 | |
| 115 | DEF_TEST(Sk4x_Swizzle, r) { |
| 116 | ASSERT_EQ(Sk4f(3,4,1,2), Sk4f(1,2,3,4).zwxy()); |
| 117 | ASSERT_EQ(Sk4f(1,2,5,6), Sk4f::XYAB(Sk4f(1,2,3,4), Sk4f(5,6,7,8))); |
| 118 | ASSERT_EQ(Sk4f(3,4,7,8), Sk4f::ZWCD(Sk4f(1,2,3,4), Sk4f(5,6,7,8))); |
mtklein | ef09991 | 2014-11-12 07:27:01 -0800 | [diff] [blame] | 119 | ASSERT_EQ(Sk4i(3,4,1,2), Sk4i(1,2,3,4).zwxy()); |
| 120 | ASSERT_EQ(Sk4i(1,2,5,6), Sk4i::XYAB(Sk4i(1,2,3,4), Sk4i(5,6,7,8))); |
| 121 | ASSERT_EQ(Sk4i(3,4,7,8), Sk4i::ZWCD(Sk4i(1,2,3,4), Sk4i(5,6,7,8))); |
mtklein | 5da116f | 2014-11-11 06:16:42 -0800 | [diff] [blame] | 122 | } |