Merge V8 5.3.332.45. DO NOT MERGE
Test: Manual
FPIIM-449
Change-Id: Id3254828b068abdea3cb10442e0172a8c9a98e03
(cherry picked from commit 13e2dadd00298019ed862f2b2fc5068bba730bcf)
diff --git a/test/unittests/BUILD.gn b/test/unittests/BUILD.gn
new file mode 100644
index 0000000..b962673
--- /dev/null
+++ b/test/unittests/BUILD.gn
@@ -0,0 +1,182 @@
+# Copyright 2016 The V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Please keep this file in sync with unittests.gyp.
+
+import("../../gni/v8.gni")
+
+v8_executable("unittests") {
+ testonly = true
+
+ sources = [
+ "base/atomic-utils-unittest.cc",
+ "base/bits-unittest.cc",
+ "base/cpu-unittest.cc",
+ "base/division-by-constant-unittest.cc",
+ "base/flags-unittest.cc",
+ "base/functional-unittest.cc",
+ "base/ieee754-unittest.cc",
+ "base/iterator-unittest.cc",
+ "base/logging-unittest.cc",
+ "base/platform/condition-variable-unittest.cc",
+ "base/platform/mutex-unittest.cc",
+ "base/platform/platform-unittest.cc",
+ "base/platform/semaphore-unittest.cc",
+ "base/platform/time-unittest.cc",
+ "base/sys-info-unittest.cc",
+ "base/utils/random-number-generator-unittest.cc",
+ "cancelable-tasks-unittest.cc",
+ "char-predicates-unittest.cc",
+ "compiler/branch-elimination-unittest.cc",
+ "compiler/checkpoint-elimination-unittest.cc",
+ "compiler/common-operator-reducer-unittest.cc",
+ "compiler/common-operator-unittest.cc",
+ "compiler/compiler-test-utils.h",
+ "compiler/control-equivalence-unittest.cc",
+ "compiler/control-flow-optimizer-unittest.cc",
+ "compiler/dead-code-elimination-unittest.cc",
+ "compiler/diamond-unittest.cc",
+ "compiler/effect-control-linearizer-unittest.cc",
+ "compiler/escape-analysis-unittest.cc",
+ "compiler/graph-reducer-unittest.cc",
+ "compiler/graph-reducer-unittest.h",
+ "compiler/graph-trimmer-unittest.cc",
+ "compiler/graph-unittest.cc",
+ "compiler/graph-unittest.h",
+ "compiler/instruction-selector-unittest.cc",
+ "compiler/instruction-selector-unittest.h",
+ "compiler/instruction-sequence-unittest.cc",
+ "compiler/instruction-sequence-unittest.h",
+ "compiler/int64-lowering-unittest.cc",
+ "compiler/js-builtin-reducer-unittest.cc",
+ "compiler/js-create-lowering-unittest.cc",
+ "compiler/js-intrinsic-lowering-unittest.cc",
+ "compiler/js-operator-unittest.cc",
+ "compiler/js-typed-lowering-unittest.cc",
+ "compiler/linkage-tail-call-unittest.cc",
+ "compiler/live-range-unittest.cc",
+ "compiler/liveness-analyzer-unittest.cc",
+ "compiler/load-elimination-unittest.cc",
+ "compiler/loop-peeling-unittest.cc",
+ "compiler/machine-operator-reducer-unittest.cc",
+ "compiler/machine-operator-unittest.cc",
+ "compiler/move-optimizer-unittest.cc",
+ "compiler/node-cache-unittest.cc",
+ "compiler/node-matchers-unittest.cc",
+ "compiler/node-properties-unittest.cc",
+ "compiler/node-test-utils.cc",
+ "compiler/node-test-utils.h",
+ "compiler/node-unittest.cc",
+ "compiler/opcodes-unittest.cc",
+ "compiler/register-allocator-unittest.cc",
+ "compiler/schedule-unittest.cc",
+ "compiler/scheduler-rpo-unittest.cc",
+ "compiler/scheduler-unittest.cc",
+ "compiler/simplified-operator-reducer-unittest.cc",
+ "compiler/simplified-operator-unittest.cc",
+ "compiler/state-values-utils-unittest.cc",
+ "compiler/tail-call-optimization-unittest.cc",
+ "compiler/typer-unittest.cc",
+ "compiler/value-numbering-reducer-unittest.cc",
+ "compiler/zone-pool-unittest.cc",
+ "counters-unittest.cc",
+ "heap/bitmap-unittest.cc",
+ "heap/gc-idle-time-handler-unittest.cc",
+ "heap/gc-tracer-unittest.cc",
+ "heap/heap-unittest.cc",
+ "heap/memory-reducer-unittest.cc",
+ "heap/scavenge-job-unittest.cc",
+ "heap/slot-set-unittest.cc",
+ "interpreter/bytecode-array-builder-unittest.cc",
+ "interpreter/bytecode-array-iterator-unittest.cc",
+ "interpreter/bytecode-array-writer-unittest.cc",
+ "interpreter/bytecode-dead-code-optimizer-unittest.cc",
+ "interpreter/bytecode-peephole-optimizer-unittest.cc",
+ "interpreter/bytecode-pipeline-unittest.cc",
+ "interpreter/bytecode-register-allocator-unittest.cc",
+ "interpreter/bytecode-register-optimizer-unittest.cc",
+ "interpreter/bytecodes-unittest.cc",
+ "interpreter/constant-array-builder-unittest.cc",
+ "interpreter/interpreter-assembler-unittest.cc",
+ "interpreter/interpreter-assembler-unittest.h",
+ "interpreter/source-position-table-unittest.cc",
+ "libplatform/default-platform-unittest.cc",
+ "libplatform/task-queue-unittest.cc",
+ "libplatform/worker-thread-unittest.cc",
+ "locked-queue-unittest.cc",
+ "register-configuration-unittest.cc",
+ "run-all-unittests.cc",
+ "test-utils.cc",
+ "test-utils.h",
+ "wasm/asm-types-unittest.cc",
+ "wasm/ast-decoder-unittest.cc",
+ "wasm/control-transfer-unittest.cc",
+ "wasm/decoder-unittest.cc",
+ "wasm/encoder-unittest.cc",
+ "wasm/leb-helper-unittest.cc",
+ "wasm/loop-assignment-analysis-unittest.cc",
+ "wasm/module-decoder-unittest.cc",
+ "wasm/switch-logic-unittest.cc",
+ "wasm/wasm-macro-gen-unittest.cc",
+ ]
+
+ if (v8_target_cpu == "arm") {
+ sources += [ "compiler/arm/instruction-selector-arm-unittest.cc" ]
+ } else if (v8_target_cpu == "arm64") {
+ sources += [ "compiler/arm64/instruction-selector-arm64-unittest.cc" ]
+ } else if (v8_target_cpu == "x86") {
+ sources += [ "compiler/ia32/instruction-selector-ia32-unittest.cc" ]
+ } else if (v8_target_cpu == "mips" || v8_target_cpu == "mipsel") {
+ sources += [ "compiler/mips/instruction-selector-mips-unittest.cc" ]
+ } else if (v8_target_cpu == "mips64" || v8_target_cpu == "mips64el") {
+ sources += [ "compiler/mips64/instruction-selector-mips64-unittest.cc" ]
+ } else if (v8_target_cpu == "x64") {
+ sources += [ "compiler/x64/instruction-selector-x64-unittest.cc" ]
+ } else if (v8_target_cpu == "ppc" || v8_target_cpu == "ppc64") {
+ sources += [ "compiler/ppc/instruction-selector-ppc-unittest.cc" ]
+ } else if (v8_target_cpu == "s390" || v8_target_cpu == "s390x") {
+ sources += [ "compiler/s390/instruction-selector-s390-unittest.cc" ]
+ }
+
+ configs = [
+ "../..:external_config",
+ "../..:internal_config_base",
+ ]
+
+ # TODO(machenbach): Translate from gyp.
+ #['OS=="aix"', {
+ # 'ldflags': [ '-Wl,-bbigtoc' ],
+ #}],
+
+ deps = [
+ "../..:v8_libplatform",
+ "//build/config/sanitizers:deps",
+ "//build/win:default_exe_manifest",
+ "//testing/gmock",
+ "//testing/gtest",
+ ]
+
+ if (is_component_build) {
+ # compiler-unittests can't be built against a shared library, so we
+ # need to depend on the underlying static target in that case.
+ deps += [ "../..:v8_maybe_snapshot" ]
+ } else {
+ deps += [ "../..:v8" ]
+ }
+
+ if (is_win) {
+ # This warning is benignly triggered by the U16 and U32 macros in
+ # bytecode-utils.h.
+ # C4309: 'static_cast': truncation of constant value
+ cflags = [ "/wd4309" ]
+
+ # Suppress warnings about importing locally defined symbols.
+ if (is_component_build) {
+ ldflags = [
+ "/ignore:4049",
+ "/ignore:4217",
+ ]
+ }
+ }
+}
diff --git a/test/unittests/base/ieee754-unittest.cc b/test/unittests/base/ieee754-unittest.cc
new file mode 100644
index 0000000..8c71b57
--- /dev/null
+++ b/test/unittests/base/ieee754-unittest.cc
@@ -0,0 +1,323 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/base/ieee754.h"
+#include "src/base/macros.h"
+#include "testing/gmock-support.h"
+#include "testing/gtest-support.h"
+
+using testing::BitEq;
+using testing::IsNaN;
+
+namespace v8 {
+namespace base {
+namespace ieee754 {
+
+namespace {
+
+double const kE = 2.718281828459045;
+double const kPI = 3.141592653589793;
+double const kTwo120 = 1.329227995784916e+36;
+
+} // namespace
+
+TEST(Ieee754, Atan) {
+ EXPECT_THAT(atan(std::numeric_limits<double>::quiet_NaN()), IsNaN());
+ EXPECT_THAT(atan(std::numeric_limits<double>::signaling_NaN()), IsNaN());
+ EXPECT_THAT(atan(-0.0), BitEq(-0.0));
+ EXPECT_THAT(atan(0.0), BitEq(0.0));
+ EXPECT_DOUBLE_EQ(1.5707963267948966,
+ atan(std::numeric_limits<double>::infinity()));
+ EXPECT_DOUBLE_EQ(-1.5707963267948966,
+ atan(-std::numeric_limits<double>::infinity()));
+}
+
+TEST(Ieee754, Atan2) {
+ EXPECT_THAT(atan2(std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::quiet_NaN()),
+ IsNaN());
+ EXPECT_THAT(atan2(std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::signaling_NaN()),
+ IsNaN());
+ EXPECT_THAT(atan2(std::numeric_limits<double>::signaling_NaN(),
+ std::numeric_limits<double>::quiet_NaN()),
+ IsNaN());
+ EXPECT_THAT(atan2(std::numeric_limits<double>::signaling_NaN(),
+ std::numeric_limits<double>::signaling_NaN()),
+ IsNaN());
+ EXPECT_DOUBLE_EQ(0.7853981633974483,
+ atan2(std::numeric_limits<double>::infinity(),
+ std::numeric_limits<double>::infinity()));
+ EXPECT_DOUBLE_EQ(2.356194490192345,
+ atan2(std::numeric_limits<double>::infinity(),
+ -std::numeric_limits<double>::infinity()));
+ EXPECT_DOUBLE_EQ(-0.7853981633974483,
+ atan2(-std::numeric_limits<double>::infinity(),
+ std::numeric_limits<double>::infinity()));
+ EXPECT_DOUBLE_EQ(-2.356194490192345,
+ atan2(-std::numeric_limits<double>::infinity(),
+ -std::numeric_limits<double>::infinity()));
+}
+
+TEST(Ieee754, Atanh) {
+ EXPECT_THAT(atanh(std::numeric_limits<double>::quiet_NaN()), IsNaN());
+ EXPECT_THAT(atanh(std::numeric_limits<double>::signaling_NaN()), IsNaN());
+ EXPECT_THAT(atanh(std::numeric_limits<double>::infinity()), IsNaN());
+ EXPECT_EQ(std::numeric_limits<double>::infinity(), atanh(1));
+ EXPECT_EQ(-std::numeric_limits<double>::infinity(), atanh(-1));
+ EXPECT_DOUBLE_EQ(0.54930614433405478, atanh(0.5));
+}
+
+TEST(Ieee754, Cos) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(cos(std::numeric_limits<double>::quiet_NaN()), IsNaN());
+ EXPECT_THAT(cos(std::numeric_limits<double>::signaling_NaN()), IsNaN());
+ EXPECT_THAT(cos(std::numeric_limits<double>::infinity()), IsNaN());
+ EXPECT_THAT(cos(-std::numeric_limits<double>::infinity()), IsNaN());
+
+ // Tests for cos for |x| < pi/4
+ EXPECT_EQ(1.0, 1 / cos(-0.0));
+ EXPECT_EQ(1.0, 1 / cos(0.0));
+ // cos(x) = 1 for |x| < 2^-27
+ EXPECT_EQ(1, cos(2.3283064365386963e-10));
+ EXPECT_EQ(1, cos(-2.3283064365386963e-10));
+ // Test KERNELCOS for |x| < 0.3.
+ // cos(pi/20) = sqrt(sqrt(2)*sqrt(sqrt(5)+5)+4)/2^(3/2)
+ EXPECT_EQ(0.9876883405951378, cos(0.15707963267948966));
+ // Test KERNELCOS for x ~= 0.78125
+ EXPECT_EQ(0.7100335477927638, cos(0.7812504768371582));
+ EXPECT_EQ(0.7100338835660797, cos(0.78125));
+ // Test KERNELCOS for |x| > 0.3.
+ // cos(pi/8) = sqrt(sqrt(2)+1)/2^(3/4)
+ EXPECT_EQ(0.9238795325112867, cos(0.39269908169872414));
+ // Test KERNELTAN for |x| < 0.67434.
+ EXPECT_EQ(0.9238795325112867, cos(-0.39269908169872414));
+
+ // Tests for cos.
+ EXPECT_EQ(1, cos(3.725290298461914e-9));
+ // Cover different code paths in KERNELCOS.
+ EXPECT_EQ(0.9689124217106447, cos(0.25));
+ EXPECT_EQ(0.8775825618903728, cos(0.5));
+ EXPECT_EQ(0.7073882691671998, cos(0.785));
+ // Test that cos(Math.PI/2) != 0 since Math.PI is not exact.
+ EXPECT_EQ(6.123233995736766e-17, cos(1.5707963267948966));
+ // Test cos for various phases.
+ EXPECT_EQ(0.7071067811865474, cos(7.0 / 4 * kPI));
+ EXPECT_EQ(0.7071067811865477, cos(9.0 / 4 * kPI));
+ EXPECT_EQ(-0.7071067811865467, cos(11.0 / 4 * kPI));
+ EXPECT_EQ(-0.7071067811865471, cos(13.0 / 4 * kPI));
+ EXPECT_EQ(0.9367521275331447, cos(1000000.0));
+ EXPECT_EQ(-3.435757038074824e-12, cos(1048575.0 / 2 * kPI));
+
+ // Test Hayne-Panek reduction.
+ EXPECT_EQ(-0.9258790228548379e0, cos(kTwo120));
+ EXPECT_EQ(-0.9258790228548379e0, cos(-kTwo120));
+}
+
+TEST(Ieee754, Exp) {
+ EXPECT_THAT(exp(std::numeric_limits<double>::quiet_NaN()), IsNaN());
+ EXPECT_THAT(exp(std::numeric_limits<double>::signaling_NaN()), IsNaN());
+ EXPECT_EQ(0.0, exp(-std::numeric_limits<double>::infinity()));
+ EXPECT_EQ(0.0, exp(-1000));
+ EXPECT_EQ(0.0, exp(-745.1332191019412));
+ EXPECT_EQ(2.2250738585072626e-308, exp(-708.39641853226408));
+ EXPECT_EQ(3.307553003638408e-308, exp(-708.0));
+ EXPECT_EQ(4.9406564584124654e-324, exp(-7.45133219101941108420e+02));
+ EXPECT_EQ(0.36787944117144233, exp(-1.0));
+ EXPECT_EQ(1.0, exp(-0.0));
+ EXPECT_EQ(1.0, exp(0.0));
+ EXPECT_EQ(1.0, exp(2.2250738585072014e-308));
+
+ // Test that exp(x) is monotonic near 1.
+ EXPECT_GE(exp(1.0), exp(0.9999999999999999));
+ EXPECT_LE(exp(1.0), exp(1.0000000000000002));
+
+ // Test that we produce the correctly rounded result for 1.
+ EXPECT_EQ(kE, exp(1.0));
+
+ EXPECT_EQ(7.38905609893065e0, exp(2.0));
+ EXPECT_EQ(1.7976931348622732e308, exp(7.09782712893383973096e+02));
+ EXPECT_EQ(2.6881171418161356e+43, exp(100.0));
+ EXPECT_EQ(8.218407461554972e+307, exp(709.0));
+ EXPECT_EQ(1.7968190737295725e308, exp(709.7822265625e0));
+ EXPECT_EQ(std::numeric_limits<double>::infinity(), exp(709.7827128933841e0));
+ EXPECT_EQ(std::numeric_limits<double>::infinity(), exp(710.0));
+ EXPECT_EQ(std::numeric_limits<double>::infinity(), exp(1000.0));
+ EXPECT_EQ(std::numeric_limits<double>::infinity(),
+ exp(std::numeric_limits<double>::infinity()));
+}
+
+TEST(Ieee754, Expm1) {
+ EXPECT_THAT(expm1(std::numeric_limits<double>::quiet_NaN()), IsNaN());
+ EXPECT_THAT(expm1(std::numeric_limits<double>::signaling_NaN()), IsNaN());
+ EXPECT_EQ(-1.0, expm1(-std::numeric_limits<double>::infinity()));
+ EXPECT_EQ(std::numeric_limits<double>::infinity(),
+ expm1(std::numeric_limits<double>::infinity()));
+ EXPECT_EQ(0.0, expm1(-0.0));
+ EXPECT_EQ(0.0, expm1(0.0));
+ EXPECT_EQ(1.718281828459045, expm1(1.0));
+ EXPECT_EQ(2.6881171418161356e+43, expm1(100.0));
+ EXPECT_EQ(8.218407461554972e+307, expm1(709.0));
+ EXPECT_EQ(std::numeric_limits<double>::infinity(), expm1(710.0));
+}
+
+TEST(Ieee754, Log) {
+ EXPECT_THAT(log(std::numeric_limits<double>::quiet_NaN()), IsNaN());
+ EXPECT_THAT(log(std::numeric_limits<double>::signaling_NaN()), IsNaN());
+ EXPECT_THAT(log(-std::numeric_limits<double>::infinity()), IsNaN());
+ EXPECT_THAT(log(-1.0), IsNaN());
+ EXPECT_EQ(-std::numeric_limits<double>::infinity(), log(-0.0));
+ EXPECT_EQ(-std::numeric_limits<double>::infinity(), log(0.0));
+ EXPECT_EQ(0.0, log(1.0));
+ EXPECT_EQ(std::numeric_limits<double>::infinity(),
+ log(std::numeric_limits<double>::infinity()));
+
+ // Test that log(E) produces the correctly rounded result.
+ EXPECT_EQ(1.0, log(kE));
+}
+
+TEST(Ieee754, Log1p) {
+ EXPECT_THAT(log1p(std::numeric_limits<double>::quiet_NaN()), IsNaN());
+ EXPECT_THAT(log1p(std::numeric_limits<double>::signaling_NaN()), IsNaN());
+ EXPECT_THAT(log1p(-std::numeric_limits<double>::infinity()), IsNaN());
+ EXPECT_EQ(-std::numeric_limits<double>::infinity(), log1p(-1.0));
+ EXPECT_EQ(0.0, log1p(0.0));
+ EXPECT_EQ(-0.0, log1p(-0.0));
+ EXPECT_EQ(std::numeric_limits<double>::infinity(),
+ log1p(std::numeric_limits<double>::infinity()));
+ EXPECT_EQ(6.9756137364252422e-03, log1p(0.007));
+ EXPECT_EQ(709.782712893384, log1p(1.7976931348623157e308));
+ EXPECT_EQ(2.7755575615628914e-17, log1p(2.7755575615628914e-17));
+ EXPECT_EQ(9.313225741817976e-10, log1p(9.313225746154785e-10));
+ EXPECT_EQ(-0.2876820724517809, log1p(-0.25));
+ EXPECT_EQ(0.22314355131420976, log1p(0.25));
+ EXPECT_EQ(2.3978952727983707, log1p(10));
+ EXPECT_EQ(36.841361487904734, log1p(10e15));
+ EXPECT_EQ(37.08337388996168, log1p(12738099905822720));
+ EXPECT_EQ(37.08336444902049, log1p(12737979646738432));
+ EXPECT_EQ(1.3862943611198906, log1p(3));
+ EXPECT_EQ(1.3862945995384413, log1p(3 + 9.5367431640625e-7));
+ EXPECT_EQ(0.5596157879354227, log1p(0.75));
+ EXPECT_EQ(0.8109302162163288, log1p(1.25));
+}
+
+TEST(Ieee754, Log2) {
+ EXPECT_THAT(log2(std::numeric_limits<double>::quiet_NaN()), IsNaN());
+ EXPECT_THAT(log2(std::numeric_limits<double>::signaling_NaN()), IsNaN());
+ EXPECT_THAT(log2(-std::numeric_limits<double>::infinity()), IsNaN());
+ EXPECT_THAT(log2(-1.0), IsNaN());
+ EXPECT_EQ(-std::numeric_limits<double>::infinity(), log2(0.0));
+ EXPECT_EQ(-std::numeric_limits<double>::infinity(), log2(-0.0));
+ EXPECT_EQ(std::numeric_limits<double>::infinity(),
+ log2(std::numeric_limits<double>::infinity()));
+}
+
+TEST(Ieee754, Log10) {
+ EXPECT_THAT(log10(std::numeric_limits<double>::quiet_NaN()), IsNaN());
+ EXPECT_THAT(log10(std::numeric_limits<double>::signaling_NaN()), IsNaN());
+ EXPECT_THAT(log10(-std::numeric_limits<double>::infinity()), IsNaN());
+ EXPECT_THAT(log10(-1.0), IsNaN());
+ EXPECT_EQ(-std::numeric_limits<double>::infinity(), log10(0.0));
+ EXPECT_EQ(-std::numeric_limits<double>::infinity(), log10(-0.0));
+ EXPECT_EQ(std::numeric_limits<double>::infinity(),
+ log10(std::numeric_limits<double>::infinity()));
+ EXPECT_EQ(3.0, log10(1000.0));
+ EXPECT_EQ(14.0, log10(100000000000000)); // log10(10 ^ 14)
+ EXPECT_EQ(3.7389561269540406, log10(5482.2158));
+ EXPECT_EQ(14.661551142893833, log10(458723662312872.125782332587));
+ EXPECT_EQ(-0.9083828622192334, log10(0.12348583358871));
+ EXPECT_EQ(5.0, log10(100000.0));
+}
+
+TEST(Ieee754, Cbrt) {
+ EXPECT_THAT(cbrt(std::numeric_limits<double>::quiet_NaN()), IsNaN());
+ EXPECT_THAT(cbrt(std::numeric_limits<double>::signaling_NaN()), IsNaN());
+ EXPECT_EQ(std::numeric_limits<double>::infinity(),
+ cbrt(std::numeric_limits<double>::infinity()));
+ EXPECT_EQ(-std::numeric_limits<double>::infinity(),
+ cbrt(-std::numeric_limits<double>::infinity()));
+ EXPECT_EQ(1.4422495703074083, cbrt(3));
+ EXPECT_EQ(100, cbrt(100 * 100 * 100));
+ EXPECT_EQ(46.415888336127786, cbrt(100000));
+}
+
+TEST(Ieee754, Sin) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(sin(std::numeric_limits<double>::quiet_NaN()), IsNaN());
+ EXPECT_THAT(sin(std::numeric_limits<double>::signaling_NaN()), IsNaN());
+ EXPECT_THAT(sin(std::numeric_limits<double>::infinity()), IsNaN());
+ EXPECT_THAT(sin(-std::numeric_limits<double>::infinity()), IsNaN());
+
+ // Tests for sin for |x| < pi/4
+ EXPECT_EQ(-std::numeric_limits<double>::infinity(), 1 / sin(-0.0));
+ EXPECT_EQ(std::numeric_limits<double>::infinity(), 1 / sin(0.0));
+ // sin(x) = x for x < 2^-27
+ EXPECT_EQ(2.3283064365386963e-10, sin(2.3283064365386963e-10));
+ EXPECT_EQ(-2.3283064365386963e-10, sin(-2.3283064365386963e-10));
+ // sin(pi/8) = sqrt(sqrt(2)-1)/2^(3/4)
+ EXPECT_EQ(0.3826834323650898, sin(0.39269908169872414));
+ EXPECT_EQ(-0.3826834323650898, sin(-0.39269908169872414));
+
+ // Tests for sin.
+ EXPECT_EQ(0.479425538604203, sin(0.5));
+ EXPECT_EQ(-0.479425538604203, sin(-0.5));
+ EXPECT_EQ(1, sin(kPI / 2.0));
+ EXPECT_EQ(-1, sin(-kPI / 2.0));
+ // Test that sin(Math.PI) != 0 since Math.PI is not exact.
+ EXPECT_EQ(1.2246467991473532e-16, sin(kPI));
+ EXPECT_EQ(-7.047032979958965e-14, sin(2200.0 * kPI));
+ // Test sin for various phases.
+ EXPECT_EQ(-0.7071067811865477, sin(7.0 / 4.0 * kPI));
+ EXPECT_EQ(0.7071067811865474, sin(9.0 / 4.0 * kPI));
+ EXPECT_EQ(0.7071067811865483, sin(11.0 / 4.0 * kPI));
+ EXPECT_EQ(-0.7071067811865479, sin(13.0 / 4.0 * kPI));
+ EXPECT_EQ(-3.2103381051568376e-11, sin(1048576.0 / 4 * kPI));
+
+ // Test Hayne-Panek reduction.
+ EXPECT_EQ(0.377820109360752e0, sin(kTwo120));
+ EXPECT_EQ(-0.377820109360752e0, sin(-kTwo120));
+}
+
+TEST(Ieee754, Tan) {
+ // Test values mentioned in the EcmaScript spec.
+ EXPECT_THAT(tan(std::numeric_limits<double>::quiet_NaN()), IsNaN());
+ EXPECT_THAT(tan(std::numeric_limits<double>::signaling_NaN()), IsNaN());
+ EXPECT_THAT(tan(std::numeric_limits<double>::infinity()), IsNaN());
+ EXPECT_THAT(tan(-std::numeric_limits<double>::infinity()), IsNaN());
+
+ // Tests for tan for |x| < pi/4
+ EXPECT_EQ(std::numeric_limits<double>::infinity(), 1 / tan(0.0));
+ EXPECT_EQ(-std::numeric_limits<double>::infinity(), 1 / tan(-0.0));
+ // tan(x) = x for |x| < 2^-28
+ EXPECT_EQ(2.3283064365386963e-10, tan(2.3283064365386963e-10));
+ EXPECT_EQ(-2.3283064365386963e-10, tan(-2.3283064365386963e-10));
+ // Test KERNELTAN for |x| > 0.67434.
+ EXPECT_EQ(0.8211418015898941, tan(11.0 / 16.0));
+ EXPECT_EQ(-0.8211418015898941, tan(-11.0 / 16.0));
+ EXPECT_EQ(0.41421356237309503, tan(0.39269908169872414));
+ // crbug/427468
+ EXPECT_EQ(0.7993357819992383, tan(0.6743358));
+
+ // Tests for tan.
+ EXPECT_EQ(3.725290298461914e-9, tan(3.725290298461914e-9));
+ // Test that tan(PI/2) != Infinity since PI is not exact.
+ EXPECT_EQ(1.633123935319537e16, tan(kPI / 2));
+ // Cover different code paths in KERNELTAN (tangent and cotangent)
+ EXPECT_EQ(0.5463024898437905, tan(0.5));
+ EXPECT_EQ(2.0000000000000027, tan(1.107148717794091));
+ EXPECT_EQ(-1.0000000000000004, tan(7.0 / 4.0 * kPI));
+ EXPECT_EQ(0.9999999999999994, tan(9.0 / 4.0 * kPI));
+ EXPECT_EQ(-6.420676210313675e-11, tan(1048576.0 / 2.0 * kPI));
+ EXPECT_EQ(2.910566692924059e11, tan(1048575.0 / 2.0 * kPI));
+
+ // Test Hayne-Panek reduction.
+ EXPECT_EQ(-0.40806638884180424e0, tan(kTwo120));
+ EXPECT_EQ(0.40806638884180424e0, tan(-kTwo120));
+}
+
+} // namespace ieee754
+} // namespace base
+} // namespace v8
diff --git a/test/unittests/base/platform/time-unittest.cc b/test/unittests/base/platform/time-unittest.cc
index 784fbf8..9aa609f 100644
--- a/test/unittests/base/platform/time-unittest.cc
+++ b/test/unittests/base/platform/time-unittest.cc
@@ -15,6 +15,8 @@
#include "src/base/win32-headers.h"
#endif
+#include <vector>
+
#include "src/base/platform/elapsed-timer.h"
#include "src/base/platform/platform.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -185,7 +187,7 @@
// Disable on windows until it is implemented.
-#if V8_OS_ANDROID || V8_OS_WIN
+#if V8_OS_ANDROID
#define MAYBE_ThreadNow DISABLED_ThreadNow
#else
#define MAYBE_ThreadNow ThreadNow
@@ -210,5 +212,50 @@
}
}
+
+#if V8_OS_WIN
+TEST(TimeTicks, TimerPerformance) {
+ // Verify that various timer mechanisms can always complete quickly.
+ // Note: This is a somewhat arbitrary test.
+ const int kLoops = 10000;
+
+ typedef TimeTicks (*TestFunc)();
+ struct TestCase {
+ TestFunc func;
+ const char *description;
+ };
+ // Cheating a bit here: assumes sizeof(TimeTicks) == sizeof(Time)
+ // in order to create a single test case list.
+ static_assert(sizeof(TimeTicks) == sizeof(Time),
+ "TimeTicks and Time must be the same size");
+ std::vector<TestCase> cases;
+ cases.push_back({reinterpret_cast<TestFunc>(&Time::Now), "Time::Now"});
+ cases.push_back({&TimeTicks::Now, "TimeTicks::Now"});
+
+ if (ThreadTicks::IsSupported()) {
+ ThreadTicks::WaitUntilInitialized();
+ cases.push_back(
+ {reinterpret_cast<TestFunc>(&ThreadTicks::Now), "ThreadTicks::Now"});
+ }
+
+ for (const auto& test_case : cases) {
+ TimeTicks start = TimeTicks::Now();
+ for (int index = 0; index < kLoops; index++)
+ test_case.func();
+ TimeTicks stop = TimeTicks::Now();
+ // Turning off the check for acceptible delays. Without this check,
+ // the test really doesn't do much other than measure. But the
+ // measurements are still useful for testing timers on various platforms.
+ // The reason to remove the check is because the tests run on many
+ // buildbots, some of which are VMs. These machines can run horribly
+ // slow, and there is really no value for checking against a max timer.
+ // const int kMaxTime = 35; // Maximum acceptible milliseconds for test.
+ // EXPECT_LT((stop - start).InMilliseconds(), kMaxTime);
+ printf("%s: %1.2fus per call\n", test_case.description,
+ (stop - start).InMillisecondsF() * 1000 / kLoops);
+ }
+}
+#endif // V8_OS_WIN
+
} // namespace base
} // namespace v8
diff --git a/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc b/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
index b088d8e..fa03039 100644
--- a/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
+++ b/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
@@ -1392,8 +1392,8 @@
EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
EXPECT_EQ(kMode_Offset_RI, s[0]->addressing_mode());
ASSERT_EQ(3U, s[0]->InputCount());
- ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
- EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(2)));
EXPECT_EQ(0U, s[0]->OutputCount());
}
}
@@ -1403,6 +1403,39 @@
InstructionSelectorMemoryAccessTest,
::testing::ValuesIn(kMemoryAccesses));
+TEST_F(InstructionSelectorMemoryAccessTest, LoadWithShiftedIndex) {
+ TRACED_FORRANGE(int, immediate_shift, 1, 31) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Int32());
+ Node* const index =
+ m.Word32Shl(m.Parameter(1), m.Int32Constant(immediate_shift));
+ m.Return(m.Load(MachineType::Int32(), m.Parameter(0), index));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmLdr, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorMemoryAccessTest, StoreWithShiftedIndex) {
+ TRACED_FORRANGE(int, immediate_shift, 1, 31) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Int32(), MachineType::Int32());
+ Node* const index =
+ m.Word32Shl(m.Parameter(1), m.Int32Constant(immediate_shift));
+ m.Store(MachineRepresentation::kWord32, m.Parameter(0), index,
+ m.Parameter(2), kNoWriteBarrier);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmStr, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
// -----------------------------------------------------------------------------
// Conversions.
@@ -2228,7 +2261,7 @@
MachineType::Int32(), MachineType::Int32());
m.Return(
m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
- Stream s = m.Build(MLS);
+ Stream s = m.Build(ARMv7);
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArmMls, s[0]->arch_opcode());
EXPECT_EQ(1U, s[0]->OutputCount());
@@ -2324,7 +2357,7 @@
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
- Stream s = m.Build(MLS, SUDIV);
+ Stream s = m.Build(ARMv7, SUDIV);
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kArmSdiv, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->OutputCount());
@@ -2530,7 +2563,7 @@
StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
MachineType::Int32());
m.Return(m.Uint32Mod(m.Parameter(0), m.Parameter(1)));
- Stream s = m.Build(MLS, SUDIV);
+ Stream s = m.Build(ARMv7, SUDIV);
ASSERT_EQ(2U, s.size());
EXPECT_EQ(kArmUdiv, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->OutputCount());
@@ -3026,6 +3059,36 @@
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
+TEST_F(InstructionSelectorTest, Float32Neg) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ // Don't use m.Float32Neg() as that generates an explicit sub.
+ Node* const n = m.AddNode(m.machine()->Float32Neg().op(), m.Parameter(0));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVnegF32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, Float64Neg) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ // Don't use m.Float64Neg() as that generates an explicit sub.
+ Node* const n = m.AddNode(m.machine()->Float64Neg().op(), m.Parameter(0));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVnegF64, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index 1c638b2..5fc210b 100644
--- a/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -184,8 +184,11 @@
{&RawMachineAssembler::Int32AddWithOverflow, "Int32AddWithOverflow",
kArm64Add32, MachineType::Int32()},
{&RawMachineAssembler::Int32SubWithOverflow, "Int32SubWithOverflow",
- kArm64Sub32, MachineType::Int32()}};
-
+ kArm64Sub32, MachineType::Int32()},
+ {&RawMachineAssembler::Int64AddWithOverflow, "Int64AddWithOverflow",
+ kArm64Add, MachineType::Int64()},
+ {&RawMachineAssembler::Int64SubWithOverflow, "Int64SubWithOverflow",
+ kArm64Sub, MachineType::Int64()}};
// ARM64 shift instructions.
const Shift kShiftInstructions[] = {
@@ -1178,7 +1181,6 @@
}
}
-
TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnLeft) {
TRACED_FORRANGE(int, bit, 0, 31) {
uint32_t mask = 1 << bit;
@@ -1261,6 +1263,91 @@
}
}
+TEST_F(InstructionSelectorTest, Word32EqualZeroAndBranchWithOneBitMask) {
+ TRACED_FORRANGE(int, bit, 0, 31) {
+ uint32_t mask = 1 << bit;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
+ m.Branch(m.Word32Equal(m.Word32And(m.Int32Constant(mask), m.Parameter(0)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
+ }
+
+ TRACED_FORRANGE(int, bit, 0, 31) {
+ uint32_t mask = 1 << bit;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
+ m.Branch(
+ m.Word32NotEqual(m.Word32And(m.Int32Constant(mask), m.Parameter(0)),
+ m.Int32Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word64EqualZeroAndBranchWithOneBitMask) {
+ TRACED_FORRANGE(int, bit, 0, 63) {
+ uint64_t mask = V8_UINT64_C(1) << bit;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b;
+ m.Branch(m.Word64Equal(m.Word64And(m.Int64Constant(mask), m.Parameter(0)),
+ m.Int64Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int64Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int64Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
+ }
+
+ TRACED_FORRANGE(int, bit, 0, 63) {
+ uint64_t mask = V8_UINT64_C(1) << bit;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b;
+ m.Branch(
+ m.Word64NotEqual(m.Word64And(m.Int64Constant(mask), m.Parameter(0)),
+ m.Int64Constant(0)),
+ &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int64Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int64Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ EXPECT_EQ(4U, s[0]->InputCount());
+ EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
+ }
+}
TEST_F(InstructionSelectorTest, CompareAgainstZeroAndBranch) {
{
@@ -1298,6 +1385,75 @@
}
}
+TEST_F(InstructionSelectorTest, EqualZeroAndBranch) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* p0 = m.Parameter(0);
+ m.Branch(m.Word32Equal(p0, m.Int32Constant(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64CompareAndBranch32, s[0]->arch_opcode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ }
+
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
+ Node* p0 = m.Parameter(0);
+ m.Branch(m.Word32NotEqual(p0, m.Int32Constant(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64CompareAndBranch32, s[0]->arch_opcode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ }
+
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b;
+ Node* p0 = m.Parameter(0);
+ m.Branch(m.Word64Equal(p0, m.Int64Constant(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int64Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int64Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64CompareAndBranch, s[0]->arch_opcode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ }
+
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b;
+ Node* p0 = m.Parameter(0);
+ m.Branch(m.Word64NotEqual(p0, m.Int64Constant(0)), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int64Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int64Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64CompareAndBranch, s[0]->arch_opcode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ }
+}
// -----------------------------------------------------------------------------
// Add and subtract instructions with overflow.
@@ -1453,6 +1609,29 @@
}
}
+TEST_P(InstructionSelectorOvfAddSubTest, RORShift) {
+ // ADD and SUB do not support ROR shifts, make sure we do not try
+ // to merge them into the ADD/SUB instruction.
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ auto rotate = &RawMachineAssembler::Word64Ror;
+ ArchOpcode rotate_opcode = kArm64Ror;
+ if (type == MachineType::Int32()) {
+ rotate = &RawMachineAssembler::Word32Ror;
+ rotate_opcode = kArm64Ror32;
+ }
+ TRACED_FORRANGE(int32_t, imm, -32, 63) {
+ StreamBuilder m(this, type, type, type);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* r = (m.*rotate)(p1, m.Int32Constant(imm));
+ m.Return((m.*dpi.constructor)(p0, r));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(rotate_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(dpi.arch_opcode, s[1]->arch_opcode());
+ }
+}
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
InstructionSelectorOvfAddSubTest,
@@ -2846,6 +3025,7 @@
struct IntegerCmp {
MachInst2 mi;
FlagsCondition cond;
+ FlagsCondition commuted_cond;
};
@@ -2858,19 +3038,24 @@
const IntegerCmp kIntegerCmpInstructions[] = {
{{&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32,
MachineType::Int32()},
+ kEqual,
kEqual},
{{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kArm64Cmp32,
MachineType::Int32()},
- kSignedLessThan},
+ kSignedLessThan,
+ kSignedGreaterThan},
{{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
kArm64Cmp32, MachineType::Int32()},
- kSignedLessThanOrEqual},
+ kSignedLessThanOrEqual,
+ kSignedGreaterThanOrEqual},
{{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kArm64Cmp32,
MachineType::Uint32()},
- kUnsignedLessThan},
+ kUnsignedLessThan,
+ kUnsignedGreaterThan},
{{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
kArm64Cmp32, MachineType::Uint32()},
- kUnsignedLessThanOrEqual}};
+ kUnsignedLessThanOrEqual,
+ kUnsignedGreaterThanOrEqual}};
} // namespace
@@ -2907,6 +3092,156 @@
}
}
+TEST_F(InstructionSelectorTest, CmpWithImmediateOnLeft) {
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ // kEqual and kNotEqual trigger the cbz/cbnz optimization, which
+ // is tested elsewhere.
+ if (cmp.cond == kEqual || cmp.cond == kNotEqual) continue;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ RawMachineLabel a, b;
+ m.Branch((m.*cmp.mi.constructor)(m.Int32Constant(imm), p0), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ ASSERT_LE(2U, s[0]->InputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.commuted_cond, s[0]->flags_condition());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ }
+ }
+}
+
+TEST_F(InstructionSelectorTest, CmnWithImmediateOnLeft) {
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+ // kEqual and kNotEqual trigger the cbz/cbnz optimization, which
+ // is tested elsewhere.
+ if (cmp.cond == kEqual || cmp.cond == kNotEqual) continue;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* sub = m.Int32Sub(m.Int32Constant(0), m.Parameter(0));
+ RawMachineLabel a, b;
+ m.Branch((m.*cmp.mi.constructor)(m.Int32Constant(imm), sub), &a, &b);
+ m.Bind(&a);
+ m.Return(m.Int32Constant(1));
+ m.Bind(&b);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
+ ASSERT_LE(2U, s[0]->InputCount());
+ EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ }
+ }
+}
+
+TEST_F(InstructionSelectorTest, CmpSignedExtendByteOnLeft) {
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* extend = m.Word32Sar(m.Word32Shl(m.Parameter(0), m.Int32Constant(24)),
+ m.Int32Constant(24));
+ m.Return((m.*cmp.mi.constructor)(extend, m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.commuted_cond, s[0]->flags_condition());
+ EXPECT_EQ(kMode_Operand2_R_SXTB, s[0]->addressing_mode());
+ }
+}
+
+TEST_F(InstructionSelectorTest, CmnSignedExtendByteOnLeft) {
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* sub = m.Int32Sub(m.Int32Constant(0), m.Parameter(0));
+ Node* extend = m.Word32Sar(m.Word32Shl(m.Parameter(0), m.Int32Constant(24)),
+ m.Int32Constant(24));
+ m.Return((m.*cmp.mi.constructor)(extend, sub));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ EXPECT_EQ(kMode_Operand2_R_SXTB, s[0]->addressing_mode());
+ }
+}
+
+TEST_F(InstructionSelectorTest, CmpShiftByImmediateOnLeft) {
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ TRACED_FOREACH(Shift, shift, kShiftInstructions) {
+ // Only test relevant shifted operands.
+ if (shift.mi.machine_type != MachineType::Int32()) continue;
+
+ // The available shift operand range is `0 <= imm < 32`, but we also test
+ // that immediates outside this range are handled properly (modulo-32).
+ TRACED_FORRANGE(int, imm, -32, 63) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ m.Return((m.*cmp.mi.constructor)(
+ (m.*shift.mi.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+ m.Parameter(0)));
+ Stream s = m.Build();
+ // Cmp does not support ROR shifts.
+ if (shift.mi.arch_opcode == kArm64Ror32) {
+ ASSERT_EQ(2U, s.size());
+ continue;
+ }
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.commuted_cond, s[0]->flags_condition());
+ }
+ }
+ }
+}
+
+TEST_F(InstructionSelectorTest, CmnShiftByImmediateOnLeft) {
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ TRACED_FOREACH(Shift, shift, kShiftInstructions) {
+ // Only test relevant shifted operands.
+ if (shift.mi.machine_type != MachineType::Int32()) continue;
+
+ // The available shift operand range is `0 <= imm < 32`, but we also test
+ // that immediates outside this range are handled properly (modulo-32).
+ TRACED_FORRANGE(int, imm, -32, 63) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* sub = m.Int32Sub(m.Int32Constant(0), m.Parameter(0));
+ m.Return((m.*cmp.mi.constructor)(
+ (m.*shift.mi.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+ sub));
+ Stream s = m.Build();
+ // Cmn does not support ROR shifts.
+ if (shift.mi.arch_opcode == kArm64Ror32) {
+ ASSERT_EQ(2U, s.size());
+ continue;
+ }
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
+ EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ }
+ }
+ }
+}
+
// -----------------------------------------------------------------------------
// Miscellaneous
@@ -3574,6 +3909,36 @@
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
+TEST_F(InstructionSelectorTest, Float32Neg) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ // Don't use m.Float32Neg() as that generates an explicit sub.
+ Node* const n = m.AddNode(m.machine()->Float32Neg().op(), m.Parameter(0));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Float32Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, Float64Neg) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ // Don't use m.Float64Neg() as that generates an explicit sub.
+ Node* const n = m.AddNode(m.machine()->Float64Neg().op(), m.Parameter(0));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Float64Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/test/unittests/compiler/checkpoint-elimination-unittest.cc b/test/unittests/compiler/checkpoint-elimination-unittest.cc
new file mode 100644
index 0000000..a201fc9
--- /dev/null
+++ b/test/unittests/compiler/checkpoint-elimination-unittest.cc
@@ -0,0 +1,59 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/checkpoint-elimination.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/operator.h"
+#include "test/unittests/compiler/graph-reducer-unittest.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+
+using testing::StrictMock;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class CheckpointEliminationTest : public GraphTest {
+ public:
+ CheckpointEliminationTest() : GraphTest() {}
+ ~CheckpointEliminationTest() override {}
+
+ protected:
+ Reduction Reduce(AdvancedReducer::Editor* editor, Node* node) {
+ CheckpointElimination reducer(editor);
+ return reducer.Reduce(node);
+ }
+
+ Reduction Reduce(Node* node) {
+ StrictMock<MockAdvancedReducerEditor> editor;
+ return Reduce(&editor, node);
+ }
+};
+
+namespace {
+
+const Operator kOpNoWrite(0, Operator::kNoWrite, "OpNoWrite", 0, 1, 0, 0, 1, 0);
+
+} // namespace
+
+// -----------------------------------------------------------------------------
+// Checkpoint
+
+TEST_F(CheckpointEliminationTest, CheckpointChain) {
+ Node* const control = graph()->start();
+ Node* frame_state = EmptyFrameState();
+ Node* checkpoint1 = graph()->NewNode(common()->Checkpoint(), frame_state,
+ graph()->start(), control);
+ Node* effect_link = graph()->NewNode(&kOpNoWrite, checkpoint1);
+ Node* checkpoint2 = graph()->NewNode(common()->Checkpoint(), frame_state,
+ effect_link, control);
+ Reduction r = Reduce(checkpoint2);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(effect_link, r.replacement());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/coalesced-live-ranges-unittest.cc b/test/unittests/compiler/coalesced-live-ranges-unittest.cc
deleted file mode 100644
index fe8fac4..0000000
--- a/test/unittests/compiler/coalesced-live-ranges-unittest.cc
+++ /dev/null
@@ -1,268 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/coalesced-live-ranges.h"
-#include "test/unittests/compiler/live-range-builder.h"
-#include "test/unittests/test-utils.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-
-class CoalescedLiveRangesTest : public TestWithZone {
- public:
- CoalescedLiveRangesTest() : TestWithZone(), ranges_(zone()) {}
- bool HasNoConflicts(const LiveRange* range);
- bool ConflictsPreciselyWith(const LiveRange* range, int id);
- bool ConflictsPreciselyWith(const LiveRange* range, int id1, int id2);
-
- CoalescedLiveRanges& ranges() { return ranges_; }
- const CoalescedLiveRanges& ranges() const { return ranges_; }
- bool AllocationsAreValid() const;
- void RemoveConflicts(LiveRange* range);
-
- private:
- typedef ZoneSet<int> LiveRangeIDs;
- bool IsRangeConflictingWith(const LiveRange* range, const LiveRangeIDs& ids);
- CoalescedLiveRanges ranges_;
-};
-
-
-bool CoalescedLiveRangesTest::ConflictsPreciselyWith(const LiveRange* range,
- int id) {
- LiveRangeIDs set(zone());
- set.insert(id);
- return IsRangeConflictingWith(range, set);
-}
-
-
-bool CoalescedLiveRangesTest::ConflictsPreciselyWith(const LiveRange* range,
- int id1, int id2) {
- LiveRangeIDs set(zone());
- set.insert(id1);
- set.insert(id2);
- return IsRangeConflictingWith(range, set);
-}
-
-
-bool CoalescedLiveRangesTest::HasNoConflicts(const LiveRange* range) {
- LiveRangeIDs set(zone());
- return IsRangeConflictingWith(range, set);
-}
-
-
-void CoalescedLiveRangesTest::RemoveConflicts(LiveRange* range) {
- auto conflicts = ranges().GetConflicts(range);
- LiveRangeIDs seen(zone());
- for (auto c = conflicts.Current(); c != nullptr;
- c = conflicts.RemoveCurrentAndGetNext()) {
- int id = c->TopLevel()->vreg();
- EXPECT_FALSE(seen.count(id) > 0);
- seen.insert(c->TopLevel()->vreg());
- }
-}
-
-
-bool CoalescedLiveRangesTest::AllocationsAreValid() const {
- return ranges().VerifyAllocationsAreValidForTesting();
-}
-
-
-bool CoalescedLiveRangesTest::IsRangeConflictingWith(const LiveRange* range,
- const LiveRangeIDs& ids) {
- LiveRangeIDs found_ids(zone());
-
- auto conflicts = ranges().GetConflicts(range);
- for (auto conflict = conflicts.Current(); conflict != nullptr;
- conflict = conflicts.GetNext()) {
- found_ids.insert(conflict->TopLevel()->vreg());
- }
- return found_ids == ids;
-}
-
-
-TEST_F(CoalescedLiveRangesTest, VisitEmptyAllocations) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
- ASSERT_TRUE(ranges().empty());
- ASSERT_TRUE(AllocationsAreValid());
- ASSERT_TRUE(HasNoConflicts(range));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, CandidateBeforeAfterAllocations) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(5, 6);
- ranges().AllocateRange(range);
- ASSERT_FALSE(ranges().empty());
- ASSERT_TRUE(AllocationsAreValid());
- LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 2);
- ASSERT_TRUE(HasNoConflicts(query));
- query = TestRangeBuilder(zone()).Id(3).Build(1, 5);
- ASSERT_TRUE(HasNoConflicts(query));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, CandidateBeforeAfterManyAllocations) {
- LiveRange* range =
- TestRangeBuilder(zone()).Id(1).Add(5, 7).Add(10, 12).Build();
- ranges().AllocateRange(range);
- ASSERT_FALSE(ranges().empty());
- ASSERT_TRUE(AllocationsAreValid());
- LiveRange* query =
- TestRangeBuilder(zone()).Id(2).Add(1, 2).Add(13, 15).Build();
- ASSERT_TRUE(HasNoConflicts(query));
- query = TestRangeBuilder(zone()).Id(3).Add(1, 5).Add(12, 15).Build();
- ASSERT_TRUE(HasNoConflicts(query));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, SelfConflictsPreciselyWithSelf) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
- ranges().AllocateRange(range);
- ASSERT_FALSE(ranges().empty());
- ASSERT_TRUE(AllocationsAreValid());
- ASSERT_TRUE(ConflictsPreciselyWith(range, 1));
- range = TestRangeBuilder(zone()).Id(2).Build(8, 10);
- ranges().AllocateRange(range);
- ASSERT_TRUE(ConflictsPreciselyWith(range, 2));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, QueryStartsBeforeConflict) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 5);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 3);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
- range = TestRangeBuilder(zone()).Id(3).Build(8, 10);
- ranges().AllocateRange(range);
- query = TestRangeBuilder(zone()).Id(4).Build(6, 9);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 3));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, QueryStartsInConflict) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 5);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(3, 6);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
- range = TestRangeBuilder(zone()).Id(3).Build(8, 10);
- ranges().AllocateRange(range);
- query = TestRangeBuilder(zone()).Id(4).Build(9, 11);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 3));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, QueryContainedInConflict) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(2, 3);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, QueryContainsConflict) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 3);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 5);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, QueryCoversManyIntervalsSameRange) {
- LiveRange* range =
- TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(7, 9).Add(20, 25).Build();
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(2, 8);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, QueryCoversManyIntervalsDifferentRanges) {
- LiveRange* range =
- TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(20, 25).Build();
- ranges().AllocateRange(range);
- range = TestRangeBuilder(zone()).Id(2).Build(7, 10);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(2, 22);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 1, 2));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, QueryFitsInGaps) {
- LiveRange* range =
- TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(10, 15).Add(20, 25).Build();
- ranges().AllocateRange(range);
- LiveRange* query =
- TestRangeBuilder(zone()).Id(3).Add(5, 10).Add(16, 19).Add(27, 30).Build();
- ASSERT_TRUE(HasNoConflicts(query));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, DeleteConflictBefore) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Add(1, 4).Add(5, 6).Build();
- ranges().AllocateRange(range);
- range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(3, 7);
- RemoveConflicts(query);
- query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, DeleteConflictAfter) {
- LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
- ranges().AllocateRange(range);
- range = TestRangeBuilder(zone()).Id(2).Add(40, 50).Add(60, 70).Build();
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(45, 60);
- RemoveConflicts(query);
- query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, DeleteConflictStraddle) {
- LiveRange* range =
- TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(10, 20).Build();
- ranges().AllocateRange(range);
- range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(4, 15);
- RemoveConflicts(query);
- query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, DeleteConflictManyOverlapsBefore) {
- LiveRange* range =
- TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(6, 10).Add(10, 20).Build();
- ranges().AllocateRange(range);
- range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
- ranges().AllocateRange(range);
- LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(4, 15);
- RemoveConflicts(query);
- query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
-}
-
-
-TEST_F(CoalescedLiveRangesTest, DeleteWhenConflictRepeatsAfterNonConflict) {
- LiveRange* range =
- TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(6, 10).Add(20, 30).Build();
- ranges().AllocateRange(range);
- range = TestRangeBuilder(zone()).Id(2).Build(12, 15);
- ranges().AllocateRange(range);
- LiveRange* query =
- TestRangeBuilder(zone()).Id(3).Add(1, 8).Add(22, 25).Build();
- RemoveConflicts(query);
- query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
- ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
-}
-
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/test/unittests/compiler/common-operator-unittest.cc b/test/unittests/compiler/common-operator-unittest.cc
index 0a55a2e..52f99a5 100644
--- a/test/unittests/compiler/common-operator-unittest.cc
+++ b/test/unittests/compiler/common-operator-unittest.cc
@@ -362,15 +362,26 @@
TEST_F(CommonOperatorTest, BeginRegion) {
- const Operator* op = common()->BeginRegion();
- EXPECT_EQ(1, op->EffectInputCount());
- EXPECT_EQ(1, OperatorProperties::GetTotalInputCount(op));
- EXPECT_EQ(0, op->ControlOutputCount());
- EXPECT_EQ(1, op->EffectOutputCount());
- EXPECT_EQ(0, op->ValueOutputCount());
+ {
+ const Operator* op =
+ common()->BeginRegion(RegionObservability::kObservable);
+ EXPECT_EQ(1, op->EffectInputCount());
+ EXPECT_EQ(1, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ControlOutputCount());
+ EXPECT_EQ(1, op->EffectOutputCount());
+ EXPECT_EQ(0, op->ValueOutputCount());
+ }
+ {
+ const Operator* op =
+ common()->BeginRegion(RegionObservability::kNotObservable);
+ EXPECT_EQ(1, op->EffectInputCount());
+ EXPECT_EQ(1, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ControlOutputCount());
+ EXPECT_EQ(1, op->EffectOutputCount());
+ EXPECT_EQ(0, op->ValueOutputCount());
+ }
}
-
TEST_F(CommonOperatorTest, FinishRegion) {
const Operator* op = common()->FinishRegion();
EXPECT_EQ(1, op->ValueInputCount());
@@ -381,6 +392,19 @@
EXPECT_EQ(1, op->ValueOutputCount());
}
+TEST_F(CommonOperatorTest, Projection) {
+ TRACED_FORRANGE(size_t, index, 0, 3) {
+ const Operator* op = common()->Projection(index);
+ EXPECT_EQ(index, ProjectionIndexOf(op));
+ EXPECT_EQ(1, op->ValueInputCount());
+ EXPECT_EQ(1, op->ControlInputCount());
+ EXPECT_EQ(2, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ControlOutputCount());
+ EXPECT_EQ(0, op->EffectOutputCount());
+ EXPECT_EQ(1, op->ValueOutputCount());
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/test/unittests/compiler/escape-analysis-unittest.cc b/test/unittests/compiler/escape-analysis-unittest.cc
index 4c17ef2..9b584a2 100644
--- a/test/unittests/compiler/escape-analysis-unittest.cc
+++ b/test/unittests/compiler/escape-analysis-unittest.cc
@@ -48,7 +48,8 @@
effect = effect_;
}
- return effect_ = graph()->NewNode(common()->BeginRegion(), effect);
+ return effect_ = graph()->NewNode(
+ common()->BeginRegion(RegionObservability::kObservable), effect);
}
Node* FinishRegion(Node* value, Node* effect = nullptr) {
diff --git a/test/unittests/compiler/graph-unittest.h b/test/unittests/compiler/graph-unittest.h
index 31bae6d..d4248e4 100644
--- a/test/unittests/compiler/graph-unittest.h
+++ b/test/unittests/compiler/graph-unittest.h
@@ -48,6 +48,9 @@
Node* EmptyFrameState();
+ Matcher<Node*> IsBooleanConstant(bool value) {
+ return value ? IsTrueConstant() : IsFalseConstant();
+ }
Matcher<Node*> IsFalseConstant();
Matcher<Node*> IsTrueConstant();
Matcher<Node*> IsUndefinedConstant();
diff --git a/test/unittests/compiler/instruction-selector-unittest.cc b/test/unittests/compiler/instruction-selector-unittest.cc
index 69ae768..936a94e 100644
--- a/test/unittests/compiler/instruction-selector-unittest.cc
+++ b/test/unittests/compiler/instruction-selector-unittest.cc
@@ -45,9 +45,8 @@
selector.SelectInstructions();
if (FLAG_trace_turbo) {
OFStream out(stdout);
- PrintableInstructionSequence printable = {
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
- &sequence};
+ PrintableInstructionSequence printable = {RegisterConfiguration::Turbofan(),
+ &sequence};
out << "=== Code sequence after instruction selection ===" << std::endl
<< printable;
}
@@ -94,12 +93,12 @@
}
for (auto i : s.virtual_registers_) {
int const virtual_register = i.second;
- if (sequence.IsFloat(virtual_register)) {
+ if (sequence.IsFP(virtual_register)) {
EXPECT_FALSE(sequence.IsReference(virtual_register));
s.doubles_.insert(virtual_register);
}
if (sequence.IsReference(virtual_register)) {
- EXPECT_FALSE(sequence.IsFloat(virtual_register));
+ EXPECT_FALSE(sequence.IsFP(virtual_register));
s.references_.insert(virtual_register);
}
}
@@ -333,7 +332,8 @@
Node* p2 = m2.Parameter(0);
m2.Return(m2.AddNode(
m2.machine()->Load(MachineType::Int32()), p2, m2.Int32Constant(0),
- m2.AddNode(m2.common()->BeginRegion(), m2.graph()->start())));
+ m2.AddNode(m2.common()->BeginRegion(RegionObservability::kObservable),
+ m2.graph()->start())));
Stream s2 = m2.Build(kAllInstructions);
EXPECT_LE(3U, s1.size());
ASSERT_EQ(s1.size(), s2.size());
@@ -480,7 +480,7 @@
EXPECT_EQ(0, s.ToInt32(call_instr->InputAt(4))); // This should be a context.
// We inserted 0 here.
EXPECT_EQ(0.5, s.ToFloat64(call_instr->InputAt(5)));
- EXPECT_TRUE(s.ToHeapObject(call_instr->InputAt(6))->IsUndefined());
+ EXPECT_TRUE(s.ToHeapObject(call_instr->InputAt(6))->IsUndefined(isolate()));
EXPECT_EQ(MachineType::AnyTagged(),
desc_before->GetType(0)); // function is always
// tagged/any.
diff --git a/test/unittests/compiler/instruction-sequence-unittest.cc b/test/unittests/compiler/instruction-sequence-unittest.cc
index 9360ca4..619e0aa 100644
--- a/test/unittests/compiler/instruction-sequence-unittest.cc
+++ b/test/unittests/compiler/instruction-sequence-unittest.cc
@@ -67,8 +67,11 @@
if (config_.is_empty()) {
config_.Reset(new RegisterConfiguration(
num_general_registers_, num_double_registers_, num_general_registers_,
- num_double_registers_, num_double_registers_, allocatable_codes,
- allocatable_double_codes, general_register_names_,
+ num_double_registers_, allocatable_codes, allocatable_double_codes,
+ kSimpleFPAliasing ? RegisterConfiguration::OVERLAP
+ : RegisterConfiguration::COMBINE,
+ general_register_names_,
+ double_register_names_, // float register names
double_register_names_));
}
return config_.get();
diff --git a/test/unittests/compiler/int64-lowering-unittest.cc b/test/unittests/compiler/int64-lowering-unittest.cc
index 8bc02c5..804c399 100644
--- a/test/unittests/compiler/int64-lowering-unittest.cc
+++ b/test/unittests/compiler/int64-lowering-unittest.cc
@@ -133,6 +133,7 @@
MachineRepresentation::kWord64);
Capture<Node*> high_word_load;
+#if defined(V8_TARGET_LITTLE_ENDIAN)
Matcher<Node*> high_word_load_matcher =
IsLoad(MachineType::Int32(), IsInt32Constant(base),
IsInt32Add(IsInt32Constant(index), IsInt32Constant(0x4)), start(),
@@ -146,6 +147,21 @@
start()),
AllOf(CaptureEq(&high_word_load), high_word_load_matcher),
start(), start()));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ Matcher<Node*> high_word_load_matcher =
+ IsLoad(MachineType::Int32(), IsInt32Constant(base),
+ IsInt32Constant(index), start(), start());
+
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn2(
+ IsLoad(MachineType::Int32(), IsInt32Constant(base),
+ IsInt32Add(IsInt32Constant(index), IsInt32Constant(0x4)),
+ AllOf(CaptureEq(&high_word_load), high_word_load_matcher),
+ start()),
+ AllOf(CaptureEq(&high_word_load), high_word_load_matcher), start(),
+ start()));
+#endif
}
TEST_F(Int64LoweringTest, Int64Store) {
@@ -177,6 +193,7 @@
const StoreRepresentation rep(MachineRepresentation::kWord32,
kNoWriteBarrier);
+#if defined(V8_TARGET_LITTLE_ENDIAN)
EXPECT_THAT(
graph()->end()->InputAt(1),
IsReturn(
@@ -189,6 +206,20 @@
IsInt32Constant(high_word_value(0)), start(), start()),
start()),
start()));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+ EXPECT_THAT(
+ graph()->end()->InputAt(1),
+ IsReturn(
+ IsInt32Constant(return_value),
+ IsStore(
+ rep, IsInt32Constant(base),
+ IsInt32Add(IsInt32Constant(index), IsInt32Constant(4)),
+ IsInt32Constant(low_word_value(0)),
+ IsStore(rep, IsInt32Constant(base), IsInt32Constant(index),
+ IsInt32Constant(high_word_value(0)), start(), start()),
+ start()),
+ start()));
+#endif
}
TEST_F(Int64LoweringTest, Int64And) {
@@ -526,12 +557,13 @@
IsStore(StoreRepresentation(MachineRepresentation::kWord32,
WriteBarrierKind::kNoWriteBarrier),
AllOf(CaptureEq(&stack_slot_capture), stack_slot_matcher),
- IsInt32Constant(0), IsInt32Constant(low_word_value(0)),
+ IsInt32Constant(Int64Lowering::kLowerWordOffset),
+ IsInt32Constant(low_word_value(0)),
IsStore(StoreRepresentation(MachineRepresentation::kWord32,
WriteBarrierKind::kNoWriteBarrier),
AllOf(CaptureEq(&stack_slot_capture), stack_slot_matcher),
- IsInt32Constant(4), IsInt32Constant(high_word_value(0)),
- start(), start()),
+ IsInt32Constant(Int64Lowering::kHigherWordOffset),
+ IsInt32Constant(high_word_value(0)), start(), start()),
start());
EXPECT_THAT(
@@ -563,11 +595,11 @@
graph()->end()->InputAt(1),
IsReturn2(IsLoad(MachineType::Int32(),
AllOf(CaptureEq(&stack_slot), stack_slot_matcher),
- IsInt32Constant(0),
+ IsInt32Constant(Int64Lowering::kLowerWordOffset),
AllOf(CaptureEq(&store), store_matcher), start()),
IsLoad(MachineType::Int32(),
AllOf(CaptureEq(&stack_slot), stack_slot_matcher),
- IsInt32Constant(0x4),
+ IsInt32Constant(Int64Lowering::kHigherWordOffset),
AllOf(CaptureEq(&store), store_matcher), start()),
start(), start()));
}
diff --git a/test/unittests/compiler/js-builtin-reducer-unittest.cc b/test/unittests/compiler/js-builtin-reducer-unittest.cc
index 0f8eed7..9a1378a 100644
--- a/test/unittests/compiler/js-builtin-reducer-unittest.cc
+++ b/test/unittests/compiler/js-builtin-reducer-unittest.cc
@@ -49,6 +49,19 @@
return HeapConstant(f);
}
+ Node* StringFunction(const char* name) {
+ Handle<Object> m =
+ JSObject::GetProperty(
+ isolate()->global_object(),
+ isolate()->factory()->NewStringFromAsciiChecked("String"))
+ .ToHandleChecked();
+ Handle<JSFunction> f = Handle<JSFunction>::cast(
+ Object::GetProperty(
+ m, isolate()->factory()->NewStringFromAsciiChecked(name))
+ .ToHandleChecked());
+ return HeapConstant(f);
+ }
+
JSOperatorBuilder* javascript() { return &javascript_; }
private:
@@ -74,10 +87,519 @@
// -----------------------------------------------------------------------------
+// Math.abs
+
+TEST_F(JSBuiltinReducerTest, MathAbsWithNumber) {
+ Node* function = MathFunction("abs");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAbs(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathAbsWithPlainPrimitive) {
+ Node* function = MathFunction("abs");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAbs(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.atan
+
+TEST_F(JSBuiltinReducerTest, MathAtanWithNumber) {
+ Node* function = MathFunction("atan");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAtan(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathAtanWithPlainPrimitive) {
+ Node* function = MathFunction("atan");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAtan(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.atan2
+
+TEST_F(JSBuiltinReducerTest, MathAtan2WithNumber) {
+ Node* function = MathFunction("atan2");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ TRACED_FOREACH(Type*, t1, kNumberTypes) {
+ Node* p1 = Parameter(t1, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
+ frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAtan2(p0, p1));
+ }
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathAtan2WithPlainPrimitive) {
+ Node* function = MathFunction("atan2");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* p1 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
+ frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberAtan2(IsPlainPrimitiveToNumber(p0),
+ IsPlainPrimitiveToNumber(p1)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.ceil
+
+TEST_F(JSBuiltinReducerTest, MathCeilWithNumber) {
+ Node* function = MathFunction("ceil");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberCeil(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathCeilWithPlainPrimitive) {
+ Node* function = MathFunction("ceil");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberCeil(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.clz32
+
+TEST_F(JSBuiltinReducerTest, MathClz32WithUnsigned32) {
+ Node* function = MathFunction("clz32");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::Unsigned32(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberClz32(p0));
+}
+
+TEST_F(JSBuiltinReducerTest, MathClz32WithNumber) {
+ Node* function = MathFunction("clz32");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::Number(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberClz32(IsNumberToUint32(p0)));
+}
+
+TEST_F(JSBuiltinReducerTest, MathClz32WithPlainPrimitive) {
+ Node* function = MathFunction("clz32");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsNumberClz32(IsNumberToUint32(IsPlainPrimitiveToNumber(p0))));
+}
+
+// -----------------------------------------------------------------------------
+// Math.cos
+
+TEST_F(JSBuiltinReducerTest, MathCosWithNumber) {
+ Node* function = MathFunction("cos");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberCos(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathCosWithPlainPrimitive) {
+ Node* function = MathFunction("cos");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberCos(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.exp
+
+TEST_F(JSBuiltinReducerTest, MathExpWithNumber) {
+ Node* function = MathFunction("exp");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberExp(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathExpWithPlainPrimitive) {
+ Node* function = MathFunction("exp");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberExp(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.floor
+
+TEST_F(JSBuiltinReducerTest, MathFloorWithNumber) {
+ Node* function = MathFunction("floor");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberFloor(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathFloorWithPlainPrimitive) {
+ Node* function = MathFunction("floor");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberFloor(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.fround
+
+TEST_F(JSBuiltinReducerTest, MathFroundWithNumber) {
+ Node* function = MathFunction("fround");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberFround(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathFroundWithPlainPrimitive) {
+ Node* function = MathFunction("fround");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberFround(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.imul
+
+TEST_F(JSBuiltinReducerTest, MathImulWithUnsigned32) {
+ Node* function = MathFunction("imul");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::Unsigned32(), 0);
+ Node* p1 = Parameter(Type::Unsigned32(), 1);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
+ frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberImul(p0, p1));
+}
+
+TEST_F(JSBuiltinReducerTest, MathImulWithNumber) {
+ Node* function = MathFunction("imul");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::Number(), 0);
+ Node* p1 = Parameter(Type::Number(), 1);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
+ frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsNumberImul(IsNumberToUint32(p0), IsNumberToUint32(p1)));
+}
+
+TEST_F(JSBuiltinReducerTest, MathImulWithPlainPrimitive) {
+ Node* function = MathFunction("imul");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* p1 = Parameter(Type::PlainPrimitive(), 1);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
+ frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsNumberImul(IsNumberToUint32(IsPlainPrimitiveToNumber(p0)),
+ IsNumberToUint32(IsPlainPrimitiveToNumber(p1))));
+}
+
+// -----------------------------------------------------------------------------
+// Math.log
+
+TEST_F(JSBuiltinReducerTest, MathLogWithNumber) {
+ Node* function = MathFunction("log");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberLog(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathLogWithPlainPrimitive) {
+ Node* function = MathFunction("log");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberLog(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.log1p
+
+TEST_F(JSBuiltinReducerTest, MathLog1pWithNumber) {
+ Node* function = MathFunction("log1p");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberLog1p(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathLog1pWithPlainPrimitive) {
+ Node* function = MathFunction("log1p");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberLog1p(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
// Math.max
-
-TEST_F(JSBuiltinReducerTest, MathMax0) {
+TEST_F(JSBuiltinReducerTest, MathMaxWithNoArguments) {
Node* function = MathFunction("max");
Node* effect = graph()->start();
@@ -86,15 +608,14 @@
Node* frame_state = graph()->start();
Node* call = graph()->NewNode(javascript()->CallFunction(2), function,
UndefinedConstant(), context, frame_state,
- frame_state, effect, control);
+ effect, control);
Reduction r = Reduce(call);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsNumberConstant(-V8_INFINITY));
}
-
-TEST_F(JSBuiltinReducerTest, MathMax1) {
+TEST_F(JSBuiltinReducerTest, MathMaxWithNumber) {
Node* function = MathFunction("max");
Node* effect = graph()->start();
@@ -105,7 +626,7 @@
Node* p0 = Parameter(t0, 0);
Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
UndefinedConstant(), p0, context, frame_state,
- frame_state, effect, control);
+ effect, control);
Reduction r = Reduce(call);
ASSERT_TRUE(r.Changed());
@@ -113,8 +634,24 @@
}
}
+TEST_F(JSBuiltinReducerTest, MathMaxWithPlainPrimitive) {
+ Node* function = MathFunction("max");
-TEST_F(JSBuiltinReducerTest, MathMax2) {
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsPlainPrimitiveToNumber(p0));
+}
+
+TEST_F(JSBuiltinReducerTest, MathMaxWithIntegral32) {
Node* function = MathFunction("max");
Node* effect = graph()->start();
@@ -127,7 +664,7 @@
Node* p1 = Parameter(t1, 1);
Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
UndefinedConstant(), p0, p1, context,
- frame_state, frame_state, effect, control);
+ frame_state, effect, control);
Reduction r = Reduce(call);
ASSERT_TRUE(r.Changed());
@@ -137,41 +674,27 @@
}
}
-
// -----------------------------------------------------------------------------
-// Math.imul
+// Math.min
-
-TEST_F(JSBuiltinReducerTest, MathImul) {
- Node* function = MathFunction("imul");
+TEST_F(JSBuiltinReducerTest, MathMinWithNoArguments) {
+ Node* function = MathFunction("min");
Node* effect = graph()->start();
Node* control = graph()->start();
Node* context = UndefinedConstant();
Node* frame_state = graph()->start();
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- TRACED_FOREACH(Type*, t1, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* p1 = Parameter(t1, 1);
- Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
- UndefinedConstant(), p0, p1, context,
- frame_state, frame_state, effect, control);
- Reduction r = Reduce(call);
+ Node* call = graph()->NewNode(javascript()->CallFunction(2), function,
+ UndefinedConstant(), context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsNumberImul(IsNumberToUint32(p0), IsNumberToUint32(p1)));
- }
- }
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberConstant(V8_INFINITY));
}
-
-// -----------------------------------------------------------------------------
-// Math.fround
-
-
-TEST_F(JSBuiltinReducerTest, MathFround) {
- Node* function = MathFunction("fround");
+TEST_F(JSBuiltinReducerTest, MathMinWithNumber) {
+ Node* function = MathFunction("min");
Node* effect = graph()->start();
Node* control = graph()->start();
@@ -181,14 +704,289 @@
Node* p0 = Parameter(t0, 0);
Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
UndefinedConstant(), p0, context, frame_state,
- frame_state, effect, control);
+ effect, control);
Reduction r = Reduce(call);
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsTruncateFloat64ToFloat32(p0));
+ EXPECT_THAT(r.replacement(), p0);
}
}
+TEST_F(JSBuiltinReducerTest, MathMinWithPlainPrimitive) {
+ Node* function = MathFunction("min");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsPlainPrimitiveToNumber(p0));
+}
+
+TEST_F(JSBuiltinReducerTest, MathMinWithIntegral32) {
+ Node* function = MathFunction("min");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kIntegral32Types) {
+ TRACED_FOREACH(Type*, t1, kIntegral32Types) {
+ Node* p0 = Parameter(t0, 0);
+ Node* p1 = Parameter(t1, 1);
+ Node* call = graph()->NewNode(javascript()->CallFunction(4), function,
+ UndefinedConstant(), p0, p1, context,
+ frame_state, effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsSelect(MachineRepresentation::kNone,
+ IsNumberLessThan(p1, p0), p1, p0));
+ }
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Math.round
+
+TEST_F(JSBuiltinReducerTest, MathRoundWithNumber) {
+ Node* function = MathFunction("round");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberRound(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathRoundWithPlainPrimitive) {
+ Node* function = MathFunction("round");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberRound(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.sin
+
+TEST_F(JSBuiltinReducerTest, MathSinWithNumber) {
+ Node* function = MathFunction("sin");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberSin(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathSinWithPlainPrimitive) {
+ Node* function = MathFunction("sin");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberSin(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.sqrt
+
+TEST_F(JSBuiltinReducerTest, MathSqrtWithNumber) {
+ Node* function = MathFunction("sqrt");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberSqrt(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathSqrtWithPlainPrimitive) {
+ Node* function = MathFunction("sqrt");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberSqrt(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.tan
+
+TEST_F(JSBuiltinReducerTest, MathTanWithNumber) {
+ Node* function = MathFunction("tan");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberTan(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathTanWithPlainPrimitive) {
+ Node* function = MathFunction("tan");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberTan(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// Math.trunc
+
+TEST_F(JSBuiltinReducerTest, MathTruncWithNumber) {
+ Node* function = MathFunction("trunc");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberTrunc(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, MathTruncWithPlainPrimitive) {
+ Node* function = MathFunction("trunc");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberTrunc(IsPlainPrimitiveToNumber(p0)));
+}
+
+// -----------------------------------------------------------------------------
+// String.fromCharCode
+
+TEST_F(JSBuiltinReducerTest, StringFromCharCodeWithNumber) {
+ Node* function = StringFunction("fromCharCode");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsStringFromCharCode(p0));
+ }
+}
+
+TEST_F(JSBuiltinReducerTest, StringFromCharCodeWithPlainPrimitive) {
+ Node* function = StringFunction("fromCharCode");
+
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ Node* p0 = Parameter(Type::PlainPrimitive(), 0);
+ Node* call = graph()->NewNode(javascript()->CallFunction(3), function,
+ UndefinedConstant(), p0, context, frame_state,
+ effect, control);
+ Reduction r = Reduce(call);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsStringFromCharCode(IsPlainPrimitiveToNumber(p0)));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/test/unittests/compiler/js-create-lowering-unittest.cc b/test/unittests/compiler/js-create-lowering-unittest.cc
index 5d95d0d..6e6245d 100644
--- a/test/unittests/compiler/js-create-lowering-unittest.cc
+++ b/test/unittests/compiler/js-create-lowering-unittest.cc
@@ -139,7 +139,7 @@
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
+ Handle<SharedFunctionInfo> shared(isolate()->number_function()->shared());
Reduction r =
Reduce(graph()->NewNode(javascript()->CreateClosure(shared, NOT_TENURED),
context, effect, control));
diff --git a/test/unittests/compiler/js-intrinsic-lowering-unittest.cc b/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
index 7c2f459..7024559 100644
--- a/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
+++ b/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
@@ -43,13 +43,6 @@
return reducer.Reduce(node);
}
- Node* EmptyFrameState() {
- MachineOperatorBuilder machine(zone());
- JSGraph jsgraph(isolate(), graph(), common(), javascript(), nullptr,
- &machine);
- return jsgraph.EmptyFrameState();
- }
-
JSOperatorBuilder* javascript() { return &javascript_; }
private:
@@ -58,27 +51,6 @@
// -----------------------------------------------------------------------------
-// %_ConstructDouble
-
-
-TEST_F(JSIntrinsicLoweringTest, InlineOptimizedConstructDouble) {
- Node* const input0 = Parameter(0);
- Node* const input1 = Parameter(1);
- Node* const context = Parameter(2);
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
- Reduction const r = Reduce(graph()->NewNode(
- javascript()->CallRuntime(Runtime::kInlineConstructDouble, 2), input0,
- input1, context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64InsertHighWord32(
- IsFloat64InsertLowWord32(
- IsNumberConstant(BitEq(0.0)), input1),
- input0));
-}
-
-
-// -----------------------------------------------------------------------------
// %_DoubleLo
diff --git a/test/unittests/compiler/js-operator-unittest.cc b/test/unittests/compiler/js-operator-unittest.cc
index 28df6a9..d5f30ef 100644
--- a/test/unittests/compiler/js-operator-unittest.cc
+++ b/test/unittests/compiler/js-operator-unittest.cc
@@ -40,22 +40,12 @@
control_input_count, value_output_count, effect_output_count, \
control_output_count \
}
- SHARED(Equal, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
- SHARED(NotEqual, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
- SHARED(StrictEqual, Operator::kPure, 2, 0, 0, 0, 1, 0, 0),
- SHARED(StrictNotEqual, Operator::kPure, 2, 0, 0, 0, 1, 0, 0),
- SHARED(LessThan, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
- SHARED(GreaterThan, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
- SHARED(LessThanOrEqual, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
- SHARED(GreaterThanOrEqual, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
SHARED(ToNumber, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
SHARED(ToString, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
SHARED(ToName, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
SHARED(ToObject, Operator::kFoldable, 1, 1, 1, 1, 1, 1, 2),
SHARED(Create, Operator::kEliminatable, 2, 1, 1, 0, 1, 1, 0),
- SHARED(HasProperty, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
SHARED(TypeOf, Operator::kPure, 1, 0, 0, 0, 1, 0, 0),
- SHARED(InstanceOf, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
SHARED(CreateWithContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1, 2),
SHARED(CreateModuleContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1, 2),
#undef SHARED
diff --git a/test/unittests/compiler/js-typed-lowering-unittest.cc b/test/unittests/compiler/js-typed-lowering-unittest.cc
index 904d5f7..3a8e778 100644
--- a/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -87,8 +87,9 @@
// TODO(titzer): mock the GraphReducer here for better unit testing.
GraphReducer graph_reducer(zone(), graph());
JSTypedLowering reducer(&graph_reducer, &deps_,
- JSTypedLowering::kDeoptimizationEnabled, &jsgraph,
- zone());
+ JSTypedLowering::kDeoptimizationEnabled |
+ JSTypedLowering::kTypeFeedbackEnabled,
+ &jsgraph, zone());
return reducer.Reduce(node);
}
@@ -320,8 +321,7 @@
Reduce(graph()->NewNode(javascript()->ToNumber(), input, context,
EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsToNumber(input, IsNumberConstant(BitEq(0.0)),
- graph()->start(), control));
+ EXPECT_THAT(r.replacement(), IsPlainPrimitiveToNumber(input));
}
@@ -384,8 +384,9 @@
Node* const context = UndefinedConstant();
TRACED_FOREACH(Type*, type, kJSTypes) {
Node* const lhs = Parameter(type);
- Reduction r = Reduce(
- graph()->NewNode(javascript()->StrictEqual(), lhs, the_hole, context));
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->StrictEqual(CompareOperationHints::Any()), lhs, the_hole,
+ context));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFalseConstant());
}
@@ -396,8 +397,9 @@
Node* const lhs = Parameter(Type::Unique(), 0);
Node* const rhs = Parameter(Type::Unique(), 1);
Node* const context = Parameter(Type::Any(), 2);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->StrictEqual(), lhs, rhs, context));
+ Reduction r = Reduce(
+ graph()->NewNode(javascript()->StrictEqual(CompareOperationHints::Any()),
+ lhs, rhs, context));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsReferenceEqual(Type::Unique(), lhs, rhs));
}
@@ -602,9 +604,9 @@
Node* context = UndefinedConstant();
Node* effect = graph()->start();
Node* control = graph()->start();
- Reduction r = Reduce(graph()->NewNode(
- javascript()->LoadProperty(feedback), base, key, vector, context,
- EmptyFrameState(), EmptyFrameState(), effect, control));
+ Reduction r = Reduce(graph()->NewNode(javascript()->LoadProperty(feedback),
+ base, key, vector, context,
+ EmptyFrameState(), effect, control));
Matcher<Node*> offset_matcher =
element_size == 1
@@ -643,9 +645,9 @@
Node* context = UndefinedConstant();
Node* effect = graph()->start();
Node* control = graph()->start();
- Reduction r = Reduce(graph()->NewNode(
- javascript()->LoadProperty(feedback), base, key, vector, context,
- EmptyFrameState(), EmptyFrameState(), effect, control));
+ Reduction r = Reduce(graph()->NewNode(javascript()->LoadProperty(feedback),
+ base, key, vector, context,
+ EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
@@ -684,8 +686,7 @@
VectorSlotPair feedback;
const Operator* op = javascript()->StoreProperty(language_mode, feedback);
Node* node = graph()->NewNode(op, base, key, value, vector, context,
- EmptyFrameState(), EmptyFrameState(),
- effect, control);
+ EmptyFrameState(), effect, control);
Reduction r = Reduce(node);
Matcher<Node*> offset_matcher =
@@ -725,11 +726,14 @@
Node* context = UndefinedConstant();
Node* effect = graph()->start();
Node* control = graph()->start();
+ // TODO(mstarzinger): Once the effect-control-linearizer provides a frame
+ // state we can get rid of this checkpoint again. The reducer won't care.
+ Node* checkpoint = graph()->NewNode(common()->Checkpoint(),
+ EmptyFrameState(), effect, control);
VectorSlotPair feedback;
const Operator* op = javascript()->StoreProperty(language_mode, feedback);
Node* node = graph()->NewNode(op, base, key, value, vector, context,
- EmptyFrameState(), EmptyFrameState(),
- effect, control);
+ EmptyFrameState(), checkpoint, control);
Reduction r = Reduce(node);
Matcher<Node*> offset_matcher =
@@ -738,7 +742,7 @@
: IsWord32Shl(key, IsInt32Constant(WhichPowerOf2(element_size)));
Matcher<Node*> value_matcher =
- IsToNumber(value, context, effect, control);
+ IsToNumber(value, context, checkpoint, control);
Matcher<Node*> effect_matcher = value_matcher;
ASSERT_TRUE(r.Changed());
@@ -778,8 +782,7 @@
VectorSlotPair feedback;
const Operator* op = javascript()->StoreProperty(language_mode, feedback);
Node* node = graph()->NewNode(op, base, key, value, vector, context,
- EmptyFrameState(), EmptyFrameState(),
- effect, control);
+ EmptyFrameState(), effect, control);
Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
@@ -805,9 +808,9 @@
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Reduction const r = Reduce(graph()->NewNode(
- javascript()->LoadNamed(name, feedback), receiver, vector, context,
- EmptyFrameState(), EmptyFrameState(), effect, control));
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->LoadNamed(name, feedback), receiver,
+ vector, context, EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsLoadField(AccessBuilder::ForStringLength(),
receiver, effect, control));
@@ -838,6 +841,52 @@
lhs, rhs, context, frame_state0, effect, control));
}
+TEST_F(JSTypedLoweringTest, JSAddSmis) {
+ BinaryOperationHints const hints(BinaryOperationHints::kSignedSmall,
+ BinaryOperationHints::kSignedSmall,
+ BinaryOperationHints::kSignedSmall);
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
+ Node* lhs = Parameter(Type::Number(), 0);
+ Node* rhs = Parameter(Type::Number(), 1);
+ Node* context = Parameter(Type::Any(), 2);
+ Node* frame_state0 = EmptyFrameState();
+ Node* frame_state1 = EmptyFrameState();
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->Add(hints), lhs, rhs, context,
+ frame_state0, frame_state1, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberAdd(BinaryOperationHints::kSignedSmall, lhs,
+ rhs, effect, control));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// JSSubtract
+
+TEST_F(JSTypedLoweringTest, JSSubtractSmis) {
+ BinaryOperationHints const hints(BinaryOperationHints::kSignedSmall,
+ BinaryOperationHints::kSignedSmall,
+ BinaryOperationHints::kSignedSmall);
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
+ Node* lhs = Parameter(Type::Number(), 0);
+ Node* rhs = Parameter(Type::Number(), 1);
+ Node* context = Parameter(Type::Any(), 2);
+ Node* frame_state0 = EmptyFrameState();
+ Node* frame_state1 = EmptyFrameState();
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->Subtract(hints), lhs,
+ rhs, context, frame_state0,
+ frame_state1, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSpeculativeNumberSubtract(BinaryOperationHints::kSignedSmall,
+ lhs, rhs, effect, control));
+ }
+}
// -----------------------------------------------------------------------------
// JSInstanceOf
diff --git a/test/unittests/compiler/machine-operator-reducer-unittest.cc b/test/unittests/compiler/machine-operator-reducer-unittest.cc
index 8b65e04..05156ed 100644
--- a/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -2,10 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/machine-operator-reducer.h"
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
+#include "src/base/ieee754.h"
#include "src/compiler/js-graph.h"
-#include "src/compiler/machine-operator-reducer.h"
#include "src/compiler/typer.h"
#include "src/conversions-inl.h"
#include "test/unittests/compiler/graph-unittest.h"
@@ -16,6 +17,7 @@
using testing::BitEq;
using testing::Capture;
using testing::CaptureEq;
+using testing::NanSensitiveDoubleEq;
namespace v8 {
namespace internal {
@@ -848,8 +850,24 @@
// -----------------------------------------------------------------------------
-// Word32Shl
+// Word32Shr
+TEST_F(MachineOperatorReducerTest, Word32ShrWithWord32And) {
+ Node* const p0 = Parameter(0);
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ uint32_t mask = (1 << shift) - 1;
+ Node* node = graph()->NewNode(
+ machine()->Word32Shr(),
+ graph()->NewNode(machine()->Word32And(), p0, Int32Constant(mask)),
+ Int32Constant(shift));
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Word32Shl
TEST_F(MachineOperatorReducerTest, Word32ShlWithZeroShift) {
Node* p0 = Parameter(0);
@@ -1266,28 +1284,31 @@
TEST_F(MachineOperatorReducerTest, Int32AddWithOverflowWithZero) {
+ Node* control = graph()->start();
Node* p0 = Parameter(0);
{
Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(),
- Int32Constant(0), p0);
+ Int32Constant(0), p0, control);
- Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ Reduction r =
+ Reduce(graph()->NewNode(common()->Projection(1), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
- r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ r = Reduce(graph()->NewNode(common()->Projection(0), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_EQ(p0, r.replacement());
}
{
Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), p0,
- Int32Constant(0));
+ Int32Constant(0), control);
- Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ Reduction r =
+ Reduce(graph()->NewNode(common()->Projection(1), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
- r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ r = Reduce(graph()->NewNode(common()->Projection(0), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_EQ(p0, r.replacement());
}
@@ -1295,18 +1316,20 @@
TEST_F(MachineOperatorReducerTest, Int32AddWithOverflowWithConstant) {
+ Node* control = graph()->start();
TRACED_FOREACH(int32_t, x, kInt32Values) {
TRACED_FOREACH(int32_t, y, kInt32Values) {
int32_t z;
Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(),
- Int32Constant(x), Int32Constant(y));
+ Int32Constant(x), Int32Constant(y), control);
- Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ Reduction r =
+ Reduce(graph()->NewNode(common()->Projection(1), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsInt32Constant(base::bits::SignedAddOverflow32(x, y, &z)));
- r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ r = Reduce(graph()->NewNode(common()->Projection(0), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(z));
}
@@ -1319,33 +1342,36 @@
TEST_F(MachineOperatorReducerTest, Int32SubWithOverflowWithZero) {
+ Node* control = graph()->start();
Node* p0 = Parameter(0);
- Node* add =
- graph()->NewNode(machine()->Int32SubWithOverflow(), p0, Int32Constant(0));
+ Node* add = graph()->NewNode(machine()->Int32SubWithOverflow(), p0,
+ Int32Constant(0), control);
- Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(0));
- r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ r = Reduce(graph()->NewNode(common()->Projection(0), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_EQ(p0, r.replacement());
}
TEST_F(MachineOperatorReducerTest, Int32SubWithOverflowWithConstant) {
+ Node* control = graph()->start();
TRACED_FOREACH(int32_t, x, kInt32Values) {
TRACED_FOREACH(int32_t, y, kInt32Values) {
int32_t z;
Node* add = graph()->NewNode(machine()->Int32SubWithOverflow(),
- Int32Constant(x), Int32Constant(y));
+ Int32Constant(x), Int32Constant(y), control);
- Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+ Reduction r =
+ Reduce(graph()->NewNode(common()->Projection(1), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
IsInt32Constant(base::bits::SignedSubOverflow32(x, y, &z)));
- r = Reduce(graph()->NewNode(common()->Projection(0), add));
+ r = Reduce(graph()->NewNode(common()->Projection(0), add, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Constant(z));
}
@@ -1399,8 +1425,133 @@
// -----------------------------------------------------------------------------
-// Float64InsertLowWord32
+// Float64Atan
+TEST_F(MachineOperatorReducerTest, Float64AtanWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Atan(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::atan(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Atan2
+
+TEST_F(MachineOperatorReducerTest, Float64Atan2WithConstant) {
+ TRACED_FOREACH(double, y, kFloat64Values) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Float64Atan2(), Float64Constant(y), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::atan2(y, x))));
+ }
+ }
+}
+
+TEST_F(MachineOperatorReducerTest, Float64Atan2WithNaN) {
+ Node* const p0 = Parameter(0);
+ Node* const nan = Float64Constant(std::numeric_limits<double>::quiet_NaN());
+ {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Atan2(), p0, nan));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(nan, r.replacement());
+ }
+ {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Atan2(), nan, p0));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(nan, r.replacement());
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Cos
+
+TEST_F(MachineOperatorReducerTest, Float64CosWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Cos(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::cos(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Exp
+
+TEST_F(MachineOperatorReducerTest, Float64ExpWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Exp(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::exp(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Log
+
+TEST_F(MachineOperatorReducerTest, Float64LogWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Log(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::log(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Log1p
+
+TEST_F(MachineOperatorReducerTest, Float64Log1pWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Log1p(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::log1p(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Sin
+
+TEST_F(MachineOperatorReducerTest, Float64SinWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Sin(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::sin(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64Tan
+
+TEST_F(MachineOperatorReducerTest, Float64TanWithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64Tan(), Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFloat64Constant(NanSensitiveDoubleEq(base::ieee754::tan(x))));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Float64InsertLowWord32
TEST_F(MachineOperatorReducerTest, Float64InsertLowWord32WithConstant) {
TRACED_FOREACH(double, x, kFloat64Values) {
diff --git a/test/unittests/compiler/machine-operator-unittest.cc b/test/unittests/compiler/machine-operator-unittest.cc
index 59eb484..4367705 100644
--- a/test/unittests/compiler/machine-operator-unittest.cc
+++ b/test/unittests/compiler/machine-operator-unittest.cc
@@ -208,9 +208,7 @@
PURE(Word64Ror, 2, 0, 1), // --
PURE(Word64Equal, 2, 0, 1), // --
PURE(Int32Add, 2, 0, 1), // --
- PURE(Int32AddWithOverflow, 2, 0, 2), // --
PURE(Int32Sub, 2, 0, 1), // --
- PURE(Int32SubWithOverflow, 2, 0, 2), // --
PURE(Int32Mul, 2, 0, 1), // --
PURE(Int32MulHigh, 2, 0, 1), // --
PURE(Int32Div, 2, 1, 1), // --
@@ -327,6 +325,8 @@
OPTIONAL_ENTRY(Float64RoundDown, 1, 0, 1), // --
OPTIONAL_ENTRY(Float64RoundTruncate, 1, 0, 1), // --
OPTIONAL_ENTRY(Float64RoundTiesAway, 1, 0, 1), // --
+ OPTIONAL_ENTRY(Float32Neg, 1, 0, 1), // --
+ OPTIONAL_ENTRY(Float64Neg, 1, 0, 1), // --
#undef OPTIONAL_ENTRY
};
} // namespace
diff --git a/test/unittests/compiler/move-optimizer-unittest.cc b/test/unittests/compiler/move-optimizer-unittest.cc
index 5ccd0c6..4c69384 100644
--- a/test/unittests/compiler/move-optimizer-unittest.cc
+++ b/test/unittests/compiler/move-optimizer-unittest.cc
@@ -106,11 +106,9 @@
TEST_F(MoveOptimizerTest, RemovesRedundantExplicit) {
int first_reg_index =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableGeneralCode(0);
+ RegisterConfiguration::Turbofan()->GetAllocatableGeneralCode(0);
int second_reg_index =
- RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
- ->GetAllocatableGeneralCode(1);
+ RegisterConfiguration::Turbofan()->GetAllocatableGeneralCode(1);
StartBlock();
auto first_instr = EmitNop();
diff --git a/test/unittests/compiler/node-test-utils.cc b/test/unittests/compiler/node-test-utils.cc
index 6adacc1..e700080 100644
--- a/test/unittests/compiler/node-test-utils.cc
+++ b/test/unittests/compiler/node-test-utils.cc
@@ -800,6 +800,40 @@
const Matcher<Node*> rhs_matcher_;
};
+class IsSpeculativeBinopMatcher final : public NodeMatcher {
+ public:
+ IsSpeculativeBinopMatcher(
+ IrOpcode::Value opcode,
+ const Matcher<BinaryOperationHints::Hint>& hint_matcher,
+ const Matcher<Node*>& lhs_matcher, const Matcher<Node*>& rhs_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(opcode),
+ lhs_matcher_(lhs_matcher),
+ rhs_matcher_(rhs_matcher),
+ effect_matcher_(effect_matcher),
+ control_matcher_(control_matcher) {}
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ // TODO(bmeurer): The type parameter is currently ignored.
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "lhs",
+ lhs_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), "rhs",
+ rhs_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+ effect_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Type*> type_matcher_;
+ const Matcher<Node*> lhs_matcher_;
+ const Matcher<Node*> rhs_matcher_;
+ const Matcher<Node*> effect_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
class IsAllocateMatcher final : public NodeMatcher {
public:
@@ -2029,6 +2063,25 @@
new IsReferenceEqualMatcher(type_matcher, lhs_matcher, rhs_matcher));
}
+Matcher<Node*> IsSpeculativeNumberAdd(
+ const Matcher<BinaryOperationHints::Hint>& hint_matcher,
+ const Matcher<Node*>& lhs_matcher, const Matcher<Node*>& rhs_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsSpeculativeBinopMatcher(
+ IrOpcode::kSpeculativeNumberAdd, hint_matcher, lhs_matcher, rhs_matcher,
+ effect_matcher, control_matcher));
+}
+
+Matcher<Node*> IsSpeculativeNumberSubtract(
+ const Matcher<BinaryOperationHints::Hint>& hint_matcher,
+ const Matcher<Node*>& lhs_matcher, const Matcher<Node*>& rhs_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsSpeculativeBinopMatcher(
+ IrOpcode::kSpeculativeNumberSubtract, hint_matcher, lhs_matcher,
+ rhs_matcher, effect_matcher, control_matcher));
+}
Matcher<Node*> IsAllocate(const Matcher<Node*>& size_matcher,
const Matcher<Node*>& effect_matcher,
@@ -2197,6 +2250,7 @@
IS_BINOP_MATCHER(NumberShiftRight)
IS_BINOP_MATCHER(NumberShiftRightLogical)
IS_BINOP_MATCHER(NumberImul)
+IS_BINOP_MATCHER(NumberAtan2)
IS_BINOP_MATCHER(Word32And)
IS_BINOP_MATCHER(Word32Or)
IS_BINOP_MATCHER(Word32Xor)
@@ -2256,10 +2310,32 @@
IS_UNOP_MATCHER(Float64RoundTiesAway)
IS_UNOP_MATCHER(Float64ExtractLowWord32)
IS_UNOP_MATCHER(Float64ExtractHighWord32)
+IS_UNOP_MATCHER(NumberAbs)
+IS_UNOP_MATCHER(NumberAtan)
+IS_UNOP_MATCHER(NumberAtanh)
+IS_UNOP_MATCHER(NumberCeil)
+IS_UNOP_MATCHER(NumberClz32)
+IS_UNOP_MATCHER(NumberCbrt)
+IS_UNOP_MATCHER(NumberCos)
+IS_UNOP_MATCHER(NumberExp)
+IS_UNOP_MATCHER(NumberExpm1)
+IS_UNOP_MATCHER(NumberFloor)
+IS_UNOP_MATCHER(NumberFround)
+IS_UNOP_MATCHER(NumberLog)
+IS_UNOP_MATCHER(NumberLog1p)
+IS_UNOP_MATCHER(NumberLog10)
+IS_UNOP_MATCHER(NumberLog2)
+IS_UNOP_MATCHER(NumberRound)
+IS_UNOP_MATCHER(NumberSin)
+IS_UNOP_MATCHER(NumberSqrt)
+IS_UNOP_MATCHER(NumberTan)
+IS_UNOP_MATCHER(NumberTrunc)
IS_UNOP_MATCHER(NumberToInt32)
IS_UNOP_MATCHER(NumberToUint32)
+IS_UNOP_MATCHER(PlainPrimitiveToNumber)
IS_UNOP_MATCHER(ObjectIsReceiver)
IS_UNOP_MATCHER(ObjectIsSmi)
+IS_UNOP_MATCHER(StringFromCharCode)
IS_UNOP_MATCHER(Word32Clz)
IS_UNOP_MATCHER(Word32Ctz)
IS_UNOP_MATCHER(Word32Popcnt)
diff --git a/test/unittests/compiler/node-test-utils.h b/test/unittests/compiler/node-test-utils.h
index 4979bd5..60a0895 100644
--- a/test/unittests/compiler/node-test-utils.h
+++ b/test/unittests/compiler/node-test-utils.h
@@ -6,6 +6,7 @@
#define V8_UNITTESTS_COMPILER_NODE_TEST_UTILS_H_
#include "src/compiler/machine-operator.h"
+#include "src/compiler/type-hints.h"
#include "src/machine-type.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -199,6 +200,18 @@
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsNumberLessThan(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsNumberAdd(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsSpeculativeNumberAdd(
+ const Matcher<BinaryOperationHints::Hint>& hint_matcher,
+ const Matcher<Node*>& lhs_matcher, const Matcher<Node*>& rhs_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsSpeculativeNumberSubtract(
+ const Matcher<BinaryOperationHints::Hint>& hint_matcher,
+ const Matcher<Node*>& lhs_matcher, const Matcher<Node*>& rhs_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
Matcher<Node*> IsNumberSubtract(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsNumberMultiply(const Matcher<Node*>& lhs_matcher,
@@ -211,6 +224,29 @@
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsNumberImul(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsNumberAbs(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberAtan(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberAtan2(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsNumberAtanh(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberCeil(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberClz32(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberCos(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberExp(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberExpm1(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberFloor(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberFround(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberLog(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberLog1p(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberLog2(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberLog10(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberRound(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberCbrt(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberSin(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberSqrt(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberTan(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsNumberTrunc(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsStringFromCharCode(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsAllocate(const Matcher<Node*>& size_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
@@ -361,6 +397,7 @@
Matcher<Node*> IsParameter(const Matcher<int> index_matcher);
Matcher<Node*> IsLoadFramePointer();
Matcher<Node*> IsLoadParentFramePointer();
+Matcher<Node*> IsPlainPrimitiveToNumber(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsInt32PairAdd(const Matcher<Node*>& a_matcher,
const Matcher<Node*>& b_matcher,
diff --git a/test/unittests/compiler/register-allocator-unittest.cc b/test/unittests/compiler/register-allocator-unittest.cc
index c5ff90f..71a726f 100644
--- a/test/unittests/compiler/register-allocator-unittest.cc
+++ b/test/unittests/compiler/register-allocator-unittest.cc
@@ -678,8 +678,7 @@
Allocate();
// TODO(mtrofin): at the moment, the linear allocator spills var1 and var2,
- // so only var3 is spilled in deferred blocks. Greedy avoids spilling 1&2.
- // Expand the test once greedy is back online with this facility.
+ // so only var3 is spilled in deferred blocks.
const int var3_reg = 2;
const int var3_slot = 2;
diff --git a/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/test/unittests/compiler/simplified-operator-reducer-unittest.cc
index eec39ab..f84b9bf 100644
--- a/test/unittests/compiler/simplified-operator-reducer-unittest.cc
+++ b/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -32,7 +32,8 @@
JSOperatorBuilder javascript(zone());
JSGraph jsgraph(isolate(), graph(), common(), &javascript, simplified(),
&machine);
- SimplifiedOperatorReducer reducer(&jsgraph);
+ GraphReducer graph_reducer(zone(), graph());
+ SimplifiedOperatorReducer reducer(&graph_reducer, &jsgraph);
return reducer.Reduce(node);
}
@@ -91,26 +92,6 @@
1866841746, 2032089723, 2147483647};
-const uint32_t kUint32Values[] = {
- 0x0, 0x5, 0x8, 0xc, 0xd, 0x26,
- 0x28, 0x29, 0x30, 0x34, 0x3e, 0x42,
- 0x50, 0x5b, 0x63, 0x71, 0x77, 0x7c,
- 0x83, 0x88, 0x96, 0x9c, 0xa3, 0xfa,
- 0x7a7, 0x165d, 0x234d, 0x3acb, 0x43a5, 0x4573,
- 0x5b4f, 0x5f14, 0x6996, 0x6c6e, 0x7289, 0x7b9a,
- 0x7bc9, 0x86bb, 0xa839, 0xaa41, 0xb03b, 0xc942,
- 0xce68, 0xcf4c, 0xd3ad, 0xdea3, 0xe90c, 0xed86,
- 0xfba5, 0x172dcc6, 0x114d8fc1, 0x182d6c9d, 0x1b1e3fad, 0x1db033bf,
- 0x1e1de755, 0x1f625c80, 0x28f6cf00, 0x2acb6a94, 0x2c20240e, 0x2f0fe54e,
- 0x31863a7c, 0x33325474, 0x3532fae3, 0x3bab82ea, 0x4c4b83a2, 0x4cd93d1e,
- 0x4f7331d4, 0x5491b09b, 0x57cc6ff9, 0x60d3b4dc, 0x653f5904, 0x690ae256,
- 0x69fe3276, 0x6bebf0ba, 0x6e2c69a3, 0x73b84ff7, 0x7b3a1924, 0x7ed032d9,
- 0x84dd734b, 0x8552ea53, 0x8680754f, 0x8e9660eb, 0x94fe2b9c, 0x972d30cf,
- 0x9b98c482, 0xb158667e, 0xb432932c, 0xb5b70989, 0xb669971a, 0xb7c359d1,
- 0xbeb15c0d, 0xc171c53d, 0xc743dd38, 0xc8e2af50, 0xc98e2df0, 0xd9d1cdf9,
- 0xdcc91049, 0xe46f396d, 0xee991950, 0xef64e521, 0xf7aeefc9, 0xffffffff};
-
-
const double kNaNs[] = {-std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::quiet_NaN(),
bit_cast<double>(V8_UINT64_C(0x7FFFFFFFFFFFFFFF)),
@@ -314,26 +295,6 @@
}
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithConstant) {
- TRACED_FOREACH(double, n, kFloat64Values) {
- Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeTaggedToInt32(), NumberConstant(n)));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsInt32Constant(DoubleToInt32(n)));
- }
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithNaNConstant) {
- TRACED_FOREACH(double, nan, kNaNs) {
- Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeTaggedToInt32(), NumberConstant(nan)));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
- }
-}
-
-
// -----------------------------------------------------------------------------
// ChangeTaggedToUint32
@@ -360,41 +321,6 @@
}
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithConstant) {
- TRACED_FOREACH(double, n, kFloat64Values) {
- Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeTaggedToUint32(), NumberConstant(n)));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(),
- IsInt32Constant(bit_cast<int32_t>(DoubleToUint32(n))));
- }
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithNaNConstant) {
- TRACED_FOREACH(double, nan, kNaNs) {
- Reduction reduction = Reduce(graph()->NewNode(
- simplified()->ChangeTaggedToUint32(), NumberConstant(nan)));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// ChangeUint32ToTagged
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeUint32ToTagged) {
- TRACED_FOREACH(uint32_t, n, kUint32Values) {
- Reduction reduction =
- Reduce(graph()->NewNode(simplified()->ChangeUint32ToTagged(),
- Int32Constant(bit_cast<int32_t>(n))));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsNumberConstant(BitEq(FastUI2D(n))));
- }
-}
-
// -----------------------------------------------------------------------------
// TruncateTaggedToWord32
@@ -417,6 +343,116 @@
}
}
+// -----------------------------------------------------------------------------
+// CheckTaggedPointer
+
+TEST_F(SimplifiedOperatorReducerTest, CheckTaggedPointerWithChangeBitToTagged) {
+ Node* param0 = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* value = graph()->NewNode(simplified()->ChangeBitToTagged(), param0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->CheckTaggedPointer(), value, effect, control));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+}
+
+TEST_F(SimplifiedOperatorReducerTest, CheckTaggedPointerWithHeapConstant) {
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Handle<HeapObject> kHeapObjects[] = {
+ factory()->empty_string(), factory()->null_value(),
+ factory()->species_symbol(), factory()->undefined_value()};
+ TRACED_FOREACH(Handle<HeapObject>, object, kHeapObjects) {
+ Node* value = HeapConstant(object);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->CheckTaggedPointer(), value, effect, control));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+ }
+}
+
+// -----------------------------------------------------------------------------
+// CheckTaggedSigned
+
+TEST_F(SimplifiedOperatorReducerTest,
+ CheckTaggedSignedWithChangeInt31ToTaggedSigned) {
+ Node* param0 = Parameter(0);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* value =
+ graph()->NewNode(simplified()->ChangeInt31ToTaggedSigned(), param0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->CheckTaggedSigned(), value, effect, control));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+}
+
+TEST_F(SimplifiedOperatorReducerTest, CheckTaggedSignedWithNumberConstant) {
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* value = NumberConstant(1.0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->CheckTaggedSigned(), value, effect, control));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+}
+
+// -----------------------------------------------------------------------------
+// NumberAbs
+
+TEST_F(SimplifiedOperatorReducerTest, NumberAbsWithNumberConstant) {
+ TRACED_FOREACH(double, n, kFloat64Values) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(simplified()->NumberAbs(), NumberConstant(n)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsNumberConstant(std::fabs(n)));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// ObjectIsSmi
+
+TEST_F(SimplifiedOperatorReducerTest, ObjectIsSmiWithChangeBitToTagged) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ObjectIsSmi(),
+ graph()->NewNode(simplified()->ChangeBitToTagged(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFalseConstant());
+}
+
+TEST_F(SimplifiedOperatorReducerTest,
+ ObjectIsSmiWithChangeInt31ToTaggedSigned) {
+ Node* param0 = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ObjectIsSmi(),
+ graph()->NewNode(simplified()->ChangeInt31ToTaggedSigned(), param0)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsTrueConstant());
+}
+
+TEST_F(SimplifiedOperatorReducerTest, ObjectIsSmiWithHeapConstant) {
+ Handle<HeapObject> kHeapObjects[] = {
+ factory()->empty_string(), factory()->null_value(),
+ factory()->species_symbol(), factory()->undefined_value()};
+ TRACED_FOREACH(Handle<HeapObject>, o, kHeapObjects) {
+ Reduction reduction =
+ Reduce(graph()->NewNode(simplified()->ObjectIsSmi(), HeapConstant(o)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFalseConstant());
+ }
+}
+
+TEST_F(SimplifiedOperatorReducerTest, ObjectIsSmiWithNumberConstant) {
+ TRACED_FOREACH(double, n, kFloat64Values) {
+ Reduction reduction = Reduce(
+ graph()->NewNode(simplified()->ObjectIsSmi(), NumberConstant(n)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsBooleanConstant(IsSmiDouble(n)));
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/test/unittests/compiler/simplified-operator-unittest.cc b/test/unittests/compiler/simplified-operator-unittest.cc
index ba404a9..3343c8f 100644
--- a/test/unittests/compiler/simplified-operator-unittest.cc
+++ b/test/unittests/compiler/simplified-operator-unittest.cc
@@ -156,7 +156,8 @@
const Operator* op = simplified.LoadBuffer(access);
EXPECT_EQ(IrOpcode::kLoadBuffer, op->opcode());
- EXPECT_EQ(Operator::kNoThrow | Operator::kNoWrite, op->properties());
+ EXPECT_EQ(Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,
+ op->properties());
EXPECT_EQ(access, BufferAccessOf(op));
EXPECT_EQ(3, op->ValueInputCount());
@@ -176,7 +177,8 @@
const Operator* op = simplified.StoreBuffer(access);
EXPECT_EQ(IrOpcode::kStoreBuffer, op->opcode());
- EXPECT_EQ(Operator::kNoRead | Operator::kNoThrow, op->properties());
+ EXPECT_EQ(Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
+ op->properties());
EXPECT_EQ(access, BufferAccessOf(op));
EXPECT_EQ(4, op->ValueInputCount());
@@ -258,7 +260,8 @@
const Operator* op = simplified.LoadElement(access);
EXPECT_EQ(IrOpcode::kLoadElement, op->opcode());
- EXPECT_EQ(Operator::kNoThrow | Operator::kNoWrite, op->properties());
+ EXPECT_EQ(Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,
+ op->properties());
EXPECT_EQ(access, ElementAccessOf(op));
EXPECT_EQ(2, op->ValueInputCount());
@@ -278,7 +281,8 @@
const Operator* op = simplified.StoreElement(access);
EXPECT_EQ(IrOpcode::kStoreElement, op->opcode());
- EXPECT_EQ(Operator::kNoRead | Operator::kNoThrow, op->properties());
+ EXPECT_EQ(Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,
+ op->properties());
EXPECT_EQ(access, ElementAccessOf(op));
EXPECT_EQ(3, op->ValueInputCount());
diff --git a/test/unittests/compiler/typer-unittest.cc b/test/unittests/compiler/typer-unittest.cc
index 9d664a6..61e00a5 100644
--- a/test/unittests/compiler/typer-unittest.cc
+++ b/test/unittests/compiler/typer-unittest.cc
@@ -290,44 +290,51 @@
TEST_F(TyperTest, TypeJSLessThan) {
- TestBinaryCompareOp(javascript_.LessThan(), std::less<double>());
+ TestBinaryCompareOp(javascript_.LessThan(CompareOperationHints::Any()),
+ std::less<double>());
}
TEST_F(TyperTest, TypeJSLessThanOrEqual) {
- TestBinaryCompareOp(javascript_.LessThanOrEqual(), std::less_equal<double>());
+ TestBinaryCompareOp(javascript_.LessThanOrEqual(CompareOperationHints::Any()),
+ std::less_equal<double>());
}
TEST_F(TyperTest, TypeJSGreaterThan) {
- TestBinaryCompareOp(javascript_.GreaterThan(), std::greater<double>());
+ TestBinaryCompareOp(javascript_.GreaterThan(CompareOperationHints::Any()),
+ std::greater<double>());
}
TEST_F(TyperTest, TypeJSGreaterThanOrEqual) {
- TestBinaryCompareOp(javascript_.GreaterThanOrEqual(),
- std::greater_equal<double>());
+ TestBinaryCompareOp(
+ javascript_.GreaterThanOrEqual(CompareOperationHints::Any()),
+ std::greater_equal<double>());
}
TEST_F(TyperTest, TypeJSEqual) {
- TestBinaryCompareOp(javascript_.Equal(), std::equal_to<double>());
+ TestBinaryCompareOp(javascript_.Equal(CompareOperationHints::Any()),
+ std::equal_to<double>());
}
TEST_F(TyperTest, TypeJSNotEqual) {
- TestBinaryCompareOp(javascript_.NotEqual(), std::not_equal_to<double>());
+ TestBinaryCompareOp(javascript_.NotEqual(CompareOperationHints::Any()),
+ std::not_equal_to<double>());
}
// For numbers there's no difference between strict and non-strict equality.
TEST_F(TyperTest, TypeJSStrictEqual) {
- TestBinaryCompareOp(javascript_.StrictEqual(), std::equal_to<double>());
+ TestBinaryCompareOp(javascript_.StrictEqual(CompareOperationHints::Any()),
+ std::equal_to<double>());
}
TEST_F(TyperTest, TypeJSStrictNotEqual) {
- TestBinaryCompareOp(javascript_.StrictNotEqual(),
+ TestBinaryCompareOp(javascript_.StrictNotEqual(CompareOperationHints::Any()),
std::not_equal_to<double>());
}
@@ -335,10 +342,9 @@
//------------------------------------------------------------------------------
// Monotonicity
-
-#define TEST_BINARY_MONOTONICITY(name) \
- TEST_F(TyperTest, Monotonicity_##name) { \
- TestBinaryMonotonicity(javascript_.name()); \
+#define TEST_BINARY_MONOTONICITY(name) \
+ TEST_F(TyperTest, Monotonicity_##name) { \
+ TestBinaryMonotonicity(javascript_.name(CompareOperationHints::Any())); \
}
TEST_BINARY_MONOTONICITY(Equal)
TEST_BINARY_MONOTONICITY(NotEqual)
diff --git a/test/unittests/heap/slot-set-unittest.cc b/test/unittests/heap/slot-set-unittest.cc
index 26a26f0..cfb1f1f 100644
--- a/test/unittests/heap/slot-set-unittest.cc
+++ b/test/unittests/heap/slot-set-unittest.cc
@@ -142,23 +142,29 @@
TEST(TypedSlotSet, Iterate) {
TypedSlotSet set(0);
const int kDelta = 10000001;
+ const int kHostDelta = 50001;
int added = 0;
- for (uint32_t i = 0; i < TypedSlotSet::kMaxOffset; i += kDelta) {
+ uint32_t j = 0;
+ for (uint32_t i = 0; i < TypedSlotSet::kMaxOffset;
+ i += kDelta, j += kHostDelta) {
SlotType type = static_cast<SlotType>(i % NUMBER_OF_SLOT_TYPES);
- set.Insert(type, i);
+ set.Insert(type, j, i);
++added;
}
int iterated = 0;
- set.Iterate([&iterated, kDelta](SlotType type, Address addr) {
+ set.Iterate([&iterated, kDelta, kHostDelta](SlotType type, Address host_addr,
+ Address addr) {
uint32_t i = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr));
+ uint32_t j = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(host_addr));
EXPECT_EQ(i % NUMBER_OF_SLOT_TYPES, static_cast<uint32_t>(type));
EXPECT_EQ(0, i % kDelta);
+ EXPECT_EQ(0, j % kHostDelta);
++iterated;
return i % 2 == 0 ? KEEP_SLOT : REMOVE_SLOT;
});
EXPECT_EQ(added, iterated);
iterated = 0;
- set.Iterate([&iterated](SlotType type, Address addr) {
+ set.Iterate([&iterated](SlotType type, Address host_addr, Address addr) {
uint32_t i = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr));
EXPECT_EQ(0, i % 2);
++iterated;
diff --git a/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index a569c94..7bbef45 100644
--- a/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -6,6 +6,7 @@
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-register-allocator.h"
#include "test/unittests/test-utils.h"
@@ -47,7 +48,7 @@
.LoadLiteral(factory->NewStringFromStaticChars("A constant"))
.StoreAccumulatorInRegister(reg)
.LoadUndefined()
- .StoreAccumulatorInRegister(reg)
+ .Debugger() // Prevent peephole optimization LdaNull, Star -> LdrNull.
.LoadNull()
.StoreAccumulatorInRegister(reg)
.LoadTheHole()
@@ -57,11 +58,12 @@
.LoadFalse()
.StoreAccumulatorInRegister(wide);
+ // Emit Ldar and Star taking care to foil the register optimizer.
builder.StackCheck(0)
.LoadAccumulatorWithRegister(other)
+ .BinaryOperation(Token::ADD, reg)
.StoreAccumulatorInRegister(reg)
- .LoadNull()
- .StoreAccumulatorInRegister(reg);
+ .LoadNull();
// Emit register-register transfer.
builder.MoveRegister(reg, other);
@@ -69,8 +71,8 @@
// Emit global load / store operations.
Handle<String> name = factory->NewStringFromStaticChars("var_name");
- builder.LoadGlobal(name, 1, TypeofMode::NOT_INSIDE_TYPEOF)
- .LoadGlobal(name, 1, TypeofMode::INSIDE_TYPEOF)
+ builder.LoadGlobal(1, TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadGlobal(1, TypeofMode::INSIDE_TYPEOF)
.StoreGlobal(name, 1, LanguageMode::SLOPPY)
.StoreGlobal(name, 1, LanguageMode::STRICT);
@@ -169,26 +171,34 @@
// Emit control flow. Return must be the last instruction.
BytecodeLabel start;
builder.Bind(&start);
- // Short jumps with Imm8 operands
- builder.Jump(&start)
- .JumpIfNull(&start)
- .JumpIfUndefined(&start)
- .JumpIfNotHole(&start);
+ {
+ // Short jumps with Imm8 operands
+ BytecodeLabel after_jump;
+ builder.Jump(&start)
+ .Bind(&after_jump)
+ .JumpIfNull(&start)
+ .JumpIfUndefined(&start)
+ .JumpIfNotHole(&start);
+ }
// Longer jumps with constant operands
BytecodeLabel end[8];
- builder.Jump(&end[0])
- .LoadTrue()
- .JumpIfTrue(&end[1])
- .LoadTrue()
- .JumpIfFalse(&end[2])
- .LoadLiteral(Smi::FromInt(0))
- .JumpIfTrue(&end[3])
- .LoadLiteral(Smi::FromInt(0))
- .JumpIfFalse(&end[4])
- .JumpIfNull(&end[5])
- .JumpIfUndefined(&end[6])
- .JumpIfNotHole(&end[7]);
+ {
+ BytecodeLabel after_jump;
+ builder.Jump(&end[0])
+ .Bind(&after_jump)
+ .LoadTrue()
+ .JumpIfTrue(&end[1])
+ .LoadTrue()
+ .JumpIfFalse(&end[2])
+ .LoadLiteral(Smi::FromInt(0))
+ .JumpIfTrue(&end[3])
+ .LoadLiteral(Smi::FromInt(0))
+ .JumpIfFalse(&end[4])
+ .JumpIfNull(&end[5])
+ .JumpIfUndefined(&end[6])
+ .JumpIfNotHole(&end[7]);
+ }
// Perform an operation that returns boolean value to
// generate JumpIfTrue/False
@@ -207,20 +217,26 @@
builder.LoadTrue();
}
// Longer jumps requiring Constant operand
- builder.Jump(&start).JumpIfNull(&start).JumpIfUndefined(&start).JumpIfNotHole(
- &start);
- // Perform an operation that returns boolean value to
- // generate JumpIfTrue/False
- builder.CompareOperation(Token::Value::EQ, reg)
- .JumpIfTrue(&start)
- .CompareOperation(Token::Value::EQ, reg)
- .JumpIfFalse(&start);
- // Perform an operation that returns a non-boolean operation to
- // generate JumpIfToBooleanTrue/False.
- builder.BinaryOperation(Token::Value::ADD, reg)
- .JumpIfTrue(&start)
- .BinaryOperation(Token::Value::ADD, reg)
- .JumpIfFalse(&start);
+ {
+ BytecodeLabel after_jump;
+ builder.Jump(&start)
+ .Bind(&after_jump)
+ .JumpIfNull(&start)
+ .JumpIfUndefined(&start)
+ .JumpIfNotHole(&start);
+ // Perform an operation that returns boolean value to
+ // generate JumpIfTrue/False
+ builder.CompareOperation(Token::Value::EQ, reg)
+ .JumpIfTrue(&start)
+ .CompareOperation(Token::Value::EQ, reg)
+ .JumpIfFalse(&start);
+ // Perform an operation that returns a non-boolean operation to
+ // generate JumpIfToBooleanTrue/False.
+ builder.BinaryOperation(Token::Value::ADD, reg)
+ .JumpIfTrue(&start)
+ .BinaryOperation(Token::Value::ADD, reg)
+ .JumpIfFalse(&start);
+ }
// Emit stack check bytecode.
builder.StackCheck(0);
@@ -228,9 +244,9 @@
// Emit throw and re-throw in it's own basic block so that the rest of the
// code isn't omitted due to being dead.
BytecodeLabel after_throw;
- builder.Jump(&after_throw).Throw().Bind(&after_throw);
+ builder.Throw().Bind(&after_throw);
BytecodeLabel after_rethrow;
- builder.Jump(&after_rethrow).ReThrow().Bind(&after_rethrow);
+ builder.ReThrow().Bind(&after_rethrow);
builder.ForInPrepare(reg)
.ForInDone(reg, reg)
@@ -250,14 +266,14 @@
Handle<String> wide_name = factory->NewStringFromStaticChars("var_wide_name");
// Emit wide global load / store operations.
- builder.LoadGlobal(name, 1024, TypeofMode::NOT_INSIDE_TYPEOF)
- .LoadGlobal(name, 1024, TypeofMode::INSIDE_TYPEOF)
- .LoadGlobal(name, 1024, TypeofMode::INSIDE_TYPEOF)
+ builder.LoadGlobal(1024, TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadGlobal(1024, TypeofMode::INSIDE_TYPEOF)
+ .LoadGlobal(1024, TypeofMode::INSIDE_TYPEOF)
.StoreGlobal(name, 1024, LanguageMode::SLOPPY)
.StoreGlobal(wide_name, 1, LanguageMode::STRICT);
// Emit extra wide global load.
- builder.LoadGlobal(name, 1024 * 1024, TypeofMode::NOT_INSIDE_TYPEOF);
+ builder.LoadGlobal(1024 * 1024, TypeofMode::NOT_INSIDE_TYPEOF);
// Emit wide load / store property operations.
builder.LoadNamedProperty(reg, wide_name, 0)
@@ -276,6 +292,19 @@
.StoreLookupSlot(wide_name, LanguageMode::SLOPPY)
.StoreLookupSlot(wide_name, LanguageMode::STRICT);
+ // Emit loads which will be transformed to Ldr equivalents by the peephole
+ // optimizer.
+ builder.LoadNamedProperty(reg, name, 0)
+ .StoreAccumulatorInRegister(reg)
+ .LoadKeyedProperty(reg, 0)
+ .StoreAccumulatorInRegister(reg)
+ .LoadContextSlot(reg, 1)
+ .StoreAccumulatorInRegister(reg)
+ .LoadGlobal(0, TypeofMode::NOT_INSIDE_TYPEOF)
+ .StoreAccumulatorInRegister(reg)
+ .LoadUndefined()
+ .StoreAccumulatorInRegister(reg);
+
// CreateClosureWide
Handle<SharedFunctionInfo> shared_info2 = factory->NewSharedFunctionInfo(
factory->NewStringFromStaticChars("function_b"), MaybeHandle<Code>(),
@@ -289,14 +318,22 @@
.CreateObjectLiteral(factory->NewFixedArray(2), 0, 0);
// Longer jumps requiring ConstantWide operand
- builder.Jump(&start).JumpIfNull(&start).JumpIfUndefined(&start).JumpIfNotHole(
- &start);
+ {
+ BytecodeLabel after_jump;
+ builder.Jump(&start)
+ .Bind(&after_jump)
+ .JumpIfNull(&start)
+ .JumpIfUndefined(&start)
+ .JumpIfNotHole(&start);
+ }
+
// Perform an operation that returns boolean value to
// generate JumpIfTrue/False
builder.CompareOperation(Token::Value::EQ, reg)
.JumpIfTrue(&start)
.CompareOperation(Token::Value::EQ, reg)
.JumpIfFalse(&start);
+
// Perform an operation that returns a non-boolean operation to
// generate JumpIfToBooleanTrue/False.
builder.BinaryOperation(Token::Value::ADD, reg)
@@ -349,6 +386,21 @@
// Insert entry for nop bytecode as this often gets optimized out.
scorecard[Bytecodes::ToByte(Bytecode::kNop)] = 1;
+ if (!FLAG_ignition_peephole) {
+ // Insert entries for bytecodes only emitted by peephole optimizer.
+ scorecard[Bytecodes::ToByte(Bytecode::kLdrNamedProperty)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kLdrKeyedProperty)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kLdrGlobal)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kLdrContextSlot)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kLdrUndefined)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kLogicalNot)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kJump)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kJumpIfTrue)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kJumpIfFalse)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kJumpIfTrueConstant)] = 1;
+ scorecard[Bytecodes::ToByte(Bytecode::kJumpIfFalseConstant)] = 1;
+ }
+
// Check return occurs at the end and only once in the BytecodeArray.
CHECK_EQ(final_bytecode, Bytecode::kReturn);
CHECK_EQ(scorecard[Bytecodes::ToByte(final_bytecode)], 1);
@@ -370,9 +422,20 @@
BytecodeArrayBuilder builder(isolate(), zone(), 0, contexts, locals);
BytecodeRegisterAllocator temporaries(
zone(), builder.temporary_register_allocator());
+ for (int i = 0; i < locals + contexts; i++) {
+ builder.LoadLiteral(Smi::FromInt(0));
+ builder.StoreAccumulatorInRegister(Register(i));
+ }
for (int i = 0; i < temps; i++) {
+ builder.LoadLiteral(Smi::FromInt(0));
builder.StoreAccumulatorInRegister(temporaries.NewRegister());
}
+ if (temps > 0) {
+ // Ensure temporaries are used so not optimized away by the
+ // register optimizer.
+ builder.New(Register(locals + contexts), Register(locals + contexts),
+ static_cast<size_t>(temps));
+ }
builder.Return();
Handle<BytecodeArray> the_array = builder.ToBytecodeArray();
@@ -398,6 +461,7 @@
TEST_F(BytecodeArrayBuilderTest, Parameters) {
BytecodeArrayBuilder builder(isolate(), zone(), 10, 0, 0);
+
Register param0(builder.Parameter(0));
Register param9(builder.Parameter(9));
CHECK_EQ(param9.index() - param0.index(), 9);
@@ -429,6 +493,7 @@
TEST_F(BytecodeArrayBuilderTest, Constants) {
BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 0);
+
Factory* factory = isolate()->factory();
Handle<HeapObject> heap_num_1 = factory->NewHeapNumber(3.14);
Handle<HeapObject> heap_num_2 = factory->NewHeapNumber(5.2);
@@ -447,16 +512,24 @@
CHECK_EQ(array->constant_pool()->length(), 3);
}
+static Bytecode PeepholeToBoolean(Bytecode jump_bytecode) {
+ return FLAG_ignition_peephole
+ ? Bytecodes::GetJumpWithoutToBoolean(jump_bytecode)
+ : jump_bytecode;
+}
TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
static const int kFarJumpDistance = 256;
BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 1);
+
Register reg(0);
BytecodeLabel far0, far1, far2, far3, far4;
BytecodeLabel near0, near1, near2, near3, near4;
+ BytecodeLabel after_jump0, after_jump1;
builder.Jump(&near0)
+ .Bind(&after_jump0)
.CompareOperation(Token::Value::EQ, reg)
.JumpIfTrue(&near1)
.CompareOperation(Token::Value::EQ, reg)
@@ -471,6 +544,7 @@
.Bind(&near3)
.Bind(&near4)
.Jump(&far0)
+ .Bind(&after_jump1)
.CompareOperation(Token::Value::EQ, reg)
.JumpIfTrue(&far1)
.CompareOperation(Token::Value::EQ, reg)
@@ -496,14 +570,16 @@
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrue);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanTrue));
CHECK_EQ(iterator.GetImmediateOperand(0), 14);
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalse);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanFalse));
CHECK_EQ(iterator.GetImmediateOperand(0), 10);
iterator.Advance();
@@ -529,7 +605,8 @@
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrueConstant);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanTrueConstant));
CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
Smi::FromInt(kFarJumpDistance - 4));
iterator.Advance();
@@ -537,7 +614,8 @@
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalseConstant);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanFalseConstant));
CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
Smi::FromInt(kFarJumpDistance - 8));
iterator.Advance();
@@ -563,6 +641,7 @@
TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 1);
+
Register reg(0);
BytecodeLabel label0, label1, label2, label3, label4;
@@ -581,7 +660,8 @@
.BinaryOperation(Token::Value::ADD, reg)
.JumpIfFalse(&label4);
for (int i = 0; i < 63; i++) {
- builder.Jump(&label4);
+ BytecodeLabel after_jump;
+ builder.Jump(&label4).Bind(&after_jump);
}
// Add padding to force wide backwards jumps.
@@ -594,6 +674,8 @@
builder.CompareOperation(Token::Value::EQ, reg).JumpIfFalse(&label2);
builder.CompareOperation(Token::Value::EQ, reg).JumpIfTrue(&label1);
builder.Jump(&label0);
+ BytecodeLabel end;
+ builder.Bind(&end);
builder.Return();
Handle<BytecodeArray> array = builder.ToBytecodeArray();
@@ -603,13 +685,15 @@
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrue);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanTrue));
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(iterator.GetImmediateOperand(0), -2);
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalse);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanFalse));
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(iterator.GetImmediateOperand(0), -2);
iterator.Advance();
@@ -650,13 +734,15 @@
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalse);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanFalse));
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
CHECK_EQ(iterator.GetImmediateOperand(0), -409);
iterator.Advance();
// Ignore compare operation.
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrue);
+ CHECK_EQ(iterator.current_bytecode(),
+ PeepholeToBoolean(Bytecode::kJumpIfToBooleanTrue));
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kDouble);
CHECK_EQ(iterator.GetImmediateOperand(0), -419);
iterator.Advance();
@@ -675,9 +761,15 @@
// Labels can only have 1 forward reference, but
// can be referred to mulitple times once bound.
- BytecodeLabel label;
+ BytecodeLabel label, after_jump0, after_jump1;
- builder.Jump(&label).Bind(&label).Jump(&label).Jump(&label).Return();
+ builder.Jump(&label)
+ .Bind(&label)
+ .Jump(&label)
+ .Bind(&after_jump0)
+ .Jump(&label)
+ .Bind(&after_jump1)
+ .Return();
Handle<BytecodeArray> array = builder.ToBytecodeArray();
BytecodeArrayIterator iterator(array);
@@ -701,8 +793,13 @@
BytecodeArrayBuilder builder(isolate(), zone(), 0, 0, 0);
for (int i = 0; i < kRepeats; i++) {
- BytecodeLabel label;
- builder.Jump(&label).Bind(&label).Jump(&label).Jump(&label);
+ BytecodeLabel label, after_jump0, after_jump1;
+ builder.Jump(&label)
+ .Bind(&label)
+ .Jump(&label)
+ .Bind(&after_jump0)
+ .Jump(&label)
+ .Bind(&after_jump1);
}
builder.Return();
diff --git a/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
index aa9effe..6b7374e 100644
--- a/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
+++ b/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -47,14 +47,16 @@
.LoadLiteral(smi_1)
.StoreAccumulatorInRegister(reg_1)
.LoadAccumulatorWithRegister(reg_0)
+ .BinaryOperation(Token::Value::ADD, reg_0)
.StoreAccumulatorInRegister(reg_1)
.LoadNamedProperty(reg_1, name, feedback_slot)
+ .BinaryOperation(Token::Value::ADD, reg_0)
.StoreAccumulatorInRegister(param)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, param, 1, reg_0)
.ForInPrepare(reg_0)
.CallRuntime(Runtime::kLoadIC_Miss, reg_0, 1)
.Debugger()
- .LoadGlobal(name, 0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadGlobal(0x10000000, TypeofMode::NOT_INSIDE_TYPEOF)
.Return();
// Test iterator sees the expected output from the builder.
@@ -155,6 +157,15 @@
offset += Bytecodes::Size(Bytecode::kLdar, OperandScale::kSingle);
iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kAdd);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
+ iterator.Advance();
+
CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
@@ -164,14 +175,23 @@
offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
iterator.Advance();
- CHECK_EQ(iterator.current_bytecode(), Bytecode::kLoadIC);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaNamedProperty);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
CHECK_EQ(iterator.GetIndexOperand(1), name_index);
CHECK_EQ(iterator.GetIndexOperand(2), feedback_slot);
CHECK(!iterator.done());
- offset += Bytecodes::Size(Bytecode::kLoadIC, OperandScale::kSingle);
+ offset += Bytecodes::Size(Bytecode::kLdaNamedProperty, OperandScale::kSingle);
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kAdd);
+ CHECK_EQ(iterator.current_offset(), offset);
+ CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ CHECK_EQ(iterator.GetRegisterOperandRange(0), 1);
+ CHECK(!iterator.done());
+ offset += Bytecodes::Size(Bytecode::kStar, OperandScale::kSingle);
iterator.Advance();
CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
@@ -209,8 +229,7 @@
CHECK_EQ(iterator.current_bytecode(), Bytecode::kCallRuntime);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kSingle);
- CHECK_EQ(static_cast<Runtime::FunctionId>(iterator.GetRuntimeIdOperand(0)),
- Runtime::kLoadIC_Miss);
+ CHECK_EQ(iterator.GetRuntimeIdOperand(0), Runtime::kLoadIC_Miss);
CHECK_EQ(iterator.GetRegisterOperand(1).index(), reg_0.index());
CHECK_EQ(iterator.GetRegisterCountOperand(2), 1);
CHECK(!iterator.done());
@@ -227,8 +246,8 @@
CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaGlobal);
CHECK_EQ(iterator.current_offset(), offset);
CHECK_EQ(iterator.current_operand_scale(), OperandScale::kQuadruple);
- CHECK_EQ(iterator.current_bytecode_size(), 10);
- CHECK_EQ(iterator.GetIndexOperand(1), 0x10000000);
+ CHECK_EQ(iterator.current_bytecode_size(), 6);
+ CHECK_EQ(iterator.GetIndexOperand(0), 0x10000000);
offset += Bytecodes::Size(Bytecode::kLdaGlobal, OperandScale::kQuadruple) +
kPrefixByteSize;
iterator.Advance();
diff --git a/test/unittests/interpreter/bytecode-array-writer-unittest.cc b/test/unittests/interpreter/bytecode-array-writer-unittest.cc
index a1b4910..90a91ce 100644
--- a/test/unittests/interpreter/bytecode-array-writer-unittest.cc
+++ b/test/unittests/interpreter/bytecode-array-writer-unittest.cc
@@ -4,7 +4,11 @@
#include "src/v8.h"
+#include "src/api.h"
+#include "src/factory.h"
#include "src/interpreter/bytecode-array-writer.h"
+#include "src/interpreter/bytecode-label.h"
+#include "src/interpreter/constant-array-builder.h"
#include "src/interpreter/source-position-table.h"
#include "src/isolate.h"
#include "src/utils.h"
@@ -18,39 +22,45 @@
class BytecodeArrayWriterUnittest : public TestWithIsolateAndZone {
public:
BytecodeArrayWriterUnittest()
- : source_position_builder_(isolate(), zone()),
- bytecode_array_writer_(zone(), &source_position_builder_) {}
+ : constant_array_builder_(isolate(), zone()),
+ bytecode_array_writer_(isolate(), zone(), &constant_array_builder_) {}
~BytecodeArrayWriterUnittest() override {}
void Write(BytecodeNode* node, const BytecodeSourceInfo& info);
void Write(Bytecode bytecode,
const BytecodeSourceInfo& info = BytecodeSourceInfo());
- void Write(Bytecode bytecode, uint32_t operand0, OperandScale operand_scale,
+ void Write(Bytecode bytecode, uint32_t operand0,
const BytecodeSourceInfo& info = BytecodeSourceInfo());
void Write(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
- OperandScale operand_scale,
+
const BytecodeSourceInfo& info = BytecodeSourceInfo());
void Write(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
- uint32_t operand2, OperandScale operand_scale,
+ uint32_t operand2,
const BytecodeSourceInfo& info = BytecodeSourceInfo());
void Write(Bytecode bytecode, uint32_t operand0, uint32_t operand1,
- uint32_t operand2, uint32_t operand3, OperandScale operand_scale,
+ uint32_t operand2, uint32_t operand3,
const BytecodeSourceInfo& info = BytecodeSourceInfo());
- SourcePositionTableBuilder* source_position_builder() {
- return &source_position_builder_;
- }
+ void WriteJump(Bytecode bytecode, BytecodeLabel* label,
+
+ const BytecodeSourceInfo& info = BytecodeSourceInfo());
+
BytecodeArrayWriter* writer() { return &bytecode_array_writer_; }
+ ZoneVector<unsigned char>* bytecodes() { return writer()->bytecodes(); }
+ SourcePositionTableBuilder* source_position_table_builder() {
+ return writer()->source_position_table_builder();
+ }
+ int max_register_count() { return writer()->max_register_count(); }
private:
- SourcePositionTableBuilder source_position_builder_;
+ ConstantArrayBuilder constant_array_builder_;
BytecodeArrayWriter bytecode_array_writer_;
};
void BytecodeArrayWriterUnittest::Write(BytecodeNode* node,
const BytecodeSourceInfo& info) {
if (info.is_valid()) {
- node->source_info().Update(info);
+ node->source_info().Clone(info);
}
writer()->Write(node);
}
@@ -62,72 +72,76 @@
}
void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
- OperandScale operand_scale,
const BytecodeSourceInfo& info) {
- BytecodeNode node(bytecode, operand0, operand_scale);
+ BytecodeNode node(bytecode, operand0);
Write(&node, info);
}
void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
uint32_t operand1,
- OperandScale operand_scale,
const BytecodeSourceInfo& info) {
- BytecodeNode node(bytecode, operand0, operand1, operand_scale);
+ BytecodeNode node(bytecode, operand0, operand1);
Write(&node, info);
}
void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
uint32_t operand1, uint32_t operand2,
- OperandScale operand_scale,
const BytecodeSourceInfo& info) {
- BytecodeNode node(bytecode, operand0, operand1, operand2, operand_scale);
+ BytecodeNode node(bytecode, operand0, operand1, operand2);
Write(&node, info);
}
void BytecodeArrayWriterUnittest::Write(Bytecode bytecode, uint32_t operand0,
uint32_t operand1, uint32_t operand2,
uint32_t operand3,
- OperandScale operand_scale,
const BytecodeSourceInfo& info) {
- BytecodeNode node(bytecode, operand0, operand1, operand2, operand3,
- operand_scale);
+ BytecodeNode node(bytecode, operand0, operand1, operand2, operand3);
Write(&node, info);
}
+void BytecodeArrayWriterUnittest::WriteJump(Bytecode bytecode,
+ BytecodeLabel* label,
+ const BytecodeSourceInfo& info) {
+ BytecodeNode node(bytecode, 0);
+ if (info.is_valid()) {
+ node.source_info().Clone(info);
+ }
+ writer()->WriteJump(&node, label);
+}
+
TEST_F(BytecodeArrayWriterUnittest, SimpleExample) {
- CHECK_EQ(writer()->bytecodes()->size(), 0);
+ CHECK_EQ(bytecodes()->size(), 0);
Write(Bytecode::kStackCheck, {10, false});
- CHECK_EQ(writer()->bytecodes()->size(), 1);
- CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 0);
+ CHECK_EQ(bytecodes()->size(), 1);
+ CHECK_EQ(max_register_count(), 0);
- Write(Bytecode::kLdaSmi, 0xff, OperandScale::kSingle, {55, true});
- CHECK_EQ(writer()->bytecodes()->size(), 3);
- CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 0);
+ Write(Bytecode::kLdaSmi, 127, {55, true});
+ CHECK_EQ(bytecodes()->size(), 3);
+ CHECK_EQ(max_register_count(), 0);
- Write(Bytecode::kLdar, Register(1).ToOperand(), OperandScale::kDouble);
- CHECK_EQ(writer()->bytecodes()->size(), 7);
- CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 2 * kPointerSize);
+ Write(Bytecode::kLdar, Register(200).ToOperand());
+ CHECK_EQ(bytecodes()->size(), 7);
+ CHECK_EQ(max_register_count(), 201);
Write(Bytecode::kReturn, {70, true});
- CHECK_EQ(writer()->bytecodes()->size(), 8);
- CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 2 * kPointerSize);
+ CHECK_EQ(bytecodes()->size(), 8);
+ CHECK_EQ(max_register_count(), 201);
- static const uint8_t bytes[] = {B(StackCheck), B(LdaSmi), U8(0xff), B(Wide),
- B(Ldar), R16(1), B(Return)};
- CHECK_EQ(writer()->bytecodes()->size(), arraysize(bytes));
+ static const uint8_t bytes[] = {B(StackCheck), B(LdaSmi), U8(127), B(Wide),
+ B(Ldar), R16(200), B(Return)};
+ CHECK_EQ(bytecodes()->size(), arraysize(bytes));
for (size_t i = 0; i < arraysize(bytes); ++i) {
- CHECK_EQ(writer()->bytecodes()->at(i), bytes[i]);
+ CHECK_EQ(bytecodes()->at(i), bytes[i]);
}
- CHECK_EQ(writer()->FlushForOffset(), arraysize(bytes));
- writer()->FlushBasicBlock();
- CHECK_EQ(writer()->bytecodes()->size(), arraysize(bytes));
+ writer()->ToBytecodeArray(0, 0, factory()->empty_fixed_array());
+ CHECK_EQ(bytecodes()->size(), arraysize(bytes));
PositionTableEntry expected_positions[] = {
{0, 10, false}, {1, 55, true}, {7, 70, true}};
Handle<ByteArray> source_positions =
- source_position_builder()->ToSourcePositionTable();
+ source_position_table_builder()->ToSourcePositionTable();
SourcePositionTableIterator source_iterator(*source_positions);
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
const PositionTableEntry& expected = expected_positions[i];
@@ -173,50 +187,57 @@
{0, 30, false}, {1, 42, true}, {3, 42, false}, {5, 68, true},
{17, 63, true}, {31, 54, false}, {36, 85, true}, {44, 85, true}};
+ BytecodeLabel back_jump, jump_for_in, jump_end_1, jump_end_2, jump_end_3;
+
#define R(i) static_cast<uint32_t>(Register(i).ToOperand())
Write(Bytecode::kStackCheck, {30, false});
- Write(Bytecode::kLdaConstant, U8(0), OperandScale::kSingle, {42, true});
- CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 0 * kPointerSize);
- Write(Bytecode::kStar, R(1), OperandScale::kSingle, {42, false});
- CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 2 * kPointerSize);
- Write(Bytecode::kJumpIfUndefined, U8(38), OperandScale::kSingle, {68, true});
- Write(Bytecode::kJumpIfNull, U8(36), OperandScale::kSingle);
+ Write(Bytecode::kLdaConstant, U8(0), {42, true});
+ CHECK_EQ(max_register_count(), 0);
+ Write(Bytecode::kStar, R(1), {42, false});
+ CHECK_EQ(max_register_count(), 2);
+ WriteJump(Bytecode::kJumpIfUndefined, &jump_end_1, {68, true});
+ WriteJump(Bytecode::kJumpIfNull, &jump_end_2);
Write(Bytecode::kToObject);
- CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 2 * kPointerSize);
- Write(Bytecode::kStar, R(3), OperandScale::kSingle);
- CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 4 * kPointerSize);
- Write(Bytecode::kForInPrepare, R(4), OperandScale::kSingle);
- CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 7 * kPointerSize);
+ CHECK_EQ(max_register_count(), 2);
+ Write(Bytecode::kStar, R(3));
+ CHECK_EQ(max_register_count(), 4);
+ Write(Bytecode::kForInPrepare, R(4));
+ CHECK_EQ(max_register_count(), 7);
Write(Bytecode::kLdaZero);
- CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 7 * kPointerSize);
- Write(Bytecode::kStar, R(7), OperandScale::kSingle);
- CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 8 * kPointerSize);
- Write(Bytecode::kForInDone, R(7), R(6), OperandScale::kSingle, {63, true});
- CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 8 * kPointerSize);
- Write(Bytecode::kJumpIfTrue, U8(23), OperandScale::kSingle);
- Write(Bytecode::kForInNext, R(3), R(7), R(4), U8(1), OperandScale::kSingle);
- Write(Bytecode::kJumpIfUndefined, U8(10), OperandScale::kSingle);
- Write(Bytecode::kStar, R(0), OperandScale::kSingle);
+ CHECK_EQ(max_register_count(), 7);
+ Write(Bytecode::kStar, R(7));
+ CHECK_EQ(max_register_count(), 8);
+ writer()->BindLabel(&back_jump);
+ Write(Bytecode::kForInDone, R(7), R(6), {63, true});
+ CHECK_EQ(max_register_count(), 8);
+ WriteJump(Bytecode::kJumpIfTrue, &jump_end_3);
+ Write(Bytecode::kForInNext, R(3), R(7), R(4), U8(1));
+ WriteJump(Bytecode::kJumpIfUndefined, &jump_for_in);
+ Write(Bytecode::kStar, R(0));
Write(Bytecode::kStackCheck, {54, false});
- Write(Bytecode::kLdar, R(0), OperandScale::kSingle);
- Write(Bytecode::kStar, R(2), OperandScale::kSingle);
+ Write(Bytecode::kLdar, R(0));
+ Write(Bytecode::kStar, R(2));
Write(Bytecode::kReturn, {85, true});
- Write(Bytecode::kForInStep, R(7), OperandScale::kSingle);
- Write(Bytecode::kStar, R(7), OperandScale::kSingle);
- Write(Bytecode::kJump, U8(-24), OperandScale::kSingle);
+ writer()->BindLabel(&jump_for_in);
+ Write(Bytecode::kForInStep, R(7));
+ Write(Bytecode::kStar, R(7));
+ WriteJump(Bytecode::kJump, &back_jump);
+ writer()->BindLabel(&jump_end_1);
+ writer()->BindLabel(&jump_end_2);
+ writer()->BindLabel(&jump_end_3);
Write(Bytecode::kLdaUndefined);
Write(Bytecode::kReturn, {85, true});
- CHECK_EQ(writer()->GetMaximumFrameSizeUsed(), 8 * kPointerSize);
+ CHECK_EQ(max_register_count(), 8);
#undef R
- CHECK_EQ(writer()->bytecodes()->size(), arraysize(expected_bytes));
+ CHECK_EQ(bytecodes()->size(), arraysize(expected_bytes));
for (size_t i = 0; i < arraysize(expected_bytes); ++i) {
- CHECK_EQ(static_cast<int>(writer()->bytecodes()->at(i)),
+ CHECK_EQ(static_cast<int>(bytecodes()->at(i)),
static_cast<int>(expected_bytes[i]));
}
Handle<ByteArray> source_positions =
- source_position_builder()->ToSourcePositionTable();
+ source_position_table_builder()->ToSourcePositionTable();
SourcePositionTableIterator source_iterator(*source_positions);
for (size_t i = 0; i < arraysize(expected_positions); ++i) {
const PositionTableEntry& expected = expected_positions[i];
diff --git a/test/unittests/interpreter/bytecode-dead-code-optimizer-unittest.cc b/test/unittests/interpreter/bytecode-dead-code-optimizer-unittest.cc
new file mode 100644
index 0000000..915c23d
--- /dev/null
+++ b/test/unittests/interpreter/bytecode-dead-code-optimizer-unittest.cc
@@ -0,0 +1,149 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/interpreter/bytecode-dead-code-optimizer.h"
+#include "src/interpreter/bytecode-label.h"
+#include "src/objects.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeDeadCodeOptimizerTest : public BytecodePipelineStage,
+ public TestWithIsolateAndZone {
+ public:
+ BytecodeDeadCodeOptimizerTest() : dead_code_optimizer_(this) {}
+ ~BytecodeDeadCodeOptimizerTest() override {}
+
+ void Write(BytecodeNode* node) override {
+ write_count_++;
+ last_written_.Clone(node);
+ }
+
+ void WriteJump(BytecodeNode* node, BytecodeLabel* label) override {
+ write_count_++;
+ last_written_.Clone(node);
+ }
+
+ void BindLabel(BytecodeLabel* label) override {}
+ void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override {}
+ Handle<BytecodeArray> ToBytecodeArray(
+ int fixed_register_count, int parameter_count,
+ Handle<FixedArray> handle_table) override {
+ return Handle<BytecodeArray>();
+ }
+
+ BytecodeDeadCodeOptimizer* optimizer() { return &dead_code_optimizer_; }
+
+ int write_count() const { return write_count_; }
+ const BytecodeNode& last_written() const { return last_written_; }
+
+ private:
+ BytecodeDeadCodeOptimizer dead_code_optimizer_;
+
+ int write_count_ = 0;
+ BytecodeNode last_written_;
+};
+
+TEST_F(BytecodeDeadCodeOptimizerTest, LiveCodeKept) {
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand());
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(add, last_written());
+
+ BytecodeLabel target;
+ BytecodeNode jump(Bytecode::kJump, 0);
+ optimizer()->WriteJump(&jump, &target);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(jump, last_written());
+}
+
+TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeAfterReturnEliminated) {
+ BytecodeNode ret(Bytecode::kReturn);
+ optimizer()->Write(&ret);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(ret, last_written());
+
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand());
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(ret, last_written());
+}
+
+TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeAfterThrowEliminated) {
+ BytecodeNode thrw(Bytecode::kThrow);
+ optimizer()->Write(&thrw);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(thrw, last_written());
+
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand());
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(thrw, last_written());
+}
+
+TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeAfterReThrowEliminated) {
+ BytecodeNode rethrow(Bytecode::kReThrow);
+ optimizer()->Write(&rethrow);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(rethrow, last_written());
+
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand());
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(rethrow, last_written());
+}
+
+TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeAfterJumpEliminated) {
+ BytecodeLabel target;
+ BytecodeNode jump(Bytecode::kJump, 0);
+ optimizer()->WriteJump(&jump, &target);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(jump, last_written());
+
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand());
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(jump, last_written());
+}
+
+TEST_F(BytecodeDeadCodeOptimizerTest, DeadCodeStillDeadAfterConditinalJump) {
+ BytecodeNode ret(Bytecode::kReturn);
+ optimizer()->Write(&ret);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(ret, last_written());
+
+ BytecodeLabel target;
+ BytecodeNode jump(Bytecode::kJumpIfTrue, 0);
+ optimizer()->WriteJump(&jump, &target);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(ret, last_written());
+
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand());
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(ret, last_written());
+}
+
+TEST_F(BytecodeDeadCodeOptimizerTest, CodeLiveAfterLabelBind) {
+ BytecodeNode ret(Bytecode::kReturn);
+ optimizer()->Write(&ret);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(ret, last_written());
+
+ BytecodeLabel target;
+ optimizer()->BindLabel(&target);
+
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand());
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(add, last_written());
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/interpreter/bytecode-peephole-optimizer-unittest.cc b/test/unittests/interpreter/bytecode-peephole-optimizer-unittest.cc
index cf4a920..671bdf8 100644
--- a/test/unittests/interpreter/bytecode-peephole-optimizer-unittest.cc
+++ b/test/unittests/interpreter/bytecode-peephole-optimizer-unittest.cc
@@ -5,6 +5,7 @@
#include "src/v8.h"
#include "src/factory.h"
+#include "src/interpreter/bytecode-label.h"
#include "src/interpreter/bytecode-peephole-optimizer.h"
#include "src/interpreter/constant-array-builder.h"
#include "src/objects-inl.h"
@@ -23,23 +24,31 @@
peephole_optimizer_(&constant_array_builder_, this) {}
~BytecodePeepholeOptimizerTest() override {}
- size_t FlushForOffset() override {
- flush_for_offset_count_++;
- return 0;
- };
-
- void FlushBasicBlock() override { flush_basic_block_count_++; }
-
void Write(BytecodeNode* node) override {
write_count_++;
last_written_.Clone(node);
}
+ void WriteJump(BytecodeNode* node, BytecodeLabel* label) override {
+ write_count_++;
+ last_written_.Clone(node);
+ }
+
+ void BindLabel(BytecodeLabel* label) override {}
+ void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override {}
+ Handle<BytecodeArray> ToBytecodeArray(
+ int fixed_register_count, int parameter_count,
+ Handle<FixedArray> handle_table) override {
+ return Handle<BytecodeArray>();
+ }
+
+ void Flush() {
+ optimizer()->ToBytecodeArray(0, 0, factory()->empty_fixed_array());
+ }
+
BytecodePeepholeOptimizer* optimizer() { return &peephole_optimizer_; }
ConstantArrayBuilder* constant_array() { return &constant_array_builder_; }
- int flush_for_offset_count() const { return flush_for_offset_count_; }
- int flush_basic_block_count() const { return flush_basic_block_count_; }
int write_count() const { return write_count_; }
const BytecodeNode& last_written() const { return last_written_; }
@@ -47,96 +56,98 @@
ConstantArrayBuilder constant_array_builder_;
BytecodePeepholeOptimizer peephole_optimizer_;
- int flush_for_offset_count_ = 0;
- int flush_basic_block_count_ = 0;
int write_count_ = 0;
BytecodeNode last_written_;
};
// Sanity tests.
-TEST_F(BytecodePeepholeOptimizerTest, FlushForOffsetPassThrough) {
- CHECK_EQ(flush_for_offset_count(), 0);
- CHECK_EQ(optimizer()->FlushForOffset(), 0);
- CHECK_EQ(flush_for_offset_count(), 1);
+TEST_F(BytecodePeepholeOptimizerTest, FlushOnJump) {
+ CHECK_EQ(write_count(), 0);
+
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand());
+ optimizer()->Write(&add);
+ CHECK_EQ(write_count(), 0);
+
+ BytecodeLabel target;
+ BytecodeNode jump(Bytecode::kJump, 0);
+ optimizer()->WriteJump(&jump, &target);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(jump, last_written());
}
-TEST_F(BytecodePeepholeOptimizerTest, FlushForOffsetRightSize) {
- BytecodeNode node(Bytecode::kAdd, Register(0).ToOperand(),
- OperandScale::kQuadruple);
- optimizer()->Write(&node);
- CHECK_EQ(optimizer()->FlushForOffset(), node.Size());
- CHECK_EQ(flush_for_offset_count(), 1);
+TEST_F(BytecodePeepholeOptimizerTest, FlushOnBind) {
CHECK_EQ(write_count(), 0);
-}
-TEST_F(BytecodePeepholeOptimizerTest, FlushForOffsetNop) {
- BytecodeNode node(Bytecode::kNop);
- optimizer()->Write(&node);
- CHECK_EQ(optimizer()->FlushForOffset(), 0);
- CHECK_EQ(flush_for_offset_count(), 1);
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand());
+ optimizer()->Write(&add);
CHECK_EQ(write_count(), 0);
-}
-TEST_F(BytecodePeepholeOptimizerTest, FlushForOffsetNopExpression) {
- BytecodeNode node(Bytecode::kNop);
- node.source_info().Update({3, false});
- optimizer()->Write(&node);
- CHECK_EQ(optimizer()->FlushForOffset(), 0);
- CHECK_EQ(flush_for_offset_count(), 1);
- CHECK_EQ(write_count(), 0);
-}
-
-TEST_F(BytecodePeepholeOptimizerTest, FlushForOffsetNopStatement) {
- BytecodeNode node(Bytecode::kNop);
- node.source_info().Update({3, true});
- optimizer()->Write(&node);
- CHECK_EQ(optimizer()->FlushForOffset(), node.Size());
- CHECK_EQ(flush_for_offset_count(), 1);
- CHECK_EQ(write_count(), 0);
-}
-
-TEST_F(BytecodePeepholeOptimizerTest, FlushBasicBlockPassThrough) {
- CHECK_EQ(flush_basic_block_count(), 0);
- optimizer()->FlushBasicBlock();
- CHECK_EQ(flush_basic_block_count(), 1);
- CHECK_EQ(write_count(), 0);
-}
-
-TEST_F(BytecodePeepholeOptimizerTest, WriteOneFlushBasicBlock) {
- BytecodeNode node(Bytecode::kAdd, Register(0).ToOperand(),
- OperandScale::kQuadruple);
- optimizer()->Write(&node);
- CHECK_EQ(write_count(), 0);
- optimizer()->FlushBasicBlock();
+ BytecodeLabel target;
+ optimizer()->BindLabel(&target);
CHECK_EQ(write_count(), 1);
- CHECK_EQ(node, last_written());
+ CHECK_EQ(add, last_written());
+}
+
+// Nop elimination tests.
+
+TEST_F(BytecodePeepholeOptimizerTest, ElideEmptyNop) {
+ BytecodeNode nop(Bytecode::kNop);
+ optimizer()->Write(&nop);
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand());
+ optimizer()->Write(&add);
+ Flush();
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(add, last_written());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, ElideExpressionNop) {
+ BytecodeNode nop(Bytecode::kNop);
+ nop.source_info().MakeExpressionPosition(3);
+ optimizer()->Write(&nop);
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand());
+ optimizer()->Write(&add);
+ Flush();
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(add, last_written());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, KeepStatementNop) {
+ BytecodeNode nop(Bytecode::kNop);
+ nop.source_info().MakeStatementPosition(3);
+ optimizer()->Write(&nop);
+ BytecodeNode add(Bytecode::kAdd, Register(0).ToOperand());
+ add.source_info().MakeExpressionPosition(3);
+ optimizer()->Write(&add);
+ Flush();
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(add, last_written());
}
// Tests covering BytecodePeepholeOptimizer::UpdateCurrentBytecode().
TEST_F(BytecodePeepholeOptimizerTest, KeepJumpIfToBooleanTrue) {
BytecodeNode first(Bytecode::kLdaNull);
- BytecodeNode second(Bytecode::kJumpIfToBooleanTrue, 3, OperandScale::kSingle);
+ BytecodeNode second(Bytecode::kJumpIfToBooleanTrue, 3);
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
optimizer()->Write(&second);
CHECK_EQ(write_count(), 1);
CHECK_EQ(last_written(), first);
- optimizer()->FlushBasicBlock();
+ Flush();
CHECK_EQ(write_count(), 2);
CHECK_EQ(last_written(), second);
}
TEST_F(BytecodePeepholeOptimizerTest, ElideJumpIfToBooleanTrue) {
BytecodeNode first(Bytecode::kLdaTrue);
- BytecodeNode second(Bytecode::kJumpIfToBooleanTrue, 3, OperandScale::kSingle);
+ BytecodeNode second(Bytecode::kJumpIfToBooleanTrue, 3);
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
optimizer()->Write(&second);
CHECK_EQ(write_count(), 1);
CHECK_EQ(last_written(), first);
- optimizer()->FlushBasicBlock();
+ Flush();
CHECK_EQ(write_count(), 2);
CHECK_EQ(last_written().bytecode(), Bytecode::kJumpIfTrue);
CHECK_EQ(last_written().operand(0), second.operand(0));
@@ -150,7 +161,7 @@
optimizer()->Write(&second);
CHECK_EQ(write_count(), 1);
CHECK_EQ(last_written(), first);
- optimizer()->FlushBasicBlock();
+ Flush();
CHECK_EQ(write_count(), 2);
CHECK_EQ(last_written(), second);
}
@@ -163,95 +174,81 @@
optimizer()->Write(&second);
CHECK_EQ(write_count(), 1);
CHECK_EQ(last_written(), first);
- optimizer()->FlushBasicBlock();
+ Flush();
CHECK_EQ(write_count(), 2);
CHECK_EQ(last_written().bytecode(), Bytecode::kLogicalNot);
}
// Tests covering BytecodePeepholeOptimizer::CanElideCurrent().
-TEST_F(BytecodePeepholeOptimizerTest, LdarRxLdarRy) {
- BytecodeNode first(Bytecode::kLdar, Register(0).ToOperand(),
- OperandScale::kSingle);
- BytecodeNode second(Bytecode::kLdar, Register(1).ToOperand(),
- OperandScale::kSingle);
+TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRy) {
+ BytecodeNode first(Bytecode::kStar, Register(0).ToOperand());
+ BytecodeNode second(Bytecode::kLdar, Register(1).ToOperand());
optimizer()->Write(&first);
- optimizer()->FlushForOffset(); // Prevent CanElideLast removing |first|.
CHECK_EQ(write_count(), 0);
optimizer()->Write(&second);
CHECK_EQ(write_count(), 1);
CHECK_EQ(last_written(), first);
- optimizer()->FlushBasicBlock();
+ Flush();
CHECK_EQ(write_count(), 2);
CHECK_EQ(last_written(), second);
}
-TEST_F(BytecodePeepholeOptimizerTest, LdarRxLdarRx) {
- BytecodeNode first(Bytecode::kLdar, Register(0).ToOperand(),
- OperandScale::kSingle);
- BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand(),
- OperandScale::kSingle);
+TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRx) {
+ BytecodeLabel label;
+ BytecodeNode first(Bytecode::kStar, Register(0).ToOperand());
+ BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand());
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
- optimizer()->FlushForOffset(); // Prevent CanElideLast removing |first|.
optimizer()->Write(&second);
CHECK_EQ(write_count(), 1);
CHECK_EQ(last_written(), first);
- optimizer()->FlushBasicBlock();
+ Flush();
CHECK_EQ(write_count(), 1);
}
-TEST_F(BytecodePeepholeOptimizerTest, LdarRxLdarRxStatement) {
- BytecodeNode first(Bytecode::kLdar, Register(0).ToOperand(),
- OperandScale::kSingle);
- BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand(),
- OperandScale::kSingle);
- second.source_info().Update({0, true});
+TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRxStatement) {
+ BytecodeNode first(Bytecode::kStar, Register(0).ToOperand());
+ BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand());
+ second.source_info().MakeStatementPosition(0);
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
- optimizer()->FlushForOffset(); // Prevent CanElideLast removing |first|.
optimizer()->Write(&second);
CHECK_EQ(write_count(), 1);
CHECK_EQ(last_written(), first);
- optimizer()->FlushBasicBlock();
+ Flush();
CHECK_EQ(write_count(), 2);
CHECK_EQ(last_written().bytecode(), Bytecode::kNop);
CHECK_EQ(last_written().source_info(), second.source_info());
}
-TEST_F(BytecodePeepholeOptimizerTest, LdarRxLdarRxStatementStarRy) {
- BytecodeNode first(Bytecode::kLdar, Register(0).ToOperand(),
- OperandScale::kSingle);
- BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand(),
- OperandScale::kSingle);
- BytecodeNode third(Bytecode::kStar, Register(3).ToOperand(),
- OperandScale::kSingle);
- second.source_info().Update({0, true});
+TEST_F(BytecodePeepholeOptimizerTest, StarRxLdarRxStatementStarRy) {
+ BytecodeLabel label;
+ BytecodeNode first(Bytecode::kStar, Register(0).ToOperand());
+ BytecodeNode second(Bytecode::kLdar, Register(0).ToOperand());
+ BytecodeNode third(Bytecode::kStar, Register(3).ToOperand());
+ second.source_info().MakeStatementPosition(0);
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
- optimizer()->FlushForOffset(); // Prevent CanElideLast removing |first|.
optimizer()->Write(&second);
CHECK_EQ(write_count(), 1);
CHECK_EQ(last_written(), first);
optimizer()->Write(&third);
CHECK_EQ(write_count(), 1);
- optimizer()->FlushBasicBlock();
+ Flush();
CHECK_EQ(write_count(), 2);
- // Source position should move |second| to |third| when |second| is elided.
- third.source_info().Update(second.source_info());
CHECK_EQ(last_written(), third);
}
TEST_F(BytecodePeepholeOptimizerTest, LdarToName) {
- BytecodeNode first(Bytecode::kLdar, Register(0).ToOperand(),
- OperandScale::kSingle);
+ BytecodeNode first(Bytecode::kLdar, Register(0).ToOperand());
BytecodeNode second(Bytecode::kToName);
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
optimizer()->Write(&second);
CHECK_EQ(write_count(), 1);
CHECK_EQ(last_written(), first);
- optimizer()->FlushBasicBlock();
+ Flush();
CHECK_EQ(write_count(), 2);
CHECK_EQ(last_written(), second);
}
@@ -264,7 +261,7 @@
optimizer()->Write(&second);
CHECK_EQ(write_count(), 1);
CHECK_EQ(last_written(), first);
- optimizer()->FlushBasicBlock();
+ Flush();
CHECK_EQ(write_count(), 1);
}
@@ -276,7 +273,7 @@
optimizer()->Write(&second);
CHECK_EQ(write_count(), 1);
CHECK_EQ(last_written(), first);
- optimizer()->FlushBasicBlock();
+ Flush();
CHECK_EQ(write_count(), 1);
}
@@ -284,50 +281,34 @@
Handle<Object> word =
isolate()->factory()->NewStringFromStaticChars("optimizing");
size_t index = constant_array()->Insert(word);
- BytecodeNode first(Bytecode::kLdaConstant, static_cast<uint32_t>(index),
- OperandScale::kSingle);
+ BytecodeNode first(Bytecode::kLdaConstant, static_cast<uint32_t>(index));
BytecodeNode second(Bytecode::kToName);
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
optimizer()->Write(&second);
CHECK_EQ(write_count(), 1);
CHECK_EQ(last_written(), first);
- optimizer()->FlushBasicBlock();
+ Flush();
CHECK_EQ(write_count(), 1);
}
TEST_F(BytecodePeepholeOptimizerTest, LdaConstantNumberToName) {
Handle<Object> word = isolate()->factory()->NewNumber(0.380);
size_t index = constant_array()->Insert(word);
- BytecodeNode first(Bytecode::kLdaConstant, static_cast<uint32_t>(index),
- OperandScale::kSingle);
+ BytecodeNode first(Bytecode::kLdaConstant, static_cast<uint32_t>(index));
BytecodeNode second(Bytecode::kToName);
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
optimizer()->Write(&second);
CHECK_EQ(write_count(), 1);
CHECK_EQ(last_written(), first);
- optimizer()->FlushBasicBlock();
+ Flush();
CHECK_EQ(write_count(), 2);
CHECK_EQ(last_written(), second);
}
// Tests covering BytecodePeepholeOptimizer::CanElideLast().
-TEST_F(BytecodePeepholeOptimizerTest, LdaTrueLdaFalseNotDiscardable) {
- BytecodeNode first(Bytecode::kLdaTrue);
- BytecodeNode second(Bytecode::kLdaFalse);
- optimizer()->Write(&first);
- optimizer()->FlushForOffset(); // Prevent discarding of |first|.
- CHECK_EQ(write_count(), 0);
- optimizer()->Write(&second);
- CHECK_EQ(write_count(), 1);
- CHECK_EQ(last_written(), first);
- optimizer()->FlushBasicBlock();
- CHECK_EQ(write_count(), 2);
- CHECK_EQ(last_written(), second);
-}
-
TEST_F(BytecodePeepholeOptimizerTest, LdaTrueLdaFalse) {
BytecodeNode first(Bytecode::kLdaTrue);
BytecodeNode second(Bytecode::kLdaFalse);
@@ -335,23 +316,24 @@
CHECK_EQ(write_count(), 0);
optimizer()->Write(&second);
CHECK_EQ(write_count(), 0);
- optimizer()->FlushBasicBlock();
+ Flush();
CHECK_EQ(write_count(), 1);
CHECK_EQ(last_written(), second);
}
TEST_F(BytecodePeepholeOptimizerTest, LdaTrueStatementLdaFalse) {
BytecodeNode first(Bytecode::kLdaTrue);
- first.source_info().Update({3, false});
+ first.source_info().MakeExpressionPosition(3);
BytecodeNode second(Bytecode::kLdaFalse);
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
optimizer()->Write(&second);
CHECK_EQ(write_count(), 0);
- optimizer()->FlushBasicBlock();
+ Flush();
CHECK_EQ(write_count(), 1);
- second.source_info().Update(first.source_info());
CHECK_EQ(last_written(), second);
+ CHECK(second.source_info().is_expression());
+ CHECK_EQ(second.source_info().source_position(), 3);
}
TEST_F(BytecodePeepholeOptimizerTest, NopStackCheck) {
@@ -361,25 +343,152 @@
CHECK_EQ(write_count(), 0);
optimizer()->Write(&second);
CHECK_EQ(write_count(), 0);
- optimizer()->FlushBasicBlock();
+ Flush();
CHECK_EQ(write_count(), 1);
CHECK_EQ(last_written(), second);
}
TEST_F(BytecodePeepholeOptimizerTest, NopStatementStackCheck) {
BytecodeNode first(Bytecode::kNop);
- first.source_info().Update({3, false});
+ first.source_info().MakeExpressionPosition(3);
BytecodeNode second(Bytecode::kStackCheck);
optimizer()->Write(&first);
CHECK_EQ(write_count(), 0);
optimizer()->Write(&second);
CHECK_EQ(write_count(), 0);
- optimizer()->FlushBasicBlock();
+ Flush();
CHECK_EQ(write_count(), 1);
- second.source_info().Update(first.source_info());
+ second.source_info().MakeExpressionPosition(
+ first.source_info().source_position());
CHECK_EQ(last_written(), second);
}
+// Tests covering BytecodePeepholeOptimizer::UpdateLastAndCurrentBytecodes().
+
+TEST_F(BytecodePeepholeOptimizerTest, MergeLoadICStar) {
+ const uint32_t operands[] = {
+ static_cast<uint32_t>(Register(31).ToOperand()), 32, 33,
+ static_cast<uint32_t>(Register(256).ToOperand())};
+ const int expected_operand_count = static_cast<int>(arraysize(operands));
+
+ BytecodeNode first(Bytecode::kLdaNamedProperty, operands[0], operands[1],
+ operands[2]);
+ BytecodeNode second(Bytecode::kStar, operands[3]);
+ BytecodeNode third(Bytecode::kReturn);
+ optimizer()->Write(&first);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdrNamedProperty);
+ CHECK_EQ(last_written().operand_count(), expected_operand_count);
+ for (int i = 0; i < expected_operand_count; ++i) {
+ CHECK_EQ(last_written().operand(i), operands[i]);
+ }
+ optimizer()->Write(&third);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdar);
+ CHECK_EQ(last_written().operand(0), operands[expected_operand_count - 1]);
+ Flush();
+ CHECK_EQ(last_written().bytecode(), third.bytecode());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, MergeLdaKeyedPropertyStar) {
+ const uint32_t operands[] = {static_cast<uint32_t>(Register(31).ToOperand()),
+ 9999997,
+ static_cast<uint32_t>(Register(1).ToOperand())};
+ const int expected_operand_count = static_cast<int>(arraysize(operands));
+
+ BytecodeNode first(Bytecode::kLdaKeyedProperty, operands[0], operands[1]);
+ BytecodeNode second(Bytecode::kStar, operands[2]);
+ BytecodeNode third(Bytecode::kReturn);
+ optimizer()->Write(&first);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdrKeyedProperty);
+ CHECK_EQ(last_written().operand_count(), expected_operand_count);
+ for (int i = 0; i < expected_operand_count; ++i) {
+ CHECK_EQ(last_written().operand(i), operands[i]);
+ }
+ optimizer()->Write(&third);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdar);
+ CHECK_EQ(last_written().operand(0), operands[expected_operand_count - 1]);
+ Flush();
+ CHECK_EQ(last_written().bytecode(), third.bytecode());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, MergeLdaGlobalStar) {
+ const uint32_t operands[] = {19191,
+ static_cast<uint32_t>(Register(1).ToOperand())};
+ const int expected_operand_count = static_cast<int>(arraysize(operands));
+
+ BytecodeNode first(Bytecode::kLdaGlobal, operands[0]);
+ BytecodeNode second(Bytecode::kStar, operands[1]);
+ BytecodeNode third(Bytecode::kReturn);
+ optimizer()->Write(&first);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdrGlobal);
+ CHECK_EQ(last_written().operand_count(), expected_operand_count);
+ for (int i = 0; i < expected_operand_count; ++i) {
+ CHECK_EQ(last_written().operand(i), operands[i]);
+ }
+ optimizer()->Write(&third);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdar);
+ CHECK_EQ(last_written().operand(0), operands[expected_operand_count - 1]);
+ Flush();
+ CHECK_EQ(last_written().bytecode(), third.bytecode());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, MergeLdaContextSlotStar) {
+ const uint32_t operands[] = {
+ static_cast<uint32_t>(Register(200000).ToOperand()), 55005500,
+ static_cast<uint32_t>(Register(1).ToOperand())};
+ const int expected_operand_count = static_cast<int>(arraysize(operands));
+
+ BytecodeNode first(Bytecode::kLdaContextSlot, operands[0], operands[1]);
+ BytecodeNode second(Bytecode::kStar, operands[2]);
+ BytecodeNode third(Bytecode::kReturn);
+ optimizer()->Write(&first);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdrContextSlot);
+ CHECK_EQ(last_written().operand_count(), expected_operand_count);
+ for (int i = 0; i < expected_operand_count; ++i) {
+ CHECK_EQ(last_written().operand(i), operands[i]);
+ }
+ optimizer()->Write(&third);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdar);
+ CHECK_EQ(last_written().operand(0), operands[expected_operand_count - 1]);
+ Flush();
+ CHECK_EQ(last_written().bytecode(), third.bytecode());
+}
+
+TEST_F(BytecodePeepholeOptimizerTest, MergeLdaUndefinedStar) {
+ const uint32_t operands[] = {
+ static_cast<uint32_t>(Register(100000).ToOperand())};
+ const int expected_operand_count = static_cast<int>(arraysize(operands));
+
+ BytecodeNode first(Bytecode::kLdaUndefined);
+ BytecodeNode second(Bytecode::kStar, operands[0]);
+ BytecodeNode third(Bytecode::kReturn);
+ optimizer()->Write(&first);
+ optimizer()->Write(&second);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdrUndefined);
+ CHECK_EQ(last_written().operand_count(), expected_operand_count);
+ for (int i = 0; i < expected_operand_count; ++i) {
+ CHECK_EQ(last_written().operand(i), operands[i]);
+ }
+ optimizer()->Write(&third);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(last_written().bytecode(), Bytecode::kLdar);
+ CHECK_EQ(last_written().operand(0), operands[expected_operand_count - 1]);
+ Flush();
+ CHECK_EQ(last_written().bytecode(), third.bytecode());
+}
+
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/test/unittests/interpreter/bytecode-pipeline-unittest.cc b/test/unittests/interpreter/bytecode-pipeline-unittest.cc
index f12391c..663b7e5 100644
--- a/test/unittests/interpreter/bytecode-pipeline-unittest.cc
+++ b/test/unittests/interpreter/bytecode-pipeline-unittest.cc
@@ -24,7 +24,7 @@
CHECK_EQ(x.is_statement(), false);
CHECK_EQ(x.is_valid(), false);
- x.Update({1, true});
+ x.MakeStatementPosition(1);
BytecodeSourceInfo y(1, true);
CHECK(x == y);
CHECK(!(x != y));
@@ -33,20 +33,20 @@
CHECK(!(x == y));
CHECK(x != y);
- y.Update({2, false});
+ y.MakeStatementPosition(1);
CHECK_EQ(y.source_position(), 1);
CHECK_EQ(y.is_statement(), true);
- y.Update({2, true});
+ y.MakeStatementPosition(2);
CHECK_EQ(y.source_position(), 2);
CHECK_EQ(y.is_statement(), true);
y.set_invalid();
- y.Update({3, false});
+ y.MakeExpressionPosition(3);
CHECK_EQ(y.source_position(), 3);
CHECK_EQ(y.is_statement(), false);
- y.Update({3, true});
+ y.MakeStatementPosition(3);
CHECK_EQ(y.source_position(), 3);
CHECK_EQ(y.is_statement(), true);
}
@@ -61,100 +61,88 @@
BytecodeNode node(Bytecode::kLdaZero);
CHECK_EQ(node.bytecode(), Bytecode::kLdaZero);
CHECK_EQ(node.operand_count(), 0);
- CHECK_EQ(node.operand_scale(), OperandScale::kSingle);
CHECK(!node.source_info().is_valid());
- CHECK_EQ(node.Size(), 1);
}
TEST_F(BytecodeNodeTest, Constructor2) {
uint32_t operands[] = {0x11};
- BytecodeNode node(Bytecode::kJumpIfTrue, operands[0], OperandScale::kDouble);
+ BytecodeNode node(Bytecode::kJumpIfTrue, operands[0]);
CHECK_EQ(node.bytecode(), Bytecode::kJumpIfTrue);
CHECK_EQ(node.operand_count(), 1);
CHECK_EQ(node.operand(0), operands[0]);
- CHECK_EQ(node.operand_scale(), OperandScale::kDouble);
CHECK(!node.source_info().is_valid());
- CHECK_EQ(node.Size(), 4);
}
TEST_F(BytecodeNodeTest, Constructor3) {
- uint32_t operands[] = {0x11, 0x22};
- BytecodeNode node(Bytecode::kLdaGlobal, operands[0], operands[1],
- OperandScale::kQuadruple);
+ uint32_t operands[] = {0x11};
+ BytecodeNode node(Bytecode::kLdaGlobal, operands[0]);
CHECK_EQ(node.bytecode(), Bytecode::kLdaGlobal);
- CHECK_EQ(node.operand_count(), 2);
+ CHECK_EQ(node.operand_count(), 1);
CHECK_EQ(node.operand(0), operands[0]);
- CHECK_EQ(node.operand(1), operands[1]);
- CHECK_EQ(node.operand_scale(), OperandScale::kQuadruple);
CHECK(!node.source_info().is_valid());
- CHECK_EQ(node.Size(), 10);
}
TEST_F(BytecodeNodeTest, Constructor4) {
uint32_t operands[] = {0x11, 0x22, 0x33};
- BytecodeNode node(Bytecode::kLoadIC, operands[0], operands[1], operands[2],
- OperandScale::kSingle);
+ BytecodeNode node(Bytecode::kLdaNamedProperty, operands[0], operands[1],
+ operands[2]);
CHECK_EQ(node.operand_count(), 3);
- CHECK_EQ(node.bytecode(), Bytecode::kLoadIC);
+ CHECK_EQ(node.bytecode(), Bytecode::kLdaNamedProperty);
CHECK_EQ(node.operand(0), operands[0]);
CHECK_EQ(node.operand(1), operands[1]);
CHECK_EQ(node.operand(2), operands[2]);
- CHECK_EQ(node.operand_scale(), OperandScale::kSingle);
CHECK(!node.source_info().is_valid());
- CHECK_EQ(node.Size(), 4);
}
TEST_F(BytecodeNodeTest, Constructor5) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
- operands[3], OperandScale::kDouble);
+ operands[3]);
CHECK_EQ(node.operand_count(), 4);
CHECK_EQ(node.bytecode(), Bytecode::kForInNext);
CHECK_EQ(node.operand(0), operands[0]);
CHECK_EQ(node.operand(1), operands[1]);
CHECK_EQ(node.operand(2), operands[2]);
CHECK_EQ(node.operand(3), operands[3]);
- CHECK_EQ(node.operand_scale(), OperandScale::kDouble);
CHECK(!node.source_info().is_valid());
- CHECK_EQ(node.Size(), 10);
}
TEST_F(BytecodeNodeTest, Equality) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
- operands[3], OperandScale::kDouble);
+ operands[3]);
CHECK_EQ(node, node);
BytecodeNode other(Bytecode::kForInNext, operands[0], operands[1],
- operands[2], operands[3], OperandScale::kDouble);
+ operands[2], operands[3]);
CHECK_EQ(node, other);
}
TEST_F(BytecodeNodeTest, EqualityWithSourceInfo) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
- operands[3], OperandScale::kDouble);
- node.source_info().Update({3, true});
+ operands[3]);
+ node.source_info().MakeStatementPosition(3);
CHECK_EQ(node, node);
BytecodeNode other(Bytecode::kForInNext, operands[0], operands[1],
- operands[2], operands[3], OperandScale::kDouble);
- other.source_info().Update({3, true});
+ operands[2], operands[3]);
+ other.source_info().MakeStatementPosition(3);
CHECK_EQ(node, other);
}
TEST_F(BytecodeNodeTest, NoEqualityWithDifferentSourceInfo) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
- operands[3], OperandScale::kDouble);
- node.source_info().Update({3, true});
+ operands[3]);
+ node.source_info().MakeStatementPosition(3);
BytecodeNode other(Bytecode::kForInNext, operands[0], operands[1],
- operands[2], operands[3], OperandScale::kDouble);
+ operands[2], operands[3]);
CHECK_NE(node, other);
}
TEST_F(BytecodeNodeTest, Clone) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
- operands[3], OperandScale::kDouble);
+ operands[3]);
BytecodeNode clone;
clone.Clone(&node);
CHECK_EQ(clone, node);
@@ -163,33 +151,32 @@
TEST_F(BytecodeNodeTest, SetBytecode0) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
- operands[3], OperandScale::kDouble);
+ operands[3]);
BytecodeSourceInfo source_info(77, false);
- node.source_info().Update(source_info);
+ node.source_info().Clone(source_info);
+ CHECK_EQ(node.source_info(), source_info);
BytecodeNode clone;
clone.Clone(&node);
clone.set_bytecode(Bytecode::kNop);
CHECK_EQ(clone.bytecode(), Bytecode::kNop);
CHECK_EQ(clone.operand_count(), 0);
- CHECK_EQ(clone.operand_scale(), OperandScale::kSingle);
CHECK_EQ(clone.source_info(), source_info);
}
TEST_F(BytecodeNodeTest, SetBytecode1) {
uint32_t operands[] = {0x71, 0xa5, 0x5a, 0xfc};
BytecodeNode node(Bytecode::kForInNext, operands[0], operands[1], operands[2],
- operands[3], OperandScale::kDouble);
+ operands[3]);
BytecodeSourceInfo source_info(77, false);
- node.source_info().Update(source_info);
+ node.source_info().Clone(source_info);
BytecodeNode clone;
clone.Clone(&node);
- clone.set_bytecode(Bytecode::kJump, 0x01aabbcc, OperandScale::kQuadruple);
+ clone.set_bytecode(Bytecode::kJump, 0x01aabbcc);
CHECK_EQ(clone.bytecode(), Bytecode::kJump);
CHECK_EQ(clone.operand_count(), 1);
CHECK_EQ(clone.operand(0), 0x01aabbcc);
- CHECK_EQ(clone.operand_scale(), OperandScale::kQuadruple);
CHECK_EQ(clone.source_info(), source_info);
}
diff --git a/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc b/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
new file mode 100644
index 0000000..795bee8
--- /dev/null
+++ b/test/unittests/interpreter/bytecode-register-optimizer-unittest.cc
@@ -0,0 +1,219 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/factory.h"
+#include "src/interpreter/bytecode-label.h"
+#include "src/interpreter/bytecode-register-optimizer.h"
+#include "src/objects-inl.h"
+#include "src/objects.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeRegisterOptimizerTest : public BytecodePipelineStage,
+ public TestWithIsolateAndZone {
+ public:
+ BytecodeRegisterOptimizerTest() {}
+ ~BytecodeRegisterOptimizerTest() override { delete register_allocator_; }
+
+ void Initialize(int number_of_parameters, int number_of_locals) {
+ register_allocator_ =
+ new TemporaryRegisterAllocator(zone(), number_of_locals);
+ register_optimizer_ = new (zone()) BytecodeRegisterOptimizer(
+ zone(), register_allocator_, number_of_parameters, this);
+ }
+
+ void Write(BytecodeNode* node) override { output_.push_back(*node); }
+ void WriteJump(BytecodeNode* node, BytecodeLabel* label) override {
+ output_.push_back(*node);
+ }
+ void BindLabel(BytecodeLabel* label) override {}
+ void BindLabel(const BytecodeLabel& target, BytecodeLabel* label) override {}
+ Handle<BytecodeArray> ToBytecodeArray(
+ int fixed_register_count, int parameter_count,
+ Handle<FixedArray> handle_table) override {
+ return Handle<BytecodeArray>();
+ }
+
+ TemporaryRegisterAllocator* allocator() { return register_allocator_; }
+ BytecodeRegisterOptimizer* optimizer() { return register_optimizer_; }
+
+ Register NewTemporary() {
+ return Register(allocator()->BorrowTemporaryRegister());
+ }
+
+ void KillTemporary(Register reg) {
+ allocator()->ReturnTemporaryRegister(reg.index());
+ }
+
+ size_t write_count() const { return output_.size(); }
+ const BytecodeNode& last_written() const { return output_.back(); }
+ const std::vector<BytecodeNode>* output() { return &output_; }
+
+ private:
+ TemporaryRegisterAllocator* register_allocator_;
+ BytecodeRegisterOptimizer* register_optimizer_;
+
+ std::vector<BytecodeNode> output_;
+};
+
+// Sanity tests.
+
+TEST_F(BytecodeRegisterOptimizerTest, WriteNop) {
+ Initialize(1, 1);
+ BytecodeNode node(Bytecode::kNop);
+ optimizer()->Write(&node);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(node, last_written());
+}
+
+TEST_F(BytecodeRegisterOptimizerTest, WriteNopExpression) {
+ Initialize(1, 1);
+ BytecodeNode node(Bytecode::kNop);
+ node.source_info().MakeExpressionPosition(3);
+ optimizer()->Write(&node);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(node, last_written());
+}
+
+TEST_F(BytecodeRegisterOptimizerTest, WriteNopStatement) {
+ Initialize(1, 1);
+ BytecodeNode node(Bytecode::kNop);
+ node.source_info().MakeStatementPosition(3);
+ optimizer()->Write(&node);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(node, last_written());
+}
+
+TEST_F(BytecodeRegisterOptimizerTest, TemporaryMaterializedForJump) {
+ Initialize(1, 1);
+ Register temp = NewTemporary();
+ BytecodeNode node(Bytecode::kStar, temp.ToOperand());
+ optimizer()->Write(&node);
+ CHECK_EQ(write_count(), 0);
+ BytecodeLabel label;
+ BytecodeNode jump(Bytecode::kJump, 0);
+ optimizer()->WriteJump(&jump, &label);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
+ CHECK_EQ(output()->at(0).operand(0), temp.ToOperand());
+ CHECK_EQ(output()->at(1).bytecode(), Bytecode::kJump);
+}
+
+TEST_F(BytecodeRegisterOptimizerTest, TemporaryMaterializedForBind) {
+ Initialize(1, 1);
+ Register temp = NewTemporary();
+ BytecodeNode node(Bytecode::kStar, temp.ToOperand());
+ optimizer()->Write(&node);
+ CHECK_EQ(write_count(), 0);
+ BytecodeLabel label;
+ optimizer()->BindLabel(&label);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(output()->at(0).bytecode(), Bytecode::kStar);
+ CHECK_EQ(output()->at(0).operand(0), temp.ToOperand());
+}
+
+// Basic Register Optimizations
+
+TEST_F(BytecodeRegisterOptimizerTest, TemporaryNotEmitted) {
+ Initialize(3, 1);
+ Register parameter = Register::FromParameterIndex(1, 3);
+ BytecodeNode node0(Bytecode::kLdar, parameter.ToOperand());
+ optimizer()->Write(&node0);
+ CHECK_EQ(write_count(), 0);
+ Register temp = NewTemporary();
+ BytecodeNode node1(Bytecode::kStar, NewTemporary().ToOperand());
+ optimizer()->Write(&node1);
+ CHECK_EQ(write_count(), 0);
+ KillTemporary(temp);
+ CHECK_EQ(write_count(), 0);
+ BytecodeNode node2(Bytecode::kReturn);
+ optimizer()->Write(&node2);
+ CHECK_EQ(write_count(), 2);
+ CHECK_EQ(output()->at(0).bytecode(), Bytecode::kLdar);
+ CHECK_EQ(output()->at(0).operand(0), parameter.ToOperand());
+ CHECK_EQ(output()->at(1).bytecode(), Bytecode::kReturn);
+}
+
+TEST_F(BytecodeRegisterOptimizerTest, StoresToLocalsImmediate) {
+ Initialize(3, 1);
+ Register parameter = Register::FromParameterIndex(1, 3);
+ BytecodeNode node0(Bytecode::kLdar, parameter.ToOperand());
+ optimizer()->Write(&node0);
+ CHECK_EQ(write_count(), 0);
+ Register local = Register(0);
+ BytecodeNode node1(Bytecode::kStar, local.ToOperand());
+ optimizer()->Write(&node1);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(output()->at(0).bytecode(), Bytecode::kMov);
+ CHECK_EQ(output()->at(0).operand(0), parameter.ToOperand());
+ CHECK_EQ(output()->at(0).operand(1), local.ToOperand());
+
+ BytecodeNode node2(Bytecode::kReturn);
+ optimizer()->Write(&node2);
+ CHECK_EQ(write_count(), 3);
+ CHECK_EQ(output()->at(1).bytecode(), Bytecode::kLdar);
+ CHECK_EQ(output()->at(1).operand(0), local.ToOperand());
+ CHECK_EQ(output()->at(2).bytecode(), Bytecode::kReturn);
+}
+
+TEST_F(BytecodeRegisterOptimizerTest, TemporaryNotMaterializedForInput) {
+ Initialize(3, 1);
+ Register parameter = Register::FromParameterIndex(1, 3);
+ Register temp0 = NewTemporary();
+ Register temp1 = NewTemporary();
+ BytecodeNode node0(Bytecode::kMov, parameter.ToOperand(), temp0.ToOperand());
+ optimizer()->Write(&node0);
+ BytecodeNode node1(Bytecode::kMov, parameter.ToOperand(), temp1.ToOperand());
+ optimizer()->Write(&node1);
+ CHECK_EQ(write_count(), 0);
+ BytecodeNode node2(Bytecode::kCallJSRuntime, 0, temp0.ToOperand(), 1);
+ optimizer()->Write(&node2);
+ CHECK_EQ(write_count(), 1);
+ CHECK_EQ(output()->at(0).bytecode(), Bytecode::kCallJSRuntime);
+ CHECK_EQ(output()->at(0).operand(0), 0);
+ CHECK_EQ(output()->at(0).operand(1), parameter.ToOperand());
+ CHECK_EQ(output()->at(0).operand(2), 1);
+}
+
+TEST_F(BytecodeRegisterOptimizerTest, RangeOfTemporariesMaterializedForInput) {
+ Initialize(3, 1);
+ Register parameter = Register::FromParameterIndex(1, 3);
+ Register temp0 = NewTemporary();
+ Register temp1 = NewTemporary();
+ BytecodeNode node0(Bytecode::kLdaSmi, 3);
+ optimizer()->Write(&node0);
+ CHECK_EQ(write_count(), 1);
+ BytecodeNode node1(Bytecode::kStar, temp0.ToOperand());
+ optimizer()->Write(&node1);
+ BytecodeNode node2(Bytecode::kMov, parameter.ToOperand(), temp1.ToOperand());
+ optimizer()->Write(&node2);
+ CHECK_EQ(write_count(), 1);
+ BytecodeNode node3(Bytecode::kCallJSRuntime, 0, temp0.ToOperand(), 2);
+ optimizer()->Write(&node3);
+ CHECK_EQ(write_count(), 4);
+
+ CHECK_EQ(output()->at(0).bytecode(), Bytecode::kLdaSmi);
+ CHECK_EQ(output()->at(0).operand(0), 3);
+
+ CHECK_EQ(output()->at(1).bytecode(), Bytecode::kStar);
+ CHECK_EQ(output()->at(1).operand(0), temp0.ToOperand());
+
+ CHECK_EQ(output()->at(2).bytecode(), Bytecode::kMov);
+ CHECK_EQ(output()->at(2).operand(0), parameter.ToOperand());
+ CHECK_EQ(output()->at(2).operand(1), temp1.ToOperand());
+
+ CHECK_EQ(output()->at(3).bytecode(), Bytecode::kCallJSRuntime);
+ CHECK_EQ(output()->at(3).operand(0), 0);
+ CHECK_EQ(output()->at(3).operand(1), temp0.ToOperand());
+ CHECK_EQ(output()->at(3).operand(2), 2);
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/interpreter/bytecodes-unittest.cc b/test/unittests/interpreter/bytecodes-unittest.cc
index eebacb2..74a2bb3 100644
--- a/test/unittests/interpreter/bytecodes-unittest.cc
+++ b/test/unittests/interpreter/bytecodes-unittest.cc
@@ -254,35 +254,6 @@
}
}
-TEST(Bytecodes, OperandScales) {
- CHECK_EQ(Bytecodes::OperandSizesToScale(OperandSize::kByte),
- OperandScale::kSingle);
- CHECK_EQ(Bytecodes::OperandSizesToScale(OperandSize::kShort),
- OperandScale::kDouble);
- CHECK_EQ(Bytecodes::OperandSizesToScale(OperandSize::kQuad),
- OperandScale::kQuadruple);
- CHECK_EQ(
- Bytecodes::OperandSizesToScale(OperandSize::kShort, OperandSize::kShort,
- OperandSize::kShort, OperandSize::kShort),
- OperandScale::kDouble);
- CHECK_EQ(
- Bytecodes::OperandSizesToScale(OperandSize::kQuad, OperandSize::kShort,
- OperandSize::kShort, OperandSize::kShort),
- OperandScale::kQuadruple);
- CHECK_EQ(
- Bytecodes::OperandSizesToScale(OperandSize::kShort, OperandSize::kQuad,
- OperandSize::kShort, OperandSize::kShort),
- OperandScale::kQuadruple);
- CHECK_EQ(
- Bytecodes::OperandSizesToScale(OperandSize::kShort, OperandSize::kShort,
- OperandSize::kQuad, OperandSize::kShort),
- OperandScale::kQuadruple);
- CHECK_EQ(
- Bytecodes::OperandSizesToScale(OperandSize::kShort, OperandSize::kShort,
- OperandSize::kShort, OperandSize::kQuad),
- OperandScale::kQuadruple);
-}
-
TEST(Bytecodes, SizesForSignedOperands) {
CHECK(Bytecodes::SizeForSignedOperand(0) == OperandSize::kByte);
CHECK(Bytecodes::SizeForSignedOperand(kMaxInt8) == OperandSize::kByte);
@@ -366,7 +337,6 @@
names.insert(Bytecodes::AccumulatorUseToString(AccumulatorUse::kReadWrite));
CHECK_EQ(names.size(), 4);
}
-
} // namespace interpreter
} // namespace internal
} // namespace v8
diff --git a/test/unittests/interpreter/constant-array-builder-unittest.cc b/test/unittests/interpreter/constant-array-builder-unittest.cc
index 7122437..c48ac58 100644
--- a/test/unittests/interpreter/constant-array-builder-unittest.cc
+++ b/test/unittests/interpreter/constant-array-builder-unittest.cc
@@ -89,7 +89,7 @@
}
for (size_t i = 0; i < reserved; i++) {
size_t index = k8BitCapacity - reserved + i;
- CHECK(builder.At(index)->IsTheHole());
+ CHECK(builder.At(index)->IsTheHole(isolate()));
}
// Now make reservations, and commit them with unique entries.
diff --git a/test/unittests/interpreter/interpreter-assembler-unittest.cc b/test/unittests/interpreter/interpreter-assembler-unittest.cc
index cd21f09..1bc80c0 100644
--- a/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -428,6 +428,10 @@
EXPECT_THAT(m.BytecodeOperandRuntimeId(i),
m.IsUnsignedOperand(offset, operand_size));
break;
+ case interpreter::OperandType::kIntrinsicId:
+ EXPECT_THAT(m.BytecodeOperandIntrinsicId(i),
+ m.IsUnsignedOperand(offset, operand_size));
+ break;
case interpreter::OperandType::kNone:
UNREACHABLE();
break;
@@ -692,16 +696,14 @@
m.IsLoad(MachineType::AnyTagged(), IsLoadParentFramePointer(),
IsIntPtrConstant(Register::function_closure().ToOperand()
<< kPointerSizeLog2));
- Matcher<Node*> load_shared_function_info_matcher =
- m.IsLoad(MachineType::AnyTagged(), load_function_matcher,
- IsIntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
- kHeapObjectTag));
+ Matcher<Node*> load_literals_matcher = m.IsLoad(
+ MachineType::AnyTagged(), load_function_matcher,
+ IsIntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag));
- EXPECT_THAT(
- feedback_vector,
- m.IsLoad(MachineType::AnyTagged(), load_shared_function_info_matcher,
- IsIntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
- kHeapObjectTag)));
+ EXPECT_THAT(feedback_vector,
+ m.IsLoad(MachineType::AnyTagged(), load_literals_matcher,
+ IsIntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
+ kHeapObjectTag)));
}
}
diff --git a/test/unittests/interpreter/source-position-table-unittest.cc b/test/unittests/interpreter/source-position-table-unittest.cc
index 230e57d..87d58fd 100644
--- a/test/unittests/interpreter/source-position-table-unittest.cc
+++ b/test/unittests/interpreter/source-position-table-unittest.cc
@@ -56,23 +56,26 @@
TEST_F(SourcePositionTableTest, EncodeAscending) {
SourcePositionTableBuilder builder(isolate(), zone());
- int accumulator = 0;
+ int code_offset = 0;
+ int source_position = 0;
for (int i = 0; i < arraysize(offsets); i++) {
- accumulator += offsets[i];
+ code_offset += offsets[i];
+ source_position += offsets[i];
if (i % 2) {
- builder.AddPosition(accumulator, accumulator, true);
+ builder.AddPosition(code_offset, source_position, true);
} else {
- builder.AddPosition(accumulator, accumulator, false);
+ builder.AddPosition(code_offset, source_position, false);
}
}
- // Also test negative offsets:
+ // Also test negative offsets for source positions:
for (int i = 0; i < arraysize(offsets); i++) {
- accumulator -= offsets[i];
+ code_offset += offsets[i];
+ source_position -= offsets[i];
if (i % 2) {
- builder.AddPosition(accumulator, accumulator, true);
+ builder.AddPosition(code_offset, source_position, true);
} else {
- builder.AddPosition(accumulator, accumulator, false);
+ builder.AddPosition(code_offset, source_position, false);
}
}
diff --git a/test/unittests/register-configuration-unittest.cc b/test/unittests/register-configuration-unittest.cc
new file mode 100644
index 0000000..31333ee
--- /dev/null
+++ b/test/unittests/register-configuration-unittest.cc
@@ -0,0 +1,130 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/register-configuration.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+
+const MachineRepresentation kFloat32 = MachineRepresentation::kFloat32;
+const MachineRepresentation kFloat64 = MachineRepresentation::kFloat64;
+
+class RegisterConfigurationUnitTest : public ::testing::Test {
+ public:
+ RegisterConfigurationUnitTest() {}
+ virtual ~RegisterConfigurationUnitTest() {}
+
+ private:
+};
+
+TEST_F(RegisterConfigurationUnitTest, BasicProperties) {
+ const int kNumGeneralRegs = 3;
+ const int kNumDoubleRegs = 4;
+ const int kNumAllocatableGeneralRegs = 2;
+ const int kNumAllocatableDoubleRegs = 2;
+ int general_codes[kNumAllocatableGeneralRegs] = {1, 2};
+ int double_codes[kNumAllocatableDoubleRegs] = {2, 3};
+
+ RegisterConfiguration test(
+ kNumGeneralRegs, kNumDoubleRegs, kNumAllocatableGeneralRegs,
+ kNumAllocatableDoubleRegs, general_codes, double_codes,
+ RegisterConfiguration::OVERLAP, nullptr, nullptr, nullptr);
+
+ EXPECT_EQ(test.num_general_registers(), kNumGeneralRegs);
+ EXPECT_EQ(test.num_double_registers(), kNumDoubleRegs);
+ EXPECT_EQ(test.num_allocatable_general_registers(),
+ kNumAllocatableGeneralRegs);
+ EXPECT_EQ(test.num_allocatable_double_registers(), kNumAllocatableDoubleRegs);
+ EXPECT_EQ(test.num_allocatable_float_registers(), kNumAllocatableDoubleRegs);
+
+ EXPECT_EQ(test.allocatable_general_codes_mask(),
+ (1 << general_codes[0]) | (1 << general_codes[1]));
+ EXPECT_EQ(test.GetAllocatableGeneralCode(0), general_codes[0]);
+ EXPECT_EQ(test.GetAllocatableGeneralCode(1), general_codes[1]);
+ EXPECT_EQ(test.allocatable_double_codes_mask(),
+ (1 << double_codes[0]) | (1 << double_codes[1]));
+ EXPECT_EQ(test.GetAllocatableDoubleCode(0), double_codes[0]);
+ EXPECT_EQ(test.GetAllocatableDoubleCode(1), double_codes[1]);
+}
+
+TEST_F(RegisterConfigurationUnitTest, Aliasing) {
+ const int kNumGeneralRegs = 3;
+ const int kNumDoubleRegs = 4;
+ const int kNumAllocatableGeneralRegs = 2;
+ const int kNumAllocatableDoubleRegs = 3;
+ int general_codes[] = {1, 2};
+ int double_codes[] = {2, 3, 16}; // reg 16 should not alias registers 32, 33.
+
+ RegisterConfiguration test(
+ kNumGeneralRegs, kNumDoubleRegs, kNumAllocatableGeneralRegs,
+ kNumAllocatableDoubleRegs, general_codes, double_codes,
+ RegisterConfiguration::COMBINE, nullptr, nullptr, nullptr);
+
+ // There are 3 allocatable double regs, but only 2 can alias float regs.
+ EXPECT_EQ(test.num_allocatable_float_registers(), 4);
+
+ // Test that float registers combine in pairs to form double registers.
+ EXPECT_EQ(test.GetAllocatableFloatCode(0), double_codes[0] * 2);
+ EXPECT_EQ(test.GetAllocatableFloatCode(1), double_codes[0] * 2 + 1);
+ EXPECT_EQ(test.GetAllocatableFloatCode(2), double_codes[1] * 2);
+ EXPECT_EQ(test.GetAllocatableFloatCode(3), double_codes[1] * 2 + 1);
+
+ // Registers alias themselves.
+ EXPECT_TRUE(test.AreAliases(kFloat32, 0, kFloat32, 0));
+ EXPECT_TRUE(test.AreAliases(kFloat64, 0, kFloat64, 0));
+ // Registers don't alias other registers of the same size.
+ EXPECT_FALSE(test.AreAliases(kFloat32, 1, kFloat32, 0));
+ EXPECT_FALSE(test.AreAliases(kFloat64, 1, kFloat64, 0));
+ // Float registers combine in pairs and alias double registers.
+ EXPECT_TRUE(test.AreAliases(kFloat32, 0, kFloat64, 0));
+ EXPECT_TRUE(test.AreAliases(kFloat32, 1, kFloat64, 0));
+ EXPECT_TRUE(test.AreAliases(kFloat64, 0, kFloat32, 0));
+ EXPECT_TRUE(test.AreAliases(kFloat64, 0, kFloat32, 1));
+
+ EXPECT_FALSE(test.AreAliases(kFloat32, 0, kFloat64, 1));
+ EXPECT_FALSE(test.AreAliases(kFloat32, 1, kFloat64, 1));
+
+ EXPECT_TRUE(test.AreAliases(kFloat64, 0, kFloat32, 1));
+ EXPECT_TRUE(test.AreAliases(kFloat64, 1, kFloat32, 2));
+ EXPECT_TRUE(test.AreAliases(kFloat64, 1, kFloat32, 3));
+ EXPECT_TRUE(test.AreAliases(kFloat64, 2, kFloat32, 4));
+ EXPECT_TRUE(test.AreAliases(kFloat64, 2, kFloat32, 5));
+
+ int alias_base_index = -1;
+ EXPECT_EQ(test.GetAliases(kFloat32, 0, kFloat32, &alias_base_index), 1);
+ EXPECT_EQ(alias_base_index, 0);
+ EXPECT_EQ(test.GetAliases(kFloat64, 1, kFloat64, &alias_base_index), 1);
+ EXPECT_EQ(alias_base_index, 1);
+ EXPECT_EQ(test.GetAliases(kFloat32, 0, kFloat64, &alias_base_index), 1);
+ EXPECT_EQ(alias_base_index, 0);
+ EXPECT_EQ(test.GetAliases(kFloat32, 1, kFloat64, &alias_base_index), 1);
+ EXPECT_EQ(test.GetAliases(kFloat32, 2, kFloat64, &alias_base_index), 1);
+ EXPECT_EQ(alias_base_index, 1);
+ EXPECT_EQ(test.GetAliases(kFloat32, 3, kFloat64, &alias_base_index), 1);
+ EXPECT_EQ(alias_base_index, 1);
+ EXPECT_EQ(test.GetAliases(kFloat64, 0, kFloat32, &alias_base_index), 2);
+ EXPECT_EQ(alias_base_index, 0);
+ EXPECT_EQ(test.GetAliases(kFloat64, 1, kFloat32, &alias_base_index), 2);
+ EXPECT_EQ(alias_base_index, 2);
+
+ // Non-allocatable codes still alias.
+ EXPECT_EQ(test.GetAliases(kFloat64, 2, kFloat32, &alias_base_index), 2);
+ EXPECT_EQ(alias_base_index, 4);
+ // High numbered double registers don't alias nonexistent single registers.
+ EXPECT_EQ(
+ test.GetAliases(kFloat64, RegisterConfiguration::kMaxFPRegisters / 2,
+ kFloat32, &alias_base_index),
+ 0);
+ EXPECT_EQ(
+ test.GetAliases(kFloat64, RegisterConfiguration::kMaxFPRegisters / 2 + 1,
+ kFloat32, &alias_base_index),
+ 0);
+ EXPECT_EQ(test.GetAliases(kFloat64, RegisterConfiguration::kMaxFPRegisters,
+ kFloat32, &alias_base_index),
+ 0);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/unittests.gyp b/test/unittests/unittests.gyp
index e16ebeb..d5ba353 100644
--- a/test/unittests/unittests.gyp
+++ b/test/unittests/unittests.gyp
@@ -2,6 +2,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
+# Please keep this file in sync with BUILD.gn.
+
{
'variables': {
'v8_code': 1,
@@ -29,6 +31,7 @@
'base/division-by-constant-unittest.cc',
'base/flags-unittest.cc',
'base/functional-unittest.cc',
+ 'base/ieee754-unittest.cc',
'base/logging-unittest.cc',
'base/iterator-unittest.cc',
'base/platform/condition-variable-unittest.cc',
@@ -41,7 +44,7 @@
'cancelable-tasks-unittest.cc',
'char-predicates-unittest.cc',
'compiler/branch-elimination-unittest.cc',
- 'compiler/coalesced-live-ranges-unittest.cc',
+ 'compiler/checkpoint-elimination-unittest.cc',
'compiler/common-operator-reducer-unittest.cc',
'compiler/common-operator-unittest.cc',
'compiler/compiler-test-utils.h',
@@ -97,9 +100,11 @@
'interpreter/bytecode-array-builder-unittest.cc',
'interpreter/bytecode-array-iterator-unittest.cc',
'interpreter/bytecode-array-writer-unittest.cc',
+ 'interpreter/bytecode-dead-code-optimizer-unittest.cc',
'interpreter/bytecode-peephole-optimizer-unittest.cc',
- 'interpreter/bytecode-register-allocator-unittest.cc',
'interpreter/bytecode-pipeline-unittest.cc',
+ 'interpreter/bytecode-register-allocator-unittest.cc',
+ 'interpreter/bytecode-register-optimizer-unittest.cc',
'interpreter/constant-array-builder-unittest.cc',
'interpreter/interpreter-assembler-unittest.cc',
'interpreter/interpreter-assembler-unittest.h',
@@ -115,10 +120,13 @@
'heap/scavenge-job-unittest.cc',
'heap/slot-set-unittest.cc',
'locked-queue-unittest.cc',
+ 'register-configuration-unittest.cc',
'run-all-unittests.cc',
'test-utils.h',
'test-utils.cc',
+ 'wasm/asm-types-unittest.cc',
'wasm/ast-decoder-unittest.cc',
+ 'wasm/control-transfer-unittest.cc',
'wasm/decoder-unittest.cc',
'wasm/encoder-unittest.cc',
'wasm/leb-helper-unittest.cc',
diff --git a/test/unittests/unittests.status b/test/unittests/unittests.status
index 84fd087..6d67439 100644
--- a/test/unittests/unittests.status
+++ b/test/unittests/unittests.status
@@ -12,4 +12,9 @@
'AstDecoderTest.AllLoadMemCombinations': [SKIP],
'AstDecoderTest.AllStoreMemCombinations': [SKIP],
}], # 'byteorder == big'
+['arch == x87', {
+ 'Ieee754.Expm1': [SKIP],
+ 'Ieee754.Cos': [SKIP],
+ 'Ieee754.Tan': [SKIP],
+}], # 'arch == x87'
]
diff --git a/test/unittests/wasm/asm-types-unittest.cc b/test/unittests/wasm/asm-types-unittest.cc
new file mode 100644
index 0000000..9b29362
--- /dev/null
+++ b/test/unittests/wasm/asm-types-unittest.cc
@@ -0,0 +1,729 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/wasm/asm-types.h"
+
+#include <unordered_map>
+#include <unordered_set>
+
+#include "src/base/macros.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+namespace {
+
+using ::testing::StrEq;
+
+class AsmTypeTest : public TestWithZone {
+ public:
+ using Type = AsmType;
+
+ AsmTypeTest()
+ : parents_({
+ {Type::Uint8Array(), {Type::Heap()}},
+ {Type::Int8Array(), {Type::Heap()}},
+ {Type::Uint16Array(), {Type::Heap()}},
+ {Type::Int16Array(), {Type::Heap()}},
+ {Type::Uint32Array(), {Type::Heap()}},
+ {Type::Int32Array(), {Type::Heap()}},
+ {Type::Float32Array(), {Type::Heap()}},
+ {Type::Float64Array(), {Type::Heap()}},
+ {Type::FloatishDoubleQ(), {Type::Floatish(), Type::DoubleQ()}},
+ {Type::FloatQDoubleQ(),
+ {Type::FloatQ(), Type::Floatish(), Type::DoubleQ()}},
+ {Type::Float(), {Type::FloatQ(), Type::Floatish()}},
+ {Type::FloatQ(), {Type::Floatish()}},
+ {Type::FixNum(),
+ {Type::Signed(), Type::Extern(), Type::Unsigned(), Type::Int(),
+ Type::Intish()}},
+ {Type::Unsigned(), {Type::Int(), Type::Intish()}},
+ {Type::Signed(), {Type::Extern(), Type::Int(), Type::Intish()}},
+ {Type::Int(), {Type::Intish()}},
+ {Type::Double(), {Type::DoubleQ(), Type::Extern()}},
+ }) {}
+
+ protected:
+ std::unordered_set<Type*> ParentsOf(Type* derived) const {
+ const auto parents_iter = parents_.find(derived);
+ if (parents_iter == parents_.end()) {
+ return std::unordered_set<Type*>();
+ }
+ return parents_iter->second;
+ }
+
+ class FunctionTypeBuilder {
+ public:
+ FunctionTypeBuilder(FunctionTypeBuilder&& b)
+ : function_type_(b.function_type_) {
+ b.function_type_ = nullptr;
+ }
+
+ FunctionTypeBuilder& operator=(FunctionTypeBuilder&& b) {
+ if (this != &b) {
+ function_type_ = b.function_type_;
+ b.function_type_ = nullptr;
+ }
+ return *this;
+ }
+
+ FunctionTypeBuilder(Zone* zone, Type* return_type)
+ : function_type_(Type::Function(zone, return_type)) {}
+
+ private:
+ static void AddAllArguments(AsmFunctionType*) {}
+
+ template <typename Arg, typename... Others>
+ static void AddAllArguments(AsmFunctionType* function_type, Arg* arg,
+ Others... others) {
+ CHECK(function_type != nullptr);
+ function_type->AddArgument((*arg)());
+ AddAllArguments(function_type, others...);
+ }
+
+ public:
+ template <typename... Args>
+ Type* operator()(Args... args) {
+ Type* ret = function_type_;
+ function_type_ = nullptr;
+ AddAllArguments(ret->AsFunctionType(), args...);
+ return ret;
+ }
+
+ private:
+ Type* function_type_;
+ };
+
+ FunctionTypeBuilder Function(Type* (*return_type)()) {
+ return FunctionTypeBuilder(zone(), (*return_type)());
+ }
+
+ template <typename... Overloads>
+ Type* Overload(Overloads... overloads) {
+ auto* ret = Type::OverloadedFunction(zone());
+ AddAllOverloads(ret->AsOverloadedFunctionType(), overloads...);
+ return ret;
+ }
+
+ private:
+ static void AddAllOverloads(AsmOverloadedFunctionType*) {}
+
+ template <typename Overload, typename... Others>
+ static void AddAllOverloads(AsmOverloadedFunctionType* function,
+ Overload* overload, Others... others) {
+ CHECK(function != nullptr);
+ function->AddOverload(overload);
+ AddAllOverloads(function, others...);
+ }
+
+ const std::unordered_map<Type*, std::unordered_set<Type*>> parents_;
+};
+
+// AsmValueTypeParents expose the bitmasks for the parents for each value type
+// in asm's type system. It inherits from AsmValueType so that the kAsm<Foo>
+// members are available when expanding the FOR_EACH_ASM_VALUE_TYPE_LIST macro.
+class AsmValueTypeParents : private AsmValueType {
+ public:
+ enum : uint32_t {
+#define V(CamelName, string_name, number, parent_types) \
+ CamelName = parent_types,
+ FOR_EACH_ASM_VALUE_TYPE_LIST(V)
+#undef V
+ };
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(AsmValueTypeParents);
+};
+
+TEST_F(AsmTypeTest, ValidateBits) {
+ // Generic validation tests for the bits in the type system's type
+ // definitions.
+
+ std::unordered_set<Type*> seen_types;
+ std::unordered_set<uint32_t> seen_numbers;
+ uint32_t total_types = 0;
+#define V(CamelName, string_name, number, parent_types) \
+ do { \
+ ++total_types; \
+ if (AsmValueTypeParents::CamelName != 0) { \
+ EXPECT_NE(0, ParentsOf(AsmType::CamelName()).size()) << #CamelName; \
+ } \
+ seen_types.insert(Type::CamelName()); \
+ seen_numbers.insert(number); \
+ /* Every ASM type must have a valid number. */ \
+ EXPECT_NE(0, number) << Type::CamelName()->Name(); \
+ /* Inheritance cycles - unlikely, but we're paranoid and check for it */ \
+ /* anyways.*/ \
+ EXPECT_EQ(0, (1 << (number)) & AsmValueTypeParents::CamelName); \
+ } while (0);
+ FOR_EACH_ASM_VALUE_TYPE_LIST(V)
+#undef V
+
+ // At least one type was expanded.
+ EXPECT_GT(total_types, 0u);
+
+ // Each value type is unique.
+ EXPECT_EQ(total_types, seen_types.size());
+
+ // Each number is unique.
+ EXPECT_EQ(total_types, seen_numbers.size());
+}
+
+TEST_F(AsmTypeTest, SaneParentsMap) {
+ // This test ensures our parents map contains all the parents types that are
+ // specified in the types' declaration. It does not report bogus inheritance.
+
+ // Handy-dandy lambda for counting bits. Code borrowed from stack overflow.
+ auto NumberOfSetBits = [](uintptr_t parent_mask) -> uint32_t {
+ uint32_t parent_mask32 = static_cast<uint32_t>(parent_mask);
+ CHECK_EQ(parent_mask, parent_mask32);
+ parent_mask32 = parent_mask32 - ((parent_mask32 >> 1) & 0x55555555);
+ parent_mask32 =
+ (parent_mask32 & 0x33333333) + ((parent_mask32 >> 2) & 0x33333333);
+ return (((parent_mask32 + (parent_mask32 >> 4)) & 0x0F0F0F0F) *
+ 0x01010101) >>
+ 24;
+ };
+
+#define V(CamelName, string_name, number, parent_types) \
+ do { \
+ const uintptr_t parents = \
+ reinterpret_cast<uintptr_t>(Type::CamelName()) & ~(1 << (number)); \
+ EXPECT_EQ(NumberOfSetBits(parents), \
+ 1 + ParentsOf(Type::CamelName()).size()) \
+ << Type::CamelName()->Name() << ", parents " \
+ << reinterpret_cast<void*>(parents) << ", type " \
+ << static_cast<void*>(Type::CamelName()); \
+ } while (0);
+ FOR_EACH_ASM_VALUE_TYPE_LIST(V)
+#undef V
+}
+
+TEST_F(AsmTypeTest, Names) {
+#define V(CamelName, string_name, number, parent_types) \
+ do { \
+ EXPECT_THAT(Type::CamelName()->Name(), StrEq(string_name)); \
+ } while (0);
+ FOR_EACH_ASM_VALUE_TYPE_LIST(V)
+#undef V
+
+ EXPECT_THAT(Function(Type::Int)(Type::Double, Type::Float)->Name(),
+ StrEq("(double, float) -> int"));
+
+ EXPECT_THAT(Overload(Function(Type::Int)(Type::Double, Type::Float),
+ Function(Type::Int)(Type::Int))
+ ->Name(),
+ StrEq("(double, float) -> int /\\ (int) -> int"));
+
+ EXPECT_THAT(Type::FroundType(zone())->Name(), StrEq("fround"));
+
+ EXPECT_THAT(Type::MinMaxType(zone(), Type::Signed(), Type::Int())->Name(),
+ StrEq("(int, int...) -> signed"));
+ EXPECT_THAT(Type::MinMaxType(zone(), Type::Float(), Type::Floatish())->Name(),
+ StrEq("(floatish, floatish...) -> float"));
+ EXPECT_THAT(Type::MinMaxType(zone(), Type::Double(), Type::DoubleQ())->Name(),
+ StrEq("(double?, double?...) -> double"));
+
+ EXPECT_THAT(Type::FFIType(zone())->Name(), StrEq("Function"));
+
+ auto* ft =
+ Type::FunctionTableType(zone(), 15, Function(Type::Double)(Type::Int));
+ EXPECT_THAT(ft->Name(), StrEq("(int) -> double[15]"));
+}
+
+TEST_F(AsmTypeTest, IsExactly) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ for (size_t jj = 0; jj < arraysize(test_types); ++jj) {
+ EXPECT_EQ(ii == jj, test_types[ii]->IsExactly(test_types[jj]))
+ << test_types[ii]->Name()
+ << ((ii == jj) ? " is not exactly " : " is exactly ")
+ << test_types[jj]->Name();
+ }
+ }
+}
+
+TEST_F(AsmTypeTest, IsA) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ for (size_t jj = 0; jj < arraysize(test_types); ++jj) {
+ const bool Expected =
+ (ii == jj) || ParentsOf(test_types[ii]).count(test_types[jj]) != 0;
+ EXPECT_EQ(Expected, test_types[ii]->IsA(test_types[jj]))
+ << test_types[ii]->Name() << (Expected ? " is not a " : " is a ")
+ << test_types[jj]->Name();
+ }
+ }
+}
+
+TEST_F(AsmTypeTest, ValidateCall) {
+ auto* min_max_int = Type::MinMaxType(zone(), Type::Signed(), Type::Int());
+ auto* i2s = Function(Type::Signed)(Type::Int);
+ auto* ii2s = Function(Type::Signed)(Type::Int, Type::Int);
+ auto* iii2s = Function(Type::Signed)(Type::Int, Type::Int, Type::Int);
+ auto* iiii2s =
+ Function(Type::Signed)(Type::Int, Type::Int, Type::Int, Type::Int);
+
+ EXPECT_EQ(Type::Signed(), min_max_int->AsCallableType()->ValidateCall(
+ min_max_int->AsFunctionType()->ReturnType(),
+ min_max_int->AsFunctionType()->Arguments()));
+ EXPECT_EQ(Type::Signed(), min_max_int->AsCallableType()->ValidateCall(
+ ii2s->AsFunctionType()->ReturnType(),
+ ii2s->AsFunctionType()->Arguments()));
+ EXPECT_EQ(Type::Signed(), min_max_int->AsCallableType()->ValidateCall(
+ iii2s->AsFunctionType()->ReturnType(),
+ iii2s->AsFunctionType()->Arguments()));
+ EXPECT_EQ(Type::Signed(), min_max_int->AsCallableType()->ValidateCall(
+ iiii2s->AsFunctionType()->ReturnType(),
+ iiii2s->AsFunctionType()->Arguments()));
+ EXPECT_EQ(Type::None(), min_max_int->AsCallableType()->ValidateCall(
+ i2s->AsFunctionType()->ReturnType(),
+ i2s->AsFunctionType()->Arguments()));
+
+ auto* min_max_double =
+ Type::MinMaxType(zone(), Type::Double(), Type::Double());
+ auto* d2d = Function(Type::Double)(Type::Double);
+ auto* dd2d = Function(Type::Double)(Type::Double, Type::Double);
+ auto* ddd2d =
+ Function(Type::Double)(Type::Double, Type::Double, Type::Double);
+ auto* dddd2d = Function(Type::Double)(Type::Double, Type::Double,
+ Type::Double, Type::Double);
+ EXPECT_EQ(Type::Double(), min_max_double->AsCallableType()->ValidateCall(
+ min_max_double->AsFunctionType()->ReturnType(),
+ min_max_double->AsFunctionType()->Arguments()));
+ EXPECT_EQ(Type::Double(), min_max_double->AsCallableType()->ValidateCall(
+ dd2d->AsFunctionType()->ReturnType(),
+ dd2d->AsFunctionType()->Arguments()));
+ EXPECT_EQ(Type::Double(), min_max_double->AsCallableType()->ValidateCall(
+ ddd2d->AsFunctionType()->ReturnType(),
+ ddd2d->AsFunctionType()->Arguments()));
+ EXPECT_EQ(Type::Double(), min_max_double->AsCallableType()->ValidateCall(
+ dddd2d->AsFunctionType()->ReturnType(),
+ dddd2d->AsFunctionType()->Arguments()));
+ EXPECT_EQ(Type::None(), min_max_double->AsCallableType()->ValidateCall(
+ d2d->AsFunctionType()->ReturnType(),
+ d2d->AsFunctionType()->Arguments()));
+
+ auto* min_max = Overload(min_max_int, min_max_double);
+ EXPECT_EQ(Type::None(), min_max->AsCallableType()->ValidateCall(
+ i2s->AsFunctionType()->ReturnType(),
+ i2s->AsFunctionType()->Arguments()));
+ EXPECT_EQ(Type::None(), min_max->AsCallableType()->ValidateCall(
+ d2d->AsFunctionType()->ReturnType(),
+ d2d->AsFunctionType()->Arguments()));
+ EXPECT_EQ(Type::Signed(), min_max->AsCallableType()->ValidateCall(
+ min_max_int->AsFunctionType()->ReturnType(),
+ min_max_int->AsFunctionType()->Arguments()));
+ EXPECT_EQ(Type::Signed(), min_max->AsCallableType()->ValidateCall(
+ ii2s->AsFunctionType()->ReturnType(),
+ ii2s->AsFunctionType()->Arguments()));
+ EXPECT_EQ(Type::Signed(), min_max->AsCallableType()->ValidateCall(
+ iii2s->AsFunctionType()->ReturnType(),
+ iii2s->AsFunctionType()->Arguments()));
+ EXPECT_EQ(Type::Signed(), min_max->AsCallableType()->ValidateCall(
+ iiii2s->AsFunctionType()->ReturnType(),
+ iiii2s->AsFunctionType()->Arguments()));
+ EXPECT_EQ(Type::Double(), min_max->AsCallableType()->ValidateCall(
+ min_max_double->AsFunctionType()->ReturnType(),
+ min_max_double->AsFunctionType()->Arguments()));
+ EXPECT_EQ(Type::Double(), min_max->AsCallableType()->ValidateCall(
+ dd2d->AsFunctionType()->ReturnType(),
+ dd2d->AsFunctionType()->Arguments()));
+ EXPECT_EQ(Type::Double(), min_max->AsCallableType()->ValidateCall(
+ ddd2d->AsFunctionType()->ReturnType(),
+ ddd2d->AsFunctionType()->Arguments()));
+ EXPECT_EQ(Type::Double(), min_max->AsCallableType()->ValidateCall(
+ dddd2d->AsFunctionType()->ReturnType(),
+ dddd2d->AsFunctionType()->Arguments()));
+
+ auto* fround = Type::FroundType(zone());
+
+ ZoneVector<AsmType*> arg(zone());
+ arg.push_back(Type::Floatish());
+ EXPECT_EQ(Type::Float(),
+ fround->AsCallableType()->ValidateCall(Type::Float(), arg));
+ arg.clear();
+ arg.push_back(Type::FloatQ());
+ EXPECT_EQ(Type::Float(),
+ fround->AsCallableType()->ValidateCall(Type::Float(), arg));
+ arg.clear();
+ arg.push_back(Type::Float());
+ EXPECT_EQ(Type::Float(),
+ fround->AsCallableType()->ValidateCall(Type::Float(), arg));
+ arg.clear();
+ arg.push_back(Type::DoubleQ());
+ EXPECT_EQ(Type::Float(),
+ fround->AsCallableType()->ValidateCall(Type::Float(), arg));
+ arg.clear();
+ arg.push_back(Type::Double());
+ EXPECT_EQ(Type::Float(),
+ fround->AsCallableType()->ValidateCall(Type::Float(), arg));
+ arg.clear();
+ arg.push_back(Type::Signed());
+ EXPECT_EQ(Type::Float(),
+ fround->AsCallableType()->ValidateCall(Type::Float(), arg));
+ arg.clear();
+ arg.push_back(Type::Unsigned());
+ EXPECT_EQ(Type::Float(),
+ fround->AsCallableType()->ValidateCall(Type::Float(), arg));
+ arg.clear();
+ arg.push_back(Type::FixNum());
+ EXPECT_EQ(Type::Float(),
+ fround->AsCallableType()->ValidateCall(Type::Float(), arg));
+
+ auto* idf2v = Function(Type::Void)(Type::Int, Type::Double, Type::Float);
+ auto* i2d = Function(Type::Double)(Type::Int);
+ auto* i2f = Function(Type::Float)(Type::Int);
+ auto* fi2d = Function(Type::Double)(Type::Float, Type::Int);
+ auto* idif2i =
+ Function(Type::Int)(Type::Int, Type::Double, Type::Int, Type::Float);
+ auto* overload = Overload(idf2v, i2f, /*i2d missing, */ fi2d, idif2i);
+ EXPECT_EQ(Type::Void(), overload->AsCallableType()->ValidateCall(
+ idf2v->AsFunctionType()->ReturnType(),
+ idf2v->AsFunctionType()->Arguments()));
+ EXPECT_EQ(Type::Float(), overload->AsCallableType()->ValidateCall(
+ i2f->AsFunctionType()->ReturnType(),
+ i2f->AsFunctionType()->Arguments()));
+ EXPECT_EQ(Type::Double(), overload->AsCallableType()->ValidateCall(
+ fi2d->AsFunctionType()->ReturnType(),
+ fi2d->AsFunctionType()->Arguments()));
+ EXPECT_EQ(Type::Int(), overload->AsCallableType()->ValidateCall(
+ idif2i->AsFunctionType()->ReturnType(),
+ idif2i->AsFunctionType()->Arguments()));
+ EXPECT_EQ(Type::None(), overload->AsCallableType()->ValidateCall(
+ i2d->AsFunctionType()->ReturnType(),
+ i2d->AsFunctionType()->Arguments()));
+ EXPECT_EQ(Type::None(), i2f->AsCallableType()->ValidateCall(
+ i2d->AsFunctionType()->ReturnType(),
+ i2d->AsFunctionType()->Arguments()));
+
+ auto* ffi = Type::FFIType(zone());
+ AsmType* (*kReturnTypes[])() = {
+ Type::Void, Type::Double, Type::Signed,
+ };
+ AsmType* (*kParameterTypes[])() = {
+ Type::Double, Type::Signed, Type::FixNum,
+ };
+ for (size_t ii = 0; ii < arraysize(kReturnTypes); ++ii) {
+ for (size_t jj = 0; jj < arraysize(kParameterTypes); ++jj) {
+ auto* f = Function(kReturnTypes[ii])(kParameterTypes[jj]);
+ EXPECT_EQ(kReturnTypes[ii](), ffi->AsCallableType()->ValidateCall(
+ f->AsFunctionType()->ReturnType(),
+ f->AsFunctionType()->Arguments()))
+ << kReturnTypes[ii]()->Name();
+
+ // Call with non-parameter type type should fail.
+ f = Function(kReturnTypes[ii])(kParameterTypes[jj], Type::Int);
+ EXPECT_EQ(Type::None(), ffi->AsCallableType()->ValidateCall(
+ f->AsFunctionType()->ReturnType(),
+ f->AsFunctionType()->Arguments()))
+ << kReturnTypes[ii]()->Name();
+ }
+ }
+
+ auto* ft0 = Type::FunctionTableType(zone(), 10, fi2d);
+ EXPECT_EQ(Type::Double(), ft0->AsCallableType()->ValidateCall(
+ fi2d->AsFunctionType()->ReturnType(),
+ fi2d->AsFunctionType()->Arguments()));
+ EXPECT_EQ(Type::None(), ft0->AsCallableType()->ValidateCall(
+ i2d->AsFunctionType()->ReturnType(),
+ i2d->AsFunctionType()->Arguments()));
+}
+
+TEST_F(AsmTypeTest, ToReturnType) {
+ std::unordered_map<AsmType*, AsmType*> kToReturnType = {
+ {Type::Signed(), Type::Signed()}, {Type::FixNum(), Type::Signed()},
+ {Type::Double(), Type::Double()}, {Type::Float(), Type::Float()},
+ {Type::Void(), Type::Void()},
+ };
+
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ auto* return_type = Type::None();
+ auto to_return_type_iter = kToReturnType.find(test_types[ii]);
+ if (to_return_type_iter != kToReturnType.end()) {
+ return_type = to_return_type_iter->second;
+ }
+ EXPECT_EQ(return_type, test_types[ii]->ToReturnType())
+ << return_type->Name() << " != " << test_types[ii]->ToReturnType();
+ }
+}
+
+TEST_F(AsmTypeTest, IsReturnType) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ std::unordered_set<Type*> return_types{
+ Type::Double(), Type::Signed(), Type::Float(), Type::Void(),
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ const bool IsReturnType = return_types.count(test_types[ii]);
+ EXPECT_EQ(IsReturnType, test_types[ii]->IsReturnType())
+ << test_types[ii]->Name()
+ << (IsReturnType ? " is not a return type" : " is a return type");
+ }
+}
+
+TEST_F(AsmTypeTest, ToParameterType) {
+ std::unordered_map<AsmType*, AsmType*> kToParameterType = {
+ {Type::Int(), Type::Int()}, {Type::Signed(), Type::Int()},
+ {Type::Unsigned(), Type::Int()}, {Type::FixNum(), Type::Int()},
+ {Type::Double(), Type::Double()}, {Type::Float(), Type::Float()},
+ };
+
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ auto* parameter_type = Type::None();
+ auto to_parameter_type_iter = kToParameterType.find(test_types[ii]);
+ if (to_parameter_type_iter != kToParameterType.end()) {
+ parameter_type = to_parameter_type_iter->second;
+ }
+ EXPECT_EQ(parameter_type, test_types[ii]->ToParameterType())
+ << parameter_type->Name()
+ << " != " << test_types[ii]->ToParameterType();
+ }
+}
+
+TEST_F(AsmTypeTest, IsParameterType) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ std::unordered_set<Type*> parameter_types{
+ Type::Double(), Type::Int(), Type::Float(),
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ const bool IsParameterType = parameter_types.count(test_types[ii]);
+ EXPECT_EQ(IsParameterType, test_types[ii]->IsParameterType())
+ << test_types[ii]->Name()
+ << (IsParameterType ? " is not a parameter type"
+ : " is a parameter type");
+ }
+}
+
+TEST_F(AsmTypeTest, IsComparableType) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ std::unordered_set<Type*> comparable_types{
+ Type::Double(), Type::Signed(), Type::Unsigned(), Type::Float(),
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ const bool IsComparableType = comparable_types.count(test_types[ii]);
+ EXPECT_EQ(IsComparableType, test_types[ii]->IsComparableType())
+ << test_types[ii]->Name()
+ << (IsComparableType ? " is not a comparable type"
+ : " is a comparable type");
+ }
+}
+
+TEST_F(AsmTypeTest, ElementSizeInBytes) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ auto ElementSizeInBytesForType = [](Type* type) -> int32_t {
+ if (type == Type::Int8Array() || type == Type::Uint8Array()) {
+ return 1;
+ }
+ if (type == Type::Int16Array() || type == Type::Uint16Array()) {
+ return 2;
+ }
+ if (type == Type::Int32Array() || type == Type::Uint32Array() ||
+ type == Type::Float32Array()) {
+ return 4;
+ }
+ if (type == Type::Float64Array()) {
+ return 8;
+ }
+ return -1;
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ EXPECT_EQ(ElementSizeInBytesForType(test_types[ii]),
+ test_types[ii]->ElementSizeInBytes());
+ }
+}
+
+TEST_F(AsmTypeTest, LoadType) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ auto LoadTypeForType = [](Type* type) -> Type* {
+ if (type == Type::Int8Array() || type == Type::Uint8Array() ||
+ type == Type::Int16Array() || type == Type::Uint16Array() ||
+ type == Type::Int32Array() || type == Type::Uint32Array()) {
+ return Type::Intish();
+ }
+
+ if (type == Type::Float32Array()) {
+ return Type::FloatQ();
+ }
+
+ if (type == Type::Float64Array()) {
+ return Type::DoubleQ();
+ }
+
+ return Type::None();
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ EXPECT_EQ(LoadTypeForType(test_types[ii]), test_types[ii]->LoadType());
+ }
+}
+
+TEST_F(AsmTypeTest, StoreType) {
+ Type* test_types[] = {
+#define CREATE(CamelName, string_name, number, parent_types) Type::CamelName(),
+ FOR_EACH_ASM_VALUE_TYPE_LIST(CREATE)
+#undef CREATE
+ Function(Type::Int)(Type::Double),
+ Function(Type::Int)(Type::DoubleQ),
+ Overload(Function(Type::Int)(Type::Double)),
+ Function(Type::Int)(Type::Int, Type::Int),
+ Type::MinMaxType(zone(), Type::Signed(), Type::Int()),
+ Function(Type::Int)(Type::Float), Type::FroundType(zone()),
+ Type::FFIType(zone()),
+ Type::FunctionTableType(zone(), 10, Function(Type::Void)()),
+ };
+
+ auto StoreTypeForType = [](Type* type) -> Type* {
+ if (type == Type::Int8Array() || type == Type::Uint8Array() ||
+ type == Type::Int16Array() || type == Type::Uint16Array() ||
+ type == Type::Int32Array() || type == Type::Uint32Array()) {
+ return Type::Intish();
+ }
+
+ if (type == Type::Float32Array()) {
+ return Type::FloatishDoubleQ();
+ }
+
+ if (type == Type::Float64Array()) {
+ return Type::FloatQDoubleQ();
+ }
+
+ return Type::None();
+ };
+
+ for (size_t ii = 0; ii < arraysize(test_types); ++ii) {
+ EXPECT_EQ(StoreTypeForType(test_types[ii]), test_types[ii]->StoreType())
+ << test_types[ii]->Name();
+ }
+}
+
+} // namespace
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/wasm/ast-decoder-unittest.cc b/test/unittests/wasm/ast-decoder-unittest.cc
index fe10115..8be0e41 100644
--- a/test/unittests/wasm/ast-decoder-unittest.cc
+++ b/test/unittests/wasm/ast-decoder-unittest.cc
@@ -82,7 +82,7 @@
// verification failures.
void Verify(ErrorCode expected, FunctionSig* sig, const byte* start,
const byte* end) {
- local_decls.Prepend(&start, &end);
+ local_decls.Prepend(zone(), &start, &end);
// Verify the code.
TreeResult result =
VerifyWasmCode(zone()->allocator(), module, sig, start, end);
@@ -105,8 +105,6 @@
}
FATAL(str.str().c_str());
}
-
- delete[] start; // local_decls.Prepend() allocated a new buffer.
}
void TestBinop(WasmOpcode opcode, FunctionSig* success) {
@@ -1158,7 +1156,6 @@
TestModuleEnv() {
instance = nullptr;
module = &mod;
- linker = nullptr;
}
byte AddGlobal(MachineType mem_type) {
mod.globals.push_back({0, 0, mem_type, 0, false});
@@ -1171,14 +1168,13 @@
return static_cast<byte>(mod.signatures.size() - 1);
}
byte AddFunction(FunctionSig* sig) {
- mod.functions.push_back({sig, // sig
- 0, // func_index
- 0, // sig_index
- 0, // name_offset
- 0, // name_length
- 0, // code_start_offset
- 0, // code_end_offset
- false}); // exported
+ mod.functions.push_back({sig, // sig
+ 0, // func_index
+ 0, // sig_index
+ 0, // name_offset
+ 0, // name_length
+ 0, // code_start_offset
+ 0}); // code_end_offset
CHECK(mod.functions.size() <= 127);
return static_cast<byte>(mod.functions.size() - 1);
}
@@ -2427,7 +2423,7 @@
local_decls.AddLocals(5, kAstF32);
local_decls.AddLocals(1337, kAstI32);
local_decls.AddLocals(212, kAstI64);
- local_decls.Prepend(&data, &end);
+ local_decls.Prepend(zone(), &data, &end);
AstLocalDecls decls(zone());
bool result = DecodeLocalDecls(decls, data, end);
@@ -2439,7 +2435,6 @@
pos = ExpectRun(map, pos, kAstF32, 5);
pos = ExpectRun(map, pos, kAstI32, 1337);
pos = ExpectRun(map, pos, kAstI64, 212);
- delete[] data;
}
} // namespace wasm
diff --git a/test/unittests/wasm/control-transfer-unittest.cc b/test/unittests/wasm/control-transfer-unittest.cc
new file mode 100644
index 0000000..2b67f12
--- /dev/null
+++ b/test/unittests/wasm/control-transfer-unittest.cc
@@ -0,0 +1,402 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+#include "src/v8.h"
+
+#include "src/wasm/wasm-interpreter.h"
+#include "src/wasm/wasm-macro-gen.h"
+
+using testing::MakeMatcher;
+using testing::Matcher;
+using testing::MatcherInterface;
+using testing::MatchResultListener;
+using testing::StringMatchResultListener;
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+#define B1(a) kExprBlock, a, kExprEnd
+#define B2(a, b) kExprBlock, a, b, kExprEnd
+#define B3(a, b, c) kExprBlock, a, b, c, kExprEnd
+
+struct ExpectedTarget {
+ pc_t pc;
+ ControlTransfer expected;
+};
+
+// For nicer error messages.
+class ControlTransferMatcher : public MatcherInterface<const ControlTransfer&> {
+ public:
+ explicit ControlTransferMatcher(pc_t pc, const ControlTransfer& expected)
+ : pc_(pc), expected_(expected) {}
+
+ void DescribeTo(std::ostream* os) const override {
+ *os << "@" << pc_ << " {pcdiff = " << expected_.pcdiff
+ << ", spdiff = " << expected_.spdiff
+ << ", action = " << expected_.action << "}";
+ }
+
+ bool MatchAndExplain(const ControlTransfer& input,
+ MatchResultListener* listener) const override {
+ if (input.pcdiff != expected_.pcdiff || input.spdiff != expected_.spdiff ||
+ input.action != expected_.action) {
+ *listener << "@" << pc_ << " {pcdiff = " << input.pcdiff
+ << ", spdiff = " << input.spdiff
+ << ", action = " << input.action << "}";
+ return false;
+ }
+ return true;
+ }
+
+ private:
+ pc_t pc_;
+ const ControlTransfer& expected_;
+};
+
+class ControlTransferTest : public TestWithZone {
+ public:
+ void CheckControlTransfers(const byte* start, const byte* end,
+ ExpectedTarget* expected_targets,
+ size_t num_targets) {
+ ControlTransferMap map =
+ WasmInterpreter::ComputeControlTransfersForTesting(zone(), start, end);
+ // Check all control targets in the map.
+ for (size_t i = 0; i < num_targets; i++) {
+ pc_t pc = expected_targets[i].pc;
+ auto it = map.find(pc);
+ if (it == map.end()) {
+ printf("expected control target @ +%zu\n", pc);
+ EXPECT_TRUE(false);
+ } else {
+ ControlTransfer& expected = expected_targets[i].expected;
+ ControlTransfer& target = it->second;
+ EXPECT_THAT(target,
+ MakeMatcher(new ControlTransferMatcher(pc, expected)));
+ }
+ }
+
+ // Check there are no other control targets.
+ for (pc_t pc = 0; start + pc < end; pc++) {
+ bool found = false;
+ for (size_t i = 0; i < num_targets; i++) {
+ if (expected_targets[i].pc == pc) {
+ found = true;
+ break;
+ }
+ }
+ if (found) continue;
+ if (map.find(pc) != map.end()) {
+ printf("expected no control @ +%zu\n", pc);
+ EXPECT_TRUE(false);
+ }
+ }
+ }
+};
+
+// Macro for simplifying tests below.
+#define EXPECT_TARGETS(...) \
+ do { \
+ ExpectedTarget pairs[] = {__VA_ARGS__}; \
+ CheckControlTransfers(code, code + sizeof(code), pairs, arraysize(pairs)); \
+ } while (false)
+
+TEST_F(ControlTransferTest, SimpleIf) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprEnd // @3
+ };
+ EXPECT_TARGETS({2, {2, 0, ControlTransfer::kPushVoid}}, // --
+ {3, {1, 0, ControlTransfer::kPushVoid}});
+}
+
+TEST_F(ControlTransferTest, SimpleIf1) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprNop, // @3
+ kExprEnd // @4
+ };
+ EXPECT_TARGETS({2, {3, 0, ControlTransfer::kPushVoid}}, // --
+ {4, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, SimpleIf2) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprNop, // @3
+ kExprNop, // @4
+ kExprEnd // @5
+ };
+ EXPECT_TARGETS({2, {4, 0, ControlTransfer::kPushVoid}}, // --
+ {5, {1, 2, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, SimpleIfElse) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprElse, // @3
+ kExprEnd // @4
+ };
+ EXPECT_TARGETS({2, {2, 0, ControlTransfer::kNoAction}}, // --
+ {3, {2, 0, ControlTransfer::kPushVoid}}, // --
+ {4, {1, 0, ControlTransfer::kPushVoid}});
+}
+
+TEST_F(ControlTransferTest, SimpleIfElse1) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprNop, // @3
+ kExprElse, // @4
+ kExprNop, // @5
+ kExprEnd // @6
+ };
+ EXPECT_TARGETS({2, {3, 0, ControlTransfer::kNoAction}}, // --
+ {4, {3, 1, ControlTransfer::kPopAndRepush}}, // --
+ {6, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, IfBr) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprBr, // @3
+ ARITY_0, // +1
+ 0, // +1
+ kExprEnd // @6
+ };
+ EXPECT_TARGETS({2, {5, 0, ControlTransfer::kPushVoid}}, // --
+ {3, {4, 0, ControlTransfer::kPushVoid}}, // --
+ {6, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, IfBrElse) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprBr, // @3
+ ARITY_0, // +1
+ 0, // +1
+ kExprElse, // @6
+ kExprEnd // @7
+ };
+ EXPECT_TARGETS({2, {5, 0, ControlTransfer::kNoAction}}, // --
+ {3, {5, 0, ControlTransfer::kPushVoid}}, // --
+ {6, {2, 1, ControlTransfer::kPopAndRepush}}, // --
+ {7, {1, 0, ControlTransfer::kPushVoid}});
+}
+
+TEST_F(ControlTransferTest, IfElseBr) {
+ byte code[] = {
+ kExprI32Const, // @0
+ 0, // +1
+ kExprIf, // @2
+ kExprNop, // @3
+ kExprElse, // @4
+ kExprBr, // @5
+ ARITY_0, // +1
+ 0, // +1
+ kExprEnd // @8
+ };
+ EXPECT_TARGETS({2, {3, 0, ControlTransfer::kNoAction}}, // --
+ {4, {5, 1, ControlTransfer::kPopAndRepush}}, // --
+ {5, {4, 0, ControlTransfer::kPushVoid}}, // --
+ {8, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, BlockEmpty) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprEnd // @1
+ };
+ EXPECT_TARGETS({1, {1, 0, ControlTransfer::kPushVoid}});
+}
+
+TEST_F(ControlTransferTest, Br0) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprBr, // @1
+ ARITY_0, // +1
+ 0, // +1
+ kExprEnd // @4
+ };
+ EXPECT_TARGETS({1, {4, 0, ControlTransfer::kPushVoid}},
+ {4, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, Br1) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprNop, // @1
+ kExprBr, // @2
+ ARITY_0, // +1
+ 0, // +1
+ kExprEnd // @5
+ };
+ EXPECT_TARGETS({2, {4, 1, ControlTransfer::kPopAndRepush}}, // --
+ {5, {1, 2, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, Br2) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprNop, // @1
+ kExprNop, // @2
+ kExprBr, // @3
+ ARITY_0, // +1
+ 0, // +1
+ kExprEnd // @6
+ };
+ EXPECT_TARGETS({3, {4, 2, ControlTransfer::kPopAndRepush}}, // --
+ {6, {1, 3, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, Br0b) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprBr, // @1
+ ARITY_0, // +1
+ 0, // +1
+ kExprNop, // @4
+ kExprEnd // @5
+ };
+ EXPECT_TARGETS({1, {5, 0, ControlTransfer::kPushVoid}}, // --
+ {5, {1, 2, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, Br0c) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprBr, // @1
+ ARITY_0, // +1
+ 0, // +1
+ kExprNop, // @4
+ kExprNop, // @5
+ kExprEnd // @6
+ };
+ EXPECT_TARGETS({1, {6, 0, ControlTransfer::kPushVoid}}, // --
+ {6, {1, 3, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, SimpleLoop1) {
+ byte code[] = {
+ kExprLoop, // @0
+ kExprBr, // @1
+ ARITY_0, // +1
+ 0, // +1
+ kExprEnd // @4
+ };
+ EXPECT_TARGETS({1, {-1, 0, ControlTransfer::kNoAction}}, // --
+ {4, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, SimpleLoop2) {
+ byte code[] = {
+ kExprLoop, // @0
+ kExprNop, // @1
+ kExprBr, // @2
+ ARITY_0, // +1
+ 0, // +1
+ kExprEnd // @5
+ };
+ EXPECT_TARGETS({2, {-2, 1, ControlTransfer::kNoAction}}, // --
+ {5, {1, 2, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, SimpleLoopExit1) {
+ byte code[] = {
+ kExprLoop, // @0
+ kExprBr, // @1
+ ARITY_0, // +1
+ 1, // +1
+ kExprEnd // @4
+ };
+ EXPECT_TARGETS({1, {4, 0, ControlTransfer::kPushVoid}}, // --
+ {4, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, SimpleLoopExit2) {
+ byte code[] = {
+ kExprLoop, // @0
+ kExprNop, // @1
+ kExprBr, // @2
+ ARITY_0, // +1
+ 1, // +1
+ kExprEnd // @5
+ };
+ EXPECT_TARGETS({2, {4, 1, ControlTransfer::kPopAndRepush}}, // --
+ {5, {1, 2, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, BrTable0) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprI8Const, // @1
+ 0, // +1
+ kExprBrTable, // @3
+ ARITY_0, // +1
+ 0, // +1
+ U32_LE(0), // +4
+ kExprEnd // @10
+ };
+ EXPECT_TARGETS({3, {8, 0, ControlTransfer::kPushVoid}}, // --
+ {10, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, BrTable1) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprI8Const, // @1
+ 0, // +1
+ kExprBrTable, // @3
+ ARITY_0, // +1
+ 1, // +1
+ U32_LE(0), // +4
+ U32_LE(0), // +4
+ kExprEnd // @14
+ };
+ EXPECT_TARGETS({3, {12, 0, ControlTransfer::kPushVoid}}, // --
+ {4, {11, 0, ControlTransfer::kPushVoid}}, // --
+ {14, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+TEST_F(ControlTransferTest, BrTable2) {
+ byte code[] = {
+ kExprBlock, // @0
+ kExprBlock, // @1
+ kExprI8Const, // @2
+ 0, // +1
+ kExprBrTable, // @4
+ ARITY_0, // +1
+ 2, // +1
+ U32_LE(0), // +4
+ U32_LE(0), // +4
+ U32_LE(1), // +4
+ kExprEnd, // @19
+ kExprEnd // @19
+ };
+ EXPECT_TARGETS({4, {16, 0, ControlTransfer::kPushVoid}}, // --
+ {5, {15, 0, ControlTransfer::kPushVoid}}, // --
+ {6, {15, 0, ControlTransfer::kPushVoid}}, // --
+ {19, {1, 1, ControlTransfer::kPopAndRepush}}, // --
+ {20, {1, 1, ControlTransfer::kPopAndRepush}});
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/wasm/decoder-unittest.cc b/test/unittests/wasm/decoder-unittest.cc
index 11d68f1..e298f0b 100644
--- a/test/unittests/wasm/decoder-unittest.cc
+++ b/test/unittests/wasm/decoder-unittest.cc
@@ -4,6 +4,7 @@
#include "test/unittests/test-utils.h"
+#include "src/objects-inl.h"
#include "src/wasm/decoder.h"
#include "src/wasm/wasm-macro-gen.h"
@@ -22,7 +23,7 @@
do { \
const byte data[] = {__VA_ARGS__}; \
decoder.Reset(data, data + sizeof(data)); \
- int length; \
+ unsigned length; \
EXPECT_EQ(expected, \
decoder.checked_read_u32v(decoder.start(), 0, &length)); \
EXPECT_EQ(expected_length, length); \
@@ -32,7 +33,7 @@
do { \
const byte data[] = {__VA_ARGS__}; \
decoder.Reset(data, data + sizeof(data)); \
- int length; \
+ unsigned length; \
EXPECT_EQ(expected, \
decoder.checked_read_i32v(decoder.start(), 0, &length)); \
EXPECT_EQ(expected_length, length); \
@@ -42,7 +43,7 @@
do { \
const byte data[] = {__VA_ARGS__}; \
decoder.Reset(data, data + sizeof(data)); \
- int length; \
+ unsigned length; \
EXPECT_EQ(expected, \
decoder.checked_read_u64v(decoder.start(), 0, &length)); \
EXPECT_EQ(expected_length, length); \
@@ -52,7 +53,7 @@
do { \
const byte data[] = {__VA_ARGS__}; \
decoder.Reset(data, data + sizeof(data)); \
- int length; \
+ unsigned length; \
EXPECT_EQ(expected, \
decoder.checked_read_i64v(decoder.start(), 0, &length)); \
EXPECT_EQ(expected_length, length); \
@@ -365,7 +366,7 @@
TEST_F(DecoderTest, ReadU32v_off_end1) {
static const byte data[] = {U32V_1(11)};
- int length = 0;
+ unsigned length = 0;
decoder.Reset(data, data);
decoder.checked_read_u32v(decoder.start(), 0, &length);
EXPECT_EQ(0, length);
@@ -375,7 +376,7 @@
TEST_F(DecoderTest, ReadU32v_off_end2) {
static const byte data[] = {U32V_2(1111)};
for (size_t i = 0; i < sizeof(data); i++) {
- int length = 0;
+ unsigned length = 0;
decoder.Reset(data, data + i);
decoder.checked_read_u32v(decoder.start(), 0, &length);
EXPECT_EQ(i, length);
@@ -386,7 +387,7 @@
TEST_F(DecoderTest, ReadU32v_off_end3) {
static const byte data[] = {U32V_3(111111)};
for (size_t i = 0; i < sizeof(data); i++) {
- int length = 0;
+ unsigned length = 0;
decoder.Reset(data, data + i);
decoder.checked_read_u32v(decoder.start(), 0, &length);
EXPECT_EQ(i, length);
@@ -397,7 +398,7 @@
TEST_F(DecoderTest, ReadU32v_off_end4) {
static const byte data[] = {U32V_4(11111111)};
for (size_t i = 0; i < sizeof(data); i++) {
- int length = 0;
+ unsigned length = 0;
decoder.Reset(data, data + i);
decoder.checked_read_u32v(decoder.start(), 0, &length);
EXPECT_EQ(i, length);
@@ -408,7 +409,7 @@
TEST_F(DecoderTest, ReadU32v_off_end5) {
static const byte data[] = {U32V_5(111111111)};
for (size_t i = 0; i < sizeof(data); i++) {
- int length = 0;
+ unsigned length = 0;
decoder.Reset(data, data + i);
decoder.checked_read_u32v(decoder.start(), 0, &length);
EXPECT_EQ(i, length);
@@ -420,7 +421,7 @@
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x00};
for (int i = 1; i < 16; i++) {
data[4] = static_cast<byte>(i << 4);
- int length = 0;
+ unsigned length = 0;
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_u32v(decoder.start(), 0, &length);
EXPECT_EQ(5, length);
@@ -430,7 +431,7 @@
TEST_F(DecoderTest, ReadI32v_extra_bits_negative) {
// OK for negative signed values to have extra ones.
- int length = 0;
+ unsigned length = 0;
byte data[] = {0xff, 0xff, 0xff, 0xff, 0x7f};
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_i32v(decoder.start(), 0, &length);
@@ -440,7 +441,7 @@
TEST_F(DecoderTest, ReadI32v_extra_bits_positive) {
// Not OK for positive signed values to have extra ones.
- int length = 0;
+ unsigned length = 0;
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x77};
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_i32v(decoder.start(), 0, &length);
@@ -477,7 +478,7 @@
// foreach buffer size 0...5
for (int limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
- int rlen;
+ unsigned rlen;
uint32_t result = decoder.checked_read_u32v(data, 0, &rlen);
if (limit < length) {
EXPECT_FALSE(decoder.ok());
@@ -533,7 +534,7 @@
for (int limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
- int length;
+ unsigned length;
uint64_t result = decoder.checked_read_u64v(data, 0, &length);
if (limit <= index) {
EXPECT_FALSE(decoder.ok());
@@ -574,7 +575,7 @@
// foreach buffer size 0...10
for (int limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
- int rlen;
+ unsigned rlen;
uint64_t result = decoder.checked_read_u64v(data, 0, &rlen);
if (limit < length) {
EXPECT_FALSE(decoder.ok());
@@ -616,7 +617,7 @@
// foreach buffer size 0...10
for (int limit = 0; limit <= kMaxSize; limit++) {
decoder.Reset(data, data + limit);
- int rlen;
+ unsigned rlen;
int64_t result = decoder.checked_read_i64v(data, 0, &rlen);
if (limit < length) {
EXPECT_FALSE(decoder.ok());
@@ -634,7 +635,7 @@
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00};
for (int i = 1; i < 128; i++) {
data[9] = static_cast<byte>(i << 1);
- int length = 0;
+ unsigned length = 0;
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_u64v(decoder.start(), 0, &length);
EXPECT_EQ(10, length);
@@ -644,7 +645,7 @@
TEST_F(DecoderTest, ReadI64v_extra_bits_negative) {
// OK for negative signed values to have extra ones.
- int length = 0;
+ unsigned length = 0;
byte data[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f};
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_i64v(decoder.start(), 0, &length);
@@ -654,7 +655,7 @@
TEST_F(DecoderTest, ReadI64v_extra_bits_positive) {
// Not OK for positive signed values to have extra ones.
- int length = 0;
+ unsigned length = 0;
byte data[] = {0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x77};
decoder.Reset(data, data + sizeof(data));
decoder.checked_read_i64v(decoder.start(), 0, &length);
diff --git a/test/unittests/wasm/leb-helper-unittest.cc b/test/unittests/wasm/leb-helper-unittest.cc
index ed9f0a3..b975933 100644
--- a/test/unittests/wasm/leb-helper-unittest.cc
+++ b/test/unittests/wasm/leb-helper-unittest.cc
@@ -4,6 +4,7 @@
#include "test/unittests/test-utils.h"
+#include "src/objects-inl.h"
#include "src/wasm/decoder.h"
#include "src/wasm/leb-helper.h"
@@ -96,7 +97,7 @@
EXPECT_EQ(LEBHelper::sizeof_##name(val), \
static_cast<size_t>(ptr - buffer)); \
Decoder decoder(buffer, buffer + kSize); \
- int length = 0; \
+ unsigned length = 0; \
ctype result = decoder.checked_read_##name(buffer, 0, &length); \
EXPECT_EQ(val, result); \
EXPECT_EQ(LEBHelper::sizeof_##name(val), static_cast<size_t>(length)); \
diff --git a/test/unittests/wasm/loop-assignment-analysis-unittest.cc b/test/unittests/wasm/loop-assignment-analysis-unittest.cc
index 7d97c50..c134de8 100644
--- a/test/unittests/wasm/loop-assignment-analysis-unittest.cc
+++ b/test/unittests/wasm/loop-assignment-analysis-unittest.cc
@@ -180,6 +180,14 @@
}
}
+TEST_F(WasmLoopAssignmentAnalyzerTest, Malformed) {
+ byte code[] = {kExprLoop, kExprF32Neg, kExprBrTable, 0x0e, 'h', 'e',
+ 'l', 'l', 'o', ',', ' ', 'w',
+ 'o', 'r', 'l', 'd', '!'};
+ BitVector* assigned = Analyze(code, code + arraysize(code));
+ CHECK_NULL(assigned);
+}
+
} // namespace wasm
} // namespace internal
} // namespace v8
diff --git a/test/unittests/wasm/module-decoder-unittest.cc b/test/unittests/wasm/module-decoder-unittest.cc
index 7090c1f..789112a 100644
--- a/test/unittests/wasm/module-decoder-unittest.cc
+++ b/test/unittests/wasm/module-decoder-unittest.cc
@@ -4,6 +4,8 @@
#include "test/unittests/test-utils.h"
+#include "src/handles.h"
+#include "src/objects-inl.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-macro-gen.h"
#include "src/wasm/wasm-opcodes.h"
@@ -39,6 +41,38 @@
#define EMPTY_FUNCTION_BODIES_SECTION SECTION(FUNCTION_BODIES, 1), 0
#define EMPTY_NAMES_SECTION SECTION(NAMES, 1), 0
+#define X1(...) __VA_ARGS__
+#define X2(...) __VA_ARGS__, __VA_ARGS__
+#define X3(...) __VA_ARGS__, __VA_ARGS__, __VA_ARGS__
+#define X4(...) __VA_ARGS__, __VA_ARGS__, __VA_ARGS__, __VA_ARGS__
+
+#define ONE_EMPTY_FUNCTION WASM_SECTION_FUNCTION_SIGNATURES, 1 + 1 * 1, 1, X1(0)
+
+#define TWO_EMPTY_FUNCTIONS \
+ WASM_SECTION_FUNCTION_SIGNATURES, 1 + 2 * 1, 2, X2(0)
+
+#define THREE_EMPTY_FUNCTIONS \
+ WASM_SECTION_FUNCTION_SIGNATURES, 1 + 3 * 1, 3, X3(0)
+
+#define FOUR_EMPTY_FUNCTIONS \
+ WASM_SECTION_FUNCTION_SIGNATURES, 1 + 4 * 1, 4, X4(0)
+
+#define ONE_EMPTY_BODY \
+ WASM_SECTION_FUNCTION_BODIES, 1 + 1 * (1 + SIZEOF_EMPTY_BODY), 1, \
+ X1(SIZEOF_EMPTY_BODY, EMPTY_BODY)
+
+#define TWO_EMPTY_BODIES \
+ WASM_SECTION_FUNCTION_BODIES, 1 + 2 * (1 + SIZEOF_EMPTY_BODY), 2, \
+ X2(SIZEOF_EMPTY_BODY, EMPTY_BODY)
+
+#define THREE_EMPTY_BODIES \
+ WASM_SECTION_FUNCTION_BODIES, 1 + 3 * (1 + SIZEOF_EMPTY_BODY), 3, \
+ X3(SIZEOF_EMPTY_BODY, EMPTY_BODY)
+
+#define FOUR_EMPTY_BODIES \
+ WASM_SECTION_FUNCTION_BODIES, 1 + 4 * (1 + SIZEOF_EMPTY_BODY), 4, \
+ X4(SIZEOF_EMPTY_BODY, EMPTY_BODY)
+
#define SIGNATURES_SECTION_VOID_VOID \
SECTION(SIGNATURES, 1 + SIZEOF_SIG_ENTRY_v_v), 1, SIG_ENTRY_v_v
@@ -156,7 +190,7 @@
EXPECT_EQ(0, result.val->functions.size());
EXPECT_EQ(0, result.val->data_segments.size());
- WasmGlobal* global = &result.val->globals.back();
+ const WasmGlobal* global = &result.val->globals.back();
EXPECT_EQ(1, global->name_length);
EXPECT_EQ(MachineType::Int32(), global->type);
@@ -169,6 +203,21 @@
EXPECT_OFF_END_FAILURE(data, 1, sizeof(data));
}
+TEST_F(WasmModuleVerifyTest, Global_invalid_type) {
+ static const byte data[] = {
+ SECTION(GLOBALS, 5), // --
+ 1,
+ NAME_LENGTH(1),
+ 'g', // name
+ 64, // invalid memory type
+ 0, // exported
+ };
+
+ ModuleResult result = DecodeModuleNoHeader(data, data + sizeof(data));
+ EXPECT_FALSE(result.ok());
+ if (result.val) delete result.val;
+}
+
TEST_F(WasmModuleVerifyTest, ZeroGlobals) {
static const byte data[] = {
SECTION(GLOBALS, 1), // --
@@ -262,8 +311,8 @@
EXPECT_EQ(0, result.val->functions.size());
EXPECT_EQ(0, result.val->data_segments.size());
- WasmGlobal* g0 = &result.val->globals[0];
- WasmGlobal* g1 = &result.val->globals[1];
+ const WasmGlobal* g0 = &result.val->globals[0];
+ const WasmGlobal* g1 = &result.val->globals[1];
EXPECT_EQ(0, g0->name_length);
EXPECT_EQ(MachineType::Float32(), g0->type);
@@ -272,7 +321,7 @@
EXPECT_EQ(0, g1->name_length);
EXPECT_EQ(MachineType::Float64(), g1->type);
- EXPECT_EQ(0, g1->offset);
+ EXPECT_EQ(8, g1->offset);
EXPECT_TRUE(g1->exported);
if (result.val) delete result.val;
@@ -321,163 +370,6 @@
EXPECT_OFF_END_FAILURE(data, 1, sizeof(data));
}
-TEST_F(WasmModuleVerifyTest, FunctionWithoutSig) {
- static const byte data[] = {
- SECTION(OLD_FUNCTIONS, 25), 1,
- // func#0 ------------------------------------------------------
- SIG_INDEX(0), // signature index
- NO_NAME, // name length
- U32_LE(0), // code start offset
- U32_LE(0), // code end offset
- U16_LE(899), // local int32 count
- U16_LE(799), // local int64 count
- U16_LE(699), // local float32 count
- U16_LE(599), // local float64 count
- 0, // exported
- 0 // external
- };
-
- ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_FALSE(result.ok());
- if (result.val) delete result.val;
-}
-
-TEST_F(WasmModuleVerifyTest, OneEmptyVoidVoidFunction) {
- const int kCodeStartOffset = 41;
- const int kCodeEndOffset = kCodeStartOffset + 1;
-
- static const byte data[] = {
- // signatures
- SIGNATURES_SECTION_VOID_VOID,
- // func#0 ------------------------------------------------------
- SECTION(OLD_FUNCTIONS, 10), 1, kDeclFunctionExport | kDeclFunctionName,
- SIG_INDEX(0), // signature index
- NAME_LENGTH(2), 'h', 'i', // name
- 1, 0, // size
- kExprNop,
- };
-
- {
- // Should decode to exactly one function.
- ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_OK(result);
- EXPECT_EQ(0, result.val->globals.size());
- EXPECT_EQ(1, result.val->signatures.size());
- EXPECT_EQ(1, result.val->functions.size());
- EXPECT_EQ(0, result.val->data_segments.size());
- EXPECT_EQ(0, result.val->function_table.size());
-
- WasmFunction* function = &result.val->functions.back();
-
- EXPECT_EQ(37, function->name_offset);
- EXPECT_EQ(2, function->name_length);
- EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
- EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
-
- EXPECT_TRUE(function->exported);
-
- if (result.val) delete result.val;
- }
-
- EXPECT_OFF_END_FAILURE(data, 16, sizeof(data));
-}
-
-TEST_F(WasmModuleVerifyTest, OneFunctionWithNopBody) {
- static const byte kCodeStartOffset = 38;
- static const byte kCodeEndOffset = kCodeStartOffset + 1;
-
- static const byte data[] = {
- SIGNATURES_SECTION_VOID_VOID, // --
- SECTION(OLD_FUNCTIONS, 7), 1,
- // func#0 ------------------------------------------------------
- 0, // no name, no locals
- 0, 0, // signature index
- 1, 0, // body size
- kExprNop // body
- };
-
- ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_OK(result);
- EXPECT_EQ(1, result.val->functions.size());
- WasmFunction* function = &result.val->functions.back();
-
- EXPECT_EQ(0, function->name_length);
- EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
- EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
-
- EXPECT_FALSE(function->exported);
-
- if (result.val) delete result.val;
-}
-
-TEST_F(WasmModuleVerifyTest, OneGlobalOneFunctionWithNopBodyOneDataSegment) {
- static const byte kNameOffset = 49;
- static const byte kCodeStartOffset = 53;
- static const byte kCodeEndOffset = kCodeStartOffset + 3;
- static const byte kDataSegmentSourceOffset = kCodeEndOffset + 22;
-
- static const byte data[] = {
- // global#0 --------------------------------------------------
- SECTION(GLOBALS, 4), 1,
- 0, // name length
- kMemU8, // memory type
- 0, // exported
- // sig#0 -----------------------------------------------------
- SIGNATURES_SECTION_VOID_VOID,
- // func#0 ----------------------------------------------------
- SECTION(OLD_FUNCTIONS, 12), 1,
- kDeclFunctionName, // --
- SIG_INDEX(0), // signature index
- 2, 'h', 'i', // name
- 3, 0, // body size
- kExprNop, // func#0 body
- kExprNop, // func#0 body
- kExprNop, // func#0 body
- // memory section --------------------------------------------
- SECTION(MEMORY, 3), 28, 28, 1,
- // segment#0 -------------------------------------------------
- SECTION(DATA_SEGMENTS, 10), 1,
- U32V_3(0x8b3ae), // dest addr
- U32V_1(5), // source size
- 0, 1, 2, 3, 4, // data bytes
- // rest ------------------------------------------------------
- SECTION(END, 0),
- };
-
- {
- ModuleResult result = DecodeModule(data, data + arraysize(data));
- EXPECT_OK(result);
- EXPECT_EQ(1, result.val->globals.size());
- EXPECT_EQ(1, result.val->functions.size());
- EXPECT_EQ(1, result.val->data_segments.size());
-
- WasmGlobal* global = &result.val->globals.back();
-
- EXPECT_EQ(0, global->name_length);
- EXPECT_EQ(MachineType::Uint8(), global->type);
- EXPECT_EQ(0, global->offset);
- EXPECT_FALSE(global->exported);
-
- WasmFunction* function = &result.val->functions.back();
-
- EXPECT_EQ(kNameOffset, function->name_offset);
- EXPECT_EQ(2, function->name_length);
- EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
- EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
-
- EXPECT_FALSE(function->exported);
-
- WasmDataSegment* segment = &result.val->data_segments.back();
-
- EXPECT_EQ(0x8b3ae, segment->dest_addr);
- EXPECT_EQ(kDataSegmentSourceOffset, segment->source_offset);
- EXPECT_EQ(5, segment->source_size);
- EXPECT_TRUE(segment->init);
-
- if (result.val) delete result.val;
- }
-}
-
TEST_F(WasmModuleVerifyTest, OneDataSegment) {
const byte kDataSegmentSourceOffset = 30;
const byte data[] = {
@@ -502,7 +394,7 @@
EXPECT_EQ(0, result.val->functions.size());
EXPECT_EQ(1, result.val->data_segments.size());
- WasmDataSegment* segment = &result.val->data_segments.back();
+ const WasmDataSegment* segment = &result.val->data_segments.back();
EXPECT_EQ(0x9bbaa, segment->dest_addr);
EXPECT_EQ(kDataSegmentSourceOffset, segment->source_offset);
@@ -553,8 +445,8 @@
EXPECT_EQ(0, result.val->functions.size());
EXPECT_EQ(2, result.val->data_segments.size());
- WasmDataSegment* s0 = &result.val->data_segments[0];
- WasmDataSegment* s1 = &result.val->data_segments[1];
+ const WasmDataSegment* s0 = &result.val->data_segments[0];
+ const WasmDataSegment* s1 = &result.val->data_segments[1];
EXPECT_EQ(0x7ffee, s0->dest_addr);
EXPECT_EQ(kDataSegment0SourceOffset, s0->source_offset);
@@ -605,9 +497,8 @@
static const byte data[] = {
// sig#0 -------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
- // func#0 ------------------------------------------------------
- SECTION(OLD_FUNCTIONS, 1 + SIZEOF_EMPTY_FUNCTION), 1, // --
- EMPTY_FUNCTION(0),
+ // funcs ------------------------------------------------------
+ ONE_EMPTY_FUNCTION,
// indirect table ----------------------------------------------
SECTION(FUNCTION_TABLE, 2), 1, U32V_1(0)};
@@ -629,12 +520,8 @@
2, // --
SIG_ENTRY_v_v, // void -> void
SIG_ENTRY_v_x(kLocalI32), // void -> i32
- // func#0 ------------------------------------------------------
- SECTION(OLD_FUNCTIONS, 1 + 4 * SIZEOF_EMPTY_FUNCTION), 4, // --
- EMPTY_FUNCTION(0), // --
- EMPTY_FUNCTION(1), // --
- EMPTY_FUNCTION(0), // --
- EMPTY_FUNCTION(1), // --
+ // funcs ------------------------------------------------------
+ FOUR_EMPTY_FUNCTIONS,
// indirect table ----------------------------------------------
SECTION(FUNCTION_TABLE, 9), 8,
U32V_1(0), // --
@@ -645,7 +532,7 @@
U32V_1(1), // --
U32V_1(2), // --
U32V_1(3), // --
- };
+ FOUR_EMPTY_BODIES};
ModuleResult result = DecodeModule(data, data + arraysize(data));
EXPECT_OK(result);
@@ -676,8 +563,7 @@
// sig#0 -------------------------------------------------------
SIGNATURES_SECTION_VOID_VOID,
// functions ---------------------------------------------------
- SECTION(OLD_FUNCTIONS, 1 + SIZEOF_EMPTY_FUNCTION), 1, // --
- EMPTY_FUNCTION(0),
+ ONE_EMPTY_FUNCTION,
// indirect table ----------------------------------------------
SECTION(FUNCTION_TABLE, 3), 1, 1, 0,
};
@@ -839,7 +725,6 @@
EXPECT_EQ(SIZEOF_SIG_ENTRY_v_v, function->code_start_offset);
EXPECT_EQ(arraysize(data), function->code_end_offset);
// TODO(titzer): verify encoding of local declarations
- EXPECT_FALSE(function->exported);
}
if (result.val) delete result.val;
@@ -983,7 +868,7 @@
EXPECT_EQ(0, result.val->functions.size());
EXPECT_EQ(0, result.val->data_segments.size());
- WasmGlobal* global = &result.val->globals.back();
+ const WasmGlobal* global = &result.val->globals.back();
EXPECT_EQ(0, global->name_length);
EXPECT_EQ(MachineType::Int32(), global->type);
@@ -1071,22 +956,24 @@
}
TEST_F(WasmModuleVerifyTest, ExportTable_empty1) {
- static const byte data[] = {
- // signatures
- SIGNATURES_SECTION_VOID_VOID,
- SECTION(OLD_FUNCTIONS, 1 + SIZEOF_EMPTY_FUNCTION),
- 1,
- EMPTY_FUNCTION(0),
- SECTION(EXPORT_TABLE, 1),
- 0 // --
- };
- EXPECT_VERIFIES(data);
+ static const byte data[] = {// signatures
+ SIGNATURES_SECTION_VOID_VOID, ONE_EMPTY_FUNCTION,
+ SECTION(EXPORT_TABLE, 1),
+ 0, // --
+ ONE_EMPTY_BODY};
+
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_OK(result);
+
+ EXPECT_EQ(1, result.val->functions.size());
+ EXPECT_EQ(0, result.val->export_table.size());
+
+ if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, ExportTable_empty2) {
static const byte data[] = {
- SECTION(SIGNATURES, 1), 0, SECTION(OLD_FUNCTIONS, 1), 0,
- SECTION(EXPORT_TABLE, 1), 0 // --
+ SECTION(SIGNATURES, 1), 0, SECTION(EXPORT_TABLE, 1), 0 // --
};
// TODO(titzer): current behavior treats empty functions section as missing.
EXPECT_FAILURE(data);
@@ -1105,85 +992,88 @@
}
TEST_F(WasmModuleVerifyTest, ExportTableOne) {
- static const byte data[] = {
- // signatures
- SIGNATURES_SECTION_VOID_VOID,
- SECTION(OLD_FUNCTIONS, 1 + SIZEOF_EMPTY_FUNCTION),
- 1, // functions
- EMPTY_FUNCTION(0), // --
- SECTION(EXPORT_TABLE, 3),
- 1, // exports
- FUNC_INDEX(0), // --
- NO_NAME // --
- };
- EXPECT_VERIFIES(data);
+ static const byte data[] = {// signatures
+ SIGNATURES_SECTION_VOID_VOID,
+ ONE_EMPTY_FUNCTION,
+ SECTION(EXPORT_TABLE, 3),
+ 1, // exports
+ FUNC_INDEX(0), // --
+ NO_NAME, // --
+ ONE_EMPTY_BODY};
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_OK(result);
+
+ EXPECT_EQ(1, result.val->functions.size());
+ EXPECT_EQ(1, result.val->export_table.size());
+
+ if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, ExportTableTwo) {
- static const byte data[] = {
- // signatures
- SIGNATURES_SECTION_VOID_VOID,
- SECTION(OLD_FUNCTIONS, 1 + SIZEOF_EMPTY_FUNCTION),
- 1, // functions
- EMPTY_FUNCTION(0), // --
- SECTION(EXPORT_TABLE, 12),
- 2, // exports
- FUNC_INDEX(0), // --
- NAME_LENGTH(4),
- 'n',
- 'a',
- 'm',
- 'e', // --
- FUNC_INDEX(0), // --
- NAME_LENGTH(3),
- 'n',
- 'o',
- 'm' // --
- };
- EXPECT_VERIFIES(data);
+ static const byte data[] = {// signatures
+ SIGNATURES_SECTION_VOID_VOID,
+ ONE_EMPTY_FUNCTION,
+ SECTION(EXPORT_TABLE, 12),
+ 2, // exports
+ FUNC_INDEX(0), // --
+ NAME_LENGTH(4),
+ 'n',
+ 'a',
+ 'm',
+ 'e', // --
+ FUNC_INDEX(0), // --
+ NAME_LENGTH(3),
+ 'n',
+ 'o',
+ 'm', // --
+ ONE_EMPTY_BODY};
+
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_OK(result);
+
+ EXPECT_EQ(1, result.val->functions.size());
+ EXPECT_EQ(2, result.val->export_table.size());
+
+ if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, ExportTableThree) {
- static const byte data[] = {
- // signatures
- SIGNATURES_SECTION_VOID_VOID,
- SECTION(OLD_FUNCTIONS, 1 + 3 * SIZEOF_EMPTY_FUNCTION),
- 3, // functions
- EMPTY_FUNCTION(0), // --
- EMPTY_FUNCTION(0), // --
- EMPTY_FUNCTION(0), // --
- SECTION(EXPORT_TABLE, 10),
- 3, // exports
- FUNC_INDEX(0), // --
- NAME_LENGTH(1),
- 'a', // --
- FUNC_INDEX(1), // --
- NAME_LENGTH(1),
- 'b', // --
- FUNC_INDEX(2), // --
- NAME_LENGTH(1),
- 'c' // --
- };
- EXPECT_VERIFIES(data);
+ static const byte data[] = {// signatures
+ SIGNATURES_SECTION_VOID_VOID,
+ THREE_EMPTY_FUNCTIONS,
+ SECTION(EXPORT_TABLE, 10),
+ 3, // exports
+ FUNC_INDEX(0), // --
+ NAME_LENGTH(1),
+ 'a', // --
+ FUNC_INDEX(1), // --
+ NAME_LENGTH(1),
+ 'b', // --
+ FUNC_INDEX(2), // --
+ NAME_LENGTH(1),
+ 'c', // --
+ THREE_EMPTY_BODIES};
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_OK(result);
+
+ EXPECT_EQ(3, result.val->functions.size());
+ EXPECT_EQ(3, result.val->export_table.size());
+
+ if (result.val) delete result.val;
}
TEST_F(WasmModuleVerifyTest, ExportTableThreeOne) {
for (int i = 0; i < 6; i++) {
- const byte data[] = {
- // signatures
- SIGNATURES_SECTION_VOID_VOID,
- SECTION(OLD_FUNCTIONS, 1 + 3 * SIZEOF_EMPTY_FUNCTION),
- 3, // functions
- EMPTY_FUNCTION(0), // --
- EMPTY_FUNCTION(0), // --
- EMPTY_FUNCTION(0), // --
- SECTION(EXPORT_TABLE, 5),
- 1, // exports
- FUNC_INDEX(i), // --
- NAME_LENGTH(2),
- 'e',
- 'x', // --
- };
+ const byte data[] = {// signatures
+ SIGNATURES_SECTION_VOID_VOID,
+ THREE_EMPTY_FUNCTIONS,
+ SECTION(EXPORT_TABLE, 5),
+ 1, // exports
+ FUNC_INDEX(i), // --
+ NAME_LENGTH(2),
+ 'e',
+ 'x', // --
+ THREE_EMPTY_BODIES};
if (i < 3) {
EXPECT_VERIFIES(data);
@@ -1197,9 +1087,7 @@
static const byte data[] = {
// signatures
SIGNATURES_SECTION_VOID_VOID,
- SECTION(OLD_FUNCTIONS, 1 + SIZEOF_EMPTY_FUNCTION),
- 1, // functions
- EMPTY_FUNCTION(0), // --
+ ONE_EMPTY_FUNCTION,
SECTION(EXPORT_TABLE, 1 + 6),
1, // exports
FUNC_INDEX(0), // --