Upgrade V8 to version 4.9.385.28
https://chromium.googlesource.com/v8/v8/+/4.9.385.28
FPIIM-449
Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/test/unittests/atomic-utils-unittest.cc b/test/unittests/atomic-utils-unittest.cc
new file mode 100644
index 0000000..ad33853
--- /dev/null
+++ b/test/unittests/atomic-utils-unittest.cc
@@ -0,0 +1,217 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits.h>
+
+#include "src/atomic-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+TEST(AtomicNumber, Constructor) {
+ // Test some common types.
+ AtomicNumber<int> zero_int;
+ AtomicNumber<size_t> zero_size_t;
+ AtomicNumber<intptr_t> zero_intptr_t;
+ EXPECT_EQ(0, zero_int.Value());
+ EXPECT_EQ(0U, zero_size_t.Value());
+ EXPECT_EQ(0, zero_intptr_t.Value());
+}
+
+
+TEST(AtomicNumber, Value) {
+ AtomicNumber<int> a(1);
+ EXPECT_EQ(1, a.Value());
+ AtomicNumber<int> b(-1);
+ EXPECT_EQ(-1, b.Value());
+ AtomicNumber<size_t> c(1);
+ EXPECT_EQ(1U, c.Value());
+ AtomicNumber<size_t> d(static_cast<size_t>(-1));
+ EXPECT_EQ(std::numeric_limits<size_t>::max(), d.Value());
+}
+
+
+TEST(AtomicNumber, SetValue) {
+ AtomicNumber<int> a(1);
+ a.SetValue(-1);
+ EXPECT_EQ(-1, a.Value());
+}
+
+
+TEST(AtomicNumber, Increment) {
+ AtomicNumber<int> a(std::numeric_limits<int>::max());
+ a.Increment(1);
+ EXPECT_EQ(std::numeric_limits<int>::min(), a.Value());
+ // Check that potential signed-ness of the underlying storage has no impact
+ // on unsigned types.
+ AtomicNumber<size_t> b(std::numeric_limits<intptr_t>::max());
+ b.Increment(1);
+ EXPECT_EQ(static_cast<size_t>(std::numeric_limits<intptr_t>::max()) + 1,
+ b.Value());
+ // Should work as decrement as well.
+ AtomicNumber<size_t> c(1);
+ c.Increment(-1);
+ EXPECT_EQ(0U, c.Value());
+ c.Increment(-1);
+ EXPECT_EQ(std::numeric_limits<size_t>::max(), c.Value());
+}
+
+
+namespace {
+
+enum TestFlag {
+ kA,
+ kB,
+ kC,
+};
+
+} // namespace
+
+
+TEST(AtomicValue, Initial) {
+ AtomicValue<TestFlag> a(kA);
+ EXPECT_EQ(TestFlag::kA, a.Value());
+}
+
+
+TEST(AtomicValue, TrySetValue) {
+ AtomicValue<TestFlag> a(kA);
+ EXPECT_FALSE(a.TrySetValue(kB, kC));
+ EXPECT_TRUE(a.TrySetValue(kA, kC));
+ EXPECT_EQ(TestFlag::kC, a.Value());
+}
+
+
+TEST(AtomicValue, SetValue) {
+ AtomicValue<TestFlag> a(kB);
+ a.SetValue(kC);
+ EXPECT_EQ(TestFlag::kC, a.Value());
+}
+
+
+TEST(AtomicValue, WithVoidStar) {
+ AtomicValue<void*> a(nullptr);
+ AtomicValue<void*> dummy(nullptr);
+ EXPECT_EQ(nullptr, a.Value());
+ a.SetValue(&a);
+ EXPECT_EQ(&a, a.Value());
+ EXPECT_FALSE(a.TrySetValue(nullptr, &dummy));
+ EXPECT_TRUE(a.TrySetValue(&a, &dummy));
+ EXPECT_EQ(&dummy, a.Value());
+}
+
+
+namespace {
+
+enum TestSetValue { kAA, kBB, kCC, kLastValue = kCC };
+
+} // namespace
+
+
+TEST(AtomicEnumSet, Constructor) {
+ AtomicEnumSet<TestSetValue> a;
+ EXPECT_TRUE(a.IsEmpty());
+ EXPECT_FALSE(a.Contains(kAA));
+}
+
+
+TEST(AtomicEnumSet, AddSingle) {
+ AtomicEnumSet<TestSetValue> a;
+ a.Add(kAA);
+ EXPECT_FALSE(a.IsEmpty());
+ EXPECT_TRUE(a.Contains(kAA));
+ EXPECT_FALSE(a.Contains(kBB));
+ EXPECT_FALSE(a.Contains(kCC));
+}
+
+
+TEST(AtomicEnumSet, AddOtherSet) {
+ AtomicEnumSet<TestSetValue> a;
+ AtomicEnumSet<TestSetValue> b;
+ a.Add(kAA);
+ EXPECT_FALSE(a.IsEmpty());
+ EXPECT_TRUE(b.IsEmpty());
+ b.Add(a);
+ EXPECT_FALSE(b.IsEmpty());
+ EXPECT_TRUE(a.Contains(kAA));
+ EXPECT_TRUE(b.Contains(kAA));
+}
+
+
+TEST(AtomicEnumSet, RemoveSingle) {
+ AtomicEnumSet<TestSetValue> a;
+ a.Add(kAA);
+ a.Add(kBB);
+ EXPECT_TRUE(a.Contains(kAA));
+ EXPECT_TRUE(a.Contains(kBB));
+ a.Remove(kAA);
+ EXPECT_FALSE(a.Contains(kAA));
+ EXPECT_TRUE(a.Contains(kBB));
+}
+
+
+TEST(AtomicEnumSet, RemoveOtherSet) {
+ AtomicEnumSet<TestSetValue> a;
+ AtomicEnumSet<TestSetValue> b;
+ a.Add(kAA);
+ a.Add(kBB);
+ b.Add(kBB);
+ a.Remove(b);
+ EXPECT_TRUE(a.Contains(kAA));
+ EXPECT_FALSE(a.Contains(kBB));
+ EXPECT_FALSE(a.Contains(kCC));
+}
+
+
+TEST(AtomicEnumSet, RemoveEmptySet) {
+ AtomicEnumSet<TestSetValue> a;
+ AtomicEnumSet<TestSetValue> b;
+ a.Add(kAA);
+ a.Add(kBB);
+ EXPECT_TRUE(a.Contains(kAA));
+ EXPECT_TRUE(a.Contains(kBB));
+ EXPECT_FALSE(a.Contains(kCC));
+ EXPECT_TRUE(b.IsEmpty());
+ a.Remove(b);
+ EXPECT_TRUE(a.Contains(kAA));
+ EXPECT_TRUE(a.Contains(kBB));
+ EXPECT_FALSE(a.Contains(kCC));
+}
+
+
+TEST(AtomicEnumSet, Intersect) {
+ AtomicEnumSet<TestSetValue> a;
+ AtomicEnumSet<TestSetValue> b;
+ a.Add(kAA);
+ b.Add(kCC);
+ a.Intersect(b);
+ EXPECT_TRUE(a.IsEmpty());
+}
+
+
+TEST(AtomicEnumSet, ContainsAnyOf) {
+ AtomicEnumSet<TestSetValue> a;
+ AtomicEnumSet<TestSetValue> b;
+ a.Add(kAA);
+ b.Add(kCC);
+ EXPECT_FALSE(a.ContainsAnyOf(b));
+ b.Add(kAA);
+ EXPECT_TRUE(a.ContainsAnyOf(b));
+}
+
+
+TEST(AtomicEnumSet, Equality) {
+ AtomicEnumSet<TestSetValue> a;
+ AtomicEnumSet<TestSetValue> b;
+ a.Add(kAA);
+ EXPECT_FALSE(a == b);
+ EXPECT_TRUE(a != b);
+ b.Add(kAA);
+ EXPECT_TRUE(a == b);
+ EXPECT_FALSE(a != b);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/base/bits-unittest.cc b/test/unittests/base/bits-unittest.cc
index 9caba84..3d17a05 100644
--- a/test/unittests/base/bits-unittest.cc
+++ b/test/unittests/base/bits-unittest.cc
@@ -255,6 +255,25 @@
}
+TEST(Bits, UnsignedAddOverflow32) {
+ uint32_t val = 0;
+ EXPECT_FALSE(UnsignedAddOverflow32(0, 0, &val));
+ EXPECT_EQ(0u, val);
+ EXPECT_TRUE(
+ UnsignedAddOverflow32(std::numeric_limits<uint32_t>::max(), 1u, &val));
+ EXPECT_EQ(std::numeric_limits<uint32_t>::min(), val);
+ EXPECT_TRUE(UnsignedAddOverflow32(std::numeric_limits<uint32_t>::max(),
+ std::numeric_limits<uint32_t>::max(),
+ &val));
+ TRACED_FORRANGE(uint32_t, i, 1, 50) {
+ TRACED_FORRANGE(uint32_t, j, 1, i) {
+ EXPECT_FALSE(UnsignedAddOverflow32(i, j, &val));
+ EXPECT_EQ(i + j, val);
+ }
+ }
+}
+
+
TEST(Bits, UnsignedDiv32) {
TRACED_FORRANGE(uint32_t, i, 0, 50) {
EXPECT_EQ(0u, UnsignedDiv32(i, 0));
diff --git a/test/unittests/base/cpu-unittest.cc b/test/unittests/base/cpu-unittest.cc
index 5c58f86..c12e339 100644
--- a/test/unittests/base/cpu-unittest.cc
+++ b/test/unittests/base/cpu-unittest.cc
@@ -18,6 +18,8 @@
EXPECT_TRUE(!cpu.has_ssse3() || cpu.has_sse3());
EXPECT_TRUE(!cpu.has_sse41() || cpu.has_sse3());
EXPECT_TRUE(!cpu.has_sse42() || cpu.has_sse41());
+ EXPECT_TRUE(!cpu.has_avx() || cpu.has_sse2());
+ EXPECT_TRUE(!cpu.has_fma3() || cpu.has_avx());
// arm features
EXPECT_TRUE(!cpu.has_vfp3_d32() || cpu.has_vfp3());
diff --git a/test/unittests/base/logging-unittest.cc b/test/unittests/base/logging-unittest.cc
new file mode 100644
index 0000000..918feb1
--- /dev/null
+++ b/test/unittests/base/logging-unittest.cc
@@ -0,0 +1,19 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/logging.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace base {
+
+TEST(LoggingTest, CheckEQImpl) {
+ EXPECT_EQ(nullptr, CheckEQImpl(0.0, 0.0, ""));
+ EXPECT_EQ(nullptr, CheckEQImpl(0.0, -0.0, ""));
+ EXPECT_EQ(nullptr, CheckEQImpl(-0.0, 0.0, ""));
+ EXPECT_EQ(nullptr, CheckEQImpl(-0.0, -0.0, ""));
+}
+
+} // namespace base
+} // namespace v8
diff --git a/test/unittests/base/platform/condition-variable-unittest.cc b/test/unittests/base/platform/condition-variable-unittest.cc
index fe0ad2a..43fd335 100644
--- a/test/unittests/base/platform/condition-variable-unittest.cc
+++ b/test/unittests/base/platform/condition-variable-unittest.cc
@@ -29,15 +29,14 @@
namespace {
-class ThreadWithMutexAndConditionVariable FINAL : public Thread {
+class ThreadWithMutexAndConditionVariable final : public Thread {
public:
ThreadWithMutexAndConditionVariable()
: Thread(Options("ThreadWithMutexAndConditionVariable")),
running_(false),
finished_(false) {}
- virtual ~ThreadWithMutexAndConditionVariable() {}
- virtual void Run() OVERRIDE {
+ void Run() override {
LockGuard<Mutex> lock_guard(&mutex_);
running_ = true;
cv_.NotifyOne();
@@ -108,7 +107,7 @@
namespace {
-class ThreadWithSharedMutexAndConditionVariable FINAL : public Thread {
+class ThreadWithSharedMutexAndConditionVariable final : public Thread {
public:
ThreadWithSharedMutexAndConditionVariable()
: Thread(Options("ThreadWithSharedMutexAndConditionVariable")),
@@ -116,9 +115,8 @@
finished_(false),
cv_(NULL),
mutex_(NULL) {}
- virtual ~ThreadWithSharedMutexAndConditionVariable() {}
- virtual void Run() OVERRIDE {
+ void Run() override {
LockGuard<Mutex> lock_guard(mutex_);
running_ = true;
cv_->NotifyAll();
@@ -218,7 +216,7 @@
namespace {
-class LoopIncrementThread FINAL : public Thread {
+class LoopIncrementThread final : public Thread {
public:
LoopIncrementThread(int rem, int* counter, int limit, int thread_count,
ConditionVariable* cv, Mutex* mutex)
@@ -233,7 +231,7 @@
EXPECT_EQ(0, limit % thread_count);
}
- virtual void Run() OVERRIDE {
+ void Run() override {
int last_count = -1;
while (true) {
LockGuard<Mutex> lock_guard(mutex_);
diff --git a/test/unittests/base/platform/platform-unittest.cc b/test/unittests/base/platform/platform-unittest.cc
index b17a9b9..0f0fb37 100644
--- a/test/unittests/base/platform/platform-unittest.cc
+++ b/test/unittests/base/platform/platform-unittest.cc
@@ -36,24 +36,6 @@
namespace {
-class SelfJoinThread FINAL : public Thread {
- public:
- SelfJoinThread() : Thread(Options("SelfJoinThread")) {}
- void Run() FINAL { Join(); }
-};
-
-} // namespace
-
-
-TEST(Thread, DISABLE_ON_ANDROID(SelfJoin)) {
- SelfJoinThread thread;
- thread.Start();
- thread.Join();
-}
-
-
-namespace {
-
class ThreadLocalStorageTest : public Thread, public ::testing::Test {
public:
ThreadLocalStorageTest() : Thread(Options("ThreadLocalStorageTest")) {
@@ -67,7 +49,7 @@
}
}
- void Run() FINAL {
+ void Run() final {
for (size_t i = 0; i < arraysize(keys_); i++) {
CHECK(!Thread::HasThreadLocal(keys_[i]));
}
diff --git a/test/unittests/base/platform/semaphore-unittest.cc b/test/unittests/base/platform/semaphore-unittest.cc
index c68435f..bd4a00f 100644
--- a/test/unittests/base/platform/semaphore-unittest.cc
+++ b/test/unittests/base/platform/semaphore-unittest.cc
@@ -20,16 +20,15 @@
static const size_t kDataSize = kBufferSize * kAlphabetSize * 10;
-class ProducerThread FINAL : public Thread {
+class ProducerThread final : public Thread {
public:
ProducerThread(char* buffer, Semaphore* free_space, Semaphore* used_space)
: Thread(Options("ProducerThread")),
buffer_(buffer),
free_space_(free_space),
used_space_(used_space) {}
- virtual ~ProducerThread() {}
- virtual void Run() OVERRIDE {
+ void Run() override {
for (size_t n = 0; n < kDataSize; ++n) {
free_space_->Wait();
buffer_[n % kBufferSize] = kAlphabet[n % kAlphabetSize];
@@ -44,7 +43,7 @@
};
-class ConsumerThread FINAL : public Thread {
+class ConsumerThread final : public Thread {
public:
ConsumerThread(const char* buffer, Semaphore* free_space,
Semaphore* used_space)
@@ -52,9 +51,8 @@
buffer_(buffer),
free_space_(free_space),
used_space_(used_space) {}
- virtual ~ConsumerThread() {}
- virtual void Run() OVERRIDE {
+ void Run() override {
for (size_t n = 0; n < kDataSize; ++n) {
used_space_->Wait();
EXPECT_EQ(kAlphabet[n % kAlphabetSize], buffer_[n % kBufferSize]);
@@ -69,13 +67,12 @@
};
-class WaitAndSignalThread FINAL : public Thread {
+class WaitAndSignalThread final : public Thread {
public:
explicit WaitAndSignalThread(Semaphore* semaphore)
: Thread(Options("WaitAndSignalThread")), semaphore_(semaphore) {}
- virtual ~WaitAndSignalThread() {}
- virtual void Run() OVERRIDE {
+ void Run() override {
for (int n = 0; n < 100; ++n) {
semaphore_->Wait();
ASSERT_FALSE(semaphore_->WaitFor(TimeDelta::FromMicroseconds(1)));
diff --git a/test/unittests/cancelable-tasks-unittest.cc b/test/unittests/cancelable-tasks-unittest.cc
new file mode 100644
index 0000000..37690aa
--- /dev/null
+++ b/test/unittests/cancelable-tasks-unittest.cc
@@ -0,0 +1,218 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/atomicops.h"
+#include "src/base/platform/platform.h"
+#include "src/cancelable-task.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+class TestTask : public Task, public Cancelable {
+ public:
+ enum Mode { kDoNothing, kWaitTillCanceledAgain, kCheckNotRun };
+
+ TestTask(CancelableTaskManager* parent, base::AtomicWord* result,
+ Mode mode = kDoNothing)
+ : Cancelable(parent), result_(result), mode_(mode) {}
+
+ // Task overrides.
+ void Run() final {
+ if (TryRun()) {
+ RunInternal();
+ }
+ }
+
+ private:
+ void RunInternal() {
+ base::Release_Store(result_, id());
+
+ switch (mode_) {
+ case kWaitTillCanceledAgain:
+ // Simple busy wait until the main thread tried to cancel.
+ while (CancelAttempts() == 0) {
+ }
+ break;
+ case kCheckNotRun:
+ // Check that we never execute {RunInternal}.
+ EXPECT_TRUE(false);
+ break;
+ default:
+ break;
+ }
+ }
+
+ base::AtomicWord* result_;
+ Mode mode_;
+};
+
+
+class SequentialRunner {
+ public:
+ explicit SequentialRunner(TestTask* task) : task_(task) {}
+
+ void Run() {
+ task_->Run();
+ delete task_;
+ }
+
+ private:
+ TestTask* task_;
+};
+
+
+class ThreadedRunner final : public base::Thread {
+ public:
+ explicit ThreadedRunner(TestTask* task)
+ : Thread(Options("runner thread")), task_(task) {}
+
+ virtual void Run() {
+ task_->Run();
+ delete task_;
+ }
+
+ private:
+ TestTask* task_;
+};
+
+
+typedef base::AtomicWord ResultType;
+
+
+intptr_t GetValue(ResultType* result) { return base::Acquire_Load(result); }
+
+} // namespace
+
+
+TEST(CancelableTask, EmptyCancelableTaskManager) {
+ CancelableTaskManager manager;
+ manager.CancelAndWait();
+}
+
+
+TEST(CancelableTask, SequentialCancelAndWait) {
+ CancelableTaskManager manager;
+ ResultType result1 = 0;
+ SequentialRunner runner1(
+ new TestTask(&manager, &result1, TestTask::kCheckNotRun));
+ EXPECT_EQ(GetValue(&result1), 0);
+ manager.CancelAndWait();
+ EXPECT_EQ(GetValue(&result1), 0);
+ runner1.Run(); // Run to avoid leaking the Task.
+ EXPECT_EQ(GetValue(&result1), 0);
+}
+
+
+TEST(CancelableTask, SequentialMultipleTasks) {
+ CancelableTaskManager manager;
+ ResultType result1 = 0;
+ ResultType result2 = 0;
+ TestTask* task1 = new TestTask(&manager, &result1);
+ TestTask* task2 = new TestTask(&manager, &result2);
+ SequentialRunner runner1(task1);
+ SequentialRunner runner2(task2);
+ EXPECT_EQ(task1->id(), 1u);
+ EXPECT_EQ(task2->id(), 2u);
+
+ EXPECT_EQ(GetValue(&result1), 0);
+ runner1.Run(); // Don't touch task1 after running it.
+ EXPECT_EQ(GetValue(&result1), 1);
+
+ EXPECT_EQ(GetValue(&result2), 0);
+ runner2.Run(); // Don't touch task2 after running it.
+ EXPECT_EQ(GetValue(&result2), 2);
+
+ manager.CancelAndWait();
+ EXPECT_FALSE(manager.TryAbort(1));
+ EXPECT_FALSE(manager.TryAbort(2));
+}
+
+
+TEST(CancelableTask, ThreadedMultipleTasksStarted) {
+ CancelableTaskManager manager;
+ ResultType result1 = 0;
+ ResultType result2 = 0;
+ TestTask* task1 =
+ new TestTask(&manager, &result1, TestTask::kWaitTillCanceledAgain);
+ TestTask* task2 =
+ new TestTask(&manager, &result2, TestTask::kWaitTillCanceledAgain);
+ ThreadedRunner runner1(task1);
+ ThreadedRunner runner2(task2);
+ runner1.Start();
+ runner2.Start();
+ // Busy wait on result to make sure both tasks are done.
+ while ((GetValue(&result1) == 0) || (GetValue(&result2) == 0)) {
+ }
+ manager.CancelAndWait();
+ runner1.Join();
+ runner2.Join();
+ EXPECT_EQ(GetValue(&result1), 1);
+ EXPECT_EQ(GetValue(&result2), 2);
+}
+
+
+TEST(CancelableTask, ThreadedMultipleTasksNotRun) {
+ CancelableTaskManager manager;
+ ResultType result1 = 0;
+ ResultType result2 = 0;
+ TestTask* task1 = new TestTask(&manager, &result1, TestTask::kCheckNotRun);
+ TestTask* task2 = new TestTask(&manager, &result2, TestTask::kCheckNotRun);
+ ThreadedRunner runner1(task1);
+ ThreadedRunner runner2(task2);
+ manager.CancelAndWait();
+ // Tasks are canceled, hence the runner will bail out and not update result.
+ runner1.Start();
+ runner2.Start();
+ runner1.Join();
+ runner2.Join();
+ EXPECT_EQ(GetValue(&result1), 0);
+ EXPECT_EQ(GetValue(&result2), 0);
+}
+
+
+TEST(CancelableTask, RemoveBeforeCancelAndWait) {
+ CancelableTaskManager manager;
+ ResultType result1 = 0;
+ TestTask* task1 = new TestTask(&manager, &result1, TestTask::kCheckNotRun);
+ ThreadedRunner runner1(task1);
+ uint32_t id = task1->id();
+ EXPECT_EQ(id, 1u);
+ EXPECT_TRUE(manager.TryAbort(id));
+ runner1.Start();
+ runner1.Join();
+ manager.CancelAndWait();
+ EXPECT_EQ(GetValue(&result1), 0);
+}
+
+
+TEST(CancelableTask, RemoveAfterCancelAndWait) {
+ CancelableTaskManager manager;
+ ResultType result1 = 0;
+ TestTask* task1 = new TestTask(&manager, &result1);
+ ThreadedRunner runner1(task1);
+ uint32_t id = task1->id();
+ EXPECT_EQ(id, 1u);
+ runner1.Start();
+ runner1.Join();
+ manager.CancelAndWait();
+ EXPECT_FALSE(manager.TryAbort(id));
+ EXPECT_EQ(GetValue(&result1), 1);
+}
+
+
+TEST(CancelableTask, RemoveUnmanagedId) {
+ CancelableTaskManager manager;
+ EXPECT_FALSE(manager.TryAbort(1));
+ EXPECT_FALSE(manager.TryAbort(2));
+ manager.CancelAndWait();
+ EXPECT_FALSE(manager.TryAbort(1));
+ EXPECT_FALSE(manager.TryAbort(3));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc b/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
index fbdf87a..62abeda 100644
--- a/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
+++ b/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
@@ -12,7 +12,6 @@
namespace {
-typedef RawMachineAssembler::Label MLabel;
typedef Node* (RawMachineAssembler::*Constructor)(Node*, Node*);
@@ -31,7 +30,7 @@
}
-static const DPI kDPIs[] = {
+const DPI kDPIs[] = {
{&RawMachineAssembler::Word32And, "Word32And", kArmAnd, kArmAnd, kArmTst},
{&RawMachineAssembler::Word32Or, "Word32Or", kArmOrr, kArmOrr, kArmOrr},
{&RawMachineAssembler::Word32Xor, "Word32Xor", kArmEor, kArmEor, kArmTeq},
@@ -39,6 +38,38 @@
{&RawMachineAssembler::Int32Sub, "Int32Sub", kArmSub, kArmRsb, kArmCmp}};
+// Floating point arithmetic instructions.
+struct FAI {
+ Constructor constructor;
+ const char* constructor_name;
+ MachineType machine_type;
+ ArchOpcode arch_opcode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const FAI& fai) {
+ return os << fai.constructor_name;
+}
+
+
+const FAI kFAIs[] = {{&RawMachineAssembler::Float32Add, "Float32Add",
+ MachineType::Float32(), kArmVaddF32},
+ {&RawMachineAssembler::Float64Add, "Float64Add",
+ MachineType::Float64(), kArmVaddF64},
+ {&RawMachineAssembler::Float32Sub, "Float32Sub",
+ MachineType::Float32(), kArmVsubF32},
+ {&RawMachineAssembler::Float64Sub, "Float64Sub",
+ MachineType::Float64(), kArmVsubF64},
+ {&RawMachineAssembler::Float32Mul, "Float32Mul",
+ MachineType::Float32(), kArmVmulF32},
+ {&RawMachineAssembler::Float64Mul, "Float64Mul",
+ MachineType::Float64(), kArmVmulF64},
+ {&RawMachineAssembler::Float32Div, "Float32Div",
+ MachineType::Float32(), kArmVdivF32},
+ {&RawMachineAssembler::Float64Div, "Float64Div",
+ MachineType::Float64(), kArmVdivF64}};
+
+
// Data processing instructions with overflow.
struct ODPI {
Constructor constructor;
@@ -53,10 +84,10 @@
}
-static const ODPI kODPIs[] = {{&RawMachineAssembler::Int32AddWithOverflow,
- "Int32AddWithOverflow", kArmAdd, kArmAdd},
- {&RawMachineAssembler::Int32SubWithOverflow,
- "Int32SubWithOverflow", kArmSub, kArmRsb}};
+const ODPI kODPIs[] = {{&RawMachineAssembler::Int32AddWithOverflow,
+ "Int32AddWithOverflow", kArmAdd, kArmAdd},
+ {&RawMachineAssembler::Int32SubWithOverflow,
+ "Int32SubWithOverflow", kArmSub, kArmRsb}};
// Shifts.
@@ -75,50 +106,34 @@
}
-static const Shift kShifts[] = {
- {&RawMachineAssembler::Word32Sar, "Word32Sar", 1, 32,
- kMode_Operand2_R_ASR_I, kMode_Operand2_R_ASR_R},
- {&RawMachineAssembler::Word32Shl, "Word32Shl", 0, 31,
- kMode_Operand2_R_LSL_I, kMode_Operand2_R_LSL_R},
- {&RawMachineAssembler::Word32Shr, "Word32Shr", 1, 32,
- kMode_Operand2_R_LSR_I, kMode_Operand2_R_LSR_R},
- {&RawMachineAssembler::Word32Ror, "Word32Ror", 1, 31,
- kMode_Operand2_R_ROR_I, kMode_Operand2_R_ROR_R}};
+const Shift kShifts[] = {{&RawMachineAssembler::Word32Sar, "Word32Sar", 1, 32,
+ kMode_Operand2_R_ASR_I, kMode_Operand2_R_ASR_R},
+ {&RawMachineAssembler::Word32Shl, "Word32Shl", 0, 31,
+ kMode_Operand2_R_LSL_I, kMode_Operand2_R_LSL_R},
+ {&RawMachineAssembler::Word32Shr, "Word32Shr", 1, 32,
+ kMode_Operand2_R_LSR_I, kMode_Operand2_R_LSR_R},
+ {&RawMachineAssembler::Word32Ror, "Word32Ror", 1, 31,
+ kMode_Operand2_R_ROR_I, kMode_Operand2_R_ROR_R}};
// Immediates (random subset).
-static const int32_t kImmediates[] = {
+const int32_t kImmediates[] = {
std::numeric_limits<int32_t>::min(), -2147483617, -2147483606, -2113929216,
- -2080374784, -1996488704, -1879048192, -1459617792,
- -1358954496, -1342177265, -1275068414, -1073741818,
- -1073741777, -855638016, -805306368, -402653184,
- -268435444, -16777216, 0, 35,
- 61, 105, 116, 171,
- 245, 255, 692, 1216,
- 1248, 1520, 1600, 1888,
- 3744, 4080, 5888, 8384,
- 9344, 9472, 9792, 13312,
- 15040, 15360, 20736, 22272,
- 23296, 32000, 33536, 37120,
- 45824, 47872, 56320, 59392,
- 65280, 72704, 101376, 147456,
- 161792, 164864, 167936, 173056,
- 195584, 209920, 212992, 356352,
- 655360, 704512, 716800, 851968,
- 901120, 1044480, 1523712, 2572288,
- 3211264, 3588096, 3833856, 3866624,
- 4325376, 5177344, 6488064, 7012352,
- 7471104, 14090240, 16711680, 19398656,
- 22282240, 28573696, 30408704, 30670848,
- 43253760, 54525952, 55312384, 56623104,
- 68157440, 115343360, 131072000, 187695104,
- 188743680, 195035136, 197132288, 203423744,
- 218103808, 267386880, 268435470, 285212672,
- 402653185, 415236096, 595591168, 603979776,
- 603979778, 629145600, 1073741835, 1073741855,
- 1073741861, 1073741884, 1157627904, 1476395008,
- 1476395010, 1610612741, 2030043136, 2080374785,
- 2097152000};
+ -2080374784, -1996488704, -1879048192, -1459617792, -1358954496,
+ -1342177265, -1275068414, -1073741818, -1073741777, -855638016, -805306368,
+ -402653184, -268435444, -16777216, 0, 35, 61, 105, 116, 171, 245, 255, 692,
+ 1216, 1248, 1520, 1600, 1888, 3744, 4080, 5888, 8384, 9344, 9472, 9792,
+ 13312, 15040, 15360, 20736, 22272, 23296, 32000, 33536, 37120, 45824, 47872,
+ 56320, 59392, 65280, 72704, 101376, 147456, 161792, 164864, 167936, 173056,
+ 195584, 209920, 212992, 356352, 655360, 704512, 716800, 851968, 901120,
+ 1044480, 1523712, 2572288, 3211264, 3588096, 3833856, 3866624, 4325376,
+ 5177344, 6488064, 7012352, 7471104, 14090240, 16711680, 19398656, 22282240,
+ 28573696, 30408704, 30670848, 43253760, 54525952, 55312384, 56623104,
+ 68157440, 115343360, 131072000, 187695104, 188743680, 195035136, 197132288,
+ 203423744, 218103808, 267386880, 268435470, 285212672, 402653185, 415236096,
+ 595591168, 603979776, 603979778, 629145600, 1073741835, 1073741855,
+ 1073741861, 1073741884, 1157627904, 1476395008, 1476395010, 1610612741,
+ 2030043136, 2080374785, 2097152000};
} // namespace
@@ -132,7 +147,8 @@
TEST_P(InstructionSelectorDPITest, Parameters) {
const DPI dpi = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -146,7 +162,7 @@
TEST_P(InstructionSelectorDPITest, Immediate) {
const DPI dpi = GetParam();
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -157,7 +173,7 @@
EXPECT_EQ(1U, s[0]->OutputCount());
}
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -173,7 +189,8 @@
TEST_P(InstructionSelectorDPITest, ShiftByParameter) {
const DPI dpi = GetParam();
TRACED_FOREACH(Shift, shift, kShifts) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
m.Return((m.*dpi.constructor)(
m.Parameter(0),
(m.*shift.constructor)(m.Parameter(1), m.Parameter(2))));
@@ -185,7 +202,8 @@
EXPECT_EQ(1U, s[0]->OutputCount());
}
TRACED_FOREACH(Shift, shift, kShifts) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
m.Return((m.*dpi.constructor)(
(m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)));
@@ -203,7 +221,8 @@
const DPI dpi = GetParam();
TRACED_FOREACH(Shift, shift, kShifts) {
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return((m.*dpi.constructor)(
m.Parameter(0),
(m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm))));
@@ -218,7 +237,8 @@
}
TRACED_FOREACH(Shift, shift, kShifts) {
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return((m.*dpi.constructor)(
(m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
m.Parameter(1)));
@@ -236,8 +256,9 @@
TEST_P(InstructionSelectorDPITest, BranchWithParameters) {
const DPI dpi = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -255,8 +276,8 @@
TEST_P(InstructionSelectorDPITest, BranchWithImmediate) {
const DPI dpi = GetParam();
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)), &a,
&b);
m.Bind(&a);
@@ -271,8 +292,8 @@
EXPECT_EQ(kNotEqual, s[0]->flags_condition());
}
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)), &a,
&b);
m.Bind(&a);
@@ -292,8 +313,9 @@
TEST_P(InstructionSelectorDPITest, BranchWithShiftByParameter) {
const DPI dpi = GetParam();
TRACED_FOREACH(Shift, shift, kShifts) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch((m.*dpi.constructor)(
m.Parameter(0),
(m.*shift.constructor)(m.Parameter(1), m.Parameter(2))),
@@ -310,8 +332,9 @@
EXPECT_EQ(kNotEqual, s[0]->flags_condition());
}
TRACED_FOREACH(Shift, shift, kShifts) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch((m.*dpi.constructor)(
(m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
m.Parameter(2)),
@@ -334,8 +357,9 @@
const DPI dpi = GetParam();
TRACED_FOREACH(Shift, shift, kShifts) {
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch((m.*dpi.constructor)(m.Parameter(0),
(m.*shift.constructor)(
m.Parameter(1), m.Int32Constant(imm))),
@@ -356,8 +380,9 @@
}
TRACED_FOREACH(Shift, shift, kShifts) {
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch((m.*dpi.constructor)(
(m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
m.Parameter(1)),
@@ -381,8 +406,9 @@
TEST_P(InstructionSelectorDPITest, BranchIfZeroWithParameters) {
const DPI dpi = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Word32Equal((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)),
m.Int32Constant(0)),
&a, &b);
@@ -401,8 +427,9 @@
TEST_P(InstructionSelectorDPITest, BranchIfNotZeroWithParameters) {
const DPI dpi = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(
m.Word32NotEqual((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)),
m.Int32Constant(0)),
@@ -423,8 +450,8 @@
TEST_P(InstructionSelectorDPITest, BranchIfZeroWithImmediate) {
const DPI dpi = GetParam();
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Word32Equal(
(m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)),
m.Int32Constant(0)),
@@ -441,8 +468,8 @@
EXPECT_EQ(kEqual, s[0]->flags_condition());
}
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Word32Equal(
(m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)),
m.Int32Constant(0)),
@@ -464,8 +491,8 @@
TEST_P(InstructionSelectorDPITest, BranchIfNotZeroWithImmediate) {
const DPI dpi = GetParam();
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Word32NotEqual(
(m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)),
m.Int32Constant(0)),
@@ -482,8 +509,8 @@
EXPECT_EQ(kNotEqual, s[0]->flags_condition());
}
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Word32NotEqual(
(m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)),
m.Int32Constant(0)),
@@ -515,7 +542,8 @@
TEST_P(InstructionSelectorODPITest, OvfWithParameters) {
const ODPI odpi = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(
m.Projection(1, (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1))));
Stream s = m.Build();
@@ -532,7 +560,7 @@
TEST_P(InstructionSelectorODPITest, OvfWithImmediate) {
const ODPI odpi = GetParam();
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Projection(
1, (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
Stream s = m.Build();
@@ -546,7 +574,7 @@
EXPECT_EQ(kOverflow, s[0]->flags_condition());
}
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Projection(
1, (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0))));
Stream s = m.Build();
@@ -565,7 +593,8 @@
TEST_P(InstructionSelectorODPITest, OvfWithShiftByParameter) {
const ODPI odpi = GetParam();
TRACED_FOREACH(Shift, shift, kShifts) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
m.Return(m.Projection(
1, (m.*odpi.constructor)(
m.Parameter(0),
@@ -580,7 +609,8 @@
EXPECT_EQ(kOverflow, s[0]->flags_condition());
}
TRACED_FOREACH(Shift, shift, kShifts) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
m.Return(m.Projection(
1, (m.*odpi.constructor)(
(m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
@@ -601,7 +631,8 @@
const ODPI odpi = GetParam();
TRACED_FOREACH(Shift, shift, kShifts) {
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Projection(
1, (m.*odpi.constructor)(m.Parameter(0),
(m.*shift.constructor)(
@@ -619,7 +650,8 @@
}
TRACED_FOREACH(Shift, shift, kShifts) {
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Projection(
1, (m.*odpi.constructor)(
(m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
@@ -640,7 +672,8 @@
TEST_P(InstructionSelectorODPITest, ValWithParameters) {
const ODPI odpi = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(
m.Projection(0, (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1))));
Stream s = m.Build();
@@ -656,7 +689,7 @@
TEST_P(InstructionSelectorODPITest, ValWithImmediate) {
const ODPI odpi = GetParam();
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Projection(
0, (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
Stream s = m.Build();
@@ -669,7 +702,7 @@
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Projection(
0, (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0))));
Stream s = m.Build();
@@ -687,7 +720,8 @@
TEST_P(InstructionSelectorODPITest, ValWithShiftByParameter) {
const ODPI odpi = GetParam();
TRACED_FOREACH(Shift, shift, kShifts) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
m.Return(m.Projection(
0, (m.*odpi.constructor)(
m.Parameter(0),
@@ -701,7 +735,8 @@
EXPECT_EQ(kFlags_none, s[0]->flags_mode());
}
TRACED_FOREACH(Shift, shift, kShifts) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
m.Return(m.Projection(
0, (m.*odpi.constructor)(
(m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
@@ -721,7 +756,8 @@
const ODPI odpi = GetParam();
TRACED_FOREACH(Shift, shift, kShifts) {
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Projection(
0, (m.*odpi.constructor)(m.Parameter(0),
(m.*shift.constructor)(
@@ -738,7 +774,8 @@
}
TRACED_FOREACH(Shift, shift, kShifts) {
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Projection(
0, (m.*odpi.constructor)(
(m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
@@ -758,7 +795,8 @@
TEST_P(InstructionSelectorODPITest, BothWithParameters) {
const ODPI odpi = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
Stream s = m.Build();
@@ -775,7 +813,7 @@
TEST_P(InstructionSelectorODPITest, BothWithImmediate) {
const ODPI odpi = GetParam();
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
Stream s = m.Build();
@@ -789,7 +827,7 @@
EXPECT_EQ(kOverflow, s[0]->flags_condition());
}
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* n = (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0));
m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
Stream s = m.Build();
@@ -808,7 +846,8 @@
TEST_P(InstructionSelectorODPITest, BothWithShiftByParameter) {
const ODPI odpi = GetParam();
TRACED_FOREACH(Shift, shift, kShifts) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
Node* n = (m.*odpi.constructor)(
m.Parameter(0), (m.*shift.constructor)(m.Parameter(1), m.Parameter(2)));
m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
@@ -822,7 +861,8 @@
EXPECT_EQ(kOverflow, s[0]->flags_condition());
}
TRACED_FOREACH(Shift, shift, kShifts) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
Node* n = (m.*odpi.constructor)(
(m.*shift.constructor)(m.Parameter(0), m.Parameter(1)), m.Parameter(2));
m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
@@ -842,7 +882,8 @@
const ODPI odpi = GetParam();
TRACED_FOREACH(Shift, shift, kShifts) {
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* n = (m.*odpi.constructor)(
m.Parameter(0),
(m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)));
@@ -860,7 +901,8 @@
}
TRACED_FOREACH(Shift, shift, kShifts) {
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* n = (m.*odpi.constructor)(
(m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
m.Parameter(1));
@@ -881,8 +923,9 @@
TEST_P(InstructionSelectorODPITest, BranchWithParameters) {
const ODPI odpi = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
m.Branch(m.Projection(1, n), &a, &b);
m.Bind(&a);
@@ -903,8 +946,8 @@
TEST_P(InstructionSelectorODPITest, BranchWithImmediate) {
const ODPI odpi = GetParam();
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
m.Branch(m.Projection(1, n), &a, &b);
m.Bind(&a);
@@ -922,8 +965,8 @@
EXPECT_EQ(kOverflow, s[0]->flags_condition());
}
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
Node* n = (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0));
m.Branch(m.Projection(1, n), &a, &b);
m.Bind(&a);
@@ -945,8 +988,9 @@
TEST_P(InstructionSelectorODPITest, BranchIfZeroWithParameters) {
const ODPI odpi = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
m.Branch(m.Word32Equal(m.Projection(1, n), m.Int32Constant(0)), &a, &b);
m.Bind(&a);
@@ -966,8 +1010,9 @@
TEST_P(InstructionSelectorODPITest, BranchIfNotZeroWithParameters) {
const ODPI odpi = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ RawMachineLabel a, b;
Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
m.Branch(m.Word32NotEqual(m.Projection(1, n), m.Int32Constant(0)), &a, &b);
m.Bind(&a);
@@ -998,7 +1043,8 @@
TEST_P(InstructionSelectorShiftTest, Parameters) {
const Shift shift = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return((m.*shift.constructor)(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1012,7 +1058,7 @@
TEST_P(InstructionSelectorShiftTest, Immediate) {
const Shift shift = GetParam();
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return((m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1028,7 +1074,8 @@
TEST_P(InstructionSelectorShiftTest, Word32EqualWithParameter) {
const Shift shift = GetParam();
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
m.Return(
m.Word32Equal(m.Parameter(0),
(m.*shift.constructor)(m.Parameter(1), m.Parameter(2))));
@@ -1042,7 +1089,8 @@
EXPECT_EQ(kEqual, s[0]->flags_condition());
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
m.Return(
m.Word32Equal((m.*shift.constructor)(m.Parameter(1), m.Parameter(2)),
m.Parameter(0)));
@@ -1061,7 +1109,8 @@
TEST_P(InstructionSelectorShiftTest, Word32EqualWithParameterAndImmediate) {
const Shift shift = GetParam();
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Word32Equal(
(m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
m.Parameter(0)));
@@ -1076,7 +1125,8 @@
EXPECT_EQ(kEqual, s[0]->flags_condition());
}
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Word32Equal(
m.Parameter(0),
(m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm))));
@@ -1095,7 +1145,8 @@
TEST_P(InstructionSelectorShiftTest, Word32EqualToZeroWithParameters) {
const Shift shift = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(
m.Word32Equal(m.Int32Constant(0),
(m.*shift.constructor)(m.Parameter(0), m.Parameter(1))));
@@ -1113,7 +1164,8 @@
TEST_P(InstructionSelectorShiftTest, Word32EqualToZeroWithImmediate) {
const Shift shift = GetParam();
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Word32Equal(
m.Int32Constant(0),
(m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm))));
@@ -1132,7 +1184,8 @@
TEST_P(InstructionSelectorShiftTest, Word32NotWithParameters) {
const Shift shift = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Word32Not((m.*shift.constructor)(m.Parameter(0), m.Parameter(1))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1146,7 +1199,7 @@
TEST_P(InstructionSelectorShiftTest, Word32NotWithImmediate) {
const Shift shift = GetParam();
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Not(
(m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm))));
Stream s = m.Build();
@@ -1162,7 +1215,8 @@
TEST_P(InstructionSelectorShiftTest, Word32AndWithWord32NotWithParameters) {
const Shift shift = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32And(m.Parameter(0), m.Word32Not((m.*shift.constructor)(
m.Parameter(1), m.Parameter(2)))));
Stream s = m.Build();
@@ -1177,7 +1231,8 @@
TEST_P(InstructionSelectorShiftTest, Word32AndWithWord32NotWithImmediate) {
const Shift shift = GetParam();
TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Word32And(m.Parameter(0),
m.Word32Not((m.*shift.constructor)(
m.Parameter(1), m.Int32Constant(imm)))));
@@ -1217,50 +1272,50 @@
}
-static const MemoryAccess kMemoryAccesses[] = {
- {kMachInt8,
+const MemoryAccess kMemoryAccesses[] = {
+ {MachineType::Int8(),
kArmLdrsb,
kArmStrb,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachUint8,
+ {MachineType::Uint8(),
kArmLdrb,
kArmStrb,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3914, -3536, -3234, -3185, -3169, -1073, -990, -859, -720, -434,
-127, -124, -122, -105, -91, -86, -64, -55, -53, -30, -10, -3, 0, 20, 28,
39, 58, 64, 73, 75, 100, 108, 121, 686, 963, 1363, 2759, 3449, 4095}},
- {kMachInt16,
+ {MachineType::Int16(),
kArmLdrsh,
kArmStrh,
&InstructionSelectorTest::Stream::IsInteger,
{-255, -251, -232, -220, -144, -138, -130, -126, -116, -115, -102, -101,
-98, -69, -59, -56, -39, -35, -23, -19, -7, 0, 22, 26, 37, 68, 83, 87, 98,
102, 108, 111, 117, 171, 195, 203, 204, 245, 246, 255}},
- {kMachUint16,
+ {MachineType::Uint16(),
kArmLdrh,
kArmStrh,
&InstructionSelectorTest::Stream::IsInteger,
{-255, -230, -201, -172, -125, -119, -118, -105, -98, -79, -54, -42, -41,
-32, -12, -11, -5, -4, 0, 5, 9, 25, 28, 51, 58, 60, 89, 104, 108, 109,
114, 116, 120, 138, 150, 161, 166, 172, 228, 255}},
- {kMachInt32,
+ {MachineType::Int32(),
kArmLdr,
kArmStr,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -1898, -1685, -1562, -1408, -1313, -344, -128, -116, -100, -92,
-80, -72, -71, -56, -25, -21, -11, -9, 0, 3, 5, 27, 28, 42, 52, 63, 88,
93, 97, 125, 846, 1037, 2102, 2403, 2597, 2632, 2997, 3935, 4095}},
- {kMachFloat32,
+ {MachineType::Float32(),
kArmVldrF32,
kArmVstrF32,
&InstructionSelectorTest::Stream::IsDouble,
{-1020, -928, -896, -772, -728, -680, -660, -488, -372, -112, -100, -92,
-84, -80, -72, -64, -60, -56, -52, -48, -36, -32, -20, -8, -4, 0, 8, 20,
24, 40, 64, 112, 204, 388, 516, 852, 856, 976, 988, 1020}},
- {kMachFloat64,
+ {MachineType::Float64(),
kArmVldrF64,
kArmVstrF64,
&InstructionSelectorTest::Stream::IsDouble,
@@ -1277,7 +1332,8 @@
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer(),
+ MachineType::Int32());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1292,7 +1348,7 @@
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, memacc.type, kMachPtr);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1309,8 +1365,10 @@
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Int32(), memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0), m.Parameter(1),
+ m.Parameter(2), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1324,9 +1382,10 @@
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
- m.Parameter(1));
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1350,7 +1409,7 @@
TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) {
- StreamBuilder m(this, kMachFloat64, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float32());
m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1361,7 +1420,7 @@
TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
- StreamBuilder m(this, kMachFloat32, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float64());
m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1382,6 +1441,7 @@
const char* constructor_name;
FlagsCondition flags_condition;
FlagsCondition negated_flags_condition;
+ FlagsCondition commuted_flags_condition;
};
@@ -1391,15 +1451,17 @@
const Comparison kComparisons[] = {
- {&RawMachineAssembler::Word32Equal, "Word32Equal", kEqual, kNotEqual},
+ {&RawMachineAssembler::Word32Equal, "Word32Equal", kEqual, kNotEqual,
+ kEqual},
{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kSignedLessThan,
- kSignedGreaterThanOrEqual},
+ kSignedGreaterThanOrEqual, kSignedGreaterThan},
{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
- kSignedLessThanOrEqual, kSignedGreaterThan},
+ kSignedLessThanOrEqual, kSignedGreaterThan, kSignedGreaterThanOrEqual},
{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kUnsignedLessThan,
- kUnsignedGreaterThanOrEqual},
+ kUnsignedGreaterThanOrEqual, kUnsignedGreaterThan},
{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
- kUnsignedLessThanOrEqual, kUnsignedGreaterThan}};
+ kUnsignedLessThanOrEqual, kUnsignedGreaterThan,
+ kUnsignedGreaterThanOrEqual}};
} // namespace
@@ -1410,7 +1472,8 @@
TEST_P(InstructionSelectorComparisonTest, Parameters) {
const Comparison& cmp = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const r = (m.*cmp.constructor)(p0, p1);
@@ -1432,7 +1495,8 @@
TEST_P(InstructionSelectorComparisonTest, Word32EqualWithZero) {
{
const Comparison& cmp = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const r =
@@ -1452,7 +1516,8 @@
}
{
const Comparison& cmp = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const r =
@@ -1479,11 +1544,345 @@
// -----------------------------------------------------------------------------
-// Miscellaneous.
+// Floating point comparisons.
+
+
+namespace {
+
+const Comparison kF32Comparisons[] = {
+ {&RawMachineAssembler::Float32Equal, "Float32Equal", kEqual, kNotEqual,
+ kEqual},
+ {&RawMachineAssembler::Float32LessThan, "Float32LessThan",
+ kFloatLessThan, kFloatGreaterThanOrEqualOrUnordered, kFloatGreaterThan},
+ {&RawMachineAssembler::Float32LessThanOrEqual, "Float32LessThanOrEqual",
+ kFloatLessThanOrEqual, kFloatGreaterThanOrUnordered,
+ kFloatGreaterThanOrEqual}};
+
+} // namespace
+
+typedef InstructionSelectorTestWithParam<Comparison>
+ InstructionSelectorF32ComparisonTest;
+
+
+TEST_P(InstructionSelectorF32ComparisonTest, WithParameters) {
+ const Comparison& cmp = GetParam();
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float32(),
+ MachineType::Float32());
+ m.Return((m.*cmp.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream const s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVcmpF32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.flags_condition, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorF32ComparisonTest, NegatedWithParameters) {
+ const Comparison& cmp = GetParam();
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float32(),
+ MachineType::Float32());
+ m.Return(
+ m.WordBinaryNot((m.*cmp.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream const s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVcmpF32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.negated_flags_condition, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorF32ComparisonTest, WithImmediateZeroOnRight) {
+ const Comparison& cmp = GetParam();
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float32());
+ m.Return((m.*cmp.constructor)(m.Parameter(0), m.Float32Constant(0.0)));
+ Stream const s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVcmpF32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.flags_condition, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorF32ComparisonTest, WithImmediateZeroOnLeft) {
+ const Comparison& cmp = GetParam();
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float32());
+ m.Return((m.*cmp.constructor)(m.Float32Constant(0.0f), m.Parameter(0)));
+ Stream const s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVcmpF32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.commuted_flags_condition, s[0]->flags_condition());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorF32ComparisonTest,
+ ::testing::ValuesIn(kF32Comparisons));
+
+
+namespace {
+
+const Comparison kF64Comparisons[] = {
+ {&RawMachineAssembler::Float64Equal, "Float64Equal", kEqual, kNotEqual,
+ kEqual},
+ {&RawMachineAssembler::Float64LessThan, "Float64LessThan",
+ kFloatLessThan, kFloatGreaterThanOrEqualOrUnordered, kFloatGreaterThan},
+ {&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
+ kFloatLessThanOrEqual, kFloatGreaterThanOrUnordered,
+ kFloatGreaterThanOrEqual}};
+
+} // namespace
+
+typedef InstructionSelectorTestWithParam<Comparison>
+ InstructionSelectorF64ComparisonTest;
+
+
+TEST_P(InstructionSelectorF64ComparisonTest, WithParameters) {
+ const Comparison& cmp = GetParam();
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float64(),
+ MachineType::Float64());
+ m.Return((m.*cmp.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream const s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVcmpF64, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.flags_condition, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorF64ComparisonTest, NegatedWithParameters) {
+ const Comparison& cmp = GetParam();
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float64(),
+ MachineType::Float64());
+ m.Return(
+ m.WordBinaryNot((m.*cmp.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream const s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVcmpF64, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.negated_flags_condition, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorF64ComparisonTest, WithImmediateZeroOnRight) {
+ const Comparison& cmp = GetParam();
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float64());
+ m.Return((m.*cmp.constructor)(m.Parameter(0), m.Float64Constant(0.0)));
+ Stream const s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVcmpF64, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.flags_condition, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorF64ComparisonTest, WithImmediateZeroOnLeft) {
+ const Comparison& cmp = GetParam();
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float64());
+ m.Return((m.*cmp.constructor)(m.Float64Constant(0.0), m.Parameter(0)));
+ Stream const s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVcmpF64, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.commuted_flags_condition, s[0]->flags_condition());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ InstructionSelectorF64ComparisonTest,
+ ::testing::ValuesIn(kF64Comparisons));
+
+
+// -----------------------------------------------------------------------------
+// Floating point arithmetic.
+
+
+typedef InstructionSelectorTestWithParam<FAI> InstructionSelectorFAITest;
+
+
+TEST_P(InstructionSelectorFAITest, Parameters) {
+ const FAI& fai = GetParam();
+ StreamBuilder m(this, fai.machine_type, fai.machine_type, fai.machine_type);
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const r = (m.*fai.constructor)(p0, p1);
+ m.Return(r);
+ Stream const s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(fai.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->OutputAt(0)));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFAITest,
+ ::testing::ValuesIn(kFAIs));
+
+
+TEST_F(InstructionSelectorTest, Float32Abs) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVabsF32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float64Abs) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVabsF64, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) {
+ {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* const n = m.Float32Add(m.Float32Mul(p0, p1), p2);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVmlaF32, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* const n = m.Float32Add(p0, m.Float32Mul(p1, p2));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVmlaF32, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) {
+ {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* const n = m.Float64Add(m.Float64Mul(p0, p1), p2);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVmlaF64, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* const n = m.Float64Add(p0, m.Float64Mul(p1, p2));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVmlaF64, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(
+ UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Float32SubWithMinusZero) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVnegF32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
TEST_F(InstructionSelectorTest, Float64SubWithMinusZero) {
- StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* const p0 = m.Parameter(0);
Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
m.Return(n);
@@ -1497,9 +1896,90 @@
}
+TEST_F(InstructionSelectorTest, Float32SubWithFloat32Mul) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* const n = m.Float32Sub(p0, m.Float32Mul(p1, p2));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVmlsF32, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
+
+
+TEST_F(InstructionSelectorTest, Float64SubWithFloat64Mul) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const p2 = m.Parameter(2);
+ Node* const n = m.Float64Sub(p0, m.Float64Mul(p1, p2));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVmlsF64, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
+
+
+TEST_F(InstructionSelectorTest, Float32Sqrt) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Sqrt(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVsqrtF32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
+
+
+TEST_F(InstructionSelectorTest, Float64Sqrt) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Sqrt(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmVsqrtF64, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
+
+
+// -----------------------------------------------------------------------------
+// Miscellaneous.
+
+
TEST_F(InstructionSelectorTest, Int32AddWithInt32Mul) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
@@ -1516,7 +1996,8 @@
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
@@ -1537,7 +2018,8 @@
TEST_F(InstructionSelectorTest, Int32AddWithInt32MulHigh) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
@@ -1554,7 +2036,8 @@
EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
@@ -1575,7 +2058,8 @@
TEST_F(InstructionSelectorTest, Int32AddWithWord32And) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const r = m.Int32Add(m.Word32And(p0, m.Int32Constant(0xff)), p1);
@@ -1591,7 +2075,8 @@
EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const r = m.Int32Add(p1, m.Word32And(p0, m.Int32Constant(0xff)));
@@ -1607,7 +2092,8 @@
EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const r = m.Int32Add(m.Word32And(p0, m.Int32Constant(0xffff)), p1);
@@ -1623,7 +2109,8 @@
EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const r = m.Int32Add(p1, m.Word32And(p0, m.Int32Constant(0xffff)));
@@ -1643,7 +2130,8 @@
TEST_F(InstructionSelectorTest, Int32AddWithWord32SarWithWord32Shl) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const r = m.Int32Add(
@@ -1661,7 +2149,8 @@
EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const r = m.Int32Add(
@@ -1679,7 +2168,8 @@
EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const r = m.Int32Add(
@@ -1697,7 +2187,8 @@
EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const r = m.Int32Add(
@@ -1718,7 +2209,8 @@
TEST_F(InstructionSelectorTest, Int32SubWithInt32Mul) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
m.Return(
m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
Stream s = m.Build();
@@ -1732,7 +2224,8 @@
TEST_F(InstructionSelectorTest, Int32SubWithInt32MulForMLS) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
m.Return(
m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
Stream s = m.Build(MLS);
@@ -1744,7 +2237,8 @@
TEST_F(InstructionSelectorTest, Int32DivWithParameters) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Div(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(4U, s.size());
@@ -1764,7 +2258,8 @@
TEST_F(InstructionSelectorTest, Int32DivWithParametersForSUDIV) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Div(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build(SUDIV);
ASSERT_EQ(1U, s.size());
@@ -1773,7 +2268,8 @@
TEST_F(InstructionSelectorTest, Int32ModWithParameters) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(6U, s.size());
@@ -1803,7 +2299,8 @@
TEST_F(InstructionSelectorTest, Int32ModWithParametersForSUDIV) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build(SUDIV);
ASSERT_EQ(3U, s.size());
@@ -1824,7 +2321,8 @@
TEST_F(InstructionSelectorTest, Int32ModWithParametersForSUDIVAndMLS) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build(MLS, SUDIV);
ASSERT_EQ(2U, s.size());
@@ -1841,7 +2339,8 @@
TEST_F(InstructionSelectorTest, Int32MulWithParameters) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Mul(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1854,7 +2353,7 @@
TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
// x * (2^k + 1) -> x + (x >> k)
TRACED_FORRANGE(int32_t, k, 1, 30) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) + 1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1867,7 +2366,7 @@
}
// x * (2^k - 1) -> -x + (x >> k)
TRACED_FORRANGE(int32_t, k, 3, 30) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) - 1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1880,7 +2379,7 @@
}
// (2^k + 1) * x -> x + (x >> k)
TRACED_FORRANGE(int32_t, k, 1, 30) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Mul(m.Int32Constant((1 << k) + 1), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1893,7 +2392,7 @@
}
// x * (2^k - 1) -> -x + (x >> k)
TRACED_FORRANGE(int32_t, k, 3, 30) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Mul(m.Int32Constant((1 << k) - 1), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1908,7 +2407,8 @@
TEST_F(InstructionSelectorTest, Int32MulHighWithParameters) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Int32MulHigh(p0, p1);
@@ -1925,7 +2425,8 @@
TEST_F(InstructionSelectorTest, Uint32MulHighWithParameters) {
- StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Uint32MulHigh(p0, p1);
@@ -1942,7 +2443,8 @@
TEST_F(InstructionSelectorTest, Uint32DivWithParameters) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Uint32Div(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(4U, s.size());
@@ -1962,7 +2464,8 @@
TEST_F(InstructionSelectorTest, Uint32DivWithParametersForSUDIV) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Uint32Div(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build(SUDIV);
ASSERT_EQ(1U, s.size());
@@ -1971,7 +2474,8 @@
TEST_F(InstructionSelectorTest, Uint32ModWithParameters) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Uint32Mod(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(6U, s.size());
@@ -2001,7 +2505,8 @@
TEST_F(InstructionSelectorTest, Uint32ModWithParametersForSUDIV) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Uint32Mod(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build(SUDIV);
ASSERT_EQ(3U, s.size());
@@ -2022,7 +2527,8 @@
TEST_F(InstructionSelectorTest, Uint32ModWithParametersForSUDIVAndMLS) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Uint32Mod(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build(MLS, SUDIV);
ASSERT_EQ(2U, s.size());
@@ -2040,7 +2546,7 @@
TEST_F(InstructionSelectorTest, Word32AndWithUbfxImmediateForARMv7) {
TRACED_FORRANGE(int32_t, width, 1, 32) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32And(m.Parameter(0),
m.Int32Constant(0xffffffffu >> (32 - width))));
Stream s = m.Build(ARMv7);
@@ -2051,7 +2557,7 @@
EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
}
TRACED_FORRANGE(int32_t, width, 1, 32) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32And(m.Int32Constant(0xffffffffu >> (32 - width)),
m.Parameter(0)));
Stream s = m.Build(ARMv7);
@@ -2067,7 +2573,7 @@
TEST_F(InstructionSelectorTest, Word32AndWithBfcImmediateForARMv7) {
TRACED_FORRANGE(int32_t, lsb, 0, 31) {
TRACED_FORRANGE(int32_t, width, 9, (32 - lsb) - 1) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32And(
m.Parameter(0),
m.Int32Constant(~((0xffffffffu >> (32 - width)) << lsb))));
@@ -2084,7 +2590,7 @@
}
TRACED_FORRANGE(int32_t, lsb, 0, 31) {
TRACED_FORRANGE(int32_t, width, 9, (32 - lsb) - 1) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(
m.Word32And(m.Int32Constant(~((0xffffffffu >> (32 - width)) << lsb)),
m.Parameter(0)));
@@ -2104,7 +2610,7 @@
TEST_F(InstructionSelectorTest, Word32AndWith0xffff) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const r = m.Word32And(p0, m.Int32Constant(0xffff));
m.Return(r);
@@ -2118,7 +2624,7 @@
EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const r = m.Word32And(m.Int32Constant(0xffff), p0);
m.Return(r);
@@ -2136,7 +2642,7 @@
TEST_F(InstructionSelectorTest, Word32SarWithWord32Shl) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const r =
m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(24)), m.Int32Constant(24));
@@ -2151,7 +2657,7 @@
EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const r =
m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(16)), m.Int32Constant(16));
@@ -2175,7 +2681,7 @@
if (max > static_cast<uint32_t>(kMaxInt)) max -= 1;
uint32_t jnk = rng()->NextInt(max);
uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
m.Int32Constant(lsb)));
Stream s = m.Build(ARMv7);
@@ -2192,7 +2698,7 @@
if (max > static_cast<uint32_t>(kMaxInt)) max -= 1;
uint32_t jnk = rng()->NextInt(max);
uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
m.Int32Constant(lsb)));
Stream s = m.Build(ARMv7);
@@ -2208,7 +2714,8 @@
TEST_F(InstructionSelectorTest, Word32AndWithWord32Not) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Word32And(m.Parameter(0), m.Word32Not(m.Parameter(1))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2218,7 +2725,8 @@
EXPECT_EQ(1U, s[0]->OutputCount());
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Word32And(m.Word32Not(m.Parameter(0)), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2231,7 +2739,8 @@
TEST_F(InstructionSelectorTest, Word32EqualWithParameters) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Word32Equal(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2247,7 +2756,7 @@
TEST_F(InstructionSelectorTest, Word32EqualWithImmediate) {
TRACED_FOREACH(int32_t, imm, kImmediates) {
if (imm == 0) continue;
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(imm)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2261,7 +2770,7 @@
}
TRACED_FOREACH(int32_t, imm, kImmediates) {
if (imm == 0) continue;
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Equal(m.Int32Constant(imm), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2278,7 +2787,7 @@
TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2291,7 +2800,7 @@
EXPECT_EQ(kEqual, s[0]->flags_condition());
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2307,7 +2816,7 @@
TEST_F(InstructionSelectorTest, Word32NotWithParameter) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Not(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2321,7 +2830,7 @@
TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrWithImmediateForARMv7) {
TRACED_FORRANGE(int32_t, lsb, 0, 31) {
TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb)),
m.Int32Constant(0xffffffffu >> (32 - width))));
Stream s = m.Build(ARMv7);
@@ -2334,7 +2843,7 @@
}
TRACED_FORRANGE(int32_t, lsb, 0, 31) {
TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32And(m.Int32Constant(0xffffffffu >> (32 - width)),
m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb))));
Stream s = m.Build(ARMv7);
@@ -2347,6 +2856,21 @@
}
}
+
+TEST_F(InstructionSelectorTest, Word32Clz) {
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32Clz(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArmClz, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index cd3ce09..73532aa 100644
--- a/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -10,8 +10,6 @@
namespace {
-typedef RawMachineAssembler::Label MLabel;
-
template <typename T>
struct MachInst {
T constructor;
@@ -45,12 +43,12 @@
// machine type.
Node* BuildConstant(InstructionSelectorTest::StreamBuilder& m, MachineType type,
int64_t value) {
- switch (type) {
- case kMachInt32:
- return m.Int32Constant(value);
+ switch (type.representation()) {
+ case MachineRepresentation::kWord32:
+ return m.Int32Constant(static_cast<int32_t>(value));
break;
- case kMachInt64:
+ case MachineRepresentation::kWord64:
return m.Int64Constant(value);
break;
@@ -62,19 +60,25 @@
// ARM64 logical instructions.
-static const MachInst2 kLogicalInstructions[] = {
- {&RawMachineAssembler::Word32And, "Word32And", kArm64And32, kMachInt32},
- {&RawMachineAssembler::Word64And, "Word64And", kArm64And, kMachInt64},
- {&RawMachineAssembler::Word32Or, "Word32Or", kArm64Or32, kMachInt32},
- {&RawMachineAssembler::Word64Or, "Word64Or", kArm64Or, kMachInt64},
- {&RawMachineAssembler::Word32Xor, "Word32Xor", kArm64Eor32, kMachInt32},
- {&RawMachineAssembler::Word64Xor, "Word64Xor", kArm64Eor, kMachInt64}};
+const MachInst2 kLogicalInstructions[] = {
+ {&RawMachineAssembler::Word32And, "Word32And", kArm64And32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64And, "Word64And", kArm64And,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kArm64Or32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Or, "Word64Or", kArm64Or,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kArm64Eor32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Xor, "Word64Xor", kArm64Eor,
+ MachineType::Int64()}};
// ARM64 logical immediates: contiguous set bits, rotated about a power of two
// sized block. The block is then duplicated across the word. Below is a random
// subset of the 32-bit immediates.
-static const uint32_t kLogical32Immediates[] = {
+const uint32_t kLogical32Immediates[] = {
0x00000002, 0x00000003, 0x00000070, 0x00000080, 0x00000100, 0x000001c0,
0x00000300, 0x000007e0, 0x00003ffc, 0x00007fc0, 0x0003c000, 0x0003f000,
0x0003ffc0, 0x0003fff8, 0x0007ff00, 0x0007ffe0, 0x000e0000, 0x001e0000,
@@ -95,7 +99,7 @@
// Random subset of 64-bit logical immediates.
-static const uint64_t kLogical64Immediates[] = {
+const uint64_t kLogical64Immediates[] = {
0x0000000000000001, 0x0000000000000002, 0x0000000000000003,
0x0000000000000070, 0x0000000000000080, 0x0000000000000100,
0x00000000000001c0, 0x0000000000000300, 0x0000000000000600,
@@ -131,20 +135,24 @@
}
-static const AddSub kAddSubInstructions[] = {
- {{&RawMachineAssembler::Int32Add, "Int32Add", kArm64Add32, kMachInt32},
+const AddSub kAddSubInstructions[] = {
+ {{&RawMachineAssembler::Int32Add, "Int32Add", kArm64Add32,
+ MachineType::Int32()},
kArm64Sub32},
- {{&RawMachineAssembler::Int64Add, "Int64Add", kArm64Add, kMachInt64},
+ {{&RawMachineAssembler::Int64Add, "Int64Add", kArm64Add,
+ MachineType::Int64()},
kArm64Sub},
- {{&RawMachineAssembler::Int32Sub, "Int32Sub", kArm64Sub32, kMachInt32},
+ {{&RawMachineAssembler::Int32Sub, "Int32Sub", kArm64Sub32,
+ MachineType::Int32()},
kArm64Add32},
- {{&RawMachineAssembler::Int64Sub, "Int64Sub", kArm64Sub, kMachInt64},
+ {{&RawMachineAssembler::Int64Sub, "Int64Sub", kArm64Sub,
+ MachineType::Int64()},
kArm64Add}};
// ARM64 Add/Sub immediates: 12-bit immediate optionally shifted by 12.
// Below is a combination of a random subset and some edge values.
-static const int32_t kAddSubImmediates[] = {
+const int32_t kAddSubImmediates[] = {
0, 1, 69, 493, 599, 701, 719,
768, 818, 842, 945, 1246, 1286, 1429,
1669, 2171, 2179, 2182, 2254, 2334, 2338,
@@ -160,66 +168,85 @@
// ARM64 flag setting data processing instructions.
-static const MachInst2 kDPFlagSetInstructions[] = {
- {&RawMachineAssembler::Word32And, "Word32And", kArm64Tst32, kMachInt32},
- {&RawMachineAssembler::Int32Add, "Int32Add", kArm64Cmn32, kMachInt32},
- {&RawMachineAssembler::Int32Sub, "Int32Sub", kArm64Cmp32, kMachInt32},
- {&RawMachineAssembler::Word64And, "Word64And", kArm64Tst, kMachInt64}};
+const MachInst2 kDPFlagSetInstructions[] = {
+ {&RawMachineAssembler::Word32And, "Word32And", kArm64Tst32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32Add, "Int32Add", kArm64Cmn32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32Sub, "Int32Sub", kArm64Cmp32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64And, "Word64And", kArm64Tst,
+ MachineType::Int64()}};
// ARM64 arithmetic with overflow instructions.
-static const MachInst2 kOvfAddSubInstructions[] = {
+const MachInst2 kOvfAddSubInstructions[] = {
{&RawMachineAssembler::Int32AddWithOverflow, "Int32AddWithOverflow",
- kArm64Add32, kMachInt32},
+ kArm64Add32, MachineType::Int32()},
{&RawMachineAssembler::Int32SubWithOverflow, "Int32SubWithOverflow",
- kArm64Sub32, kMachInt32}};
+ kArm64Sub32, MachineType::Int32()}};
// ARM64 shift instructions.
-static const Shift kShiftInstructions[] = {
- {{&RawMachineAssembler::Word32Shl, "Word32Shl", kArm64Lsl32, kMachInt32},
+const Shift kShiftInstructions[] = {
+ {{&RawMachineAssembler::Word32Shl, "Word32Shl", kArm64Lsl32,
+ MachineType::Int32()},
kMode_Operand2_R_LSL_I},
- {{&RawMachineAssembler::Word64Shl, "Word64Shl", kArm64Lsl, kMachInt64},
+ {{&RawMachineAssembler::Word64Shl, "Word64Shl", kArm64Lsl,
+ MachineType::Int64()},
kMode_Operand2_R_LSL_I},
- {{&RawMachineAssembler::Word32Shr, "Word32Shr", kArm64Lsr32, kMachInt32},
+ {{&RawMachineAssembler::Word32Shr, "Word32Shr", kArm64Lsr32,
+ MachineType::Int32()},
kMode_Operand2_R_LSR_I},
- {{&RawMachineAssembler::Word64Shr, "Word64Shr", kArm64Lsr, kMachInt64},
+ {{&RawMachineAssembler::Word64Shr, "Word64Shr", kArm64Lsr,
+ MachineType::Int64()},
kMode_Operand2_R_LSR_I},
- {{&RawMachineAssembler::Word32Sar, "Word32Sar", kArm64Asr32, kMachInt32},
+ {{&RawMachineAssembler::Word32Sar, "Word32Sar", kArm64Asr32,
+ MachineType::Int32()},
kMode_Operand2_R_ASR_I},
- {{&RawMachineAssembler::Word64Sar, "Word64Sar", kArm64Asr, kMachInt64},
+ {{&RawMachineAssembler::Word64Sar, "Word64Sar", kArm64Asr,
+ MachineType::Int64()},
kMode_Operand2_R_ASR_I},
- {{&RawMachineAssembler::Word32Ror, "Word32Ror", kArm64Ror32, kMachInt32},
+ {{&RawMachineAssembler::Word32Ror, "Word32Ror", kArm64Ror32,
+ MachineType::Int32()},
kMode_Operand2_R_ROR_I},
- {{&RawMachineAssembler::Word64Ror, "Word64Ror", kArm64Ror, kMachInt64},
+ {{&RawMachineAssembler::Word64Ror, "Word64Ror", kArm64Ror,
+ MachineType::Int64()},
kMode_Operand2_R_ROR_I}};
// ARM64 Mul/Div instructions.
-static const MachInst2 kMulDivInstructions[] = {
- {&RawMachineAssembler::Int32Mul, "Int32Mul", kArm64Mul32, kMachInt32},
- {&RawMachineAssembler::Int64Mul, "Int64Mul", kArm64Mul, kMachInt64},
- {&RawMachineAssembler::Int32Div, "Int32Div", kArm64Idiv32, kMachInt32},
- {&RawMachineAssembler::Int64Div, "Int64Div", kArm64Idiv, kMachInt64},
- {&RawMachineAssembler::Uint32Div, "Uint32Div", kArm64Udiv32, kMachInt32},
- {&RawMachineAssembler::Uint64Div, "Uint64Div", kArm64Udiv, kMachInt64}};
+const MachInst2 kMulDivInstructions[] = {
+ {&RawMachineAssembler::Int32Mul, "Int32Mul", kArm64Mul32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int64Mul, "Int64Mul", kArm64Mul,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Int32Div, "Int32Div", kArm64Idiv32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int64Div, "Int64Div", kArm64Idiv,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Uint32Div, "Uint32Div", kArm64Udiv32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Uint64Div, "Uint64Div", kArm64Udiv,
+ MachineType::Int64()}};
// ARM64 FP arithmetic instructions.
-static const MachInst2 kFPArithInstructions[] = {
+const MachInst2 kFPArithInstructions[] = {
{&RawMachineAssembler::Float64Add, "Float64Add", kArm64Float64Add,
- kMachFloat64},
+ MachineType::Float64()},
{&RawMachineAssembler::Float64Sub, "Float64Sub", kArm64Float64Sub,
- kMachFloat64},
+ MachineType::Float64()},
{&RawMachineAssembler::Float64Mul, "Float64Mul", kArm64Float64Mul,
- kMachFloat64},
+ MachineType::Float64()},
{&RawMachineAssembler::Float64Div, "Float64Div", kArm64Float64Div,
- kMachFloat64}};
+ MachineType::Float64()}};
struct FPCmp {
MachInst2 mi;
FlagsCondition cond;
+ FlagsCondition commuted_cond;
};
@@ -229,16 +256,31 @@
// ARM64 FP comparison instructions.
-static const FPCmp kFPCmpInstructions[] = {
+const FPCmp kFPCmpInstructions[] = {
{{&RawMachineAssembler::Float64Equal, "Float64Equal", kArm64Float64Cmp,
- kMachFloat64},
- kUnorderedEqual},
+ MachineType::Float64()},
+ kEqual,
+ kEqual},
{{&RawMachineAssembler::Float64LessThan, "Float64LessThan",
- kArm64Float64Cmp, kMachFloat64},
- kUnorderedLessThan},
+ kArm64Float64Cmp, MachineType::Float64()},
+ kFloatLessThan,
+ kFloatGreaterThan},
{{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
- kArm64Float64Cmp, kMachFloat64},
- kUnorderedLessThanOrEqual}};
+ kArm64Float64Cmp, MachineType::Float64()},
+ kFloatLessThanOrEqual,
+ kFloatGreaterThanOrEqual},
+ {{&RawMachineAssembler::Float32Equal, "Float32Equal", kArm64Float32Cmp,
+ MachineType::Float32()},
+ kEqual,
+ kEqual},
+ {{&RawMachineAssembler::Float32LessThan, "Float32LessThan",
+ kArm64Float32Cmp, MachineType::Float32()},
+ kFloatLessThan,
+ kFloatGreaterThan},
+ {{&RawMachineAssembler::Float32LessThanOrEqual, "Float32LessThanOrEqual",
+ kArm64Float32Cmp, MachineType::Float32()},
+ kFloatLessThanOrEqual,
+ kFloatGreaterThanOrEqual}};
struct Conversion {
@@ -254,34 +296,35 @@
// ARM64 type conversion instructions.
-static const Conversion kConversionInstructions[] = {
+const Conversion kConversionInstructions[] = {
{{&RawMachineAssembler::ChangeFloat32ToFloat64, "ChangeFloat32ToFloat64",
- kArm64Float32ToFloat64, kMachFloat64},
- kMachFloat32},
+ kArm64Float32ToFloat64, MachineType::Float64()},
+ MachineType::Float32()},
{{&RawMachineAssembler::TruncateFloat64ToFloat32,
- "TruncateFloat64ToFloat32", kArm64Float64ToFloat32, kMachFloat32},
- kMachFloat64},
+ "TruncateFloat64ToFloat32", kArm64Float64ToFloat32,
+ MachineType::Float32()},
+ MachineType::Float64()},
{{&RawMachineAssembler::ChangeInt32ToInt64, "ChangeInt32ToInt64",
- kArm64Sxtw, kMachInt64},
- kMachInt32},
+ kArm64Sxtw, MachineType::Int64()},
+ MachineType::Int32()},
{{&RawMachineAssembler::ChangeUint32ToUint64, "ChangeUint32ToUint64",
- kArm64Mov32, kMachUint64},
- kMachUint32},
+ kArm64Mov32, MachineType::Uint64()},
+ MachineType::Uint32()},
{{&RawMachineAssembler::TruncateInt64ToInt32, "TruncateInt64ToInt32",
- kArm64Mov32, kMachInt32},
- kMachInt64},
+ kArm64Mov32, MachineType::Int32()},
+ MachineType::Int64()},
{{&RawMachineAssembler::ChangeInt32ToFloat64, "ChangeInt32ToFloat64",
- kArm64Int32ToFloat64, kMachFloat64},
- kMachInt32},
+ kArm64Int32ToFloat64, MachineType::Float64()},
+ MachineType::Int32()},
{{&RawMachineAssembler::ChangeUint32ToFloat64, "ChangeUint32ToFloat64",
- kArm64Uint32ToFloat64, kMachFloat64},
- kMachUint32},
+ kArm64Uint32ToFloat64, MachineType::Float64()},
+ MachineType::Uint32()},
{{&RawMachineAssembler::ChangeFloat64ToInt32, "ChangeFloat64ToInt32",
- kArm64Float64ToInt32, kMachInt32},
- kMachFloat64},
+ kArm64Float64ToInt32, MachineType::Int32()},
+ MachineType::Float64()},
{{&RawMachineAssembler::ChangeFloat64ToUint32, "ChangeFloat64ToUint32",
- kArm64Float64ToUint32, kMachUint32},
- kMachFloat64}};
+ kArm64Float64ToUint32, MachineType::Uint32()},
+ MachineType::Float64()}};
} // namespace
@@ -311,7 +354,7 @@
const MachInst2 dpi = GetParam();
const MachineType type = dpi.machine_type;
// TODO(all): Add support for testing 64-bit immediates.
- if (type == kMachInt32) {
+ if (type == MachineType::Int32()) {
// Immediate on the right.
TRACED_FOREACH(int32_t, imm, kLogical32Immediates) {
StreamBuilder m(this, type, type);
@@ -348,7 +391,7 @@
// Only test 64-bit shifted operands with 64-bit instructions.
if (shift.mi.machine_type != type) continue;
- TRACED_FORRANGE(int, imm, 0, ((type == kMachInt32) ? 31 : 63)) {
+ TRACED_FORRANGE(int, imm, 0, ((type == MachineType::Int32()) ? 31 : 63)) {
StreamBuilder m(this, type, type, type);
m.Return((m.*dpi.constructor)(
m.Parameter(0),
@@ -363,7 +406,7 @@
EXPECT_EQ(1U, s[0]->OutputCount());
}
- TRACED_FORRANGE(int, imm, 0, ((type == kMachInt32) ? 31 : 63)) {
+ TRACED_FORRANGE(int, imm, 0, ((type == MachineType::Int32()) ? 31 : 63)) {
StreamBuilder m(this, type, type, type);
m.Return((m.*dpi.constructor)(
(m.*shift.mi.constructor)(m.Parameter(1),
@@ -454,7 +497,7 @@
continue;
}
- TRACED_FORRANGE(int, imm, 0, ((type == kMachInt32) ? 31 : 63)) {
+ TRACED_FORRANGE(int, imm, 0, ((type == MachineType::Int32()) ? 31 : 63)) {
StreamBuilder m(this, type, type, type);
m.Return((m.*dpi.mi.constructor)(
m.Parameter(0),
@@ -472,6 +515,70 @@
}
+TEST_P(InstructionSelectorAddSubTest, UnsignedExtendByte) {
+ const AddSub dpi = GetParam();
+ const MachineType type = dpi.mi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.mi.constructor)(
+ m.Parameter(0), m.Word32And(m.Parameter(1), m.Int32Constant(0xff))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_UXTB, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorAddSubTest, UnsignedExtendHalfword) {
+ const AddSub dpi = GetParam();
+ const MachineType type = dpi.mi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.mi.constructor)(
+ m.Parameter(0), m.Word32And(m.Parameter(1), m.Int32Constant(0xffff))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_UXTH, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorAddSubTest, SignedExtendByte) {
+ const AddSub dpi = GetParam();
+ const MachineType type = dpi.mi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.mi.constructor)(
+ m.Parameter(0),
+ m.Word32Sar(m.Word32Shl(m.Parameter(1), m.Int32Constant(24)),
+ m.Int32Constant(24))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_SXTB, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorAddSubTest, SignedExtendHalfword) {
+ const AddSub dpi = GetParam();
+ const MachineType type = dpi.mi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.mi.constructor)(
+ m.Parameter(0),
+ m.Word32Sar(m.Word32Shl(m.Parameter(1), m.Int32Constant(16)),
+ m.Int32Constant(16))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_SXTH, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+}
+
+
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorAddSubTest,
::testing::ValuesIn(kAddSubInstructions));
@@ -480,7 +587,7 @@
{
// 32-bit add.
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Add(m.Int32Constant(imm), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -494,7 +601,7 @@
{
// 64-bit add.
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Int64Add(m.Int64Constant(imm), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -509,38 +616,101 @@
TEST_F(InstructionSelectorTest, SubZeroOnLeft) {
- // Subtraction with zero on the left maps to Neg.
{
// 32-bit subtract.
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Sub(m.Int32Constant(0), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Neg32, s[0]->arch_opcode());
- EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(kArm64Sub32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(0)->IsImmediate());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(0)));
EXPECT_EQ(1U, s[0]->OutputCount());
}
{
// 64-bit subtract.
- StreamBuilder m(this, kMachInt64, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64());
m.Return(m.Int64Sub(m.Int64Constant(0), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Neg, s[0]->arch_opcode());
- EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(kArm64Sub, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(0)->IsImmediate());
+ EXPECT_EQ(0, s.ToInt64(s[0]->InputAt(0)));
EXPECT_EQ(1U, s[0]->OutputCount());
}
}
+TEST_F(InstructionSelectorTest, SubZeroOnLeftWithShift) {
+ TRACED_FOREACH(Shift, shift, kShiftInstructions) {
+ {
+ // Test 32-bit operations. Ignore ROR shifts, as subtract does not
+ // support them.
+ if ((shift.mi.machine_type != MachineType::Int32()) ||
+ (shift.mi.arch_opcode == kArm64Ror32) ||
+ (shift.mi.arch_opcode == kArm64Ror))
+ continue;
+
+ TRACED_FORRANGE(int, imm, -32, 63) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ m.Return(m.Int32Sub(
+ m.Int32Constant(0),
+ (m.*shift.mi.constructor)(m.Parameter(1), m.Int32Constant(imm))));
+ Stream s = m.Build();
+
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Sub32, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(0)->IsImmediate());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(0)));
+ EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+ {
+ // Test 64-bit operations. Ignore ROR shifts, as subtract does not
+ // support them.
+ if ((shift.mi.machine_type != MachineType::Int64()) ||
+ (shift.mi.arch_opcode == kArm64Ror32) ||
+ (shift.mi.arch_opcode == kArm64Ror))
+ continue;
+
+ TRACED_FORRANGE(int, imm, -32, 127) {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64());
+ m.Return(m.Int64Sub(
+ m.Int64Constant(0),
+ (m.*shift.mi.constructor)(m.Parameter(1), m.Int64Constant(imm))));
+ Stream s = m.Build();
+
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Sub, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(0)->IsImmediate());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(0)));
+ EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+ }
+}
+
+
TEST_F(InstructionSelectorTest, AddNegImmediateOnLeft) {
{
// 32-bit add.
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
if (imm == 0) continue;
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Add(m.Int32Constant(-imm), m.Parameter(0)));
Stream s = m.Build();
@@ -556,7 +726,7 @@
// 64-bit add.
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
if (imm == 0) continue;
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Int64Add(m.Int64Constant(-imm), m.Parameter(0)));
Stream s = m.Build();
@@ -575,11 +745,14 @@
// 32-bit add.
TRACED_FOREACH(Shift, shift, kShiftInstructions) {
// Only test relevant shifted operands.
- if (shift.mi.machine_type != kMachInt32) continue;
+ if (shift.mi.machine_type != MachineType::Int32()) continue;
if (shift.mi.arch_opcode == kArm64Ror32) continue;
- TRACED_FORRANGE(int, imm, 0, 31) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ // The available shift operand range is `0 <= imm < 32`, but we also test
+ // that immediates outside this range are handled properly (modulo-32).
+ TRACED_FORRANGE(int, imm, -32, 63) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return((m.Int32Add)(
(m.*shift.mi.constructor)(m.Parameter(1), m.Int32Constant(imm)),
m.Parameter(0)));
@@ -596,11 +769,14 @@
// 64-bit add.
TRACED_FOREACH(Shift, shift, kShiftInstructions) {
// Only test relevant shifted operands.
- if (shift.mi.machine_type != kMachInt64) continue;
+ if (shift.mi.machine_type != MachineType::Int64()) continue;
if (shift.mi.arch_opcode == kArm64Ror) continue;
- TRACED_FORRANGE(int, imm, 0, 63) {
- StreamBuilder m(this, kMachInt64, kMachInt64, kMachInt64);
+ // The available shift operand range is `0 <= imm < 64`, but we also test
+ // that immediates outside this range are handled properly (modulo-64).
+ TRACED_FORRANGE(int, imm, -64, 127) {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64());
m.Return((m.Int64Add)(
(m.*shift.mi.constructor)(m.Parameter(1), m.Int64Constant(imm)),
m.Parameter(0)));
@@ -616,6 +792,126 @@
}
+TEST_F(InstructionSelectorTest, AddUnsignedExtendByteOnLeft) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ m.Return(m.Int32Add(m.Word32And(m.Parameter(0), m.Int32Constant(0xff)),
+ m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_UXTB, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int32(),
+ MachineType::Int64());
+ m.Return(m.Int64Add(m.Word32And(m.Parameter(0), m.Int32Constant(0xff)),
+ m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_UXTB, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, AddUnsignedExtendHalfwordOnLeft) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ m.Return(m.Int32Add(m.Word32And(m.Parameter(0), m.Int32Constant(0xffff)),
+ m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_UXTH, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int32(),
+ MachineType::Int64());
+ m.Return(m.Int64Add(m.Word32And(m.Parameter(0), m.Int32Constant(0xffff)),
+ m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_UXTH, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, AddSignedExtendByteOnLeft) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ m.Return(
+ m.Int32Add(m.Word32Sar(m.Word32Shl(m.Parameter(0), m.Int32Constant(24)),
+ m.Int32Constant(24)),
+ m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_SXTB, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int32(),
+ MachineType::Int64());
+ m.Return(
+ m.Int64Add(m.Word32Sar(m.Word32Shl(m.Parameter(0), m.Int32Constant(24)),
+ m.Int32Constant(24)),
+ m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_SXTB, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, AddSignedExtendHalfwordOnLeft) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ m.Return(
+ m.Int32Add(m.Word32Sar(m.Word32Shl(m.Parameter(0), m.Int32Constant(16)),
+ m.Int32Constant(16)),
+ m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_SXTH, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int32(),
+ MachineType::Int64());
+ m.Return(
+ m.Int64Add(m.Word32Sar(m.Word32Shl(m.Parameter(0), m.Int32Constant(16)),
+ m.Int32Constant(16)),
+ m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_SXTH, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
// -----------------------------------------------------------------------------
// Data processing controlled branches.
@@ -628,7 +924,7 @@
const MachInst2 dpi = GetParam();
const MachineType type = dpi.machine_type;
StreamBuilder m(this, type, type, type);
- MLabel a, b;
+ RawMachineLabel a, b;
m.Branch((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -652,8 +948,8 @@
// Skip the cases where the instruction selector would use tbz/tbnz.
if (base::bits::CountPopulation32(imm) == 1) continue;
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Word32And(m.Parameter(0), m.Int32Constant(imm)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -675,8 +971,8 @@
// Skip the cases where the instruction selector would use tbz/tbnz.
if (base::bits::CountPopulation64(imm) == 1) continue;
- StreamBuilder m(this, kMachInt64, kMachInt64);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b;
m.Branch(m.Word64And(m.Parameter(0), m.Int64Constant(imm)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -695,8 +991,8 @@
TEST_F(InstructionSelectorTest, AddBranchWithImmediateOnRight) {
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Int32Add(m.Parameter(0), m.Int32Constant(imm)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -713,8 +1009,8 @@
TEST_F(InstructionSelectorTest, SubBranchWithImmediateOnRight) {
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Int32Sub(m.Parameter(0), m.Int32Constant(imm)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -734,8 +1030,8 @@
// Skip the cases where the instruction selector would use tbz/tbnz.
if (base::bits::CountPopulation32(imm) == 1) continue;
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Word32And(m.Int32Constant(imm), m.Parameter(0)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -758,8 +1054,8 @@
// Skip the cases where the instruction selector would use tbz/tbnz.
if (base::bits::CountPopulation64(imm) == 1) continue;
- StreamBuilder m(this, kMachInt64, kMachInt64);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b;
m.Branch(m.Word64And(m.Int64Constant(imm), m.Parameter(0)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -779,8 +1075,8 @@
TEST_F(InstructionSelectorTest, AddBranchWithImmediateOnLeft) {
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Int32Add(m.Int32Constant(imm), m.Parameter(0)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -799,8 +1095,8 @@
TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnRight) {
TRACED_FORRANGE(int, bit, 0, 31) {
uint32_t mask = 1 << bit;
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Word32And(m.Parameter(0), m.Int32Constant(mask)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -817,8 +1113,8 @@
TRACED_FORRANGE(int, bit, 0, 31) {
uint32_t mask = 1 << bit;
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(
m.Word32BinaryNot(m.Word32And(m.Parameter(0), m.Int32Constant(mask))),
&a, &b);
@@ -840,8 +1136,8 @@
TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnLeft) {
TRACED_FORRANGE(int, bit, 0, 31) {
uint32_t mask = 1 << bit;
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(m.Word32And(m.Int32Constant(mask), m.Parameter(0)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -858,8 +1154,8 @@
TRACED_FORRANGE(int, bit, 0, 31) {
uint32_t mask = 1 << bit;
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
m.Branch(
m.Word32BinaryNot(m.Word32And(m.Int32Constant(mask), m.Parameter(0))),
&a, &b);
@@ -881,8 +1177,8 @@
TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnRight) {
TRACED_FORRANGE(int, bit, 0, 63) {
uint64_t mask = 1L << bit;
- StreamBuilder m(this, kMachInt64, kMachInt64);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b;
m.Branch(m.Word64And(m.Parameter(0), m.Int64Constant(mask)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -896,34 +1192,14 @@
EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
}
-
- TRACED_FORRANGE(int, bit, 0, 63) {
- uint64_t mask = 1L << bit;
- StreamBuilder m(this, kMachInt64, kMachInt64);
- MLabel a, b;
- m.Branch(
- m.Word64BinaryNot(m.Word64And(m.Parameter(0), m.Int64Constant(mask))),
- &a, &b);
- m.Bind(&a);
- m.Return(m.Int32Constant(1));
- m.Bind(&b);
- m.Return(m.Int32Constant(0));
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
- EXPECT_EQ(kEqual, s[0]->flags_condition());
- EXPECT_EQ(4U, s[0]->InputCount());
- EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
- EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
- }
}
TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnLeft) {
TRACED_FORRANGE(int, bit, 0, 63) {
uint64_t mask = 1L << bit;
- StreamBuilder m(this, kMachInt64, kMachInt64);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ RawMachineLabel a, b;
m.Branch(m.Word64And(m.Int64Constant(mask), m.Parameter(0)), &a, &b);
m.Bind(&a);
m.Return(m.Int32Constant(1));
@@ -937,33 +1213,13 @@
EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
}
-
- TRACED_FORRANGE(int, bit, 0, 63) {
- uint64_t mask = 1L << bit;
- StreamBuilder m(this, kMachInt64, kMachInt64);
- MLabel a, b;
- m.Branch(
- m.Word64BinaryNot(m.Word64And(m.Int64Constant(mask), m.Parameter(0))),
- &a, &b);
- m.Bind(&a);
- m.Return(m.Int32Constant(1));
- m.Bind(&b);
- m.Return(m.Int32Constant(0));
- Stream s = m.Build();
- ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
- EXPECT_EQ(kEqual, s[0]->flags_condition());
- EXPECT_EQ(4U, s[0]->InputCount());
- EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
- EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
- }
}
TEST_F(InstructionSelectorTest, CompareAgainstZeroAndBranch) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
Node* p0 = m.Parameter(0);
m.Branch(p0, &a, &b);
m.Bind(&a);
@@ -979,8 +1235,8 @@
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
Node* p0 = m.Parameter(0);
m.Branch(m.Word32BinaryNot(p0), &a, &b);
m.Bind(&a);
@@ -1112,7 +1368,7 @@
const MachInst2 dpi = GetParam();
const MachineType type = dpi.machine_type;
StreamBuilder m(this, type, type, type);
- MLabel a, b;
+ RawMachineLabel a, b;
Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Parameter(1));
m.Branch(m.Projection(1, n), &a, &b);
m.Bind(&a);
@@ -1134,7 +1390,7 @@
const MachineType type = dpi.machine_type;
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
StreamBuilder m(this, type, type);
- MLabel a, b;
+ RawMachineLabel a, b;
Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
m.Branch(m.Projection(1, n), &a, &b);
m.Bind(&a);
@@ -1159,7 +1415,7 @@
TEST_F(InstructionSelectorTest, OvfFlagAddImmediateOnLeft) {
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Projection(
1, m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0))));
Stream s = m.Build();
@@ -1177,7 +1433,7 @@
TEST_F(InstructionSelectorTest, OvfValAddImmediateOnLeft) {
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Projection(
0, m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0))));
Stream s = m.Build();
@@ -1194,7 +1450,7 @@
TEST_F(InstructionSelectorTest, OvfBothAddImmediateOnLeft) {
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* n = m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0));
m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
Stream s = m.Build();
@@ -1212,8 +1468,8 @@
TEST_F(InstructionSelectorTest, OvfBranchWithImmediateOnLeft) {
TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
- MLabel a, b;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ RawMachineLabel a, b;
Node* n = m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0));
m.Branch(m.Projection(1, n), &a, &b);
m.Bind(&a);
@@ -1256,7 +1512,8 @@
TEST_P(InstructionSelectorShiftTest, Immediate) {
const Shift shift = GetParam();
const MachineType type = shift.mi.machine_type;
- TRACED_FORRANGE(int32_t, imm, 0, (ElementSizeOf(type) * 8) - 1) {
+ TRACED_FORRANGE(int32_t, imm, 0,
+ ((1 << ElementSizeLog2Of(type.representation())) * 8) - 1) {
StreamBuilder m(this, type, type);
m.Return((m.*shift.mi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
Stream s = m.Build();
@@ -1276,7 +1533,7 @@
TEST_F(InstructionSelectorTest, Word64ShlWithChangeInt32ToInt64) {
TRACED_FORRANGE(int64_t, x, 32, 63) {
- StreamBuilder m(this, kMachInt64, kMachInt32);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Word64Shl(m.ChangeInt32ToInt64(p0), m.Int64Constant(x));
m.Return(n);
@@ -1294,7 +1551,7 @@
TEST_F(InstructionSelectorTest, Word64ShlWithChangeUint32ToUint64) {
TRACED_FORRANGE(int64_t, x, 32, 63) {
- StreamBuilder m(this, kMachInt64, kMachUint32);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Word64Shl(m.ChangeUint32ToUint64(p0), m.Int64Constant(x));
m.Return(n);
@@ -1311,7 +1568,7 @@
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) {
- StreamBuilder m(this, kMachInt32, kMachInt64);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int64());
Node* const p = m.Parameter(0);
Node* const t = m.TruncateInt64ToInt32(m.Word64Sar(p, m.Int64Constant(32)));
m.Return(t);
@@ -1328,7 +1585,7 @@
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shr) {
TRACED_FORRANGE(int64_t, x, 32, 63) {
- StreamBuilder m(this, kMachInt32, kMachInt64);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int64());
Node* const p = m.Parameter(0);
Node* const t = m.TruncateInt64ToInt32(m.Word64Shr(p, m.Int64Constant(x)));
m.Return(t);
@@ -1393,10 +1650,10 @@
static const MulDPInst kMulDPInstructions[] = {
{"Int32Mul", &RawMachineAssembler::Int32Mul, &RawMachineAssembler::Int32Add,
&RawMachineAssembler::Int32Sub, kArm64Madd32, kArm64Msub32, kArm64Mneg32,
- kMachInt32},
+ MachineType::Int32()},
{"Int64Mul", &RawMachineAssembler::Int64Mul, &RawMachineAssembler::Int64Add,
&RawMachineAssembler::Int64Sub, kArm64Madd, kArm64Msub, kArm64Mneg,
- kMachInt64}};
+ MachineType::Int64()}};
typedef InstructionSelectorTestWithParam<MulDPInst>
@@ -1478,6 +1735,270 @@
::testing::ValuesIn(kMulDPInstructions));
+TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
+ // x * (2^k + 1) -> x + (x << k)
+ TRACED_FORRANGE(int32_t, k, 1, 30) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) + 1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // (2^k + 1) * x -> x + (x << k)
+ TRACED_FORRANGE(int32_t, k, 1, 30) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Int32Mul(m.Int32Constant((1 << k) + 1), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // x * (2^k + 1) + c -> x + (x << k) + c
+ TRACED_FORRANGE(int32_t, k, 1, 30) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ m.Return(
+ m.Int32Add(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) + 1)),
+ m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64Add32, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // (2^k + 1) * x + c -> x + (x << k) + c
+ TRACED_FORRANGE(int32_t, k, 1, 30) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ m.Return(
+ m.Int32Add(m.Int32Mul(m.Int32Constant((1 << k) + 1), m.Parameter(0)),
+ m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64Add32, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // c + x * (2^k + 1) -> c + x + (x << k)
+ TRACED_FORRANGE(int32_t, k, 1, 30) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ m.Return(
+ m.Int32Add(m.Parameter(0),
+ m.Int32Mul(m.Parameter(1), m.Int32Constant((1 << k) + 1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64Add32, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // c + (2^k + 1) * x -> c + x + (x << k)
+ TRACED_FORRANGE(int32_t, k, 1, 30) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ m.Return(
+ m.Int32Add(m.Parameter(0),
+ m.Int32Mul(m.Int32Constant((1 << k) + 1), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64Add32, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // c - x * (2^k + 1) -> c - x + (x << k)
+ TRACED_FORRANGE(int32_t, k, 1, 30) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ m.Return(
+ m.Int32Sub(m.Parameter(0),
+ m.Int32Mul(m.Parameter(1), m.Int32Constant((1 << k) + 1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64Sub32, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // c - (2^k + 1) * x -> c - x + (x << k)
+ TRACED_FORRANGE(int32_t, k, 1, 30) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ m.Return(
+ m.Int32Sub(m.Parameter(0),
+ m.Int32Mul(m.Int32Constant((1 << k) + 1), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64Sub32, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Int64MulWithImmediate) {
+ // x * (2^k + 1) -> x + (x << k)
+ TRACED_FORRANGE(int64_t, k, 1, 62) {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Int64Mul(m.Parameter(0), m.Int64Constant((1L << k) + 1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // (2^k + 1) * x -> x + (x << k)
+ TRACED_FORRANGE(int64_t, k, 1, 62) {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Int64Mul(m.Int64Constant((1L << k) + 1), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // x * (2^k + 1) + c -> x + (x << k) + c
+ TRACED_FORRANGE(int64_t, k, 1, 62) {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64());
+ m.Return(
+ m.Int64Add(m.Int64Mul(m.Parameter(0), m.Int64Constant((1L << k) + 1)),
+ m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64Add, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // (2^k + 1) * x + c -> x + (x << k) + c
+ TRACED_FORRANGE(int64_t, k, 1, 62) {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64());
+ m.Return(
+ m.Int64Add(m.Int64Mul(m.Int64Constant((1L << k) + 1), m.Parameter(0)),
+ m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64Add, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // c + x * (2^k + 1) -> c + x + (x << k)
+ TRACED_FORRANGE(int64_t, k, 1, 62) {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64());
+ m.Return(
+ m.Int64Add(m.Parameter(0),
+ m.Int64Mul(m.Parameter(1), m.Int64Constant((1L << k) + 1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64Add, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // c + (2^k + 1) * x -> c + x + (x << k)
+ TRACED_FORRANGE(int64_t, k, 1, 62) {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64());
+ m.Return(
+ m.Int64Add(m.Parameter(0),
+ m.Int64Mul(m.Int64Constant((1L << k) + 1), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64Add, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // c - x * (2^k + 1) -> c - x + (x << k)
+ TRACED_FORRANGE(int64_t, k, 1, 62) {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64());
+ m.Return(
+ m.Int64Sub(m.Parameter(0),
+ m.Int64Mul(m.Parameter(1), m.Int64Constant((1L << k) + 1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64Sub, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // c - (2^k + 1) * x -> c - x + (x << k)
+ TRACED_FORRANGE(int64_t, k, 1, 62) {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64(),
+ MachineType::Int64());
+ m.Return(
+ m.Int64Sub(m.Parameter(0),
+ m.Int64Mul(m.Int64Constant((1L << k) + 1), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArm64Add, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64Sub, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(k, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
// -----------------------------------------------------------------------------
// Floating point instructions.
@@ -1506,7 +2027,8 @@
TEST_P(InstructionSelectorFPCmpTest, Parameter) {
const FPCmp cmp = GetParam();
- StreamBuilder m(this, kMachInt32, cmp.mi.machine_type, cmp.mi.machine_type);
+ StreamBuilder m(this, MachineType::Int32(), cmp.mi.machine_type,
+ cmp.mi.machine_type);
m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1518,6 +2040,44 @@
}
+TEST_P(InstructionSelectorFPCmpTest, WithImmediateZeroOnRight) {
+ const FPCmp cmp = GetParam();
+ StreamBuilder m(this, MachineType::Int32(), cmp.mi.machine_type);
+ if (cmp.mi.machine_type == MachineType::Float64()) {
+ m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Float64Constant(0.0)));
+ } else {
+ m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Float32Constant(0.0f)));
+ }
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorFPCmpTest, WithImmediateZeroOnLeft) {
+ const FPCmp cmp = GetParam();
+ StreamBuilder m(this, MachineType::Int32(), cmp.mi.machine_type);
+ if (cmp.mi.machine_type == MachineType::Float64()) {
+ m.Return((m.*cmp.mi.constructor)(m.Float64Constant(0.0), m.Parameter(0)));
+ } else {
+ m.Return((m.*cmp.mi.constructor)(m.Float32Constant(0.0f), m.Parameter(0)));
+ }
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.commuted_cond, s[0]->flags_condition());
+}
+
+
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
::testing::ValuesIn(kFPCmpInstructions));
@@ -1568,52 +2128,52 @@
static const MemoryAccess kMemoryAccesses[] = {
- {kMachInt8,
+ {MachineType::Int8(),
kArm64Ldrsb,
kArm64Strb,
{-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 257, 258, 1000, 1001, 2121,
2442, 4093, 4094, 4095}},
- {kMachUint8,
+ {MachineType::Uint8(),
kArm64Ldrb,
kArm64Strb,
{-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 257, 258, 1000, 1001, 2121,
2442, 4093, 4094, 4095}},
- {kMachInt16,
+ {MachineType::Int16(),
kArm64Ldrsh,
kArm64Strh,
{-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 258, 260, 4096, 4098, 4100,
4242, 6786, 8188, 8190}},
- {kMachUint16,
+ {MachineType::Uint16(),
kArm64Ldrh,
kArm64Strh,
{-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 258, 260, 4096, 4098, 4100,
4242, 6786, 8188, 8190}},
- {kMachInt32,
+ {MachineType::Int32(),
kArm64LdrW,
kArm64StrW,
{-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 260, 4096, 4100, 8192, 8196,
3276, 3280, 16376, 16380}},
- {kMachUint32,
+ {MachineType::Uint32(),
kArm64LdrW,
kArm64StrW,
{-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 260, 4096, 4100, 8192, 8196,
3276, 3280, 16376, 16380}},
- {kMachInt64,
+ {MachineType::Int64(),
kArm64Ldr,
kArm64Str,
{-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 264, 4096, 4104, 8192, 8200,
16384, 16392, 32752, 32760}},
- {kMachUint64,
+ {MachineType::Uint64(),
kArm64Ldr,
kArm64Str,
{-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 264, 4096, 4104, 8192, 8200,
16384, 16392, 32752, 32760}},
- {kMachFloat32,
+ {MachineType::Float32(),
kArm64LdrS,
kArm64StrS,
{-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 260, 4096, 4100, 8192, 8196,
3276, 3280, 16376, 16380}},
- {kMachFloat64,
+ {MachineType::Float64(),
kArm64LdrD,
kArm64StrD,
{-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 264, 4096, 4104, 8192, 8200,
@@ -1626,7 +2186,8 @@
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer(),
+ MachineType::Int32());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1640,7 +2201,7 @@
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, memacc.type, kMachPtr);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1656,8 +2217,10 @@
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Int32(), memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0), m.Parameter(1),
+ m.Parameter(2), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1671,9 +2234,10 @@
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
- m.Parameter(1));
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1696,8 +2260,10 @@
// Comparison instructions.
static const MachInst2 kComparisonInstructions[] = {
- {&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32, kMachInt32},
- {&RawMachineAssembler::Word64Equal, "Word64Equal", kArm64Cmp, kMachInt64},
+ {&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Equal, "Word64Equal", kArm64Cmp,
+ MachineType::Int64()},
};
@@ -1762,7 +2328,7 @@
TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1774,7 +2340,7 @@
EXPECT_EQ(kEqual, s[0]->flags_condition());
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1790,7 +2356,7 @@
TEST_F(InstructionSelectorTest, Word64EqualWithZero) {
{
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64Equal(m.Parameter(0), m.Int64Constant(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1802,7 +2368,7 @@
EXPECT_EQ(kEqual, s[0]->flags_condition());
}
{
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64Equal(m.Int64Constant(0), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1816,17 +2382,319 @@
}
+TEST_F(InstructionSelectorTest, Word32EqualWithWord32Shift) {
+ TRACED_FOREACH(Shift, shift, kShiftInstructions) {
+ // Skip non 32-bit shifts or ror operations.
+ if (shift.mi.machine_type != MachineType::Int32() ||
+ shift.mi.arch_opcode == kArm64Ror32) {
+ continue;
+ }
+
+ TRACED_FORRANGE(int32_t, imm, -32, 63) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* r = (m.*shift.mi.constructor)(p1, m.Int32Constant(imm));
+ m.Return(m.Word32Equal(p0, r));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+ TRACED_FORRANGE(int32_t, imm, -32, 63) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* r = (m.*shift.mi.constructor)(p1, m.Int32Constant(imm));
+ m.Return(m.Word32Equal(r, p0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithUnsignedExtendByte) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* r = m.Word32And(p1, m.Int32Constant(0xff));
+ m.Return(m.Word32Equal(p0, r));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_UXTB, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* r = m.Word32And(p1, m.Int32Constant(0xff));
+ m.Return(m.Word32Equal(r, p0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_UXTB, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithUnsignedExtendHalfword) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* r = m.Word32And(p1, m.Int32Constant(0xffff));
+ m.Return(m.Word32Equal(p0, r));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_UXTH, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* r = m.Word32And(p1, m.Int32Constant(0xffff));
+ m.Return(m.Word32Equal(r, p0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_UXTH, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithSignedExtendByte) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* r =
+ m.Word32Sar(m.Word32Shl(p1, m.Int32Constant(24)), m.Int32Constant(24));
+ m.Return(m.Word32Equal(p0, r));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_SXTB, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* r =
+ m.Word32Sar(m.Word32Shl(p1, m.Int32Constant(24)), m.Int32Constant(24));
+ m.Return(m.Word32Equal(r, p0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_SXTB, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithSignedExtendHalfword) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* r =
+ m.Word32Sar(m.Word32Shl(p1, m.Int32Constant(16)), m.Int32Constant(16));
+ m.Return(m.Word32Equal(p0, r));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_SXTH, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* r =
+ m.Word32Sar(m.Word32Shl(p1, m.Int32Constant(16)), m.Int32Constant(16));
+ m.Return(m.Word32Equal(r, p0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_SXTH, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32EqualZeroWithWord32Equal) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ m.Return(m.Word32Equal(m.Word32Equal(p0, p1), m.Int32Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ m.Return(m.Word32Equal(m.Int32Constant(0), m.Word32Equal(p0, p1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+ }
+}
+
+namespace {
+
+struct IntegerCmp {
+ MachInst2 mi;
+ FlagsCondition cond;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const IntegerCmp& cmp) {
+ return os << cmp.mi;
+}
+
+
+// ARM64 32-bit integer comparison instructions.
+const IntegerCmp kIntegerCmpInstructions[] = {
+ {{&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32,
+ MachineType::Int32()},
+ kEqual},
+ {{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kArm64Cmp32,
+ MachineType::Int32()},
+ kSignedLessThan},
+ {{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
+ kArm64Cmp32, MachineType::Int32()},
+ kSignedLessThanOrEqual},
+ {{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kArm64Cmp32,
+ MachineType::Uint32()},
+ kUnsignedLessThan},
+ {{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
+ kArm64Cmp32, MachineType::Uint32()},
+ kUnsignedLessThanOrEqual}};
+
+} // namespace
+
+
+TEST_F(InstructionSelectorTest, Word32CompareNegateWithWord32Shift) {
+ TRACED_FOREACH(IntegerCmp, cmp, kIntegerCmpInstructions) {
+ TRACED_FOREACH(Shift, shift, kShiftInstructions) {
+ // Test 32-bit operations. Ignore ROR shifts, as compare-negate does not
+ // support them.
+ if (shift.mi.machine_type != MachineType::Int32() ||
+ shift.mi.arch_opcode == kArm64Ror32) {
+ continue;
+ }
+
+ TRACED_FORRANGE(int32_t, imm, -32, 63) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* r = (m.*shift.mi.constructor)(p1, m.Int32Constant(imm));
+ m.Return(
+ (m.*cmp.mi.constructor)(p0, m.Int32Sub(m.Int32Constant(0), r)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(shift.mode, s[0]->addressing_mode());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+ }
+ }
+ }
+}
+
+
// -----------------------------------------------------------------------------
// Miscellaneous
static const MachInst2 kLogicalWithNotRHSs[] = {
- {&RawMachineAssembler::Word32And, "Word32And", kArm64Bic32, kMachInt32},
- {&RawMachineAssembler::Word64And, "Word64And", kArm64Bic, kMachInt64},
- {&RawMachineAssembler::Word32Or, "Word32Or", kArm64Orn32, kMachInt32},
- {&RawMachineAssembler::Word64Or, "Word64Or", kArm64Orn, kMachInt64},
- {&RawMachineAssembler::Word32Xor, "Word32Xor", kArm64Eon32, kMachInt32},
- {&RawMachineAssembler::Word64Xor, "Word64Xor", kArm64Eon, kMachInt64}};
+ {&RawMachineAssembler::Word32And, "Word32And", kArm64Bic32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64And, "Word64And", kArm64Bic,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kArm64Orn32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Or, "Word64Or", kArm64Orn,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kArm64Eon32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Xor, "Word64Xor", kArm64Eon,
+ MachineType::Int64()}};
typedef InstructionSelectorTestWithParam<MachInst2>
@@ -1839,11 +2707,11 @@
// Test cases where RHS is Xor(x, -1).
{
StreamBuilder m(this, type, type, type);
- if (type == kMachInt32) {
+ if (type == MachineType::Int32()) {
m.Return((m.*inst.constructor)(
m.Parameter(0), m.Word32Xor(m.Parameter(1), m.Int32Constant(-1))));
} else {
- ASSERT_EQ(kMachInt64, type);
+ ASSERT_EQ(MachineType::Int64(), type);
m.Return((m.*inst.constructor)(
m.Parameter(0), m.Word64Xor(m.Parameter(1), m.Int64Constant(-1))));
}
@@ -1855,11 +2723,11 @@
}
{
StreamBuilder m(this, type, type, type);
- if (type == kMachInt32) {
+ if (type == MachineType::Int32()) {
m.Return((m.*inst.constructor)(
m.Word32Xor(m.Parameter(0), m.Int32Constant(-1)), m.Parameter(1)));
} else {
- ASSERT_EQ(kMachInt64, type);
+ ASSERT_EQ(MachineType::Int64(), type);
m.Return((m.*inst.constructor)(
m.Word64Xor(m.Parameter(0), m.Int64Constant(-1)), m.Parameter(1)));
}
@@ -1872,11 +2740,11 @@
// Test cases where RHS is Not(x).
{
StreamBuilder m(this, type, type, type);
- if (type == kMachInt32) {
+ if (type == MachineType::Int32()) {
m.Return(
(m.*inst.constructor)(m.Parameter(0), m.Word32Not(m.Parameter(1))));
} else {
- ASSERT_EQ(kMachInt64, type);
+ ASSERT_EQ(MachineType::Int64(), type);
m.Return(
(m.*inst.constructor)(m.Parameter(0), m.Word64Not(m.Parameter(1))));
}
@@ -1888,11 +2756,11 @@
}
{
StreamBuilder m(this, type, type, type);
- if (type == kMachInt32) {
+ if (type == MachineType::Int32()) {
m.Return(
(m.*inst.constructor)(m.Word32Not(m.Parameter(0)), m.Parameter(1)));
} else {
- ASSERT_EQ(kMachInt64, type);
+ ASSERT_EQ(MachineType::Int64(), type);
m.Return(
(m.*inst.constructor)(m.Word64Not(m.Parameter(0)), m.Parameter(1)));
}
@@ -1911,7 +2779,7 @@
TEST_F(InstructionSelectorTest, Word32NotWithParameter) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Not(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1922,7 +2790,7 @@
TEST_F(InstructionSelectorTest, Word64NotWithParameter) {
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64Not(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1934,7 +2802,7 @@
TEST_F(InstructionSelectorTest, Word32XorMinusOneWithParameter) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Xor(m.Parameter(0), m.Int32Constant(-1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1943,7 +2811,7 @@
EXPECT_EQ(1U, s[0]->OutputCount());
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Xor(m.Int32Constant(-1), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1956,7 +2824,7 @@
TEST_F(InstructionSelectorTest, Word64XorMinusOneWithParameter) {
{
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64Xor(m.Parameter(0), m.Int64Constant(-1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1965,7 +2833,7 @@
EXPECT_EQ(1U, s[0]->OutputCount());
}
{
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64Xor(m.Int64Constant(-1), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -1977,14 +2845,17 @@
TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediate) {
- TRACED_FORRANGE(int32_t, lsb, 1, 31) {
+ // The available shift operand range is `0 <= imm < 32`, but we also test
+ // that immediates outside this range are handled properly (modulo-32).
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1f;
TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
uint32_t jnk = rng()->NextInt();
- jnk >>= 32 - lsb;
+ jnk = (lsb > 0) ? (jnk >> (32 - lsb)) : 0;
uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
- m.Int32Constant(lsb)));
+ m.Int32Constant(shift)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Ubfx32, s[0]->arch_opcode());
@@ -1993,14 +2864,15 @@
EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
}
}
- TRACED_FORRANGE(int32_t, lsb, 1, 31) {
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1f;
TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
uint32_t jnk = rng()->NextInt();
- jnk >>= 32 - lsb;
+ jnk = (lsb > 0) ? (jnk >> (32 - lsb)) : 0;
uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
- m.Int32Constant(lsb)));
+ m.Int32Constant(shift)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Ubfx32, s[0]->arch_opcode());
@@ -2013,15 +2885,18 @@
TEST_F(InstructionSelectorTest, Word64ShrWithWord64AndWithImmediate) {
- TRACED_FORRANGE(int32_t, lsb, 1, 63) {
+ // The available shift operand range is `0 <= imm < 64`, but we also test
+ // that immediates outside this range are handled properly (modulo-64).
+ TRACED_FORRANGE(int32_t, shift, -64, 127) {
+ int32_t lsb = shift & 0x3f;
TRACED_FORRANGE(int32_t, width, 1, 64 - lsb) {
uint64_t jnk = rng()->NextInt64();
- jnk >>= 64 - lsb;
+ jnk = (lsb > 0) ? (jnk >> (64 - lsb)) : 0;
uint64_t msk =
((V8_UINT64_C(0xffffffffffffffff) >> (64 - width)) << lsb) | jnk;
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64Shr(m.Word64And(m.Parameter(0), m.Int64Constant(msk)),
- m.Int64Constant(lsb)));
+ m.Int64Constant(shift)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Ubfx, s[0]->arch_opcode());
@@ -2030,15 +2905,16 @@
EXPECT_EQ(width, s.ToInt64(s[0]->InputAt(2)));
}
}
- TRACED_FORRANGE(int32_t, lsb, 1, 63) {
+ TRACED_FORRANGE(int32_t, shift, -64, 127) {
+ int32_t lsb = shift & 0x3f;
TRACED_FORRANGE(int32_t, width, 1, 64 - lsb) {
uint64_t jnk = rng()->NextInt64();
- jnk >>= 64 - lsb;
+ jnk = (lsb > 0) ? (jnk >> (64 - lsb)) : 0;
uint64_t msk =
((V8_UINT64_C(0xffffffffffffffff) >> (64 - width)) << lsb) | jnk;
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64Shr(m.Word64And(m.Int64Constant(msk), m.Parameter(0)),
- m.Int64Constant(lsb)));
+ m.Int64Constant(shift)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Ubfx, s[0]->arch_opcode());
@@ -2051,11 +2927,14 @@
TEST_F(InstructionSelectorTest, Word32AndWithImmediateWithWord32Shr) {
- TRACED_FORRANGE(int32_t, lsb, 1, 31) {
+ // The available shift operand range is `0 <= imm < 32`, but we also test
+ // that immediates outside this range are handled properly (modulo-32).
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1f;
TRACED_FORRANGE(int32_t, width, 1, 31) {
uint32_t msk = (1 << width) - 1;
- StreamBuilder m(this, kMachInt32, kMachInt32);
- m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb)),
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)),
m.Int32Constant(msk)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2066,12 +2945,14 @@
EXPECT_EQ(actual_width, s.ToInt32(s[0]->InputAt(2)));
}
}
- TRACED_FORRANGE(int32_t, lsb, 1, 31) {
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1f;
TRACED_FORRANGE(int32_t, width, 1, 31) {
uint32_t msk = (1 << width) - 1;
- StreamBuilder m(this, kMachInt32, kMachInt32);
- m.Return(m.Word32And(m.Int32Constant(msk),
- m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb))));
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(
+ m.Word32And(m.Int32Constant(msk),
+ m.Word32Shr(m.Parameter(0), m.Int32Constant(shift))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Ubfx32, s[0]->arch_opcode());
@@ -2085,11 +2966,14 @@
TEST_F(InstructionSelectorTest, Word64AndWithImmediateWithWord64Shr) {
- TRACED_FORRANGE(int64_t, lsb, 1, 63) {
+ // The available shift operand range is `0 <= imm < 64`, but we also test
+ // that immediates outside this range are handled properly (modulo-64).
+ TRACED_FORRANGE(int64_t, shift, -64, 127) {
+ int64_t lsb = shift & 0x3f;
TRACED_FORRANGE(int64_t, width, 1, 63) {
uint64_t msk = (V8_UINT64_C(1) << width) - 1;
- StreamBuilder m(this, kMachInt64, kMachInt64);
- m.Return(m.Word64And(m.Word64Shr(m.Parameter(0), m.Int64Constant(lsb)),
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64And(m.Word64Shr(m.Parameter(0), m.Int64Constant(shift)),
m.Int64Constant(msk)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -2100,12 +2984,14 @@
EXPECT_EQ(actual_width, s.ToInt64(s[0]->InputAt(2)));
}
}
- TRACED_FORRANGE(int64_t, lsb, 1, 63) {
+ TRACED_FORRANGE(int64_t, shift, -64, 127) {
+ int64_t lsb = shift & 0x3f;
TRACED_FORRANGE(int64_t, width, 1, 63) {
uint64_t msk = (V8_UINT64_C(1) << width) - 1;
- StreamBuilder m(this, kMachInt64, kMachInt64);
- m.Return(m.Word64And(m.Int64Constant(msk),
- m.Word64Shr(m.Parameter(0), m.Int64Constant(lsb))));
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(
+ m.Word64And(m.Int64Constant(msk),
+ m.Word64Shr(m.Parameter(0), m.Int64Constant(shift))));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(kArm64Ubfx, s[0]->arch_opcode());
@@ -2119,7 +3005,8 @@
TEST_F(InstructionSelectorTest, Int32MulHighWithParameters) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Int32MulHigh(p0, p1);
@@ -2140,37 +3027,323 @@
}
+TEST_F(InstructionSelectorTest, Int32MulHighWithSar) {
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Word32Sar(m.Int32MulHigh(p0, p1), m.Int32Constant(shift));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArm64Smull, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kArm64Asr, s[1]->arch_opcode());
+ ASSERT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
+ EXPECT_EQ((shift & 0x1f) + 32, s.ToInt64(s[1]->InputAt(1)));
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[1]->Output()));
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Int32MulHighWithAdd) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const a = m.Int32Add(m.Int32MulHigh(p0, p1), p0);
+ // Test only one shift constant here, as we're only interested in it being a
+ // 32-bit operation; the shift amount is irrelevant.
+ Node* const n = m.Word32Sar(a, m.Int32Constant(1));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(3U, s.size());
+ EXPECT_EQ(kArm64Smull, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kArm64Add, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_Operand2_R_ASR_I, s[1]->addressing_mode());
+ ASSERT_EQ(3U, s[1]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[1]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(1)));
+ EXPECT_EQ(32, s.ToInt64(s[1]->InputAt(2)));
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ EXPECT_EQ(kArm64Asr32, s[2]->arch_opcode());
+ ASSERT_EQ(2U, s[2]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(0)));
+ EXPECT_EQ(1, s.ToInt64(s[2]->InputAt(1)));
+ ASSERT_EQ(1U, s[2]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[2]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Uint32MulHighWithShr) {
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n =
+ m.Word32Shr(m.Uint32MulHigh(p0, p1), m.Int32Constant(shift));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kArm64Umull, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kArm64Lsr, s[1]->arch_opcode());
+ ASSERT_EQ(2U, s[1]->InputCount());
+ EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
+ EXPECT_EQ((shift & 0x1f) + 32, s.ToInt64(s[1]->InputAt(1)));
+ ASSERT_EQ(1U, s[1]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[1]->Output()));
+ }
+}
+
+
TEST_F(InstructionSelectorTest, Word32SarWithWord32Shl) {
- {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
- Node* const r =
- m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(24)), m.Int32Constant(24));
+ Node* const r = m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(shift)),
+ m.Int32Constant(shift));
m.Return(r);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Sxtb32, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(kArm64Sbfx32, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
}
- {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
- Node* const r =
- m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(16)), m.Int32Constant(16));
+ Node* const r = m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(shift + 32)),
+ m.Int32Constant(shift + 64));
m.Return(r);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64Sxth32, s[0]->arch_opcode());
- ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(kArm64Sbfx32, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
}
}
+
+TEST_F(InstructionSelectorTest, Word32ShrWithWord32Shl) {
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r = m.Word32Shr(m.Word32Shl(p0, m.Int32Constant(shift)),
+ m.Int32Constant(shift));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ubfx32, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r = m.Word32Shr(m.Word32Shl(p0, m.Int32Constant(shift + 32)),
+ m.Int32Constant(shift + 64));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ubfx32, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32ShlWithWord32And) {
+ TRACED_FORRANGE(int32_t, shift, 1, 30) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r =
+ m.Word32Shl(m.Word32And(p0, m.Int32Constant((1 << (31 - shift)) - 1)),
+ m.Int32Constant(shift));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ubfiz32, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+ TRACED_FORRANGE(int32_t, shift, 0, 30) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r =
+ m.Word32Shl(m.Word32And(p0, m.Int32Constant((1 << (31 - shift)) - 1)),
+ m.Int32Constant(shift + 1));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Lsl32, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32Clz) {
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32Clz(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Clz32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float32Abs) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Float32Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float64Abs) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Float64Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float64SubWithMinusZero) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Float64Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float32Max) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Float32Max(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ // Float32Max is `(b < a) ? a : b`.
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Float32Max, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float32Min) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Float32Min(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ // Float32Min is `(a < b) ? a : b`.
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Float32Min, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float64Max) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Float64Max(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ // Float64Max is `(b < a) ? a : b`.
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Float64Max, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float64Min) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Float64Min(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ // Float64Min is `(a < b) ? a : b`.
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Float64Min, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/test/unittests/compiler/branch-elimination-unittest.cc b/test/unittests/compiler/branch-elimination-unittest.cc
new file mode 100644
index 0000000..fcd702c
--- /dev/null
+++ b/test/unittests/compiler/branch-elimination-unittest.cc
@@ -0,0 +1,209 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/branch-elimination.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node-properties.h"
+#include "test/unittests/compiler/compiler-test-utils.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class BranchEliminationTest : public TypedGraphTest {
+ public:
+ BranchEliminationTest()
+ : machine_(zone(), MachineType::PointerRepresentation(),
+ MachineOperatorBuilder::kNoFlags) {}
+
+ MachineOperatorBuilder* machine() { return &machine_; }
+
+ void Reduce() {
+ JSOperatorBuilder javascript(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), &javascript, nullptr,
+ machine());
+ GraphReducer graph_reducer(zone(), graph(), jsgraph.Dead());
+ BranchElimination branch_condition_elimination(&graph_reducer, &jsgraph,
+ zone());
+ graph_reducer.AddReducer(&branch_condition_elimination);
+ graph_reducer.ReduceGraph();
+ }
+
+ private:
+ MachineOperatorBuilder machine_;
+};
+
+
+TEST_F(BranchEliminationTest, NestedBranchSameTrue) {
+ // { return (x ? (x ? 1 : 2) : 3; }
+ // should be reduced to
+ // { return (x ? 1 : 3; }
+ Node* condition = Parameter(0);
+ Node* outer_branch =
+ graph()->NewNode(common()->Branch(), condition, graph()->start());
+
+ Node* outer_if_true = graph()->NewNode(common()->IfTrue(), outer_branch);
+ Node* inner_branch =
+ graph()->NewNode(common()->Branch(), condition, outer_if_true);
+ Node* inner_if_true = graph()->NewNode(common()->IfTrue(), inner_branch);
+ Node* inner_if_false = graph()->NewNode(common()->IfFalse(), inner_branch);
+ Node* inner_merge =
+ graph()->NewNode(common()->Merge(2), inner_if_true, inner_if_false);
+ Node* inner_phi =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ Int32Constant(1), Int32Constant(2), inner_merge);
+
+ Node* outer_if_false = graph()->NewNode(common()->IfFalse(), outer_branch);
+ Node* outer_merge =
+ graph()->NewNode(common()->Merge(2), inner_merge, outer_if_false);
+ Node* outer_phi =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ inner_phi, Int32Constant(3), outer_merge);
+
+ Node* ret = graph()->NewNode(common()->Return(), outer_phi, graph()->start(),
+ outer_merge);
+ graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
+
+ Reduce();
+
+ // Outer branch should not be rewritten, the inner branch should be discarded.
+ EXPECT_THAT(outer_branch, IsBranch(condition, graph()->start()));
+ EXPECT_THAT(inner_phi,
+ IsPhi(MachineRepresentation::kWord32, IsInt32Constant(1),
+ IsInt32Constant(2), IsMerge(outer_if_true, IsDead())));
+}
+
+
+TEST_F(BranchEliminationTest, NestedBranchSameFalse) {
+ // { return (x ? 1 : (x ? 2 : 3); }
+ // should be reduced to
+ // { return (x ? 1 : 3; }
+ Node* condition = Parameter(0);
+ Node* outer_branch =
+ graph()->NewNode(common()->Branch(), condition, graph()->start());
+
+ Node* outer_if_true = graph()->NewNode(common()->IfTrue(), outer_branch);
+
+ Node* outer_if_false = graph()->NewNode(common()->IfFalse(), outer_branch);
+ Node* inner_branch =
+ graph()->NewNode(common()->Branch(), condition, outer_if_false);
+ Node* inner_if_true = graph()->NewNode(common()->IfTrue(), inner_branch);
+ Node* inner_if_false = graph()->NewNode(common()->IfFalse(), inner_branch);
+ Node* inner_merge =
+ graph()->NewNode(common()->Merge(2), inner_if_true, inner_if_false);
+ Node* inner_phi =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ Int32Constant(2), Int32Constant(3), inner_merge);
+
+ Node* outer_merge =
+ graph()->NewNode(common()->Merge(2), outer_if_true, inner_merge);
+ Node* outer_phi =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ Int32Constant(1), inner_phi, outer_merge);
+
+ Node* ret = graph()->NewNode(common()->Return(), outer_phi, graph()->start(),
+ outer_merge);
+ graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
+
+ Reduce();
+
+ // Outer branch should not be rewritten, the inner branch should be discarded.
+ EXPECT_THAT(outer_branch, IsBranch(condition, graph()->start()));
+ EXPECT_THAT(inner_phi,
+ IsPhi(MachineRepresentation::kWord32, IsInt32Constant(2),
+ IsInt32Constant(3), IsMerge(IsDead(), outer_if_false)));
+}
+
+
+TEST_F(BranchEliminationTest, BranchAfterDiamond) {
+ // { var y = x ? 1 : 2; return y + x ? 3 : 4; }
+ // should not be reduced.
+ Node* condition = Parameter(0);
+
+ Node* branch1 =
+ graph()->NewNode(common()->Branch(), condition, graph()->start());
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* merge1 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
+ Node* phi1 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ Int32Constant(1), Int32Constant(2), merge1);
+
+ Node* branch2 = graph()->NewNode(common()->Branch(), condition, merge1);
+ Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+ Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+ Node* merge2 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
+ Node* phi2 =
+ graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
+ Int32Constant(3), Int32Constant(4), merge1);
+
+
+ Node* add = graph()->NewNode(machine()->Int32Add(), phi1, phi2);
+ Node* ret =
+ graph()->NewNode(common()->Return(), add, graph()->start(), merge2);
+ graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
+
+ Reduce();
+
+ // Outer branch should not be rewritten, the inner branch condition should
+ // be true.
+ EXPECT_THAT(branch1, IsBranch(condition, graph()->start()));
+ EXPECT_THAT(branch2, IsBranch(condition, merge1));
+}
+
+
+TEST_F(BranchEliminationTest, BranchInsideLoopSame) {
+ // if (x) while (x) { return 2; } else { return 1; }
+ // should be rewritten to
+ // if (x) while (true) { return 2; } else { return 1; }
+
+ Node* condition = Parameter(0);
+
+ Node* outer_branch =
+ graph()->NewNode(common()->Branch(), condition, graph()->start());
+ Node* outer_if_true = graph()->NewNode(common()->IfTrue(), outer_branch);
+
+
+ Node* loop = graph()->NewNode(common()->Loop(1), outer_if_true);
+ Node* effect =
+ graph()->NewNode(common()->EffectPhi(1), graph()->start(), loop);
+
+ Node* inner_branch = graph()->NewNode(common()->Branch(), condition, loop);
+
+ Node* inner_if_true = graph()->NewNode(common()->IfTrue(), inner_branch);
+ Node* ret1 = graph()->NewNode(common()->Return(), Int32Constant(2), effect,
+ inner_if_true);
+
+ Node* inner_if_false = graph()->NewNode(common()->IfFalse(), inner_branch);
+ loop->AppendInput(zone(), inner_if_false);
+ NodeProperties::ChangeOp(loop, common()->Loop(2));
+ effect->InsertInput(zone(), 1, effect);
+ NodeProperties::ChangeOp(effect, common()->EffectPhi(2));
+
+ Node* outer_if_false = graph()->NewNode(common()->IfFalse(), outer_branch);
+ Node* outer_merge =
+ graph()->NewNode(common()->Merge(2), loop, outer_if_false);
+ Node* outer_ephi = graph()->NewNode(common()->EffectPhi(2), effect,
+ graph()->start(), outer_merge);
+
+ Node* ret2 = graph()->NewNode(common()->Return(), Int32Constant(1),
+ outer_ephi, outer_merge);
+
+ Node* terminate = graph()->NewNode(common()->Terminate(), effect, loop);
+ graph()->SetEnd(graph()->NewNode(common()->End(3), ret1, ret2, terminate));
+
+ Reduce();
+
+ // Outer branch should not be rewritten, the inner branch should be discarded.
+ EXPECT_THAT(outer_branch, IsBranch(condition, graph()->start()));
+ EXPECT_THAT(ret1, IsReturn(IsInt32Constant(2), effect, loop));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/change-lowering-unittest.cc b/test/unittests/compiler/change-lowering-unittest.cc
index 060b1c1..fd0766c 100644
--- a/test/unittests/compiler/change-lowering-unittest.cc
+++ b/test/unittests/compiler/change-lowering-unittest.cc
@@ -6,7 +6,7 @@
#include "src/compiler/change-lowering.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/linkage.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
#include "test/unittests/compiler/compiler-test-utils.h"
#include "test/unittests/compiler/graph-unittest.h"
@@ -23,53 +23,26 @@
namespace internal {
namespace compiler {
-class ChangeLoweringTest : public GraphTest {
+class ChangeLoweringTest : public TypedGraphTest {
public:
ChangeLoweringTest() : simplified_(zone()) {}
- ~ChangeLoweringTest() OVERRIDE {}
- virtual MachineType WordRepresentation() const = 0;
+ virtual MachineRepresentation WordRepresentation() const = 0;
protected:
- int HeapNumberValueOffset() const {
- STATIC_ASSERT(HeapNumber::kValueOffset % kApiPointerSize == 0);
- return (HeapNumber::kValueOffset / kApiPointerSize) * PointerSize() -
- kHeapObjectTag;
+ bool Is32() const {
+ return WordRepresentation() == MachineRepresentation::kWord32;
}
- bool Is32() const { return WordRepresentation() == kRepWord32; }
- int PointerSize() const {
- switch (WordRepresentation()) {
- case kRepWord32:
- return 4;
- case kRepWord64:
- return 8;
- default:
- break;
- }
- UNREACHABLE();
- return 0;
- }
- int SmiMaxValue() const { return -(SmiMinValue() + 1); }
- int SmiMinValue() const {
- return static_cast<int>(0xffffffffu << (SmiValueSize() - 1));
- }
- int SmiShiftAmount() const { return kSmiTagSize + SmiShiftSize(); }
- int SmiShiftSize() const {
- return Is32() ? SmiTagging<4>::SmiShiftSize()
- : SmiTagging<8>::SmiShiftSize();
- }
- int SmiValueSize() const {
- return Is32() ? SmiTagging<4>::SmiValueSize()
- : SmiTagging<8>::SmiValueSize();
+ bool Is64() const {
+ return WordRepresentation() == MachineRepresentation::kWord64;
}
Reduction Reduce(Node* node) {
MachineOperatorBuilder machine(zone(), WordRepresentation());
JSOperatorBuilder javascript(zone());
- JSGraph jsgraph(graph(), common(), &javascript, &machine);
- CompilationInfo info(isolate(), zone());
- Linkage linkage(zone(), &info);
- ChangeLowering reducer(&jsgraph, &linkage);
+ JSGraph jsgraph(isolate(), graph(), common(), &javascript, nullptr,
+ &machine);
+ ChangeLowering reducer(&jsgraph);
return reducer.Reduce(node);
}
@@ -77,20 +50,37 @@
Matcher<Node*> IsAllocateHeapNumber(const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher) {
- return IsCall(_, IsHeapConstant(Unique<HeapObject>::CreateImmovable(
- AllocateHeapNumberStub(isolate()).GetCode())),
- IsNumberConstant(BitEq(0.0)), effect_matcher,
- control_matcher);
+ return IsCall(
+ _, IsHeapConstant(AllocateHeapNumberStub(isolate()).GetCode()),
+ IsNumberConstant(BitEq(0.0)), effect_matcher, control_matcher);
+ }
+ Matcher<Node*> IsChangeInt32ToSmi(const Matcher<Node*>& value_matcher) {
+ return Is64() ? IsWord64Shl(IsChangeInt32ToInt64(value_matcher),
+ IsSmiShiftBitsConstant())
+ : IsWord32Shl(value_matcher, IsSmiShiftBitsConstant());
+ }
+ Matcher<Node*> IsChangeSmiToInt32(const Matcher<Node*>& value_matcher) {
+ return Is64() ? IsTruncateInt64ToInt32(
+ IsWord64Sar(value_matcher, IsSmiShiftBitsConstant()))
+ : IsWord32Sar(value_matcher, IsSmiShiftBitsConstant());
+ }
+ Matcher<Node*> IsChangeUint32ToSmi(const Matcher<Node*>& value_matcher) {
+ return Is64() ? IsWord64Shl(IsChangeUint32ToUint64(value_matcher),
+ IsSmiShiftBitsConstant())
+ : IsWord32Shl(value_matcher, IsSmiShiftBitsConstant());
}
Matcher<Node*> IsLoadHeapNumber(const Matcher<Node*>& value_matcher,
const Matcher<Node*>& control_matcher) {
- return IsLoad(kMachFloat64, value_matcher,
- IsIntPtrConstant(HeapNumberValueOffset()), graph()->start(),
- control_matcher);
+ return IsLoad(MachineType::Float64(), value_matcher,
+ IsIntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag),
+ graph()->start(), control_matcher);
}
Matcher<Node*> IsIntPtrConstant(int value) {
return Is32() ? IsInt32Constant(value) : IsInt64Constant(value);
}
+ Matcher<Node*> IsSmiShiftBitsConstant() {
+ return IsIntPtrConstant(kSmiShiftSize + kSmiTagSize);
+ }
Matcher<Node*> IsWordEqual(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher) {
return Is32() ? IsWord32Equal(lhs_matcher, rhs_matcher)
@@ -108,65 +98,256 @@
class ChangeLoweringCommonTest
: public ChangeLoweringTest,
- public ::testing::WithParamInterface<MachineType> {
+ public ::testing::WithParamInterface<MachineRepresentation> {
public:
- ~ChangeLoweringCommonTest() OVERRIDE {}
+ ~ChangeLoweringCommonTest() override {}
- MachineType WordRepresentation() const FINAL { return GetParam(); }
+ MachineRepresentation WordRepresentation() const final { return GetParam(); }
};
TARGET_TEST_P(ChangeLoweringCommonTest, ChangeBitToBool) {
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeBitToBool(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(),
- IsSelect(static_cast<MachineType>(kTypeBool | kRepTagged), val,
- IsTrueConstant(), IsFalseConstant()));
+ Node* value = Parameter(Type::Boolean());
+ Reduction r =
+ Reduce(graph()->NewNode(simplified()->ChangeBitToBool(), value));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsSelect(MachineRepresentation::kTagged, value,
+ IsTrueConstant(), IsFalseConstant()));
}
TARGET_TEST_P(ChangeLoweringCommonTest, ChangeBoolToBit) {
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeBoolToBit(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- EXPECT_THAT(reduction.replacement(), IsWordEqual(val, IsTrueConstant()));
+ Node* value = Parameter(Type::Number());
+ Reduction r =
+ Reduce(graph()->NewNode(simplified()->ChangeBoolToBit(), value));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsWordEqual(value, IsTrueConstant()));
}
-TARGET_TEST_P(ChangeLoweringCommonTest, ChangeFloat64ToTagged) {
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeFloat64ToTagged(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* finish = reduction.replacement();
- Capture<Node*> heap_number;
- EXPECT_THAT(
- finish,
- IsFinish(
- AllOf(CaptureEq(&heap_number),
- IsAllocateHeapNumber(IsValueEffect(val), graph()->start())),
- IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier),
- CaptureEq(&heap_number),
- IsIntPtrConstant(HeapNumberValueOffset()), val,
- CaptureEq(&heap_number), graph()->start())));
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeInt32ToTaggedWithSignedSmall) {
+ Node* value = Parameter(Type::SignedSmall());
+ Reduction r =
+ Reduce(graph()->NewNode(simplified()->ChangeInt32ToTagged(), value));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsChangeInt32ToSmi(value));
}
-TARGET_TEST_P(ChangeLoweringCommonTest, StringAdd) {
- Node* node =
- graph()->NewNode(simplified()->StringAdd(), Parameter(0), Parameter(1));
- Reduction reduction = Reduce(node);
- EXPECT_FALSE(reduction.Changed());
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeUint32ToTaggedWithUnsignedSmall) {
+ Node* value = Parameter(Type::UnsignedSmall());
+ Reduction r =
+ Reduce(graph()->NewNode(simplified()->ChangeUint32ToTagged(), value));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsChangeUint32ToSmi(value));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeTaggedToInt32WithTaggedSigned) {
+ Node* value = Parameter(Type::TaggedSigned());
+ Reduction r =
+ Reduce(graph()->NewNode(simplified()->ChangeTaggedToInt32(), value));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsChangeSmiToInt32(value));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeTaggedToInt32WithTaggedPointer) {
+ Node* value = Parameter(Type::TaggedPointer());
+ Reduction r =
+ Reduce(graph()->NewNode(simplified()->ChangeTaggedToInt32(), value));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsChangeFloat64ToInt32(
+ IsLoadHeapNumber(value, graph()->start())));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeTaggedToUint32WithTaggedSigned) {
+ Node* value = Parameter(Type::TaggedSigned());
+ Reduction r =
+ Reduce(graph()->NewNode(simplified()->ChangeTaggedToUint32(), value));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsChangeSmiToInt32(value));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeTaggedToUint32WithTaggedPointer) {
+ Node* value = Parameter(Type::TaggedPointer());
+ Reduction r =
+ Reduce(graph()->NewNode(simplified()->ChangeTaggedToUint32(), value));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsChangeFloat64ToUint32(
+ IsLoadHeapNumber(value, graph()->start())));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, StoreFieldSmi) {
+ FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Handle<Name>::null(), Type::Any(),
+ MachineType::AnyTagged()};
+ Node* p0 = Parameter(Type::TaggedPointer());
+ Node* p1 = Parameter(Type::TaggedSigned());
+ Node* store = graph()->NewNode(simplified()->StoreField(access), p0, p1,
+ graph()->start(), graph()->start());
+ Reduction r = Reduce(store);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsStore(StoreRepresentation(MachineRepresentation::kTagged,
+ kNoWriteBarrier),
+ p0, IsIntPtrConstant(access.offset - access.tag()), p1,
+ graph()->start(), graph()->start()));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, StoreFieldTagged) {
+ FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Handle<Name>::null(), Type::Any(),
+ MachineType::AnyTagged()};
+ Node* p0 = Parameter(Type::TaggedPointer());
+ Node* p1 = Parameter(Type::Tagged());
+ Node* store = graph()->NewNode(simplified()->StoreField(access), p0, p1,
+ graph()->start(), graph()->start());
+ Reduction r = Reduce(store);
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsStore(StoreRepresentation(MachineRepresentation::kTagged,
+ kFullWriteBarrier),
+ p0, IsIntPtrConstant(access.offset - access.tag()), p1,
+ graph()->start(), graph()->start()));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, LoadField) {
+ FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Handle<Name>::null(), Type::Any(),
+ MachineType::AnyTagged()};
+ Node* p0 = Parameter(Type::TaggedPointer());
+ Node* load = graph()->NewNode(simplified()->LoadField(access), p0,
+ graph()->start(), graph()->start());
+ Reduction r = Reduce(load);
+
+ ASSERT_TRUE(r.Changed());
+ Matcher<Node*> index_match = IsIntPtrConstant(access.offset - access.tag());
+ EXPECT_THAT(r.replacement(),
+ IsLoad(MachineType::AnyTagged(), p0,
+ IsIntPtrConstant(access.offset - access.tag()),
+ graph()->start(), graph()->start()));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, StoreElementTagged) {
+ ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
+ MachineType::AnyTagged()};
+ Node* p0 = Parameter(Type::TaggedPointer());
+ Node* p1 = Parameter(Type::Signed32());
+ Node* p2 = Parameter(Type::Tagged());
+ Node* store = graph()->NewNode(simplified()->StoreElement(access), p0, p1, p2,
+ graph()->start(), graph()->start());
+ Reduction r = Reduce(store);
+
+ const int element_size_shift =
+ ElementSizeLog2Of(access.machine_type.representation());
+ ASSERT_TRUE(r.Changed());
+ Matcher<Node*> index_match =
+ IsInt32Add(IsWord32Shl(p1, IsInt32Constant(element_size_shift)),
+ IsInt32Constant(access.header_size - access.tag()));
+ if (!Is32()) {
+ index_match = IsChangeUint32ToUint64(index_match);
+ }
+
+ EXPECT_THAT(r.replacement(),
+ IsStore(StoreRepresentation(MachineRepresentation::kTagged,
+ kFullWriteBarrier),
+ p0, index_match, p2, graph()->start(), graph()->start()));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, StoreElementUint8) {
+ ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Type::Signed32(), MachineType::Uint8()};
+ Node* p0 = Parameter(Type::TaggedPointer());
+ Node* p1 = Parameter(Type::Signed32());
+ Node* p2 = Parameter(Type::Signed32());
+ Node* store = graph()->NewNode(simplified()->StoreElement(access), p0, p1, p2,
+ graph()->start(), graph()->start());
+ Reduction r = Reduce(store);
+
+ ASSERT_TRUE(r.Changed());
+ Matcher<Node*> index_match =
+ IsInt32Add(p1, IsInt32Constant(access.header_size - access.tag()));
+ if (!Is32()) {
+ index_match = IsChangeUint32ToUint64(index_match);
+ }
+
+ EXPECT_THAT(r.replacement(),
+ IsStore(StoreRepresentation(MachineRepresentation::kWord8,
+ kNoWriteBarrier),
+ p0, index_match, p2, graph()->start(), graph()->start()));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, LoadElementTagged) {
+ ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
+ MachineType::AnyTagged()};
+ Node* p0 = Parameter(Type::TaggedPointer());
+ Node* p1 = Parameter(Type::Signed32());
+ Node* load = graph()->NewNode(simplified()->LoadElement(access), p0, p1,
+ graph()->start(), graph()->start());
+ Reduction r = Reduce(load);
+
+ const int element_size_shift =
+ ElementSizeLog2Of(access.machine_type.representation());
+ ASSERT_TRUE(r.Changed());
+ Matcher<Node*> index_match =
+ IsInt32Add(IsWord32Shl(p1, IsInt32Constant(element_size_shift)),
+ IsInt32Constant(access.header_size - access.tag()));
+ if (!Is32()) {
+ index_match = IsChangeUint32ToUint64(index_match);
+ }
+
+ EXPECT_THAT(r.replacement(), IsLoad(MachineType::AnyTagged(), p0, index_match,
+ graph()->start(), graph()->start()));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, LoadElementInt8) {
+ ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Type::Signed32(), MachineType::Int8()};
+ Node* p0 = Parameter(Type::TaggedPointer());
+ Node* p1 = Parameter(Type::Signed32());
+ Node* load = graph()->NewNode(simplified()->LoadElement(access), p0, p1,
+ graph()->start(), graph()->start());
+ Reduction r = Reduce(load);
+
+ ASSERT_TRUE(r.Changed());
+ Matcher<Node*> index_match =
+ IsInt32Add(p1, IsInt32Constant(access.header_size - access.tag()));
+ if (!Is32()) {
+ index_match = IsChangeUint32ToUint64(index_match);
+ }
+
+ EXPECT_THAT(r.replacement(), IsLoad(MachineType::Int8(), p0, index_match,
+ graph()->start(), graph()->start()));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, Allocate) {
+ Node* p0 = Parameter(Type::Signed32());
+ Node* alloc = graph()->NewNode(simplified()->Allocate(TENURED), p0,
+ graph()->start(), graph()->start());
+ Reduction r = Reduce(alloc);
+
+ // Only check that we lowered, but do not specify the exact form since
+ // this is subject to change.
+ ASSERT_TRUE(r.Changed());
}
INSTANTIATE_TEST_CASE_P(ChangeLoweringTest, ChangeLoweringCommonTest,
- ::testing::Values(kRepWord32, kRepWord64));
+ ::testing::Values(MachineRepresentation::kWord32,
+ MachineRepresentation::kWord64));
// -----------------------------------------------------------------------------
@@ -175,32 +356,34 @@
class ChangeLowering32Test : public ChangeLoweringTest {
public:
- ~ChangeLowering32Test() OVERRIDE {}
- MachineType WordRepresentation() const FINAL { return kRepWord32; }
+ ~ChangeLowering32Test() override {}
+ MachineRepresentation WordRepresentation() const final {
+ return MachineRepresentation::kWord32;
+ }
};
TARGET_TEST_F(ChangeLowering32Test, ChangeInt32ToTagged) {
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), val);
- NodeProperties::SetBounds(val, Bounds(Type::None(), Type::Signed32()));
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* phi = reduction.replacement();
+ Node* value = Parameter(Type::Integral32());
+ Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), value);
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
Capture<Node*> add, branch, heap_number, if_true;
EXPECT_THAT(
- phi,
- IsPhi(kMachAnyTagged,
- IsFinish(AllOf(CaptureEq(&heap_number),
- IsAllocateHeapNumber(_, CaptureEq(&if_true))),
- IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier),
- CaptureEq(&heap_number),
- IsIntPtrConstant(HeapNumberValueOffset()),
- IsChangeInt32ToFloat64(val),
- CaptureEq(&heap_number), CaptureEq(&if_true))),
- IsProjection(
- 0, AllOf(CaptureEq(&add), IsInt32AddWithOverflow(val, val))),
+ r.replacement(),
+ IsPhi(MachineRepresentation::kTagged,
+ IsFinishRegion(
+ AllOf(CaptureEq(&heap_number),
+ IsAllocateHeapNumber(_, CaptureEq(&if_true))),
+ IsStore(
+ StoreRepresentation(MachineRepresentation::kFloat64,
+ kNoWriteBarrier),
+ CaptureEq(&heap_number),
+ IsIntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag),
+ IsChangeInt32ToFloat64(value), CaptureEq(&heap_number),
+ CaptureEq(&if_true))),
+ IsProjection(0, AllOf(CaptureEq(&add),
+ IsInt32AddWithOverflow(value, value))),
IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
IsIfFalse(AllOf(CaptureEq(&branch),
IsBranch(IsProjection(1, CaptureEq(&add)),
@@ -208,43 +391,28 @@
}
-TARGET_TEST_F(ChangeLowering32Test, ChangeInt32ToTaggedSmall) {
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), val);
- NodeProperties::SetBounds(val, Bounds(Type::None(), Type::SignedSmall()));
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* change = reduction.replacement();
- Capture<Node*> add, branch, heap_number, if_true;
- EXPECT_THAT(change, IsWord32Shl(val, IsInt32Constant(SmiShiftAmount())));
-}
-
-
TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToFloat64) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* phi = reduction.replacement();
+ Node* value = Parameter(Type::Number());
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), value);
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
Capture<Node*> branch, if_true;
EXPECT_THAT(
- phi,
- IsPhi(
- kMachFloat64, IsLoadHeapNumber(val, CaptureEq(&if_true)),
- IsChangeInt32ToFloat64(
- IsWord32Sar(val, IsInt32Constant(SmiShiftAmount()))),
- IsMerge(
- AllOf(CaptureEq(&if_true),
- IsIfTrue(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord32And(val, IsInt32Constant(kSmiTagMask)),
- graph()->start())))),
- IsIfFalse(CaptureEq(&branch)))));
+ r.replacement(),
+ IsPhi(MachineRepresentation::kFloat64,
+ IsLoadHeapNumber(value, CaptureEq(&if_true)),
+ IsChangeInt32ToFloat64(IsWord32Sar(
+ value, IsInt32Constant(kSmiTagSize + kSmiShiftSize))),
+ IsMerge(AllOf(CaptureEq(&if_true),
+ IsIfTrue(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord32And(
+ value, IsInt32Constant(kSmiTagMask)),
+ graph()->start())))),
+ IsIfFalse(CaptureEq(&branch)))));
}
@@ -252,23 +420,22 @@
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToInt32(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* phi = reduction.replacement();
+ Node* value = Parameter(Type::Signed32());
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToInt32(), value);
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
Capture<Node*> branch, if_true;
EXPECT_THAT(
- phi,
- IsPhi(kMachInt32,
- IsChangeFloat64ToInt32(IsLoadHeapNumber(val, CaptureEq(&if_true))),
- IsWord32Sar(val, IsInt32Constant(SmiShiftAmount())),
- IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
- IsIfFalse(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord32And(val, IsInt32Constant(kSmiTagMask)),
- graph()->start()))))));
+ r.replacement(),
+ IsPhi(
+ MachineRepresentation::kWord32,
+ IsChangeFloat64ToInt32(IsLoadHeapNumber(value, CaptureEq(&if_true))),
+ IsWord32Sar(value, IsInt32Constant(kSmiTagSize + kSmiShiftSize)),
+ IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+ IsIfFalse(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord32And(value, IsInt32Constant(kSmiTagMask)),
+ graph()->start()))))));
}
@@ -276,23 +443,22 @@
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToUint32(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* phi = reduction.replacement();
+ Node* value = Parameter(Type::Unsigned32());
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToUint32(), value);
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
Capture<Node*> branch, if_true;
EXPECT_THAT(
- phi,
- IsPhi(kMachUint32,
- IsChangeFloat64ToUint32(IsLoadHeapNumber(val, CaptureEq(&if_true))),
- IsWord32Sar(val, IsInt32Constant(SmiShiftAmount())),
- IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
- IsIfFalse(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord32And(val, IsInt32Constant(kSmiTagMask)),
- graph()->start()))))));
+ r.replacement(),
+ IsPhi(
+ MachineRepresentation::kWord32,
+ IsChangeFloat64ToUint32(IsLoadHeapNumber(value, CaptureEq(&if_true))),
+ IsWord32Sar(value, IsInt32Constant(kSmiTagSize + kSmiShiftSize)),
+ IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+ IsIfFalse(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord32And(value, IsInt32Constant(kSmiTagMask)),
+ graph()->start()))))));
}
@@ -300,30 +466,32 @@
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeUint32ToTagged(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* phi = reduction.replacement();
+ Node* value = Parameter(Type::Number());
+ Node* node = graph()->NewNode(simplified()->ChangeUint32ToTagged(), value);
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
Capture<Node*> branch, heap_number, if_false;
EXPECT_THAT(
- phi,
+ r.replacement(),
IsPhi(
- kMachAnyTagged, IsWord32Shl(val, IsInt32Constant(SmiShiftAmount())),
- IsFinish(AllOf(CaptureEq(&heap_number),
- IsAllocateHeapNumber(_, CaptureEq(&if_false))),
- IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier),
- CaptureEq(&heap_number),
- IsInt32Constant(HeapNumberValueOffset()),
- IsChangeUint32ToFloat64(val),
- CaptureEq(&heap_number), CaptureEq(&if_false))),
- IsMerge(
- IsIfTrue(AllOf(CaptureEq(&branch),
- IsBranch(IsUint32LessThanOrEqual(
- val, IsInt32Constant(SmiMaxValue())),
- graph()->start()))),
- AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
+ MachineRepresentation::kTagged,
+ IsWord32Shl(value, IsInt32Constant(kSmiTagSize + kSmiShiftSize)),
+ IsFinishRegion(
+ AllOf(CaptureEq(&heap_number),
+ IsAllocateHeapNumber(_, CaptureEq(&if_false))),
+ IsStore(
+ StoreRepresentation(MachineRepresentation::kFloat64,
+ kNoWriteBarrier),
+ CaptureEq(&heap_number),
+ IsInt32Constant(HeapNumber::kValueOffset - kHeapObjectTag),
+ IsChangeUint32ToFloat64(value), CaptureEq(&heap_number),
+ CaptureEq(&if_false))),
+ IsMerge(IsIfTrue(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsUint32LessThanOrEqual(
+ value, IsInt32Constant(Smi::kMaxValue)),
+ graph()->start()))),
+ AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
}
@@ -333,20 +501,19 @@
class ChangeLowering64Test : public ChangeLoweringTest {
public:
- ~ChangeLowering64Test() OVERRIDE {}
- MachineType WordRepresentation() const FINAL { return kRepWord64; }
+ ~ChangeLowering64Test() override {}
+ MachineRepresentation WordRepresentation() const final {
+ return MachineRepresentation::kWord64;
+ }
};
TARGET_TEST_F(ChangeLowering64Test, ChangeInt32ToTagged) {
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- EXPECT_THAT(reduction.replacement(),
- IsWord64Shl(IsChangeInt32ToInt64(val),
- IsInt64Constant(SmiShiftAmount())));
+ Node* value = Parameter(Type::Signed32());
+ Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), value);
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsChangeInt32ToSmi(value));
}
@@ -354,26 +521,24 @@
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* phi = reduction.replacement();
+ Node* value = Parameter(Type::Number());
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), value);
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
Capture<Node*> branch, if_true;
EXPECT_THAT(
- phi,
- IsPhi(
- kMachFloat64, IsLoadHeapNumber(val, CaptureEq(&if_true)),
- IsChangeInt32ToFloat64(IsTruncateInt64ToInt32(
- IsWord64Sar(val, IsInt64Constant(SmiShiftAmount())))),
- IsMerge(
- AllOf(CaptureEq(&if_true),
- IsIfTrue(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord64And(val, IsInt64Constant(kSmiTagMask)),
- graph()->start())))),
- IsIfFalse(CaptureEq(&branch)))));
+ r.replacement(),
+ IsPhi(MachineRepresentation::kFloat64,
+ IsLoadHeapNumber(value, CaptureEq(&if_true)),
+ IsChangeInt32ToFloat64(IsTruncateInt64ToInt32(IsWord64Sar(
+ value, IsInt64Constant(kSmiTagSize + kSmiShiftSize)))),
+ IsMerge(AllOf(CaptureEq(&if_true),
+ IsIfTrue(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord64And(
+ value, IsInt64Constant(kSmiTagMask)),
+ graph()->start())))),
+ IsIfFalse(CaptureEq(&branch)))));
}
@@ -381,24 +546,23 @@
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToInt32(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* phi = reduction.replacement();
+ Node* value = Parameter(Type::Signed32());
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToInt32(), value);
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
Capture<Node*> branch, if_true;
EXPECT_THAT(
- phi,
- IsPhi(kMachInt32,
- IsChangeFloat64ToInt32(IsLoadHeapNumber(val, CaptureEq(&if_true))),
- IsTruncateInt64ToInt32(
- IsWord64Sar(val, IsInt64Constant(SmiShiftAmount()))),
- IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
- IsIfFalse(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord64And(val, IsInt64Constant(kSmiTagMask)),
- graph()->start()))))));
+ r.replacement(),
+ IsPhi(
+ MachineRepresentation::kWord32,
+ IsChangeFloat64ToInt32(IsLoadHeapNumber(value, CaptureEq(&if_true))),
+ IsTruncateInt64ToInt32(
+ IsWord64Sar(value, IsInt64Constant(kSmiTagSize + kSmiShiftSize))),
+ IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+ IsIfFalse(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord64And(value, IsInt64Constant(kSmiTagMask)),
+ graph()->start()))))));
}
@@ -406,24 +570,23 @@
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeTaggedToUint32(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* phi = reduction.replacement();
+ Node* value = Parameter(Type::Unsigned32());
+ Node* node = graph()->NewNode(simplified()->ChangeTaggedToUint32(), value);
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
Capture<Node*> branch, if_true;
EXPECT_THAT(
- phi,
- IsPhi(kMachUint32,
- IsChangeFloat64ToUint32(IsLoadHeapNumber(val, CaptureEq(&if_true))),
- IsTruncateInt64ToInt32(
- IsWord64Sar(val, IsInt64Constant(SmiShiftAmount()))),
- IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
- IsIfFalse(AllOf(
- CaptureEq(&branch),
- IsBranch(IsWord64And(val, IsInt64Constant(kSmiTagMask)),
- graph()->start()))))));
+ r.replacement(),
+ IsPhi(
+ MachineRepresentation::kWord32,
+ IsChangeFloat64ToUint32(IsLoadHeapNumber(value, CaptureEq(&if_true))),
+ IsTruncateInt64ToInt32(
+ IsWord64Sar(value, IsInt64Constant(kSmiTagSize + kSmiShiftSize))),
+ IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+ IsIfFalse(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsWord64And(value, IsInt64Constant(kSmiTagMask)),
+ graph()->start()))))));
}
@@ -431,31 +594,33 @@
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- Node* val = Parameter(0);
- Node* node = graph()->NewNode(simplified()->ChangeUint32ToTagged(), val);
- Reduction reduction = Reduce(node);
- ASSERT_TRUE(reduction.Changed());
-
- Node* phi = reduction.replacement();
+ Node* value = Parameter(Type::Number());
+ Node* node = graph()->NewNode(simplified()->ChangeUint32ToTagged(), value);
+ Reduction r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
Capture<Node*> branch, heap_number, if_false;
EXPECT_THAT(
- phi,
+ r.replacement(),
IsPhi(
- kMachAnyTagged, IsWord64Shl(IsChangeUint32ToUint64(val),
- IsInt64Constant(SmiShiftAmount())),
- IsFinish(AllOf(CaptureEq(&heap_number),
- IsAllocateHeapNumber(_, CaptureEq(&if_false))),
- IsStore(StoreRepresentation(kMachFloat64, kNoWriteBarrier),
- CaptureEq(&heap_number),
- IsInt64Constant(HeapNumberValueOffset()),
- IsChangeUint32ToFloat64(val),
- CaptureEq(&heap_number), CaptureEq(&if_false))),
- IsMerge(
- IsIfTrue(AllOf(CaptureEq(&branch),
- IsBranch(IsUint32LessThanOrEqual(
- val, IsInt32Constant(SmiMaxValue())),
- graph()->start()))),
- AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
+ MachineRepresentation::kTagged,
+ IsWord64Shl(IsChangeUint32ToUint64(value),
+ IsInt64Constant(kSmiTagSize + kSmiShiftSize)),
+ IsFinishRegion(
+ AllOf(CaptureEq(&heap_number),
+ IsAllocateHeapNumber(_, CaptureEq(&if_false))),
+ IsStore(
+ StoreRepresentation(MachineRepresentation::kFloat64,
+ kNoWriteBarrier),
+ CaptureEq(&heap_number),
+ IsInt64Constant(HeapNumber::kValueOffset - kHeapObjectTag),
+ IsChangeUint32ToFloat64(value), CaptureEq(&heap_number),
+ CaptureEq(&if_false))),
+ IsMerge(IsIfTrue(AllOf(
+ CaptureEq(&branch),
+ IsBranch(IsUint32LessThanOrEqual(
+ value, IsInt32Constant(Smi::kMaxValue)),
+ graph()->start()))),
+ AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
}
} // namespace compiler
diff --git a/test/unittests/compiler/coalesced-live-ranges-unittest.cc b/test/unittests/compiler/coalesced-live-ranges-unittest.cc
new file mode 100644
index 0000000..fe8fac4
--- /dev/null
+++ b/test/unittests/compiler/coalesced-live-ranges-unittest.cc
@@ -0,0 +1,268 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/coalesced-live-ranges.h"
+#include "test/unittests/compiler/live-range-builder.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+class CoalescedLiveRangesTest : public TestWithZone {
+ public:
+ CoalescedLiveRangesTest() : TestWithZone(), ranges_(zone()) {}
+ bool HasNoConflicts(const LiveRange* range);
+ bool ConflictsPreciselyWith(const LiveRange* range, int id);
+ bool ConflictsPreciselyWith(const LiveRange* range, int id1, int id2);
+
+ CoalescedLiveRanges& ranges() { return ranges_; }
+ const CoalescedLiveRanges& ranges() const { return ranges_; }
+ bool AllocationsAreValid() const;
+ void RemoveConflicts(LiveRange* range);
+
+ private:
+ typedef ZoneSet<int> LiveRangeIDs;
+ bool IsRangeConflictingWith(const LiveRange* range, const LiveRangeIDs& ids);
+ CoalescedLiveRanges ranges_;
+};
+
+
+bool CoalescedLiveRangesTest::ConflictsPreciselyWith(const LiveRange* range,
+ int id) {
+ LiveRangeIDs set(zone());
+ set.insert(id);
+ return IsRangeConflictingWith(range, set);
+}
+
+
+bool CoalescedLiveRangesTest::ConflictsPreciselyWith(const LiveRange* range,
+ int id1, int id2) {
+ LiveRangeIDs set(zone());
+ set.insert(id1);
+ set.insert(id2);
+ return IsRangeConflictingWith(range, set);
+}
+
+
+bool CoalescedLiveRangesTest::HasNoConflicts(const LiveRange* range) {
+ LiveRangeIDs set(zone());
+ return IsRangeConflictingWith(range, set);
+}
+
+
+void CoalescedLiveRangesTest::RemoveConflicts(LiveRange* range) {
+ auto conflicts = ranges().GetConflicts(range);
+ LiveRangeIDs seen(zone());
+ for (auto c = conflicts.Current(); c != nullptr;
+ c = conflicts.RemoveCurrentAndGetNext()) {
+ int id = c->TopLevel()->vreg();
+ EXPECT_FALSE(seen.count(id) > 0);
+ seen.insert(c->TopLevel()->vreg());
+ }
+}
+
+
+bool CoalescedLiveRangesTest::AllocationsAreValid() const {
+ return ranges().VerifyAllocationsAreValidForTesting();
+}
+
+
+bool CoalescedLiveRangesTest::IsRangeConflictingWith(const LiveRange* range,
+ const LiveRangeIDs& ids) {
+ LiveRangeIDs found_ids(zone());
+
+ auto conflicts = ranges().GetConflicts(range);
+ for (auto conflict = conflicts.Current(); conflict != nullptr;
+ conflict = conflicts.GetNext()) {
+ found_ids.insert(conflict->TopLevel()->vreg());
+ }
+ return found_ids == ids;
+}
+
+
+TEST_F(CoalescedLiveRangesTest, VisitEmptyAllocations) {
+ LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
+ ASSERT_TRUE(ranges().empty());
+ ASSERT_TRUE(AllocationsAreValid());
+ ASSERT_TRUE(HasNoConflicts(range));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, CandidateBeforeAfterAllocations) {
+ LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(5, 6);
+ ranges().AllocateRange(range);
+ ASSERT_FALSE(ranges().empty());
+ ASSERT_TRUE(AllocationsAreValid());
+ LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 2);
+ ASSERT_TRUE(HasNoConflicts(query));
+ query = TestRangeBuilder(zone()).Id(3).Build(1, 5);
+ ASSERT_TRUE(HasNoConflicts(query));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, CandidateBeforeAfterManyAllocations) {
+ LiveRange* range =
+ TestRangeBuilder(zone()).Id(1).Add(5, 7).Add(10, 12).Build();
+ ranges().AllocateRange(range);
+ ASSERT_FALSE(ranges().empty());
+ ASSERT_TRUE(AllocationsAreValid());
+ LiveRange* query =
+ TestRangeBuilder(zone()).Id(2).Add(1, 2).Add(13, 15).Build();
+ ASSERT_TRUE(HasNoConflicts(query));
+ query = TestRangeBuilder(zone()).Id(3).Add(1, 5).Add(12, 15).Build();
+ ASSERT_TRUE(HasNoConflicts(query));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, SelfConflictsPreciselyWithSelf) {
+ LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
+ ranges().AllocateRange(range);
+ ASSERT_FALSE(ranges().empty());
+ ASSERT_TRUE(AllocationsAreValid());
+ ASSERT_TRUE(ConflictsPreciselyWith(range, 1));
+ range = TestRangeBuilder(zone()).Id(2).Build(8, 10);
+ ranges().AllocateRange(range);
+ ASSERT_TRUE(ConflictsPreciselyWith(range, 2));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, QueryStartsBeforeConflict) {
+ LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 5);
+ ranges().AllocateRange(range);
+ LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 3);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
+ range = TestRangeBuilder(zone()).Id(3).Build(8, 10);
+ ranges().AllocateRange(range);
+ query = TestRangeBuilder(zone()).Id(4).Build(6, 9);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 3));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, QueryStartsInConflict) {
+ LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 5);
+ ranges().AllocateRange(range);
+ LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(3, 6);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
+ range = TestRangeBuilder(zone()).Id(3).Build(8, 10);
+ ranges().AllocateRange(range);
+ query = TestRangeBuilder(zone()).Id(4).Build(9, 11);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 3));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, QueryContainedInConflict) {
+ LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
+ ranges().AllocateRange(range);
+ LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(2, 3);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, QueryContainsConflict) {
+ LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(2, 3);
+ ranges().AllocateRange(range);
+ LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(1, 5);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, QueryCoversManyIntervalsSameRange) {
+ LiveRange* range =
+ TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(7, 9).Add(20, 25).Build();
+ ranges().AllocateRange(range);
+ LiveRange* query = TestRangeBuilder(zone()).Id(2).Build(2, 8);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, QueryCoversManyIntervalsDifferentRanges) {
+ LiveRange* range =
+ TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(20, 25).Build();
+ ranges().AllocateRange(range);
+ range = TestRangeBuilder(zone()).Id(2).Build(7, 10);
+ ranges().AllocateRange(range);
+ LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(2, 22);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 1, 2));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, QueryFitsInGaps) {
+ LiveRange* range =
+ TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(10, 15).Add(20, 25).Build();
+ ranges().AllocateRange(range);
+ LiveRange* query =
+ TestRangeBuilder(zone()).Id(3).Add(5, 10).Add(16, 19).Add(27, 30).Build();
+ ASSERT_TRUE(HasNoConflicts(query));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, DeleteConflictBefore) {
+ LiveRange* range = TestRangeBuilder(zone()).Id(1).Add(1, 4).Add(5, 6).Build();
+ ranges().AllocateRange(range);
+ range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
+ ranges().AllocateRange(range);
+ LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(3, 7);
+ RemoveConflicts(query);
+ query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, DeleteConflictAfter) {
+ LiveRange* range = TestRangeBuilder(zone()).Id(1).Build(1, 5);
+ ranges().AllocateRange(range);
+ range = TestRangeBuilder(zone()).Id(2).Add(40, 50).Add(60, 70).Build();
+ ranges().AllocateRange(range);
+ LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(45, 60);
+ RemoveConflicts(query);
+ query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 1));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, DeleteConflictStraddle) {
+ LiveRange* range =
+ TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(10, 20).Build();
+ ranges().AllocateRange(range);
+ range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
+ ranges().AllocateRange(range);
+ LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(4, 15);
+ RemoveConflicts(query);
+ query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, DeleteConflictManyOverlapsBefore) {
+ LiveRange* range =
+ TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(6, 10).Add(10, 20).Build();
+ ranges().AllocateRange(range);
+ range = TestRangeBuilder(zone()).Id(2).Build(40, 50);
+ ranges().AllocateRange(range);
+ LiveRange* query = TestRangeBuilder(zone()).Id(3).Build(4, 15);
+ RemoveConflicts(query);
+ query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
+}
+
+
+TEST_F(CoalescedLiveRangesTest, DeleteWhenConflictRepeatsAfterNonConflict) {
+ LiveRange* range =
+ TestRangeBuilder(zone()).Id(1).Add(1, 5).Add(6, 10).Add(20, 30).Build();
+ ranges().AllocateRange(range);
+ range = TestRangeBuilder(zone()).Id(2).Build(12, 15);
+ ranges().AllocateRange(range);
+ LiveRange* query =
+ TestRangeBuilder(zone()).Id(3).Add(1, 8).Add(22, 25).Build();
+ RemoveConflicts(query);
+ query = TestRangeBuilder(zone()).Id(4).Build(0, 60);
+ ASSERT_TRUE(ConflictsPreciselyWith(query, 2));
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/common-operator-reducer-unittest.cc b/test/unittests/compiler/common-operator-reducer-unittest.cc
index c713815..1c16370 100644
--- a/test/unittests/compiler/common-operator-reducer-unittest.cc
+++ b/test/unittests/compiler/common-operator-reducer-unittest.cc
@@ -4,8 +4,15 @@
#include "src/compiler/common-operator.h"
#include "src/compiler/common-operator-reducer.h"
-#include "src/compiler/machine-type.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/machine-type.h"
+#include "test/unittests/compiler/graph-reducer-unittest.h"
#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+
+using testing::StrictMock;
namespace v8 {
namespace internal {
@@ -14,14 +21,31 @@
class CommonOperatorReducerTest : public GraphTest {
public:
explicit CommonOperatorReducerTest(int num_parameters = 1)
- : GraphTest(num_parameters) {}
- ~CommonOperatorReducerTest() OVERRIDE {}
+ : GraphTest(num_parameters), machine_(zone()), simplified_(zone()) {}
+ ~CommonOperatorReducerTest() override {}
protected:
- Reduction Reduce(Node* node) {
- CommonOperatorReducer reducer;
+ Reduction Reduce(
+ AdvancedReducer::Editor* editor, Node* node,
+ MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags) {
+ MachineOperatorBuilder machine(zone(), MachineType::PointerRepresentation(),
+ flags);
+ CommonOperatorReducer reducer(editor, graph(), common(), &machine);
return reducer.Reduce(node);
}
+
+ Reduction Reduce(Node* node, MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::kNoFlags) {
+ StrictMock<MockAdvancedReducerEditor> editor;
+ return Reduce(&editor, node, flags);
+ }
+
+ MachineOperatorBuilder* machine() { return &machine_; }
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+ private:
+ MachineOperatorBuilder machine_;
+ SimplifiedOperatorBuilder simplified_;
};
@@ -31,11 +55,11 @@
BranchHint::kTrue};
-const MachineType kMachineTypes[] = {
- kMachFloat32, kMachFloat64, kMachInt8, kMachUint8, kMachInt16,
- kMachUint16, kMachInt32, kMachUint32, kMachInt64, kMachUint64,
- kMachPtr, kMachAnyTagged, kRepBit, kRepWord8, kRepWord16,
- kRepWord32, kRepWord64, kRepFloat32, kRepFloat64, kRepTagged};
+const MachineRepresentation kMachineRepresentations[] = {
+ MachineRepresentation::kBit, MachineRepresentation::kWord8,
+ MachineRepresentation::kWord16, MachineRepresentation::kWord32,
+ MachineRepresentation::kWord64, MachineRepresentation::kFloat32,
+ MachineRepresentation::kFloat64, MachineRepresentation::kTagged};
const Operator kOp0(0, Operator::kNoProperties, "Op0", 0, 0, 0, 1, 1, 0);
@@ -44,44 +68,233 @@
// -----------------------------------------------------------------------------
+// Branch
+
+
+TEST_F(CommonOperatorReducerTest, BranchWithInt32ZeroConstant) {
+ TRACED_FOREACH(BranchHint, hint, kBranchHints) {
+ Node* const control = graph()->start();
+ Node* const branch =
+ graph()->NewNode(common()->Branch(hint), Int32Constant(0), control);
+ Node* const if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* const if_false = graph()->NewNode(common()->IfFalse(), branch);
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Replace(if_true, IsDead()));
+ EXPECT_CALL(editor, Replace(if_false, control));
+ Reduction const r = Reduce(&editor, branch);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
+ }
+}
+
+
+TEST_F(CommonOperatorReducerTest, BranchWithInt32OneConstant) {
+ TRACED_FOREACH(BranchHint, hint, kBranchHints) {
+ Node* const control = graph()->start();
+ Node* const branch =
+ graph()->NewNode(common()->Branch(hint), Int32Constant(1), control);
+ Node* const if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* const if_false = graph()->NewNode(common()->IfFalse(), branch);
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Replace(if_true, control));
+ EXPECT_CALL(editor, Replace(if_false, IsDead()));
+ Reduction const r = Reduce(&editor, branch);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
+ }
+}
+
+
+TEST_F(CommonOperatorReducerTest, BranchWithInt64ZeroConstant) {
+ TRACED_FOREACH(BranchHint, hint, kBranchHints) {
+ Node* const control = graph()->start();
+ Node* const branch =
+ graph()->NewNode(common()->Branch(hint), Int64Constant(0), control);
+ Node* const if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* const if_false = graph()->NewNode(common()->IfFalse(), branch);
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Replace(if_true, IsDead()));
+ EXPECT_CALL(editor, Replace(if_false, control));
+ Reduction const r = Reduce(&editor, branch);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
+ }
+}
+
+
+TEST_F(CommonOperatorReducerTest, BranchWithInt64OneConstant) {
+ TRACED_FOREACH(BranchHint, hint, kBranchHints) {
+ Node* const control = graph()->start();
+ Node* const branch =
+ graph()->NewNode(common()->Branch(hint), Int64Constant(1), control);
+ Node* const if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* const if_false = graph()->NewNode(common()->IfFalse(), branch);
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Replace(if_true, control));
+ EXPECT_CALL(editor, Replace(if_false, IsDead()));
+ Reduction const r = Reduce(&editor, branch);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
+ }
+}
+
+
+TEST_F(CommonOperatorReducerTest, BranchWithFalseConstant) {
+ TRACED_FOREACH(BranchHint, hint, kBranchHints) {
+ Node* const control = graph()->start();
+ Node* const branch =
+ graph()->NewNode(common()->Branch(hint), FalseConstant(), control);
+ Node* const if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* const if_false = graph()->NewNode(common()->IfFalse(), branch);
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Replace(if_true, IsDead()));
+ EXPECT_CALL(editor, Replace(if_false, control));
+ Reduction const r = Reduce(&editor, branch);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
+ }
+}
+
+
+TEST_F(CommonOperatorReducerTest, BranchWithTrueConstant) {
+ TRACED_FOREACH(BranchHint, hint, kBranchHints) {
+ Node* const control = graph()->start();
+ Node* const branch =
+ graph()->NewNode(common()->Branch(hint), TrueConstant(), control);
+ Node* const if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* const if_false = graph()->NewNode(common()->IfFalse(), branch);
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Replace(if_true, control));
+ EXPECT_CALL(editor, Replace(if_false, IsDead()));
+ Reduction const r = Reduce(&editor, branch);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
+ }
+}
+
+
+TEST_F(CommonOperatorReducerTest, BranchWithBooleanNot) {
+ Node* const value = Parameter(0);
+ TRACED_FOREACH(BranchHint, hint, kBranchHints) {
+ Node* const control = graph()->start();
+ Node* const branch = graph()->NewNode(
+ common()->Branch(hint),
+ graph()->NewNode(simplified()->BooleanNot(), value), control);
+ Node* const if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* const if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Reduction const r = Reduce(branch);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(branch, r.replacement());
+ EXPECT_THAT(branch, IsBranch(value, control));
+ EXPECT_THAT(if_false, IsIfTrue(branch));
+ EXPECT_THAT(if_true, IsIfFalse(branch));
+ EXPECT_EQ(NegateBranchHint(hint), BranchHintOf(branch->op()));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Merge
+
+
+TEST_F(CommonOperatorReducerTest, MergeOfUnusedDiamond0) {
+ Node* const value = Parameter(0);
+ Node* const control = graph()->start();
+ Node* const branch = graph()->NewNode(common()->Branch(), value, control);
+ Node* const if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* const if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Reduction const r =
+ Reduce(graph()->NewNode(common()->Merge(2), if_true, if_false));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(control, r.replacement());
+ EXPECT_THAT(branch, IsDead());
+}
+
+
+TEST_F(CommonOperatorReducerTest, MergeOfUnusedDiamond1) {
+ Node* const value = Parameter(0);
+ Node* const control = graph()->start();
+ Node* const branch = graph()->NewNode(common()->Branch(), value, control);
+ Node* const if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* const if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Reduction const r =
+ Reduce(graph()->NewNode(common()->Merge(2), if_false, if_true));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(control, r.replacement());
+ EXPECT_THAT(branch, IsDead());
+}
+
+
+// -----------------------------------------------------------------------------
// EffectPhi
-TEST_F(CommonOperatorReducerTest, RedundantEffectPhi) {
+TEST_F(CommonOperatorReducerTest, EffectPhiWithMerge) {
const int kMaxInputs = 64;
Node* inputs[kMaxInputs];
Node* const input = graph()->NewNode(&kOp0);
TRACED_FORRANGE(int, input_count, 2, kMaxInputs - 1) {
int const value_input_count = input_count - 1;
for (int i = 0; i < value_input_count; ++i) {
+ inputs[i] = graph()->start();
+ }
+ Node* const merge = graph()->NewNode(common()->Merge(value_input_count),
+ value_input_count, inputs);
+ for (int i = 0; i < value_input_count; ++i) {
inputs[i] = input;
}
- inputs[value_input_count] = graph()->start();
- Reduction r = Reduce(graph()->NewNode(
- common()->EffectPhi(value_input_count), input_count, inputs));
+ inputs[value_input_count] = merge;
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Revisit(merge));
+ Reduction r =
+ Reduce(&editor, graph()->NewNode(common()->EffectPhi(value_input_count),
+ input_count, inputs));
ASSERT_TRUE(r.Changed());
EXPECT_EQ(input, r.replacement());
}
}
+TEST_F(CommonOperatorReducerTest, EffectPhiWithLoop) {
+ Node* const e0 = graph()->NewNode(&kOp0);
+ Node* const loop =
+ graph()->NewNode(common()->Loop(2), graph()->start(), graph()->start());
+ loop->ReplaceInput(1, loop);
+ Node* const ephi = graph()->NewNode(common()->EffectPhi(2), e0, e0, loop);
+ ephi->ReplaceInput(1, ephi);
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Revisit(loop));
+ Reduction const r = Reduce(&editor, ephi);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(e0, r.replacement());
+}
+
+
// -----------------------------------------------------------------------------
// Phi
-TEST_F(CommonOperatorReducerTest, RedundantPhi) {
+TEST_F(CommonOperatorReducerTest, PhiWithMerge) {
const int kMaxInputs = 64;
Node* inputs[kMaxInputs];
Node* const input = graph()->NewNode(&kOp0);
TRACED_FORRANGE(int, input_count, 2, kMaxInputs - 1) {
int const value_input_count = input_count - 1;
- TRACED_FOREACH(MachineType, type, kMachineTypes) {
+ TRACED_FOREACH(MachineRepresentation, rep, kMachineRepresentations) {
+ for (int i = 0; i < value_input_count; ++i) {
+ inputs[i] = graph()->start();
+ }
+ Node* const merge = graph()->NewNode(common()->Merge(value_input_count),
+ value_input_count, inputs);
for (int i = 0; i < value_input_count; ++i) {
inputs[i] = input;
}
- inputs[value_input_count] = graph()->start();
- Reduction r = Reduce(graph()->NewNode(
- common()->Phi(type, value_input_count), input_count, inputs));
+ inputs[value_input_count] = merge;
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Revisit(merge));
+ Reduction r = Reduce(
+ &editor, graph()->NewNode(common()->Phi(rep, value_input_count),
+ input_count, inputs));
ASSERT_TRUE(r.Changed());
EXPECT_EQ(input, r.replacement());
}
@@ -89,22 +302,324 @@
}
+TEST_F(CommonOperatorReducerTest, PhiWithLoop) {
+ Node* const p0 = Parameter(0);
+ Node* const loop =
+ graph()->NewNode(common()->Loop(2), graph()->start(), graph()->start());
+ loop->ReplaceInput(1, loop);
+ Node* const phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), p0, p0, loop);
+ phi->ReplaceInput(1, phi);
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Revisit(loop));
+ Reduction const r = Reduce(&editor, phi);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(p0, r.replacement());
+}
+
+
+TEST_F(CommonOperatorReducerTest, PhiToFloat32Abs) {
+ Node* p0 = Parameter(0);
+ Node* c0 = Float32Constant(0.0);
+ Node* check = graph()->NewNode(machine()->Float32LessThan(), c0, p0);
+ Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = p0;
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = graph()->NewNode(machine()->Float32Sub(), c0, p0);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kFloat32, 2), vtrue, vfalse, merge);
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Revisit(merge));
+ Reduction r = Reduce(&editor, phi);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32Abs(p0));
+}
+
+
+TEST_F(CommonOperatorReducerTest, PhiToFloat64Abs) {
+ Node* p0 = Parameter(0);
+ Node* c0 = Float64Constant(0.0);
+ Node* check = graph()->NewNode(machine()->Float64LessThan(), c0, p0);
+ Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* vtrue = p0;
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* vfalse = graph()->NewNode(machine()->Float64Sub(), c0, p0);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kFloat64, 2), vtrue, vfalse, merge);
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Revisit(merge));
+ Reduction r = Reduce(&editor, phi);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Abs(p0));
+}
+
+
+TEST_F(CommonOperatorReducerTest, PhiToFloat32Max) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* check = graph()->NewNode(machine()->Float32LessThan(), p0, p1);
+ Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kFloat32, 2), p1, p0, merge);
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Revisit(merge));
+ Reduction r = Reduce(&editor, phi, MachineOperatorBuilder::kFloat32Max);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32Max(p1, p0));
+}
+
+
+TEST_F(CommonOperatorReducerTest, PhiToFloat64Max) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
+ Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kFloat64, 2), p1, p0, merge);
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Revisit(merge));
+ Reduction r = Reduce(&editor, phi, MachineOperatorBuilder::kFloat64Max);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Max(p1, p0));
+}
+
+
+TEST_F(CommonOperatorReducerTest, PhiToFloat32Min) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* check = graph()->NewNode(machine()->Float32LessThan(), p0, p1);
+ Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kFloat32, 2), p0, p1, merge);
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Revisit(merge));
+ Reduction r = Reduce(&editor, phi, MachineOperatorBuilder::kFloat32Min);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32Min(p0, p1));
+}
+
+
+TEST_F(CommonOperatorReducerTest, PhiToFloat64Min) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
+ Node* branch = graph()->NewNode(common()->Branch(), check, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kFloat64, 2), p0, p1, merge);
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Revisit(merge));
+ Reduction r = Reduce(&editor, phi, MachineOperatorBuilder::kFloat64Min);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Min(p0, p1));
+}
+
+
+// -----------------------------------------------------------------------------
+// Return
+
+
+TEST_F(CommonOperatorReducerTest, ReturnWithPhiAndEffectPhiAndMerge) {
+ Node* cond = Parameter(2);
+ Node* branch = graph()->NewNode(common()->Branch(), cond, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* etrue = graph()->start();
+ Node* vtrue = Parameter(0);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* efalse = graph()->start();
+ Node* vfalse = Parameter(1);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ Node* ephi = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, merge);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ vtrue, vfalse, merge);
+ Node* ret = graph()->NewNode(common()->Return(), phi, ephi, merge);
+ graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Replace(merge, IsDead()));
+ Reduction const r = Reduce(&editor, ret);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
+ EXPECT_THAT(graph()->end(), IsEnd(ret, IsReturn(vtrue, etrue, if_true),
+ IsReturn(vfalse, efalse, if_false)));
+}
+
+
// -----------------------------------------------------------------------------
// Select
-TEST_F(CommonOperatorReducerTest, RedundantSelect) {
+TEST_F(CommonOperatorReducerTest, SelectWithSameThenAndElse) {
Node* const input = graph()->NewNode(&kOp0);
TRACED_FOREACH(BranchHint, hint, kBranchHints) {
- TRACED_FOREACH(MachineType, type, kMachineTypes) {
+ TRACED_FOREACH(MachineRepresentation, rep, kMachineRepresentations) {
Reduction r = Reduce(
- graph()->NewNode(common()->Select(type, hint), input, input, input));
+ graph()->NewNode(common()->Select(rep, hint), input, input, input));
ASSERT_TRUE(r.Changed());
EXPECT_EQ(input, r.replacement());
}
}
}
+
+TEST_F(CommonOperatorReducerTest, SelectWithInt32ZeroConstant) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* select =
+ graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
+ Int32Constant(0), p0, p1);
+ Reduction r = Reduce(select);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(p1, r.replacement());
+}
+
+
+TEST_F(CommonOperatorReducerTest, SelectWithInt32OneConstant) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* select =
+ graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
+ Int32Constant(1), p0, p1);
+ Reduction r = Reduce(select);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(p0, r.replacement());
+}
+
+
+TEST_F(CommonOperatorReducerTest, SelectWithInt64ZeroConstant) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* select =
+ graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
+ Int64Constant(0), p0, p1);
+ Reduction r = Reduce(select);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(p1, r.replacement());
+}
+
+
+TEST_F(CommonOperatorReducerTest, SelectWithInt64OneConstant) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* select =
+ graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
+ Int64Constant(1), p0, p1);
+ Reduction r = Reduce(select);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(p0, r.replacement());
+}
+
+
+TEST_F(CommonOperatorReducerTest, SelectWithFalseConstant) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* select =
+ graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
+ FalseConstant(), p0, p1);
+ Reduction r = Reduce(select);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(p1, r.replacement());
+}
+
+
+TEST_F(CommonOperatorReducerTest, SelectWithTrueConstant) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* select = graph()->NewNode(
+ common()->Select(MachineRepresentation::kTagged), TrueConstant(), p0, p1);
+ Reduction r = Reduce(select);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(p0, r.replacement());
+}
+
+
+TEST_F(CommonOperatorReducerTest, SelectToFloat32Abs) {
+ Node* p0 = Parameter(0);
+ Node* c0 = Float32Constant(0.0);
+ Node* check = graph()->NewNode(machine()->Float32LessThan(), c0, p0);
+ Node* select =
+ graph()->NewNode(common()->Select(MachineRepresentation::kFloat32), check,
+ p0, graph()->NewNode(machine()->Float32Sub(), c0, p0));
+ Reduction r = Reduce(select);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32Abs(p0));
+}
+
+
+TEST_F(CommonOperatorReducerTest, SelectToFloat64Abs) {
+ Node* p0 = Parameter(0);
+ Node* c0 = Float64Constant(0.0);
+ Node* check = graph()->NewNode(machine()->Float64LessThan(), c0, p0);
+ Node* select =
+ graph()->NewNode(common()->Select(MachineRepresentation::kFloat64), check,
+ p0, graph()->NewNode(machine()->Float64Sub(), c0, p0));
+ Reduction r = Reduce(select);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Abs(p0));
+}
+
+
+TEST_F(CommonOperatorReducerTest, SelectToFloat32Max) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* check = graph()->NewNode(machine()->Float32LessThan(), p0, p1);
+ Node* select = graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat32), check, p1, p0);
+ Reduction r = Reduce(select, MachineOperatorBuilder::kFloat32Max);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32Max(p1, p0));
+}
+
+
+TEST_F(CommonOperatorReducerTest, SelectToFloat64Max) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
+ Node* select = graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64), check, p1, p0);
+ Reduction r = Reduce(select, MachineOperatorBuilder::kFloat64Max);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Max(p1, p0));
+}
+
+
+TEST_F(CommonOperatorReducerTest, SelectToFloat32Min) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* check = graph()->NewNode(machine()->Float32LessThan(), p0, p1);
+ Node* select = graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat32), check, p0, p1);
+ Reduction r = Reduce(select, MachineOperatorBuilder::kFloat32Min);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32Min(p0, p1));
+}
+
+
+TEST_F(CommonOperatorReducerTest, SelectToFloat64Min) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* check = graph()->NewNode(machine()->Float64LessThan(), p0, p1);
+ Node* select = graph()->NewNode(
+ common()->Select(MachineRepresentation::kFloat64), check, p0, p1);
+ Reduction r = Reduce(select, MachineOperatorBuilder::kFloat64Min);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Min(p0, p1));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/test/unittests/compiler/common-operator-unittest.cc b/test/unittests/compiler/common-operator-unittest.cc
index d0ac145..0a55a2e 100644
--- a/test/unittests/compiler/common-operator-unittest.cc
+++ b/test/unittests/compiler/common-operator-unittest.cc
@@ -28,6 +28,7 @@
int value_input_count;
int effect_input_count;
int control_input_count;
+ int value_output_count;
int effect_output_count;
int control_output_count;
};
@@ -39,19 +40,20 @@
const SharedOperator kSharedOperators[] = {
-#define SHARED(Name, properties, value_input_count, effect_input_count, \
- control_input_count, effect_output_count, control_output_count) \
- { \
- &CommonOperatorBuilder::Name, IrOpcode::k##Name, properties, \
- value_input_count, effect_input_count, control_input_count, \
- effect_output_count, control_output_count \
+#define SHARED(Name, properties, value_input_count, effect_input_count, \
+ control_input_count, value_output_count, effect_output_count, \
+ control_output_count) \
+ { \
+ &CommonOperatorBuilder::Name, IrOpcode::k##Name, properties, \
+ value_input_count, effect_input_count, control_input_count, \
+ value_output_count, effect_output_count, control_output_count \
}
- SHARED(Dead, Operator::kFoldable, 0, 0, 0, 0, 1),
- SHARED(End, Operator::kFoldable, 0, 0, 1, 0, 0),
- SHARED(IfTrue, Operator::kFoldable, 0, 0, 1, 0, 1),
- SHARED(IfFalse, Operator::kFoldable, 0, 0, 1, 0, 1),
- SHARED(Throw, Operator::kFoldable, 1, 1, 1, 0, 1),
- SHARED(Return, Operator::kNoProperties, 1, 1, 1, 0, 1)
+ SHARED(Dead, Operator::kFoldable, 0, 0, 0, 1, 1, 1),
+ SHARED(IfTrue, Operator::kKontrol, 0, 0, 1, 0, 0, 1),
+ SHARED(IfFalse, Operator::kKontrol, 0, 0, 1, 0, 0, 1),
+ SHARED(IfSuccess, Operator::kKontrol, 0, 0, 1, 0, 0, 1),
+ SHARED(Throw, Operator::kKontrol, 1, 1, 1, 0, 0, 1),
+ SHARED(Terminate, Operator::kKontrol, 0, 1, 1, 0, 0, 1)
#undef SHARED
};
@@ -83,7 +85,7 @@
sop.value_input_count + sop.effect_input_count + sop.control_input_count,
OperatorProperties::GetTotalInputCount(op));
- EXPECT_EQ(0, op->ValueOutputCount());
+ EXPECT_EQ(sop.value_output_count, op->ValueOutputCount());
EXPECT_EQ(sop.effect_output_count, op->EffectOutputCount());
EXPECT_EQ(sop.control_output_count, op->ControlOutputCount());
}
@@ -118,7 +120,7 @@
class CommonOperatorTest : public TestWithZone {
public:
CommonOperatorTest() : common_(zone()) {}
- ~CommonOperatorTest() OVERRIDE {}
+ ~CommonOperatorTest() override {}
CommonOperatorBuilder* common() { return &common_; }
@@ -130,6 +132,9 @@
const int kArguments[] = {1, 5, 6, 42, 100, 10000, 65000};
+const size_t kCases[] = {3, 4, 100, 255, 1024, 65000};
+
+
const float kFloatValues[] = {-std::numeric_limits<float>::infinity(),
std::numeric_limits<float>::min(),
-1.0f,
@@ -142,29 +147,67 @@
std::numeric_limits<float>::signaling_NaN()};
-const double kDoubleValues[] = {-std::numeric_limits<double>::infinity(),
- std::numeric_limits<double>::min(),
- -1.0,
- -0.0,
- 0.0,
- 1.0,
- std::numeric_limits<double>::max(),
- std::numeric_limits<double>::infinity(),
- std::numeric_limits<double>::quiet_NaN(),
- std::numeric_limits<double>::signaling_NaN()};
+const size_t kInputCounts[] = {3, 4, 100, 255, 1024, 65000};
-const BranchHint kHints[] = {BranchHint::kNone, BranchHint::kTrue,
- BranchHint::kFalse};
+const int32_t kInt32Values[] = {
+ std::numeric_limits<int32_t>::min(), -1914954528, -1698749618, -1578693386,
+ -1577976073, -1573998034, -1529085059, -1499540537, -1299205097,
+ -1090814845, -938186388, -806828902, -750927650, -520676892, -513661538,
+ -453036354, -433622833, -282638793, -28375, -27788, -22770, -18806, -14173,
+ -11956, -11200, -10212, -8160, -3751, -2758, -1522, -121, -120, -118, -117,
+ -106, -84, -80, -74, -59, -52, -48, -39, -35, -17, -11, -10, -9, -7, -5, 0,
+ 9, 12, 17, 23, 29, 31, 33, 35, 40, 47, 55, 56, 62, 64, 67, 68, 69, 74, 79,
+ 84, 89, 90, 97, 104, 118, 124, 126, 127, 7278, 17787, 24136, 24202, 25570,
+ 26680, 30242, 32399, 420886487, 642166225, 821912648, 822577803, 851385718,
+ 1212241078, 1411419304, 1589626102, 1596437184, 1876245816, 1954730266,
+ 2008792749, 2045320228, std::numeric_limits<int32_t>::max()};
+
+
+const BranchHint kBranchHints[] = {BranchHint::kNone, BranchHint::kTrue,
+ BranchHint::kFalse};
} // namespace
+TEST_F(CommonOperatorTest, End) {
+ TRACED_FOREACH(size_t, input_count, kInputCounts) {
+ const Operator* const op = common()->End(input_count);
+ EXPECT_EQ(IrOpcode::kEnd, op->opcode());
+ EXPECT_EQ(Operator::kKontrol, op->properties());
+ EXPECT_EQ(0, op->ValueInputCount());
+ EXPECT_EQ(0, op->EffectInputCount());
+ EXPECT_EQ(input_count, static_cast<uint32_t>(op->ControlInputCount()));
+ EXPECT_EQ(input_count, static_cast<uint32_t>(
+ OperatorProperties::GetTotalInputCount(op)));
+ EXPECT_EQ(0, op->ValueOutputCount());
+ EXPECT_EQ(0, op->EffectOutputCount());
+ EXPECT_EQ(0, op->ControlOutputCount());
+ }
+}
+
+
+TEST_F(CommonOperatorTest, Return) {
+ TRACED_FOREACH(int, input_count, kArguments) {
+ const Operator* const op = common()->Return(input_count);
+ EXPECT_EQ(IrOpcode::kReturn, op->opcode());
+ EXPECT_EQ(Operator::kNoThrow, op->properties());
+ EXPECT_EQ(input_count, op->ValueInputCount());
+ EXPECT_EQ(1, op->EffectInputCount());
+ EXPECT_EQ(1, op->ControlInputCount());
+ EXPECT_EQ(2 + input_count, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ValueOutputCount());
+ EXPECT_EQ(0, op->EffectOutputCount());
+ EXPECT_EQ(1, op->ControlOutputCount());
+ }
+}
+
+
TEST_F(CommonOperatorTest, Branch) {
- TRACED_FOREACH(BranchHint, hint, kHints) {
+ TRACED_FOREACH(BranchHint, hint, kBranchHints) {
const Operator* const op = common()->Branch(hint);
EXPECT_EQ(IrOpcode::kBranch, op->opcode());
- EXPECT_EQ(Operator::kFoldable, op->properties());
+ EXPECT_EQ(Operator::kKontrol, op->properties());
EXPECT_EQ(hint, BranchHintOf(op));
EXPECT_EQ(1, op->ValueInputCount());
EXPECT_EQ(0, op->EffectInputCount());
@@ -177,17 +220,71 @@
}
+TEST_F(CommonOperatorTest, IfException) {
+ static const IfExceptionHint kIfExceptionHints[] = {
+ IfExceptionHint::kLocallyCaught, IfExceptionHint::kLocallyUncaught};
+ TRACED_FOREACH(IfExceptionHint, hint, kIfExceptionHints) {
+ const Operator* const op = common()->IfException(hint);
+ EXPECT_EQ(IrOpcode::kIfException, op->opcode());
+ EXPECT_EQ(Operator::kKontrol, op->properties());
+ EXPECT_EQ(0, op->ValueInputCount());
+ EXPECT_EQ(1, op->EffectInputCount());
+ EXPECT_EQ(1, op->ControlInputCount());
+ EXPECT_EQ(2, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(1, op->ValueOutputCount());
+ EXPECT_EQ(1, op->EffectOutputCount());
+ EXPECT_EQ(1, op->ControlOutputCount());
+ }
+}
+
+
+TEST_F(CommonOperatorTest, Switch) {
+ TRACED_FOREACH(size_t, cases, kCases) {
+ const Operator* const op = common()->Switch(cases);
+ EXPECT_EQ(IrOpcode::kSwitch, op->opcode());
+ EXPECT_EQ(Operator::kKontrol, op->properties());
+ EXPECT_EQ(1, op->ValueInputCount());
+ EXPECT_EQ(0, op->EffectInputCount());
+ EXPECT_EQ(1, op->ControlInputCount());
+ EXPECT_EQ(2, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ValueOutputCount());
+ EXPECT_EQ(0, op->EffectOutputCount());
+ EXPECT_EQ(static_cast<int>(cases), op->ControlOutputCount());
+ }
+}
+
+
+TEST_F(CommonOperatorTest, IfValue) {
+ TRACED_FOREACH(int32_t, value, kInt32Values) {
+ const Operator* const op = common()->IfValue(value);
+ EXPECT_EQ(IrOpcode::kIfValue, op->opcode());
+ EXPECT_EQ(Operator::kKontrol, op->properties());
+ EXPECT_EQ(value, OpParameter<int32_t>(op));
+ EXPECT_EQ(0, op->ValueInputCount());
+ EXPECT_EQ(0, op->EffectInputCount());
+ EXPECT_EQ(1, op->ControlInputCount());
+ EXPECT_EQ(1, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ValueOutputCount());
+ EXPECT_EQ(0, op->EffectOutputCount());
+ EXPECT_EQ(1, op->ControlOutputCount());
+ }
+}
+
+
TEST_F(CommonOperatorTest, Select) {
- static const MachineType kTypes[] = {
- kMachInt8, kMachUint8, kMachInt16, kMachUint16,
- kMachInt32, kMachUint32, kMachInt64, kMachUint64,
- kMachFloat32, kMachFloat64, kMachAnyTagged};
- TRACED_FOREACH(MachineType, type, kTypes) {
- TRACED_FOREACH(BranchHint, hint, kHints) {
- const Operator* const op = common()->Select(type, hint);
+ static const MachineRepresentation kMachineRepresentations[] = {
+ MachineRepresentation::kBit, MachineRepresentation::kWord8,
+ MachineRepresentation::kWord16, MachineRepresentation::kWord32,
+ MachineRepresentation::kWord64, MachineRepresentation::kFloat32,
+ MachineRepresentation::kFloat64, MachineRepresentation::kTagged};
+
+
+ TRACED_FOREACH(MachineRepresentation, rep, kMachineRepresentations) {
+ TRACED_FOREACH(BranchHint, hint, kBranchHints) {
+ const Operator* const op = common()->Select(rep, hint);
EXPECT_EQ(IrOpcode::kSelect, op->opcode());
EXPECT_EQ(Operator::kPure, op->properties());
- EXPECT_EQ(type, SelectParametersOf(op).type());
+ EXPECT_EQ(rep, SelectParametersOf(op).representation());
EXPECT_EQ(hint, SelectParametersOf(op).hint());
EXPECT_EQ(3, op->ValueInputCount());
EXPECT_EQ(0, op->EffectInputCount());
@@ -264,28 +361,24 @@
}
-TEST_F(CommonOperatorTest, ValueEffect) {
- TRACED_FOREACH(int, arguments, kArguments) {
- const Operator* op = common()->ValueEffect(arguments);
- EXPECT_EQ(arguments, op->ValueInputCount());
- EXPECT_EQ(arguments, OperatorProperties::GetTotalInputCount(op));
- EXPECT_EQ(0, op->ControlOutputCount());
- EXPECT_EQ(1, op->EffectOutputCount());
- EXPECT_EQ(0, op->ValueOutputCount());
- }
+TEST_F(CommonOperatorTest, BeginRegion) {
+ const Operator* op = common()->BeginRegion();
+ EXPECT_EQ(1, op->EffectInputCount());
+ EXPECT_EQ(1, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ControlOutputCount());
+ EXPECT_EQ(1, op->EffectOutputCount());
+ EXPECT_EQ(0, op->ValueOutputCount());
}
-TEST_F(CommonOperatorTest, Finish) {
- TRACED_FOREACH(int, arguments, kArguments) {
- const Operator* op = common()->Finish(arguments);
- EXPECT_EQ(1, op->ValueInputCount());
- EXPECT_EQ(arguments, op->EffectInputCount());
- EXPECT_EQ(arguments + 1, OperatorProperties::GetTotalInputCount(op));
- EXPECT_EQ(0, op->ControlOutputCount());
- EXPECT_EQ(0, op->EffectOutputCount());
- EXPECT_EQ(1, op->ValueOutputCount());
- }
+TEST_F(CommonOperatorTest, FinishRegion) {
+ const Operator* op = common()->FinishRegion();
+ EXPECT_EQ(1, op->ValueInputCount());
+ EXPECT_EQ(1, op->EffectInputCount());
+ EXPECT_EQ(2, OperatorProperties::GetTotalInputCount(op));
+ EXPECT_EQ(0, op->ControlOutputCount());
+ EXPECT_EQ(1, op->EffectOutputCount());
+ EXPECT_EQ(1, op->ValueOutputCount());
}
} // namespace compiler
diff --git a/test/unittests/compiler/compiler-test-utils.h b/test/unittests/compiler/compiler-test-utils.h
index 6ce28f9..7873c96 100644
--- a/test/unittests/compiler/compiler-test-utils.h
+++ b/test/unittests/compiler/compiler-test-utils.h
@@ -14,41 +14,25 @@
// The TARGET_TEST(Case, Name) macro works just like
// TEST(Case, Name), except that the test is disabled
// if the platform is not a supported TurboFan target.
-#if V8_TURBOFAN_TARGET
#define TARGET_TEST(Case, Name) TEST(Case, Name)
-#else
-#define TARGET_TEST(Case, Name) TEST(Case, DISABLED_##Name)
-#endif
// The TARGET_TEST_F(Case, Name) macro works just like
// TEST_F(Case, Name), except that the test is disabled
// if the platform is not a supported TurboFan target.
-#if V8_TURBOFAN_TARGET
#define TARGET_TEST_F(Case, Name) TEST_F(Case, Name)
-#else
-#define TARGET_TEST_F(Case, Name) TEST_F(Case, DISABLED_##Name)
-#endif
// The TARGET_TEST_P(Case, Name) macro works just like
// TEST_P(Case, Name), except that the test is disabled
// if the platform is not a supported TurboFan target.
-#if V8_TURBOFAN_TARGET
#define TARGET_TEST_P(Case, Name) TEST_P(Case, Name)
-#else
-#define TARGET_TEST_P(Case, Name) TEST_P(Case, DISABLED_##Name)
-#endif
// The TARGET_TYPED_TEST(Case, Name) macro works just like
// TYPED_TEST(Case, Name), except that the test is disabled
// if the platform is not a supported TurboFan target.
-#if V8_TURBOFAN_TARGET
#define TARGET_TYPED_TEST(Case, Name) TYPED_TEST(Case, Name)
-#else
-#define TARGET_TYPED_TEST(Case, Name) TYPED_TEST(Case, DISABLED_##Name)
-#endif
} // namespace compiler
} // namespace internal
diff --git a/test/unittests/compiler/control-equivalence-unittest.cc b/test/unittests/compiler/control-equivalence-unittest.cc
index 56b4a2b..a87f760 100644
--- a/test/unittests/compiler/control-equivalence-unittest.cc
+++ b/test/unittests/compiler/control-equivalence-unittest.cc
@@ -5,7 +5,8 @@
#include "src/bit-vector.h"
#include "src/compiler/control-equivalence.h"
#include "src/compiler/graph-visualizer.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/source-position.h"
#include "src/zone-containers.h"
#include "test/unittests/compiler/graph-unittest.h"
@@ -17,7 +18,7 @@
do { \
Node* __n[] = {__VA_ARGS__}; \
ASSERT_TRUE(IsEquivalenceClass(arraysize(__n), __n)); \
- } while (false);
+ } while (false)
class ControlEquivalenceTest : public GraphTest {
public:
@@ -27,10 +28,11 @@
protected:
void ComputeEquivalence(Node* node) {
- graph()->SetEnd(graph()->NewNode(common()->End(), node));
+ graph()->SetEnd(graph()->NewNode(common()->End(1), node));
if (FLAG_trace_turbo) {
OFStream os(stdout);
- os << AsDOT(*graph());
+ SourcePositionTable table(graph());
+ os << AsJSON(*graph(), &table);
}
ControlEquivalence equivalence(zone(), graph());
equivalence.Run(node);
@@ -41,7 +43,7 @@
}
bool IsEquivalenceClass(size_t length, Node** nodes) {
- BitVector in_class(graph()->NodeCount(), zone());
+ BitVector in_class(static_cast<int>(graph()->NodeCount()), zone());
size_t expected_class = classes_[nodes[0]->id()];
for (size_t i = 0; i < length; ++i) {
in_class.Add(nodes[i]->id());
@@ -70,6 +72,10 @@
return Store(graph()->NewNode(common()->IfFalse(), control));
}
+ Node* Merge1(Node* control) {
+ return Store(graph()->NewNode(common()->Merge(1), control));
+ }
+
Node* Merge2(Node* control1, Node* control2) {
return Store(graph()->NewNode(common()->Merge(2), control1, control2));
}
@@ -79,7 +85,7 @@
}
Node* End(Node* control) {
- return Store(graph()->NewNode(common()->End(), control));
+ return Store(graph()->NewNode(common()->End(1), control));
}
private:
@@ -107,10 +113,10 @@
TEST_F(ControlEquivalenceTest, Empty2) {
Node* start = graph()->start();
- Node* end = End(start);
- ComputeEquivalence(end);
+ Node* merge1 = Merge1(start);
+ ComputeEquivalence(merge1);
- ASSERT_EQUIVALENCE(start, end);
+ ASSERT_EQUIVALENCE(start, merge1);
}
diff --git a/test/unittests/compiler/control-flow-optimizer-unittest.cc b/test/unittests/compiler/control-flow-optimizer-unittest.cc
new file mode 100644
index 0000000..a5a3c74
--- /dev/null
+++ b/test/unittests/compiler/control-flow-optimizer-unittest.cc
@@ -0,0 +1,129 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/control-flow-optimizer.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/machine-operator.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::AllOf;
+using testing::Capture;
+using testing::CaptureEq;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class ControlFlowOptimizerTest : public GraphTest {
+ public:
+ explicit ControlFlowOptimizerTest(int num_parameters = 3)
+ : GraphTest(num_parameters), machine_(zone()), javascript_(zone()) {}
+ ~ControlFlowOptimizerTest() override {}
+
+ protected:
+ void Optimize() {
+ ControlFlowOptimizer optimizer(graph(), common(), machine(), zone());
+ optimizer.Optimize();
+ }
+
+ JSOperatorBuilder* javascript() { return &javascript_; }
+ MachineOperatorBuilder* machine() { return &machine_; }
+
+ private:
+ MachineOperatorBuilder machine_;
+ JSOperatorBuilder javascript_;
+};
+
+
+TEST_F(ControlFlowOptimizerTest, BuildSwitch1) {
+ Node* index = Parameter(0);
+ Node* branch0 = graph()->NewNode(
+ common()->Branch(),
+ graph()->NewNode(machine()->Word32Equal(), index, Int32Constant(0)),
+ start());
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* branch1 = graph()->NewNode(
+ common()->Branch(),
+ graph()->NewNode(machine()->Word32Equal(), index, Int32Constant(1)),
+ if_false0);
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* merge =
+ graph()->NewNode(common()->Merge(3), if_true0, if_true1, if_false1);
+ graph()->SetEnd(graph()->NewNode(common()->End(1), merge));
+ Optimize();
+ Capture<Node*> switch_capture;
+ EXPECT_THAT(end(),
+ IsEnd(IsMerge(IsIfValue(0, CaptureEq(&switch_capture)),
+ IsIfValue(1, CaptureEq(&switch_capture)),
+ IsIfDefault(AllOf(CaptureEq(&switch_capture),
+ IsSwitch(index, start()))))));
+}
+
+
+TEST_F(ControlFlowOptimizerTest, BuildSwitch2) {
+ Node* input = Parameter(0);
+ Node* context = Parameter(1);
+ Node* index = graph()->NewNode(javascript()->ToNumber(), input, context,
+ start(), start(), start());
+ Node* if_success = graph()->NewNode(common()->IfSuccess(), index);
+ Node* branch0 = graph()->NewNode(
+ common()->Branch(),
+ graph()->NewNode(machine()->Word32Equal(), index, Int32Constant(0)),
+ if_success);
+ Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* branch1 = graph()->NewNode(
+ common()->Branch(),
+ graph()->NewNode(machine()->Word32Equal(), index, Int32Constant(1)),
+ if_false0);
+ Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+ Node* merge =
+ graph()->NewNode(common()->Merge(3), if_true0, if_true1, if_false1);
+ graph()->SetEnd(graph()->NewNode(common()->End(1), merge));
+ Optimize();
+ Capture<Node*> switch_capture;
+ EXPECT_THAT(
+ end(),
+ IsEnd(IsMerge(IsIfValue(0, CaptureEq(&switch_capture)),
+ IsIfValue(1, CaptureEq(&switch_capture)),
+ IsIfDefault(AllOf(CaptureEq(&switch_capture),
+ IsSwitch(index, IsIfSuccess(index)))))));
+}
+
+
+TEST_F(ControlFlowOptimizerTest, CloneBranch) {
+ Node* cond0 = Parameter(0);
+ Node* cond1 = Parameter(1);
+ Node* cond2 = Parameter(2);
+ Node* branch0 = graph()->NewNode(common()->Branch(), cond0, start());
+ Node* control1 = graph()->NewNode(common()->IfTrue(), branch0);
+ Node* control2 = graph()->NewNode(common()->IfFalse(), branch0);
+ Node* merge0 = graph()->NewNode(common()->Merge(2), control1, control2);
+ Node* phi0 = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2),
+ cond1, cond2, merge0);
+ Node* branch = graph()->NewNode(common()->Branch(), phi0, merge0);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ graph()->SetEnd(graph()->NewNode(common()->End(1), merge));
+ Optimize();
+ Capture<Node*> branch1_capture, branch2_capture;
+ EXPECT_THAT(
+ end(),
+ IsEnd(IsMerge(IsMerge(IsIfTrue(CaptureEq(&branch1_capture)),
+ IsIfTrue(CaptureEq(&branch2_capture))),
+ IsMerge(IsIfFalse(AllOf(CaptureEq(&branch1_capture),
+ IsBranch(cond1, control1))),
+ IsIfFalse(AllOf(CaptureEq(&branch2_capture),
+ IsBranch(cond2, control2)))))));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/dead-code-elimination-unittest.cc b/test/unittests/compiler/dead-code-elimination-unittest.cc
new file mode 100644
index 0000000..df93f25
--- /dev/null
+++ b/test/unittests/compiler/dead-code-elimination-unittest.cc
@@ -0,0 +1,377 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/dead-code-elimination.h"
+#include "test/unittests/compiler/graph-reducer-unittest.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::StrictMock;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class DeadCodeEliminationTest : public GraphTest {
+ public:
+ explicit DeadCodeEliminationTest(int num_parameters = 4)
+ : GraphTest(num_parameters) {}
+ ~DeadCodeEliminationTest() override {}
+
+ protected:
+ Reduction Reduce(AdvancedReducer::Editor* editor, Node* node) {
+ DeadCodeElimination reducer(editor, graph(), common());
+ return reducer.Reduce(node);
+ }
+
+ Reduction Reduce(Node* node) {
+ StrictMock<MockAdvancedReducerEditor> editor;
+ return Reduce(&editor, node);
+ }
+};
+
+
+namespace {
+
+const MachineRepresentation kMachineRepresentations[] = {
+ MachineRepresentation::kBit, MachineRepresentation::kWord8,
+ MachineRepresentation::kWord16, MachineRepresentation::kWord32,
+ MachineRepresentation::kWord64, MachineRepresentation::kFloat32,
+ MachineRepresentation::kFloat64, MachineRepresentation::kTagged};
+
+
+const int kMaxInputs = 16;
+
+
+const Operator kOp0(0, Operator::kNoProperties, "Op0", 1, 1, 1, 1, 1, 1);
+
+} // namespace
+
+
+// -----------------------------------------------------------------------------
+// General dead propagation
+
+
+TEST_F(DeadCodeEliminationTest, GeneralDeadPropagation) {
+ Node* const value = Parameter(0);
+ Node* const effect = graph()->start();
+ Node* const dead = graph()->NewNode(common()->Dead());
+ Node* const node = graph()->NewNode(&kOp0, value, effect, dead);
+ Reduction const r = Reduce(node);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
+}
+
+
+// -----------------------------------------------------------------------------
+// Branch
+
+
+TEST_F(DeadCodeEliminationTest, BranchWithDeadControlInput) {
+ BranchHint const kHints[] = {BranchHint::kNone, BranchHint::kTrue,
+ BranchHint::kFalse};
+ TRACED_FOREACH(BranchHint, hint, kHints) {
+ Reduction const r =
+ Reduce(graph()->NewNode(common()->Branch(hint), Parameter(0),
+ graph()->NewNode(common()->Dead())));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// IfTrue
+
+
+TEST_F(DeadCodeEliminationTest, IfTrueWithDeadInput) {
+ Reduction const r = Reduce(
+ graph()->NewNode(common()->IfTrue(), graph()->NewNode(common()->Dead())));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
+}
+
+
+// -----------------------------------------------------------------------------
+// IfFalse
+
+
+TEST_F(DeadCodeEliminationTest, IfFalseWithDeadInput) {
+ Reduction const r = Reduce(graph()->NewNode(
+ common()->IfFalse(), graph()->NewNode(common()->Dead())));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
+}
+
+
+// -----------------------------------------------------------------------------
+// IfSuccess
+
+
+TEST_F(DeadCodeEliminationTest, IfSuccessWithDeadInput) {
+ Reduction const r = Reduce(graph()->NewNode(
+ common()->IfSuccess(), graph()->NewNode(common()->Dead())));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
+}
+
+
+// -----------------------------------------------------------------------------
+// IfException
+
+
+TEST_F(DeadCodeEliminationTest, IfExceptionWithDeadControlInput) {
+ IfExceptionHint const kHints[] = {IfExceptionHint::kLocallyCaught,
+ IfExceptionHint::kLocallyUncaught};
+ TRACED_FOREACH(IfExceptionHint, hint, kHints) {
+ Reduction const r =
+ Reduce(graph()->NewNode(common()->IfException(hint), graph()->start(),
+ graph()->NewNode(common()->Dead())));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// End
+
+
+TEST_F(DeadCodeEliminationTest, EndWithDeadAndStart) {
+ Node* const dead = graph()->NewNode(common()->Dead());
+ Node* const start = graph()->start();
+ Reduction const r = Reduce(graph()->NewNode(common()->End(2), dead, start));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsEnd(start));
+}
+
+
+TEST_F(DeadCodeEliminationTest, EndWithOnlyDeadInputs) {
+ Node* inputs[kMaxInputs];
+ TRACED_FORRANGE(int, input_count, 1, kMaxInputs - 1) {
+ for (int i = 0; i < input_count; ++i) {
+ inputs[i] = graph()->NewNode(common()->Dead());
+ }
+ Reduction const r = Reduce(
+ graph()->NewNode(common()->End(input_count), input_count, inputs));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Merge
+
+
+TEST_F(DeadCodeEliminationTest, MergeWithOnlyDeadInputs) {
+ Node* inputs[kMaxInputs + 1];
+ TRACED_FORRANGE(int, input_count, 1, kMaxInputs - 1) {
+ for (int i = 0; i < input_count; ++i) {
+ inputs[i] = graph()->NewNode(common()->Dead());
+ }
+ Reduction const r = Reduce(
+ graph()->NewNode(common()->Merge(input_count), input_count, inputs));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
+ }
+}
+
+
+TEST_F(DeadCodeEliminationTest, MergeWithOneLiveAndOneDeadInput) {
+ Node* const v0 = Parameter(0);
+ Node* const v1 = Parameter(1);
+ Node* const c0 =
+ graph()->NewNode(&kOp0, v0, graph()->start(), graph()->start());
+ Node* const c1 = graph()->NewNode(common()->Dead());
+ Node* const e0 = graph()->NewNode(&kOp0, v0, graph()->start(), c0);
+ Node* const e1 = graph()->NewNode(&kOp0, v1, graph()->start(), c1);
+ Node* const merge = graph()->NewNode(common()->Merge(2), c0, c1);
+ Node* const phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), v0, v1, merge);
+ Node* const ephi = graph()->NewNode(common()->EffectPhi(2), e0, e1, merge);
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Replace(phi, v0));
+ EXPECT_CALL(editor, Replace(ephi, e0));
+ Reduction const r = Reduce(&editor, merge);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(c0, r.replacement());
+}
+
+
+TEST_F(DeadCodeEliminationTest, MergeWithTwoLiveAndTwoDeadInputs) {
+ Node* const v0 = Parameter(0);
+ Node* const v1 = Parameter(1);
+ Node* const v2 = Parameter(2);
+ Node* const v3 = Parameter(3);
+ Node* const c0 =
+ graph()->NewNode(&kOp0, v0, graph()->start(), graph()->start());
+ Node* const c1 = graph()->NewNode(common()->Dead());
+ Node* const c2 = graph()->NewNode(common()->Dead());
+ Node* const c3 = graph()->NewNode(&kOp0, v3, graph()->start(), c0);
+ Node* const e0 = graph()->start();
+ Node* const e1 = graph()->NewNode(&kOp0, v1, e0, c0);
+ Node* const e2 = graph()->NewNode(&kOp0, v2, e1, c0);
+ Node* const e3 = graph()->NewNode(&kOp0, v3, graph()->start(), c3);
+ Node* const merge = graph()->NewNode(common()->Merge(4), c0, c1, c2, c3);
+ Node* const phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 4), v0, v1, v2, v3, merge);
+ Node* const ephi =
+ graph()->NewNode(common()->EffectPhi(4), e0, e1, e2, e3, merge);
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Revisit(phi));
+ EXPECT_CALL(editor, Revisit(ephi));
+ Reduction const r = Reduce(&editor, merge);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsMerge(c0, c3));
+ EXPECT_THAT(phi,
+ IsPhi(MachineRepresentation::kTagged, v0, v3, r.replacement()));
+ EXPECT_THAT(ephi, IsEffectPhi(e0, e3, r.replacement()));
+}
+
+
+// -----------------------------------------------------------------------------
+// Loop
+
+
+TEST_F(DeadCodeEliminationTest, LoopWithDeadFirstInput) {
+ Node* inputs[kMaxInputs + 1];
+ TRACED_FORRANGE(int, input_count, 1, kMaxInputs - 1) {
+ inputs[0] = graph()->NewNode(common()->Dead());
+ for (int i = 1; i < input_count; ++i) {
+ inputs[i] = graph()->start();
+ }
+ Reduction const r = Reduce(
+ graph()->NewNode(common()->Loop(input_count), input_count, inputs));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
+ }
+}
+
+
+TEST_F(DeadCodeEliminationTest, LoopWithOnlyDeadInputs) {
+ Node* inputs[kMaxInputs + 1];
+ TRACED_FORRANGE(int, input_count, 1, kMaxInputs - 1) {
+ for (int i = 0; i < input_count; ++i) {
+ inputs[i] = graph()->NewNode(common()->Dead());
+ }
+ Reduction const r = Reduce(
+ graph()->NewNode(common()->Loop(input_count), input_count, inputs));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
+ }
+}
+
+
+TEST_F(DeadCodeEliminationTest, LoopWithOneLiveAndOneDeadInput) {
+ Node* const v0 = Parameter(0);
+ Node* const v1 = Parameter(1);
+ Node* const c0 =
+ graph()->NewNode(&kOp0, v0, graph()->start(), graph()->start());
+ Node* const c1 = graph()->NewNode(common()->Dead());
+ Node* const e0 = graph()->NewNode(&kOp0, v0, graph()->start(), c0);
+ Node* const e1 = graph()->NewNode(&kOp0, v1, graph()->start(), c1);
+ Node* const loop = graph()->NewNode(common()->Loop(2), c0, c1);
+ Node* const phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), v0, v1, loop);
+ Node* const ephi = graph()->NewNode(common()->EffectPhi(2), e0, e1, loop);
+ Node* const terminate = graph()->NewNode(common()->Terminate(), ephi, loop);
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Replace(phi, v0));
+ EXPECT_CALL(editor, Replace(ephi, e0));
+ EXPECT_CALL(editor, Replace(terminate, IsDead()));
+ Reduction const r = Reduce(&editor, loop);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(c0, r.replacement());
+}
+
+
+TEST_F(DeadCodeEliminationTest, LoopWithTwoLiveAndTwoDeadInputs) {
+ Node* const v0 = Parameter(0);
+ Node* const v1 = Parameter(1);
+ Node* const v2 = Parameter(2);
+ Node* const v3 = Parameter(3);
+ Node* const c0 =
+ graph()->NewNode(&kOp0, v0, graph()->start(), graph()->start());
+ Node* const c1 = graph()->NewNode(common()->Dead());
+ Node* const c2 = graph()->NewNode(common()->Dead());
+ Node* const c3 = graph()->NewNode(&kOp0, v3, graph()->start(), c0);
+ Node* const e0 = graph()->start();
+ Node* const e1 = graph()->NewNode(&kOp0, v1, e0, c0);
+ Node* const e2 = graph()->NewNode(&kOp0, v2, e1, c0);
+ Node* const e3 = graph()->NewNode(&kOp0, v3, graph()->start(), c3);
+ Node* const loop = graph()->NewNode(common()->Loop(4), c0, c1, c2, c3);
+ Node* const phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 4), v0, v1, v2, v3, loop);
+ Node* const ephi =
+ graph()->NewNode(common()->EffectPhi(4), e0, e1, e2, e3, loop);
+ StrictMock<MockAdvancedReducerEditor> editor;
+ EXPECT_CALL(editor, Revisit(phi));
+ EXPECT_CALL(editor, Revisit(ephi));
+ Reduction const r = Reduce(&editor, loop);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsLoop(c0, c3));
+ EXPECT_THAT(phi,
+ IsPhi(MachineRepresentation::kTagged, v0, v3, r.replacement()));
+ EXPECT_THAT(ephi, IsEffectPhi(e0, e3, r.replacement()));
+}
+
+
+// -----------------------------------------------------------------------------
+// Phi
+
+
+TEST_F(DeadCodeEliminationTest, PhiWithDeadControlInput) {
+ Node* inputs[kMaxInputs + 1];
+ TRACED_FOREACH(MachineRepresentation, rep, kMachineRepresentations) {
+ TRACED_FORRANGE(int, input_count, 1, kMaxInputs) {
+ for (int i = 0; i < input_count; ++i) {
+ inputs[i] = Parameter(i);
+ }
+ inputs[input_count] = graph()->NewNode(common()->Dead());
+ Reduction const r = Reduce(graph()->NewNode(
+ common()->Phi(rep, input_count), input_count + 1, inputs));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// EffectPhi
+
+
+TEST_F(DeadCodeEliminationTest, EffectPhiWithDeadControlInput) {
+ Node* inputs[kMaxInputs + 1];
+ TRACED_FORRANGE(int, input_count, 1, kMaxInputs) {
+ for (int i = 0; i < input_count; ++i) {
+ inputs[i] = graph()->start();
+ }
+ inputs[input_count] = graph()->NewNode(common()->Dead());
+ Reduction const r = Reduce(graph()->NewNode(
+ common()->EffectPhi(input_count), input_count + 1, inputs));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Terminate
+
+
+TEST_F(DeadCodeEliminationTest, TerminateWithDeadControlInput) {
+ Reduction const r =
+ Reduce(graph()->NewNode(common()->Terminate(), graph()->start(),
+ graph()->NewNode(common()->Dead())));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsDead());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/diamond-unittest.cc b/test/unittests/compiler/diamond-unittest.cc
index c14886f..5b28a00 100644
--- a/test/unittests/compiler/diamond-unittest.cc
+++ b/test/unittests/compiler/diamond-unittest.cc
@@ -114,7 +114,8 @@
Node* p2 = Parameter(2);
Diamond d(graph(), common(), p0);
- MachineType types[] = {kMachAnyTagged, kMachUint32, kMachInt32};
+ MachineRepresentation types[] = {MachineRepresentation::kTagged,
+ MachineRepresentation::kWord32};
for (size_t i = 0; i < arraysize(types); i++) {
Node* phi = d.Phi(types[i], p1, p2);
@@ -128,22 +129,6 @@
}
-TEST_F(DiamondTest, DiamondEffectPhis) {
- Node* p0 = Parameter(0);
- Node* p1 = Parameter(1);
- Node* p2 = Parameter(2);
- Diamond d(graph(), common(), p0);
-
- Node* phi = d.EffectPhi(p1, p2);
-
- EXPECT_THAT(d.branch, IsBranch(p0, graph()->start()));
- EXPECT_THAT(d.if_true, IsIfTrue(d.branch));
- EXPECT_THAT(d.if_false, IsIfFalse(d.branch));
- EXPECT_THAT(d.merge, IsMerge(d.if_true, d.if_false));
- EXPECT_THAT(phi, IsEffectPhi(p1, p2, d.merge));
-}
-
-
TEST_F(DiamondTest, BranchHint) {
Diamond dn(graph(), common(), Parameter(0));
CHECK(BranchHint::kNone == BranchHintOf(dn.branch->op()));
diff --git a/test/unittests/compiler/escape-analysis-unittest.cc b/test/unittests/compiler/escape-analysis-unittest.cc
new file mode 100644
index 0000000..b088367
--- /dev/null
+++ b/test/unittests/compiler/escape-analysis-unittest.cc
@@ -0,0 +1,396 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/bit-vector.h"
+#include "src/compiler/escape-analysis.h"
+#include "src/compiler/escape-analysis-reducer.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/types-inl.h"
+#include "src/zone-containers.h"
+#include "test/unittests/compiler/graph-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class EscapeAnalysisTest : public GraphTest {
+ public:
+ EscapeAnalysisTest()
+ : simplified_(zone()),
+ jsgraph_(isolate(), graph(), common(), nullptr, nullptr, nullptr),
+ escape_analysis_(graph(), common(), zone()),
+ effect_(graph()->start()),
+ control_(graph()->start()) {}
+
+ ~EscapeAnalysisTest() {}
+
+ EscapeAnalysis* escape_analysis() { return &escape_analysis_; }
+
+ protected:
+ void Analysis() { escape_analysis_.Run(); }
+
+ void Transformation() {
+ GraphReducer graph_reducer(zone(), graph());
+ EscapeAnalysisReducer escape_reducer(&graph_reducer, &jsgraph_,
+ &escape_analysis_, zone());
+ graph_reducer.AddReducer(&escape_reducer);
+ graph_reducer.ReduceGraph();
+ }
+
+ // ---------------------------------Node Creation Helper----------------------
+
+ Node* BeginRegion(Node* effect = nullptr) {
+ if (!effect) {
+ effect = effect_;
+ }
+
+ return effect_ = graph()->NewNode(common()->BeginRegion(), effect);
+ }
+
+ Node* FinishRegion(Node* value, Node* effect = nullptr) {
+ if (!effect) {
+ effect = effect_;
+ }
+ return effect_ = graph()->NewNode(common()->FinishRegion(), value, effect);
+ }
+
+ Node* Allocate(Node* size, Node* effect = nullptr, Node* control = nullptr) {
+ if (!effect) {
+ effect = effect_;
+ }
+ if (!control) {
+ control = control_;
+ }
+ return effect_ = graph()->NewNode(simplified()->Allocate(), size, effect,
+ control);
+ }
+
+ Node* Constant(int num) {
+ return graph()->NewNode(common()->NumberConstant(num));
+ }
+
+ Node* Store(const FieldAccess& access, Node* allocation, Node* value,
+ Node* effect = nullptr, Node* control = nullptr) {
+ if (!effect) {
+ effect = effect_;
+ }
+ if (!control) {
+ control = control_;
+ }
+ return effect_ = graph()->NewNode(simplified()->StoreField(access),
+ allocation, value, effect, control);
+ }
+
+ Node* Load(const FieldAccess& access, Node* from, Node* effect = nullptr,
+ Node* control = nullptr) {
+ if (!effect) {
+ effect = effect_;
+ }
+ if (!control) {
+ control = control_;
+ }
+ return graph()->NewNode(simplified()->LoadField(access), from, effect,
+ control);
+ }
+
+ Node* Return(Node* value, Node* effect = nullptr, Node* control = nullptr) {
+ if (!effect) {
+ effect = effect_;
+ }
+ if (!control) {
+ control = control_;
+ }
+ return control_ =
+ graph()->NewNode(common()->Return(), value, effect, control);
+ }
+
+ void EndGraph() {
+ for (Edge edge : graph()->end()->input_edges()) {
+ if (NodeProperties::IsControlEdge(edge)) {
+ edge.UpdateTo(control_);
+ }
+ }
+ }
+
+ Node* Branch() {
+ return control_ =
+ graph()->NewNode(common()->Branch(), Constant(0), control_);
+ }
+
+ Node* IfTrue() {
+ return control_ = graph()->NewNode(common()->IfTrue(), control_);
+ }
+
+ Node* IfFalse() { return graph()->NewNode(common()->IfFalse(), control_); }
+
+ Node* Merge2(Node* control1, Node* control2) {
+ return control_ = graph()->NewNode(common()->Merge(2), control1, control2);
+ }
+
+ FieldAccess AccessAtIndex(int offset) {
+ FieldAccess access = {kTaggedBase, offset, MaybeHandle<Name>(), Type::Any(),
+ MachineType::AnyTagged()};
+ return access;
+ }
+
+ // ---------------------------------Assertion Helper--------------------------
+
+ void ExpectReplacement(Node* node, Node* rep) {
+ EXPECT_EQ(rep, escape_analysis()->GetReplacement(node));
+ }
+
+ void ExpectReplacementPhi(Node* node, Node* left, Node* right) {
+ Node* rep = escape_analysis()->GetReplacement(node);
+ ASSERT_NE(nullptr, rep);
+ ASSERT_EQ(IrOpcode::kPhi, rep->opcode());
+ EXPECT_EQ(left, NodeProperties::GetValueInput(rep, 0));
+ EXPECT_EQ(right, NodeProperties::GetValueInput(rep, 1));
+ }
+
+ void ExpectVirtual(Node* node) {
+ EXPECT_TRUE(node->opcode() == IrOpcode::kAllocate ||
+ node->opcode() == IrOpcode::kFinishRegion);
+ EXPECT_TRUE(escape_analysis()->IsVirtual(node));
+ }
+
+ void ExpectEscaped(Node* node) {
+ EXPECT_TRUE(node->opcode() == IrOpcode::kAllocate ||
+ node->opcode() == IrOpcode::kFinishRegion);
+ EXPECT_TRUE(escape_analysis()->IsEscaped(node));
+ }
+
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+ Node* effect() { return effect_; }
+
+ private:
+ SimplifiedOperatorBuilder simplified_;
+ JSGraph jsgraph_;
+ EscapeAnalysis escape_analysis_;
+
+ Node* effect_;
+ Node* control_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Test cases.
+
+
+TEST_F(EscapeAnalysisTest, StraightNonEscape) {
+ Node* object1 = Constant(1);
+ BeginRegion();
+ Node* allocation = Allocate(Constant(kPointerSize));
+ Store(AccessAtIndex(0), allocation, object1);
+ Node* finish = FinishRegion(allocation);
+ Node* load = Load(AccessAtIndex(0), finish);
+ Node* result = Return(load);
+ EndGraph();
+
+ Analysis();
+
+ ExpectVirtual(allocation);
+ ExpectReplacement(load, object1);
+
+ Transformation();
+
+ ASSERT_EQ(object1, NodeProperties::GetValueInput(result, 0));
+}
+
+
+TEST_F(EscapeAnalysisTest, StraightEscape) {
+ Node* object1 = Constant(1);
+ BeginRegion();
+ Node* allocation = Allocate(Constant(kPointerSize));
+ Store(AccessAtIndex(0), allocation, object1);
+ Node* finish = FinishRegion(allocation);
+ Node* load = Load(AccessAtIndex(0), finish);
+ Node* result = Return(allocation);
+ EndGraph();
+ graph()->end()->AppendInput(zone(), load);
+
+ Analysis();
+
+ ExpectEscaped(allocation);
+ ExpectReplacement(load, object1);
+
+ Transformation();
+
+ ASSERT_EQ(allocation, NodeProperties::GetValueInput(result, 0));
+}
+
+
+TEST_F(EscapeAnalysisTest, StoreLoadEscape) {
+ Node* object1 = Constant(1);
+
+ BeginRegion();
+ Node* allocation1 = Allocate(Constant(kPointerSize));
+ Store(AccessAtIndex(0), allocation1, object1);
+ Node* finish1 = FinishRegion(allocation1);
+
+ BeginRegion();
+ Node* allocation2 = Allocate(Constant(kPointerSize));
+ Store(AccessAtIndex(0), allocation2, finish1);
+ Node* finish2 = FinishRegion(allocation2);
+
+ Node* load = Load(AccessAtIndex(0), finish2);
+ Node* result = Return(load);
+ EndGraph();
+ Analysis();
+
+ ExpectEscaped(allocation1);
+ ExpectVirtual(allocation2);
+ ExpectReplacement(load, finish1);
+
+ Transformation();
+
+ ASSERT_EQ(finish1, NodeProperties::GetValueInput(result, 0));
+}
+
+
+TEST_F(EscapeAnalysisTest, BranchNonEscape) {
+ Node* object1 = Constant(1);
+ Node* object2 = Constant(2);
+ BeginRegion();
+ Node* allocation = Allocate(Constant(kPointerSize));
+ Store(AccessAtIndex(0), allocation, object1);
+ Node* finish = FinishRegion(allocation);
+ Branch();
+ Node* ifFalse = IfFalse();
+ Node* ifTrue = IfTrue();
+ Node* effect1 = Store(AccessAtIndex(0), allocation, object1, finish, ifFalse);
+ Node* effect2 = Store(AccessAtIndex(0), allocation, object2, finish, ifTrue);
+ Node* merge = Merge2(ifFalse, ifTrue);
+ Node* phi = graph()->NewNode(common()->EffectPhi(2), effect1, effect2, merge);
+ Node* load = Load(AccessAtIndex(0), finish, phi, merge);
+ Node* result = Return(load, phi);
+ EndGraph();
+ graph()->end()->AppendInput(zone(), result);
+
+ Analysis();
+
+ ExpectVirtual(allocation);
+ ExpectReplacementPhi(load, object1, object2);
+ Node* replacement_phi = escape_analysis()->GetReplacement(load);
+
+ Transformation();
+
+ ASSERT_EQ(replacement_phi, NodeProperties::GetValueInput(result, 0));
+}
+
+
+TEST_F(EscapeAnalysisTest, DanglingLoadOrder) {
+ Node* object1 = Constant(1);
+ Node* object2 = Constant(2);
+ Node* allocation = Allocate(Constant(kPointerSize));
+ Node* store1 = Store(AccessAtIndex(0), allocation, object1);
+ Node* load1 = Load(AccessAtIndex(0), allocation);
+ Node* store2 = Store(AccessAtIndex(0), allocation, object2);
+ Node* load2 = Load(AccessAtIndex(0), allocation, store1);
+ Node* result = Return(load2);
+ EndGraph();
+ graph()->end()->AppendInput(zone(), store2);
+ graph()->end()->AppendInput(zone(), load1);
+
+ Analysis();
+
+ ExpectVirtual(allocation);
+ ExpectReplacement(load1, object1);
+ ExpectReplacement(load2, object1);
+
+ Transformation();
+
+ ASSERT_EQ(object1, NodeProperties::GetValueInput(result, 0));
+}
+
+
+TEST_F(EscapeAnalysisTest, DeoptReplacement) {
+ Node* object1 = Constant(1);
+ BeginRegion();
+ Node* allocation = Allocate(Constant(kPointerSize));
+ Store(AccessAtIndex(0), allocation, object1);
+ Node* finish = FinishRegion(allocation);
+ Node* effect1 = Store(AccessAtIndex(0), allocation, object1, finish);
+ Branch();
+ Node* ifFalse = IfFalse();
+ Node* state_values1 = graph()->NewNode(common()->StateValues(1), finish);
+ Node* state_values2 = graph()->NewNode(common()->StateValues(0));
+ Node* state_values3 = graph()->NewNode(common()->StateValues(0));
+ Node* frame_state = graph()->NewNode(
+ common()->FrameState(BailoutId::None(), OutputFrameStateCombine::Ignore(),
+ nullptr),
+ state_values1, state_values2, state_values3, UndefinedConstant(),
+ graph()->start(), graph()->start());
+ Node* deopt = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect1, ifFalse);
+ Node* ifTrue = IfTrue();
+ Node* load = Load(AccessAtIndex(0), finish, effect1, ifTrue);
+ Node* result = Return(load, effect1, ifTrue);
+ EndGraph();
+ graph()->end()->AppendInput(zone(), deopt);
+ Analysis();
+
+ ExpectVirtual(allocation);
+ ExpectReplacement(load, object1);
+
+ Transformation();
+
+ ASSERT_EQ(object1, NodeProperties::GetValueInput(result, 0));
+ Node* object_state = NodeProperties::GetValueInput(state_values1, 0);
+ ASSERT_EQ(object_state->opcode(), IrOpcode::kObjectState);
+ ASSERT_EQ(1, object_state->op()->ValueInputCount());
+ ASSERT_EQ(object1, NodeProperties::GetValueInput(object_state, 0));
+}
+
+
+TEST_F(EscapeAnalysisTest, DeoptReplacementIdentity) {
+ Node* object1 = Constant(1);
+ BeginRegion();
+ Node* allocation = Allocate(Constant(kPointerSize * 2));
+ Store(AccessAtIndex(0), allocation, object1);
+ Store(AccessAtIndex(kPointerSize), allocation, allocation);
+ Node* finish = FinishRegion(allocation);
+ Node* effect1 = Store(AccessAtIndex(0), allocation, object1, finish);
+ Branch();
+ Node* ifFalse = IfFalse();
+ Node* state_values1 = graph()->NewNode(common()->StateValues(1), finish);
+ Node* state_values2 = graph()->NewNode(common()->StateValues(1), finish);
+ Node* state_values3 = graph()->NewNode(common()->StateValues(0));
+ Node* frame_state = graph()->NewNode(
+ common()->FrameState(BailoutId::None(), OutputFrameStateCombine::Ignore(),
+ nullptr),
+ state_values1, state_values2, state_values3, UndefinedConstant(),
+ graph()->start(), graph()->start());
+ Node* deopt = graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager),
+ frame_state, effect1, ifFalse);
+ Node* ifTrue = IfTrue();
+ Node* load = Load(AccessAtIndex(0), finish, effect1, ifTrue);
+ Node* result = Return(load, effect1, ifTrue);
+ EndGraph();
+ graph()->end()->AppendInput(zone(), deopt);
+ Analysis();
+
+ ExpectVirtual(allocation);
+ ExpectReplacement(load, object1);
+
+ Transformation();
+
+ ASSERT_EQ(object1, NodeProperties::GetValueInput(result, 0));
+
+ Node* object_state = NodeProperties::GetValueInput(state_values1, 0);
+ ASSERT_EQ(object_state->opcode(), IrOpcode::kObjectState);
+ ASSERT_EQ(2, object_state->op()->ValueInputCount());
+ ASSERT_EQ(object1, NodeProperties::GetValueInput(object_state, 0));
+ ASSERT_EQ(object_state, NodeProperties::GetValueInput(object_state, 1));
+
+ Node* object_state2 = NodeProperties::GetValueInput(state_values1, 0);
+ ASSERT_EQ(object_state, object_state2);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/graph-reducer-unittest.cc b/test/unittests/compiler/graph-reducer-unittest.cc
index dbdd4bb..8d05c52 100644
--- a/test/unittests/compiler/graph-reducer-unittest.cc
+++ b/test/unittests/compiler/graph-reducer-unittest.cc
@@ -2,43 +2,393 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
-#include "src/compiler/graph-reducer.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/operator.h"
+#include "test/unittests/compiler/graph-reducer-unittest.h"
#include "test/unittests/test-utils.h"
-#include "testing/gmock/include/gmock/gmock.h"
using testing::_;
using testing::DefaultValue;
+using testing::ElementsAre;
using testing::Return;
using testing::Sequence;
using testing::StrictMock;
+using testing::UnorderedElementsAre;
namespace v8 {
namespace internal {
namespace compiler {
+namespace {
+
struct TestOperator : public Operator {
TestOperator(Operator::Opcode opcode, Operator::Properties properties,
- size_t value_in, size_t value_out)
- : Operator(opcode, properties, "TestOp", value_in, 0, 0, value_out, 0,
- 0) {}
+ const char* op_name, size_t value_in, size_t value_out)
+ : Operator(opcode, properties, op_name, value_in, 0, 0, value_out, 0, 0) {
+ }
};
-namespace {
+const uint8_t kOpcodeA0 = 10;
+const uint8_t kOpcodeA1 = 11;
+const uint8_t kOpcodeA2 = 12;
+const uint8_t kOpcodeB0 = 20;
+const uint8_t kOpcodeB1 = 21;
+const uint8_t kOpcodeB2 = 22;
+const uint8_t kOpcodeC0 = 30;
+const uint8_t kOpcodeC1 = 31;
+const uint8_t kOpcodeC2 = 32;
-TestOperator OP0(0, Operator::kNoWrite, 0, 1);
-TestOperator OP1(1, Operator::kNoProperties, 1, 1);
-
+static TestOperator kOpA0(kOpcodeA0, Operator::kNoWrite, "opa1", 0, 1);
+static TestOperator kOpA1(kOpcodeA1, Operator::kNoProperties, "opa2", 1, 1);
+static TestOperator kOpA2(kOpcodeA2, Operator::kNoProperties, "opa3", 2, 1);
+static TestOperator kOpB0(kOpcodeB0, Operator::kNoWrite, "opa0", 0, 0);
+static TestOperator kOpB1(kOpcodeB1, Operator::kNoWrite, "opa1", 1, 0);
+static TestOperator kOpB2(kOpcodeB2, Operator::kNoWrite, "opa2", 2, 0);
+static TestOperator kOpC0(kOpcodeC0, Operator::kNoWrite, "opc0", 0, 0);
+static TestOperator kOpC1(kOpcodeC1, Operator::kNoWrite, "opc1", 1, 0);
+static TestOperator kOpC2(kOpcodeC2, Operator::kNoWrite, "opc2", 2, 0);
struct MockReducer : public Reducer {
MOCK_METHOD1(Reduce, Reduction(Node*));
};
+
+// Replaces all "A" operators with "B" operators without creating new nodes.
+class InPlaceABReducer final : public Reducer {
+ public:
+ Reduction Reduce(Node* node) final {
+ switch (node->op()->opcode()) {
+ case kOpcodeA0:
+ EXPECT_EQ(0, node->InputCount());
+ NodeProperties::ChangeOp(node, &kOpB0);
+ return Replace(node);
+ case kOpcodeA1:
+ EXPECT_EQ(1, node->InputCount());
+ NodeProperties::ChangeOp(node, &kOpB1);
+ return Replace(node);
+ case kOpcodeA2:
+ EXPECT_EQ(2, node->InputCount());
+ NodeProperties::ChangeOp(node, &kOpB2);
+ return Replace(node);
+ }
+ return NoChange();
+ }
+};
+
+
+// Replaces all "A" operators with "B" operators by allocating new nodes.
+class NewABReducer final : public Reducer {
+ public:
+ explicit NewABReducer(Graph* graph) : graph_(graph) {}
+
+ Reduction Reduce(Node* node) final {
+ switch (node->op()->opcode()) {
+ case kOpcodeA0:
+ EXPECT_EQ(0, node->InputCount());
+ return Replace(graph_->NewNode(&kOpB0));
+ case kOpcodeA1:
+ EXPECT_EQ(1, node->InputCount());
+ return Replace(graph_->NewNode(&kOpB1, node->InputAt(0)));
+ case kOpcodeA2:
+ EXPECT_EQ(2, node->InputCount());
+ return Replace(
+ graph_->NewNode(&kOpB2, node->InputAt(0), node->InputAt(1)));
+ }
+ return NoChange();
+ }
+
+ private:
+ Graph* const graph_;
+};
+
+
+// Wraps all "kOpA0" nodes in "kOpB1" operators by allocating new nodes.
+class A0Wrapper final : public Reducer {
+ public:
+ explicit A0Wrapper(Graph* graph) : graph_(graph) {}
+
+ Reduction Reduce(Node* node) final {
+ switch (node->op()->opcode()) {
+ case kOpcodeA0:
+ EXPECT_EQ(0, node->InputCount());
+ return Replace(graph_->NewNode(&kOpB1, node));
+ }
+ return NoChange();
+ }
+
+ private:
+ Graph* const graph_;
+};
+
+
+// Wraps all "kOpB0" nodes in two "kOpC1" operators by allocating new nodes.
+class B0Wrapper final : public Reducer {
+ public:
+ explicit B0Wrapper(Graph* graph) : graph_(graph) {}
+
+ Reduction Reduce(Node* node) final {
+ switch (node->op()->opcode()) {
+ case kOpcodeB0:
+ EXPECT_EQ(0, node->InputCount());
+ return Replace(graph_->NewNode(&kOpC1, graph_->NewNode(&kOpC1, node)));
+ }
+ return NoChange();
+ }
+
+ private:
+ Graph* const graph_;
+};
+
+
+// Replaces all "kOpA1" nodes with the first input.
+class A1Forwarder final : public Reducer {
+ public:
+ Reduction Reduce(Node* node) final {
+ switch (node->op()->opcode()) {
+ case kOpcodeA1:
+ EXPECT_EQ(1, node->InputCount());
+ return Replace(node->InputAt(0));
+ }
+ return NoChange();
+ }
+};
+
+
+// Replaces all "kOpB1" nodes with the first input.
+class B1Forwarder final : public Reducer {
+ public:
+ Reduction Reduce(Node* node) final {
+ switch (node->op()->opcode()) {
+ case kOpcodeB1:
+ EXPECT_EQ(1, node->InputCount());
+ return Replace(node->InputAt(0));
+ }
+ return NoChange();
+ }
+};
+
+
+// Replaces all "B" operators with "C" operators without creating new nodes.
+class InPlaceBCReducer final : public Reducer {
+ public:
+ Reduction Reduce(Node* node) final {
+ switch (node->op()->opcode()) {
+ case kOpcodeB0:
+ EXPECT_EQ(0, node->InputCount());
+ NodeProperties::ChangeOp(node, &kOpC0);
+ return Replace(node);
+ case kOpcodeB1:
+ EXPECT_EQ(1, node->InputCount());
+ NodeProperties::ChangeOp(node, &kOpC1);
+ return Replace(node);
+ case kOpcodeB2:
+ EXPECT_EQ(2, node->InputCount());
+ NodeProperties::ChangeOp(node, &kOpC2);
+ return Replace(node);
+ }
+ return NoChange();
+ }
+};
+
+
+// Swaps the inputs to "kOp2A" and "kOp2B" nodes based on ids.
+class AB2Sorter final : public Reducer {
+ public:
+ Reduction Reduce(Node* node) final {
+ switch (node->op()->opcode()) {
+ case kOpcodeA2:
+ case kOpcodeB2:
+ EXPECT_EQ(2, node->InputCount());
+ Node* x = node->InputAt(0);
+ Node* y = node->InputAt(1);
+ if (x->id() > y->id()) {
+ node->ReplaceInput(0, y);
+ node->ReplaceInput(1, x);
+ return Replace(node);
+ }
+ }
+ return NoChange();
+ }
+};
+
} // namespace
+class AdvancedReducerTest : public TestWithZone {
+ public:
+ AdvancedReducerTest() : graph_(zone()) {}
+
+ protected:
+ Graph* graph() { return &graph_; }
+
+ private:
+ Graph graph_;
+};
+
+
+TEST_F(AdvancedReducerTest, Replace) {
+ struct DummyReducer final : public AdvancedReducer {
+ explicit DummyReducer(Editor* editor) : AdvancedReducer(editor) {}
+ Reduction Reduce(Node* node) final {
+ Replace(node, node);
+ return NoChange();
+ }
+ };
+ StrictMock<MockAdvancedReducerEditor> e;
+ DummyReducer r(&e);
+ Node* node0 = graph()->NewNode(&kOpA0);
+ Node* node1 = graph()->NewNode(&kOpA1, node0);
+ EXPECT_CALL(e, Replace(node0, node0));
+ EXPECT_CALL(e, Replace(node1, node1));
+ EXPECT_FALSE(r.Reduce(node0).Changed());
+ EXPECT_FALSE(r.Reduce(node1).Changed());
+}
+
+
+TEST_F(AdvancedReducerTest, Revisit) {
+ struct DummyReducer final : public AdvancedReducer {
+ explicit DummyReducer(Editor* editor) : AdvancedReducer(editor) {}
+ Reduction Reduce(Node* node) final {
+ Revisit(node);
+ return NoChange();
+ }
+ };
+ StrictMock<MockAdvancedReducerEditor> e;
+ DummyReducer r(&e);
+ Node* node0 = graph()->NewNode(&kOpA0);
+ Node* node1 = graph()->NewNode(&kOpA1, node0);
+ EXPECT_CALL(e, Revisit(node0));
+ EXPECT_CALL(e, Revisit(node1));
+ EXPECT_FALSE(r.Reduce(node0).Changed());
+ EXPECT_FALSE(r.Reduce(node1).Changed());
+}
+
+
+namespace {
+
+struct ReplaceWithValueReducer final : public AdvancedReducer {
+ explicit ReplaceWithValueReducer(Editor* editor) : AdvancedReducer(editor) {}
+ Reduction Reduce(Node* node) final { return NoChange(); }
+ using AdvancedReducer::ReplaceWithValue;
+};
+
+const Operator kMockOperator(IrOpcode::kDead, Operator::kNoProperties,
+ "MockOperator", 0, 0, 0, 1, 0, 0);
+const Operator kMockOpEffect(IrOpcode::kDead, Operator::kNoProperties,
+ "MockOpEffect", 0, 1, 0, 1, 1, 0);
+const Operator kMockOpControl(IrOpcode::kDead, Operator::kNoProperties,
+ "MockOpControl", 0, 0, 1, 1, 0, 1);
+
+const IfExceptionHint kNoHint = IfExceptionHint::kLocallyCaught;
+
+} // namespace
+
+
+TEST_F(AdvancedReducerTest, ReplaceWithValue_ValueUse) {
+ CommonOperatorBuilder common(zone());
+ Node* node = graph()->NewNode(&kMockOperator);
+ Node* start = graph()->NewNode(common.Start(1));
+ Node* use_value = graph()->NewNode(common.Return(), node, start, start);
+ Node* replacement = graph()->NewNode(&kMockOperator);
+ GraphReducer graph_reducer(zone(), graph(), nullptr);
+ ReplaceWithValueReducer r(&graph_reducer);
+ r.ReplaceWithValue(node, replacement);
+ EXPECT_EQ(replacement, use_value->InputAt(0));
+ EXPECT_EQ(0, node->UseCount());
+ EXPECT_EQ(1, replacement->UseCount());
+ EXPECT_THAT(replacement->uses(), ElementsAre(use_value));
+}
+
+
+TEST_F(AdvancedReducerTest, ReplaceWithValue_EffectUse) {
+ CommonOperatorBuilder common(zone());
+ Node* start = graph()->NewNode(common.Start(1));
+ Node* node = graph()->NewNode(&kMockOpEffect, start);
+ Node* use_control = graph()->NewNode(common.Merge(1), start);
+ Node* use_effect = graph()->NewNode(common.EffectPhi(1), node, use_control);
+ Node* replacement = graph()->NewNode(&kMockOperator);
+ GraphReducer graph_reducer(zone(), graph(), nullptr);
+ ReplaceWithValueReducer r(&graph_reducer);
+ r.ReplaceWithValue(node, replacement);
+ EXPECT_EQ(start, use_effect->InputAt(0));
+ EXPECT_EQ(0, node->UseCount());
+ EXPECT_EQ(3, start->UseCount());
+ EXPECT_EQ(0, replacement->UseCount());
+ EXPECT_THAT(start->uses(),
+ UnorderedElementsAre(use_effect, use_control, node));
+}
+
+
+TEST_F(AdvancedReducerTest, ReplaceWithValue_ControlUse1) {
+ CommonOperatorBuilder common(zone());
+ Node* start = graph()->NewNode(common.Start(1));
+ Node* node = graph()->NewNode(&kMockOpControl, start);
+ Node* success = graph()->NewNode(common.IfSuccess(), node);
+ Node* use_control = graph()->NewNode(common.Merge(1), success);
+ Node* replacement = graph()->NewNode(&kMockOperator);
+ GraphReducer graph_reducer(zone(), graph(), nullptr);
+ ReplaceWithValueReducer r(&graph_reducer);
+ r.ReplaceWithValue(node, replacement);
+ EXPECT_EQ(start, use_control->InputAt(0));
+ EXPECT_EQ(0, node->UseCount());
+ EXPECT_EQ(2, start->UseCount());
+ EXPECT_EQ(0, replacement->UseCount());
+ EXPECT_THAT(start->uses(), UnorderedElementsAre(use_control, node));
+}
+
+
+TEST_F(AdvancedReducerTest, ReplaceWithValue_ControlUse2) {
+ CommonOperatorBuilder common(zone());
+ Node* start = graph()->NewNode(common.Start(1));
+ Node* effect = graph()->NewNode(&kMockOperator);
+ Node* dead = graph()->NewNode(&kMockOperator);
+ Node* node = graph()->NewNode(&kMockOpControl, start);
+ Node* success = graph()->NewNode(common.IfSuccess(), node);
+ Node* exception = graph()->NewNode(common.IfException(kNoHint), effect, node);
+ Node* use_control = graph()->NewNode(common.Merge(1), success);
+ Node* replacement = graph()->NewNode(&kMockOperator);
+ GraphReducer graph_reducer(zone(), graph(), dead);
+ ReplaceWithValueReducer r(&graph_reducer);
+ r.ReplaceWithValue(node, replacement);
+ EXPECT_EQ(start, use_control->InputAt(0));
+ EXPECT_EQ(dead, exception->InputAt(1));
+ EXPECT_EQ(0, node->UseCount());
+ EXPECT_EQ(2, start->UseCount());
+ EXPECT_EQ(1, dead->UseCount());
+ EXPECT_EQ(0, replacement->UseCount());
+ EXPECT_THAT(start->uses(), UnorderedElementsAre(use_control, node));
+ EXPECT_THAT(dead->uses(), ElementsAre(exception));
+}
+
+
+TEST_F(AdvancedReducerTest, ReplaceWithValue_ControlUse3) {
+ CommonOperatorBuilder common(zone());
+ Node* start = graph()->NewNode(common.Start(1));
+ Node* effect = graph()->NewNode(&kMockOperator);
+ Node* dead = graph()->NewNode(&kMockOperator);
+ Node* node = graph()->NewNode(&kMockOpControl, start);
+ Node* success = graph()->NewNode(common.IfSuccess(), node);
+ Node* exception = graph()->NewNode(common.IfException(kNoHint), effect, node);
+ Node* use_control = graph()->NewNode(common.Merge(1), success);
+ Node* replacement = graph()->NewNode(&kMockOperator);
+ GraphReducer graph_reducer(zone(), graph(), dead);
+ ReplaceWithValueReducer r(&graph_reducer);
+ r.ReplaceWithValue(node, replacement);
+ EXPECT_EQ(start, use_control->InputAt(0));
+ EXPECT_EQ(dead, exception->InputAt(1));
+ EXPECT_EQ(0, node->UseCount());
+ EXPECT_EQ(2, start->UseCount());
+ EXPECT_EQ(1, dead->UseCount());
+ EXPECT_EQ(0, replacement->UseCount());
+ EXPECT_THAT(start->uses(), UnorderedElementsAre(use_control, node));
+ EXPECT_THAT(dead->uses(), ElementsAre(exception));
+}
+
+
class GraphReducerTest : public TestWithZone {
public:
GraphReducerTest() : graph_(zone()) {}
@@ -55,26 +405,47 @@
protected:
void ReduceNode(Node* node, Reducer* r) {
- GraphReducer reducer(graph(), zone());
+ GraphReducer reducer(zone(), graph());
reducer.AddReducer(r);
reducer.ReduceNode(node);
}
void ReduceNode(Node* node, Reducer* r1, Reducer* r2) {
- GraphReducer reducer(graph(), zone());
+ GraphReducer reducer(zone(), graph());
reducer.AddReducer(r1);
reducer.AddReducer(r2);
reducer.ReduceNode(node);
}
void ReduceNode(Node* node, Reducer* r1, Reducer* r2, Reducer* r3) {
- GraphReducer reducer(graph(), zone());
+ GraphReducer reducer(zone(), graph());
reducer.AddReducer(r1);
reducer.AddReducer(r2);
reducer.AddReducer(r3);
reducer.ReduceNode(node);
}
+ void ReduceGraph(Reducer* r1) {
+ GraphReducer reducer(zone(), graph());
+ reducer.AddReducer(r1);
+ reducer.ReduceGraph();
+ }
+
+ void ReduceGraph(Reducer* r1, Reducer* r2) {
+ GraphReducer reducer(zone(), graph());
+ reducer.AddReducer(r1);
+ reducer.AddReducer(r2);
+ reducer.ReduceGraph();
+ }
+
+ void ReduceGraph(Reducer* r1, Reducer* r2, Reducer* r3) {
+ GraphReducer reducer(zone(), graph());
+ reducer.AddReducer(r1);
+ reducer.AddReducer(r2);
+ reducer.AddReducer(r3);
+ reducer.ReduceGraph();
+ }
+
Graph* graph() { return &graph_; }
private:
@@ -84,9 +455,9 @@
TEST_F(GraphReducerTest, NodeIsDeadAfterReplace) {
StrictMock<MockReducer> r;
- Node* node0 = graph()->NewNode(&OP0);
- Node* node1 = graph()->NewNode(&OP1, node0);
- Node* node2 = graph()->NewNode(&OP1, node0);
+ Node* node0 = graph()->NewNode(&kOpA0);
+ Node* node1 = graph()->NewNode(&kOpA1, node0);
+ Node* node2 = graph()->NewNode(&kOpA1, node0);
EXPECT_CALL(r, Reduce(node0)).WillOnce(Return(Reducer::NoChange()));
EXPECT_CALL(r, Reduce(node1)).WillOnce(Return(Reducer::Replace(node2)));
ReduceNode(node1, &r);
@@ -98,7 +469,7 @@
TEST_F(GraphReducerTest, ReduceOnceForEveryReducer) {
StrictMock<MockReducer> r1, r2;
- Node* node0 = graph()->NewNode(&OP0);
+ Node* node0 = graph()->NewNode(&kOpA0);
EXPECT_CALL(r1, Reduce(node0));
EXPECT_CALL(r2, Reduce(node0));
ReduceNode(node0, &r1, &r2);
@@ -108,7 +479,7 @@
TEST_F(GraphReducerTest, ReduceAgainAfterChanged) {
Sequence s1, s2, s3;
StrictMock<MockReducer> r1, r2, r3;
- Node* node0 = graph()->NewNode(&OP0);
+ Node* node0 = graph()->NewNode(&kOpA0);
EXPECT_CALL(r1, Reduce(node0));
EXPECT_CALL(r2, Reduce(node0));
EXPECT_CALL(r3, Reduce(node0)).InSequence(s1, s2, s3).WillOnce(
@@ -118,6 +489,373 @@
ReduceNode(node0, &r1, &r2, &r3);
}
+
+TEST_F(GraphReducerTest, ReduceGraphFromEnd1) {
+ StrictMock<MockReducer> r1;
+ Node* n = graph()->NewNode(&kOpA0);
+ Node* end = graph()->NewNode(&kOpA1, n);
+ graph()->SetEnd(end);
+ Sequence s;
+ EXPECT_CALL(r1, Reduce(n));
+ EXPECT_CALL(r1, Reduce(end));
+ ReduceGraph(&r1);
+}
+
+
+TEST_F(GraphReducerTest, ReduceGraphFromEnd2) {
+ StrictMock<MockReducer> r1;
+ Node* n1 = graph()->NewNode(&kOpA0);
+ Node* n2 = graph()->NewNode(&kOpA1, n1);
+ Node* n3 = graph()->NewNode(&kOpA1, n1);
+ Node* end = graph()->NewNode(&kOpA2, n2, n3);
+ graph()->SetEnd(end);
+ Sequence s1, s2;
+ EXPECT_CALL(r1, Reduce(n1)).InSequence(s1, s2);
+ EXPECT_CALL(r1, Reduce(n2)).InSequence(s1);
+ EXPECT_CALL(r1, Reduce(n3)).InSequence(s2);
+ EXPECT_CALL(r1, Reduce(end)).InSequence(s1, s2);
+ ReduceGraph(&r1);
+}
+
+
+TEST_F(GraphReducerTest, ReduceInPlace1) {
+ Node* n1 = graph()->NewNode(&kOpA0);
+ Node* end = graph()->NewNode(&kOpA1, n1);
+ graph()->SetEnd(end);
+
+ // Tests A* => B* with in-place updates.
+ InPlaceABReducer r;
+ for (int i = 0; i < 3; i++) {
+ size_t before = graph()->NodeCount();
+ ReduceGraph(&r);
+ EXPECT_EQ(before, graph()->NodeCount());
+ EXPECT_EQ(&kOpB0, n1->op());
+ EXPECT_EQ(&kOpB1, end->op());
+ EXPECT_EQ(n1, end->InputAt(0));
+ }
+}
+
+
+TEST_F(GraphReducerTest, ReduceInPlace2) {
+ Node* n1 = graph()->NewNode(&kOpA0);
+ Node* n2 = graph()->NewNode(&kOpA1, n1);
+ Node* n3 = graph()->NewNode(&kOpA1, n1);
+ Node* end = graph()->NewNode(&kOpA2, n2, n3);
+ graph()->SetEnd(end);
+
+ // Tests A* => B* with in-place updates.
+ InPlaceABReducer r;
+ for (int i = 0; i < 3; i++) {
+ size_t before = graph()->NodeCount();
+ ReduceGraph(&r);
+ EXPECT_EQ(before, graph()->NodeCount());
+ EXPECT_EQ(&kOpB0, n1->op());
+ EXPECT_EQ(&kOpB1, n2->op());
+ EXPECT_EQ(n1, n2->InputAt(0));
+ EXPECT_EQ(&kOpB1, n3->op());
+ EXPECT_EQ(n1, n3->InputAt(0));
+ EXPECT_EQ(&kOpB2, end->op());
+ EXPECT_EQ(n2, end->InputAt(0));
+ EXPECT_EQ(n3, end->InputAt(1));
+ }
+}
+
+
+TEST_F(GraphReducerTest, ReduceNew1) {
+ Node* n1 = graph()->NewNode(&kOpA0);
+ Node* n2 = graph()->NewNode(&kOpA1, n1);
+ Node* n3 = graph()->NewNode(&kOpA1, n1);
+ Node* end = graph()->NewNode(&kOpA2, n2, n3);
+ graph()->SetEnd(end);
+
+ NewABReducer r(graph());
+ // Tests A* => B* while creating new nodes.
+ for (int i = 0; i < 3; i++) {
+ size_t before = graph()->NodeCount();
+ ReduceGraph(&r);
+ if (i == 0) {
+ EXPECT_NE(before, graph()->NodeCount());
+ } else {
+ EXPECT_EQ(before, graph()->NodeCount());
+ }
+ Node* nend = graph()->end();
+ EXPECT_NE(end, nend); // end() should be updated too.
+
+ Node* nn2 = nend->InputAt(0);
+ Node* nn3 = nend->InputAt(1);
+ Node* nn1 = nn2->InputAt(0);
+
+ EXPECT_EQ(nn1, nn3->InputAt(0));
+
+ EXPECT_EQ(&kOpB0, nn1->op());
+ EXPECT_EQ(&kOpB1, nn2->op());
+ EXPECT_EQ(&kOpB1, nn3->op());
+ EXPECT_EQ(&kOpB2, nend->op());
+ }
+}
+
+
+TEST_F(GraphReducerTest, Wrapping1) {
+ Node* end = graph()->NewNode(&kOpA0);
+ graph()->SetEnd(end);
+ EXPECT_EQ(1U, graph()->NodeCount());
+
+ A0Wrapper r(graph());
+
+ ReduceGraph(&r);
+ EXPECT_EQ(2U, graph()->NodeCount());
+
+ Node* nend = graph()->end();
+ EXPECT_NE(end, nend);
+ EXPECT_EQ(&kOpB1, nend->op());
+ EXPECT_EQ(1, nend->InputCount());
+ EXPECT_EQ(end, nend->InputAt(0));
+}
+
+
+TEST_F(GraphReducerTest, Wrapping2) {
+ Node* end = graph()->NewNode(&kOpB0);
+ graph()->SetEnd(end);
+ EXPECT_EQ(1U, graph()->NodeCount());
+
+ B0Wrapper r(graph());
+
+ ReduceGraph(&r);
+ EXPECT_EQ(3U, graph()->NodeCount());
+
+ Node* nend = graph()->end();
+ EXPECT_NE(end, nend);
+ EXPECT_EQ(&kOpC1, nend->op());
+ EXPECT_EQ(1, nend->InputCount());
+
+ Node* n1 = nend->InputAt(0);
+ EXPECT_NE(end, n1);
+ EXPECT_EQ(&kOpC1, n1->op());
+ EXPECT_EQ(1, n1->InputCount());
+ EXPECT_EQ(end, n1->InputAt(0));
+}
+
+
+TEST_F(GraphReducerTest, Forwarding1) {
+ Node* n1 = graph()->NewNode(&kOpA0);
+ Node* end = graph()->NewNode(&kOpA1, n1);
+ graph()->SetEnd(end);
+
+ A1Forwarder r;
+
+ // Tests A1(x) => x
+ for (int i = 0; i < 3; i++) {
+ size_t before = graph()->NodeCount();
+ ReduceGraph(&r);
+ EXPECT_EQ(before, graph()->NodeCount());
+ EXPECT_EQ(&kOpA0, n1->op());
+ EXPECT_EQ(n1, graph()->end());
+ }
+}
+
+
+TEST_F(GraphReducerTest, Forwarding2) {
+ Node* n1 = graph()->NewNode(&kOpA0);
+ Node* n2 = graph()->NewNode(&kOpA1, n1);
+ Node* n3 = graph()->NewNode(&kOpA1, n1);
+ Node* end = graph()->NewNode(&kOpA2, n2, n3);
+ graph()->SetEnd(end);
+
+ A1Forwarder r;
+
+ // Tests reducing A2(A1(x), A1(y)) => A2(x, y).
+ for (int i = 0; i < 3; i++) {
+ size_t before = graph()->NodeCount();
+ ReduceGraph(&r);
+ EXPECT_EQ(before, graph()->NodeCount());
+ EXPECT_EQ(&kOpA0, n1->op());
+ EXPECT_EQ(n1, end->InputAt(0));
+ EXPECT_EQ(n1, end->InputAt(1));
+ EXPECT_EQ(&kOpA2, end->op());
+ EXPECT_EQ(0, n2->UseCount());
+ EXPECT_EQ(0, n3->UseCount());
+ }
+}
+
+
+TEST_F(GraphReducerTest, Forwarding3) {
+ // Tests reducing a chain of A1(A1(A1(A1(x)))) => x.
+ for (int i = 0; i < 8; i++) {
+ Node* n1 = graph()->NewNode(&kOpA0);
+ Node* end = n1;
+ for (int j = 0; j < i; j++) {
+ end = graph()->NewNode(&kOpA1, end);
+ }
+ graph()->SetEnd(end);
+
+ A1Forwarder r;
+
+ for (size_t i = 0; i < 3; i++) {
+ size_t before = graph()->NodeCount();
+ ReduceGraph(&r);
+ EXPECT_EQ(before, graph()->NodeCount());
+ EXPECT_EQ(&kOpA0, n1->op());
+ EXPECT_EQ(n1, graph()->end());
+ }
+ }
+}
+
+
+TEST_F(GraphReducerTest, ReduceForward1) {
+ Node* n1 = graph()->NewNode(&kOpA0);
+ Node* n2 = graph()->NewNode(&kOpA1, n1);
+ Node* n3 = graph()->NewNode(&kOpA1, n1);
+ Node* end = graph()->NewNode(&kOpA2, n2, n3);
+ graph()->SetEnd(end);
+
+ InPlaceABReducer r;
+ B1Forwarder f;
+
+ // Tests first reducing A => B, then B1(x) => x.
+ for (size_t i = 0; i < 3; i++) {
+ size_t before = graph()->NodeCount();
+ ReduceGraph(&r, &f);
+ EXPECT_EQ(before, graph()->NodeCount());
+ EXPECT_EQ(&kOpB0, n1->op());
+ EXPECT_TRUE(n2->IsDead());
+ EXPECT_EQ(n1, end->InputAt(0));
+ EXPECT_TRUE(n3->IsDead());
+ EXPECT_EQ(n1, end->InputAt(0));
+ EXPECT_EQ(&kOpB2, end->op());
+ EXPECT_EQ(0, n2->UseCount());
+ EXPECT_EQ(0, n3->UseCount());
+ }
+}
+
+
+TEST_F(GraphReducerTest, Sorter1) {
+ AB2Sorter r;
+ for (int i = 0; i < 6; i++) {
+ Node* n1 = graph()->NewNode(&kOpA0);
+ Node* n2 = graph()->NewNode(&kOpA1, n1);
+ Node* n3 = graph()->NewNode(&kOpA1, n1);
+ Node* end = NULL; // Initialize to please the compiler.
+
+ if (i == 0) end = graph()->NewNode(&kOpA2, n2, n3);
+ if (i == 1) end = graph()->NewNode(&kOpA2, n3, n2);
+ if (i == 2) end = graph()->NewNode(&kOpA2, n2, n1);
+ if (i == 3) end = graph()->NewNode(&kOpA2, n1, n2);
+ if (i == 4) end = graph()->NewNode(&kOpA2, n3, n1);
+ if (i == 5) end = graph()->NewNode(&kOpA2, n1, n3);
+
+ graph()->SetEnd(end);
+
+ size_t before = graph()->NodeCount();
+ ReduceGraph(&r);
+ EXPECT_EQ(before, graph()->NodeCount());
+ EXPECT_EQ(&kOpA0, n1->op());
+ EXPECT_EQ(&kOpA1, n2->op());
+ EXPECT_EQ(&kOpA1, n3->op());
+ EXPECT_EQ(&kOpA2, end->op());
+ EXPECT_EQ(end, graph()->end());
+ EXPECT_LE(end->InputAt(0)->id(), end->InputAt(1)->id());
+ }
+}
+
+
+namespace {
+
+// Generate a node graph with the given permutations.
+void GenDAG(Graph* graph, int* p3, int* p2, int* p1) {
+ Node* level4 = graph->NewNode(&kOpA0);
+ Node* level3[] = {graph->NewNode(&kOpA1, level4),
+ graph->NewNode(&kOpA1, level4)};
+
+ Node* level2[] = {graph->NewNode(&kOpA1, level3[p3[0]]),
+ graph->NewNode(&kOpA1, level3[p3[1]]),
+ graph->NewNode(&kOpA1, level3[p3[0]]),
+ graph->NewNode(&kOpA1, level3[p3[1]])};
+
+ Node* level1[] = {graph->NewNode(&kOpA2, level2[p2[0]], level2[p2[1]]),
+ graph->NewNode(&kOpA2, level2[p2[2]], level2[p2[3]])};
+
+ Node* end = graph->NewNode(&kOpA2, level1[p1[0]], level1[p1[1]]);
+ graph->SetEnd(end);
+}
+
+} // namespace
+
+
+TEST_F(GraphReducerTest, SortForwardReduce) {
+ // Tests combined reductions on a series of DAGs.
+ for (int j = 0; j < 2; j++) {
+ int p3[] = {j, 1 - j};
+ for (int m = 0; m < 2; m++) {
+ int p1[] = {m, 1 - m};
+ for (int k = 0; k < 24; k++) { // All permutations of 0, 1, 2, 3
+ int p2[] = {-1, -1, -1, -1};
+ int n = k;
+ for (int d = 4; d >= 1; d--) { // Construct permutation.
+ int p = n % d;
+ for (int z = 0; z < 4; z++) {
+ if (p2[z] == -1) {
+ if (p == 0) p2[z] = d - 1;
+ p--;
+ }
+ }
+ n = n / d;
+ }
+
+ GenDAG(graph(), p3, p2, p1);
+
+ AB2Sorter r1;
+ A1Forwarder r2;
+ InPlaceABReducer r3;
+
+ ReduceGraph(&r1, &r2, &r3);
+
+ Node* end = graph()->end();
+ EXPECT_EQ(&kOpB2, end->op());
+ Node* n1 = end->InputAt(0);
+ Node* n2 = end->InputAt(1);
+ EXPECT_NE(n1, n2);
+ EXPECT_LT(n1->id(), n2->id());
+ EXPECT_EQ(&kOpB2, n1->op());
+ EXPECT_EQ(&kOpB2, n2->op());
+ Node* n4 = n1->InputAt(0);
+ EXPECT_EQ(&kOpB0, n4->op());
+ EXPECT_EQ(n4, n1->InputAt(1));
+ EXPECT_EQ(n4, n2->InputAt(0));
+ EXPECT_EQ(n4, n2->InputAt(1));
+ }
+ }
+ }
+}
+
+
+TEST_F(GraphReducerTest, Order) {
+ // Test that the order of reducers doesn't matter, as they should be
+ // rerun for changed nodes.
+ for (int i = 0; i < 2; i++) {
+ Node* n1 = graph()->NewNode(&kOpA0);
+ Node* end = graph()->NewNode(&kOpA1, n1);
+ graph()->SetEnd(end);
+
+ InPlaceABReducer abr;
+ InPlaceBCReducer bcr;
+
+ // Tests A* => C* with in-place updates.
+ for (size_t j = 0; j < 3; j++) {
+ size_t before = graph()->NodeCount();
+ if (i == 0) {
+ ReduceGraph(&abr, &bcr);
+ } else {
+ ReduceGraph(&bcr, &abr);
+ }
+
+ EXPECT_EQ(before, graph()->NodeCount());
+ EXPECT_EQ(&kOpC0, n1->op());
+ EXPECT_EQ(&kOpC1, end->op());
+ EXPECT_EQ(n1, end->InputAt(0));
+ }
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/test/unittests/compiler/graph-reducer-unittest.h b/test/unittests/compiler/graph-reducer-unittest.h
new file mode 100644
index 0000000..2b0651d
--- /dev/null
+++ b/test/unittests/compiler/graph-reducer-unittest.h
@@ -0,0 +1,25 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UNITTESTS_COMPILER_GRAPH_REDUCER_UNITTEST_H_
+#define V8_UNITTESTS_COMPILER_GRAPH_REDUCER_UNITTEST_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct MockAdvancedReducerEditor : public AdvancedReducer::Editor {
+ MOCK_METHOD1(Revisit, void(Node*));
+ MOCK_METHOD2(Replace, void(Node*, Node*));
+ MOCK_METHOD4(ReplaceWithValue, void(Node*, Node*, Node*, Node*));
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_UNITTESTS_COMPILER_GRAPH_REDUCER_UNITTEST_H_
diff --git a/test/unittests/compiler/graph-trimmer-unittest.cc b/test/unittests/compiler/graph-trimmer-unittest.cc
new file mode 100644
index 0000000..36892e6
--- /dev/null
+++ b/test/unittests/compiler/graph-trimmer-unittest.cc
@@ -0,0 +1,85 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-trimmer.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "testing/gmock-support.h"
+
+using testing::ElementsAre;
+using testing::UnorderedElementsAre;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class GraphTrimmerTest : public GraphTest {
+ public:
+ GraphTrimmerTest() : GraphTest(1) {}
+
+ protected:
+ void TrimGraph(Node* root) {
+ Node* const roots[1] = {root};
+ GraphTrimmer trimmer(zone(), graph());
+ trimmer.TrimGraph(&roots[0], &roots[arraysize(roots)]);
+ }
+ void TrimGraph() {
+ GraphTrimmer trimmer(zone(), graph());
+ trimmer.TrimGraph();
+ }
+};
+
+
+namespace {
+
+const Operator kDead0(IrOpcode::kDead, Operator::kNoProperties, "Dead0", 0, 0,
+ 1, 0, 0, 0);
+const Operator kLive0(IrOpcode::kDead, Operator::kNoProperties, "Live0", 0, 0,
+ 1, 0, 0, 1);
+
+} // namespace
+
+
+TEST_F(GraphTrimmerTest, Empty) {
+ Node* const start = graph()->NewNode(common()->Start(0));
+ Node* const end = graph()->NewNode(common()->End(1), start);
+ graph()->SetStart(start);
+ graph()->SetEnd(end);
+ TrimGraph();
+ EXPECT_EQ(end, graph()->end());
+ EXPECT_EQ(start, graph()->start());
+ EXPECT_EQ(start, end->InputAt(0));
+}
+
+
+TEST_F(GraphTrimmerTest, DeadUseOfStart) {
+ Node* const dead0 = graph()->NewNode(&kDead0, graph()->start());
+ graph()->SetEnd(graph()->NewNode(common()->End(1), graph()->start()));
+ TrimGraph();
+ EXPECT_THAT(dead0->inputs(), ElementsAre(nullptr));
+ EXPECT_THAT(graph()->start()->uses(), ElementsAre(graph()->end()));
+}
+
+
+TEST_F(GraphTrimmerTest, DeadAndLiveUsesOfStart) {
+ Node* const dead0 = graph()->NewNode(&kDead0, graph()->start());
+ Node* const live0 = graph()->NewNode(&kLive0, graph()->start());
+ graph()->SetEnd(graph()->NewNode(common()->End(1), live0));
+ TrimGraph();
+ EXPECT_THAT(dead0->inputs(), ElementsAre(nullptr));
+ EXPECT_THAT(graph()->start()->uses(), ElementsAre(live0));
+ EXPECT_THAT(live0->uses(), ElementsAre(graph()->end()));
+}
+
+
+TEST_F(GraphTrimmerTest, Roots) {
+ Node* const live0 = graph()->NewNode(&kLive0, graph()->start());
+ Node* const live1 = graph()->NewNode(&kLive0, graph()->start());
+ graph()->SetEnd(graph()->NewNode(common()->End(1), live0));
+ TrimGraph(live1);
+ EXPECT_THAT(graph()->start()->uses(), UnorderedElementsAre(live0, live1));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/graph-unittest.cc b/test/unittests/compiler/graph-unittest.cc
index 9543258..399f985 100644
--- a/test/unittests/compiler/graph-unittest.cc
+++ b/test/unittests/compiler/graph-unittest.cc
@@ -4,7 +4,9 @@
#include "test/unittests/compiler/graph-unittest.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/factory.h"
+#include "src/objects-inl.h" // TODO(everyone): Make typer.h IWYU compliant.
#include "test/unittests/compiler/node-test-utils.h"
namespace v8 {
@@ -13,6 +15,7 @@
GraphTest::GraphTest(int num_parameters) : common_(zone()), graph_(zone()) {
graph()->SetStart(graph()->NewNode(common()->Start(num_parameters)));
+ graph()->SetEnd(graph()->NewNode(common()->End(1), graph()->start()));
}
@@ -50,50 +53,55 @@
Node* GraphTest::HeapConstant(const Handle<HeapObject>& value) {
- return HeapConstant(Unique<HeapObject>::CreateUninitialized(value));
-}
-
-
-Node* GraphTest::HeapConstant(const Unique<HeapObject>& value) {
Node* node = graph()->NewNode(common()->HeapConstant(value));
- Type* type = Type::Constant(value.handle(), zone());
- NodeProperties::SetBounds(node, Bounds(type));
+ Type* type = Type::Constant(value, zone());
+ NodeProperties::SetType(node, type);
return node;
}
Node* GraphTest::FalseConstant() {
- return HeapConstant(
- Unique<HeapObject>::CreateImmovable(factory()->false_value()));
+ return HeapConstant(factory()->false_value());
}
Node* GraphTest::TrueConstant() {
- return HeapConstant(
- Unique<HeapObject>::CreateImmovable(factory()->true_value()));
+ return HeapConstant(factory()->true_value());
}
Node* GraphTest::UndefinedConstant() {
- return HeapConstant(
- Unique<HeapObject>::CreateImmovable(factory()->undefined_value()));
+ return HeapConstant(factory()->undefined_value());
+}
+
+
+Node* GraphTest::EmptyFrameState() {
+ Node* state_values = graph()->NewNode(common()->StateValues(0));
+ return graph()->NewNode(
+ common()->FrameState(BailoutId::None(), OutputFrameStateCombine::Ignore(),
+ nullptr),
+ state_values, state_values, state_values, NumberConstant(0),
+ UndefinedConstant(), graph()->start());
}
Matcher<Node*> GraphTest::IsFalseConstant() {
- return IsHeapConstant(
- Unique<HeapObject>::CreateImmovable(factory()->false_value()));
+ return IsHeapConstant(factory()->false_value());
}
Matcher<Node*> GraphTest::IsTrueConstant() {
- return IsHeapConstant(
- Unique<HeapObject>::CreateImmovable(factory()->true_value()));
+ return IsHeapConstant(factory()->true_value());
+}
+
+
+Matcher<Node*> GraphTest::IsUndefinedConstant() {
+ return IsHeapConstant(factory()->undefined_value());
}
TypedGraphTest::TypedGraphTest(int num_parameters)
- : GraphTest(num_parameters), typer_(graph(), MaybeHandle<Context>()) {}
+ : GraphTest(num_parameters), typer_(isolate(), graph()) {}
TypedGraphTest::~TypedGraphTest() {}
@@ -101,10 +109,30 @@
Node* TypedGraphTest::Parameter(Type* type, int32_t index) {
Node* node = GraphTest::Parameter(index);
- NodeProperties::SetBounds(node, Bounds(type));
+ NodeProperties::SetType(node, type);
return node;
}
+
+namespace {
+
+const Operator kDummyOperator(0, Operator::kNoProperties, "Dummy", 0, 0, 0, 1,
+ 0, 0);
+
+} // namespace
+
+
+TEST_F(GraphTest, NewNode) {
+ Node* n0 = graph()->NewNode(&kDummyOperator);
+ Node* n1 = graph()->NewNode(&kDummyOperator);
+ EXPECT_NE(n0, n1);
+ EXPECT_LT(0u, n0->id());
+ EXPECT_LT(0u, n1->id());
+ EXPECT_NE(n0->id(), n1->id());
+ EXPECT_EQ(&kDummyOperator, n0->op());
+ EXPECT_EQ(&kDummyOperator, n1->op());
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/test/unittests/compiler/graph-unittest.h b/test/unittests/compiler/graph-unittest.h
index 7c75161..9c99992 100644
--- a/test/unittests/compiler/graph-unittest.h
+++ b/test/unittests/compiler/graph-unittest.h
@@ -18,20 +18,21 @@
template <class T>
class Handle;
class HeapObject;
-template <class T>
-class Unique;
namespace compiler {
using ::testing::Matcher;
-class GraphTest : public TestWithContext, public TestWithZone {
+class GraphTest : public TestWithContext, public TestWithIsolateAndZone {
public:
explicit GraphTest(int num_parameters = 1);
- ~GraphTest() OVERRIDE;
+ ~GraphTest() override;
protected:
+ Node* start() { return graph()->start(); }
+ Node* end() { return graph()->end(); }
+
Node* Parameter(int32_t index = 0);
Node* Float32Constant(volatile float value);
Node* Float64Constant(volatile double value);
@@ -42,13 +43,15 @@
Node* Int64Constant(int64_t value);
Node* NumberConstant(volatile double value);
Node* HeapConstant(const Handle<HeapObject>& value);
- Node* HeapConstant(const Unique<HeapObject>& value);
Node* FalseConstant();
Node* TrueConstant();
Node* UndefinedConstant();
+ Node* EmptyFrameState();
+
Matcher<Node*> IsFalseConstant();
Matcher<Node*> IsTrueConstant();
+ Matcher<Node*> IsUndefinedConstant();
CommonOperatorBuilder* common() { return &common_; }
Graph* graph() { return &graph_; }
@@ -62,7 +65,7 @@
class TypedGraphTest : public GraphTest {
public:
explicit TypedGraphTest(int num_parameters = 1);
- ~TypedGraphTest() OVERRIDE;
+ ~TypedGraphTest() override;
protected:
Node* Parameter(int32_t index = 0) { return GraphTest::Parameter(index); }
diff --git a/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc b/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
index afa1e94..5280f69 100644
--- a/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
+++ b/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
@@ -11,15 +11,16 @@
namespace {
// Immediates (random subset).
-static const int32_t kImmediates[] = {
- kMinInt, -42, -1, 0, 1, 2, 3, 4, 5,
- 6, 7, 8, 16, 42, 0xff, 0xffff, 0x0f0f0f0f, kMaxInt};
+const int32_t kImmediates[] = {kMinInt, -42, -1, 0, 1, 2,
+ 3, 4, 5, 6, 7, 8,
+ 16, 42, 0xff, 0xffff, 0x0f0f0f0f, kMaxInt};
} // namespace
TEST_F(InstructionSelectorTest, Int32AddWithParameter) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Add(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -30,7 +31,7 @@
TEST_F(InstructionSelectorTest, Int32AddWithImmediate) {
TRACED_FOREACH(int32_t, imm, kImmediates) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Add(m.Parameter(0), m.Int32Constant(imm)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -43,7 +44,7 @@
}
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Add(m.Int32Constant(imm), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -60,7 +61,8 @@
TEST_F(InstructionSelectorTest, Int32SubWithParameter) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
m.Return(m.Int32Sub(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -71,7 +73,7 @@
TEST_F(InstructionSelectorTest, Int32SubWithImmediate) {
TRACED_FOREACH(int32_t, imm, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Int32Sub(m.Parameter(0), m.Int32Constant(imm)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -87,22 +89,22 @@
TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) {
- StreamBuilder m(this, kMachFloat32, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float64());
m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSECvtss2sd, s[0]->arch_opcode());
+ EXPECT_EQ(kSSEFloat32ToFloat64, s[0]->arch_opcode());
EXPECT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
- StreamBuilder m(this, kMachFloat64, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float32());
m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSECvtsd2ss, s[0]->arch_opcode());
+ EXPECT_EQ(kSSEFloat64ToFloat32, s[0]->arch_opcode());
EXPECT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
@@ -113,7 +115,8 @@
TEST_F(InstructionSelectorTest, BetterLeftOperandTestAddBinop) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* param1 = m.Parameter(0);
Node* param2 = m.Parameter(1);
Node* add = m.Int32Add(param1, param2);
@@ -131,7 +134,8 @@
TEST_F(InstructionSelectorTest, BetterLeftOperandTestMulBinop) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* param1 = m.Parameter(0);
Node* param2 = m.Parameter(1);
Node* mul = m.Int32Mul(param1, param2);
@@ -151,7 +155,7 @@
TEST_F(InstructionSelectorTest, ChangeUint32ToFloat64WithParameter) {
- StreamBuilder m(this, kMachFloat64, kMachUint32);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Uint32());
m.Return(m.ChangeUint32ToFloat64(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -178,14 +182,14 @@
static const MemoryAccess kMemoryAccesses[] = {
- {kMachInt8, kIA32Movsxbl, kIA32Movb},
- {kMachUint8, kIA32Movzxbl, kIA32Movb},
- {kMachInt16, kIA32Movsxwl, kIA32Movw},
- {kMachUint16, kIA32Movzxwl, kIA32Movw},
- {kMachInt32, kIA32Movl, kIA32Movl},
- {kMachUint32, kIA32Movl, kIA32Movl},
- {kMachFloat32, kIA32Movss, kIA32Movss},
- {kMachFloat64, kIA32Movsd, kIA32Movsd}};
+ {MachineType::Int8(), kIA32Movsxbl, kIA32Movb},
+ {MachineType::Uint8(), kIA32Movzxbl, kIA32Movb},
+ {MachineType::Int16(), kIA32Movsxwl, kIA32Movw},
+ {MachineType::Uint16(), kIA32Movzxwl, kIA32Movw},
+ {MachineType::Int32(), kIA32Movl, kIA32Movl},
+ {MachineType::Uint32(), kIA32Movl, kIA32Movl},
+ {MachineType::Float32(), kIA32Movss, kIA32Movss},
+ {MachineType::Float64(), kIA32Movsd, kIA32Movsd}};
} // namespace
@@ -196,7 +200,8 @@
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer(),
+ MachineType::Int32());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -209,7 +214,7 @@
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateBase) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, base, kImmediates) {
- StreamBuilder m(this, memacc.type, kMachPtr);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer());
m.Return(m.Load(memacc.type, m.Int32Constant(base), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -229,7 +234,7 @@
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, index, kImmediates) {
- StreamBuilder m(this, memacc.type, kMachPtr);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -248,8 +253,10 @@
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Int32(), memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0), m.Parameter(1),
+ m.Parameter(2), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -262,8 +269,10 @@
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateBase) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, base, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachInt32, memacc.type);
- m.Store(memacc.type, m.Int32Constant(base), m.Parameter(0), m.Parameter(1));
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ memacc.type);
+ m.Store(memacc.type.representation(), m.Int32Constant(base), m.Parameter(0),
+ m.Parameter(1), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -283,9 +292,10 @@
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
const MemoryAccess memacc = GetParam();
TRACED_FOREACH(int32_t, index, kImmediates) {
- StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
- m.Parameter(1));
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -318,8 +328,9 @@
void Run(Node* base, Node* load_index, Node* store_index,
AddressingMode mode) {
- Node* load = m->Load(kMachInt32, base, load_index);
- m->Store(kMachInt32, base, store_index, load);
+ Node* load = m->Load(MachineType::Int32(), base, load_index);
+ m->Store(MachineRepresentation::kWord32, base, store_index, load,
+ kNoWriteBarrier);
m->Return(m->Int32Constant(0));
Stream s = m->Build();
ASSERT_EQ(2U, s.size());
@@ -337,7 +348,8 @@
void Reset() {
delete m;
- m = new StreamBuilder(this, kMachInt32, kMachInt32, kMachInt32);
+ m = new StreamBuilder(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
zero = m->Int32Constant(0);
null_ptr = m->Int32Constant(0);
non_zero = m->Int32Constant(127);
@@ -563,7 +575,7 @@
TEST_P(InstructionSelectorMultTest, Mult32) {
const MultParam m_param = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* param = m.Parameter(0);
Node* mult = m.Int32Mul(param, m.Int32Constant(m_param.value));
m.Return(mult);
@@ -584,7 +596,7 @@
TEST_P(InstructionSelectorMultTest, MultAdd32) {
TRACED_FOREACH(int32_t, imm, kImmediates) {
const MultParam m_param = GetParam();
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* param = m.Parameter(0);
Node* mult = m.Int32Add(m.Int32Mul(param, m.Int32Constant(m_param.value)),
m.Int32Constant(imm));
@@ -616,7 +628,8 @@
TEST_F(InstructionSelectorTest, Int32MulHigh) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Int32MulHigh(p0, p1);
@@ -635,9 +648,80 @@
}
+// -----------------------------------------------------------------------------
+// Floating point operations.
+
+
+TEST_F(InstructionSelectorTest, Float32Abs) {
+ {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEFloat32Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Abs(p0);
+ m.Return(n);
+ Stream s = m.Build(AVX);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kAVXFloat32Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Float64Abs) {
+ {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEFloat64Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Abs(p0);
+ m.Return(n);
+ Stream s = m.Build(AVX);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kAVXFloat64Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+
TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
{
- StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
Node* mul = m.Float64Mul(add, m.Parameter(1));
Node* sub = m.Float64Sub(mul, add);
@@ -651,7 +735,8 @@
EXPECT_EQ(kAVXFloat64Div, s[3]->arch_opcode());
}
{
- StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
Node* mul = m.Float64Mul(add, m.Parameter(1));
Node* sub = m.Float64Sub(mul, add);
@@ -666,6 +751,108 @@
}
}
+
+TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) {
+ {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEFloat32Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
+ m.Return(n);
+ Stream s = m.Build(AVX);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kAVXFloat32Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) {
+ {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEFloat64Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
+ m.Return(n);
+ Stream s = m.Build(AVX);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kAVXFloat64Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Miscellaneous.
+
+
+TEST_F(InstructionSelectorTest, Uint32LessThanWithLoadAndLoadStackPointer) {
+ StreamBuilder m(this, MachineType::Bool());
+ Node* const sl = m.Load(
+ MachineType::Pointer(),
+ m.ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
+ Node* const sp = m.LoadStackPointer();
+ Node* const n = m.Uint32LessThan(sl, sp);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kIA32StackCheck, s[0]->arch_opcode());
+ ASSERT_EQ(0U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kUnsignedGreaterThan, s[0]->flags_condition());
+}
+
+
+TEST_F(InstructionSelectorTest, Word32Clz) {
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32Clz(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kIA32Lzcnt, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/test/unittests/compiler/instruction-selector-unittest.cc b/test/unittests/compiler/instruction-selector-unittest.cc
index c79a9e4..89c0a65 100644
--- a/test/unittests/compiler/instruction-selector-unittest.cc
+++ b/test/unittests/compiler/instruction-selector-unittest.cc
@@ -4,7 +4,9 @@
#include "test/unittests/compiler/instruction-selector-unittest.h"
-#include "src/compiler/graph-inl.h"
+#include "src/code-factory.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/schedule.h"
#include "src/flags.h"
#include "test/unittests/compiler/compiler-test-utils.h"
@@ -12,12 +14,6 @@
namespace internal {
namespace compiler {
-namespace {
-
-typedef RawMachineAssembler::Label MLabel;
-
-} // namespace
-
InstructionSelectorTest::InstructionSelectorTest() : rng_(FLAG_random_seed) {}
@@ -27,44 +23,38 @@
InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
InstructionSelector::Features features,
- InstructionSelectorTest::StreamBuilderMode mode) {
+ InstructionSelectorTest::StreamBuilderMode mode,
+ InstructionSelector::SourcePositionMode source_position_mode) {
Schedule* schedule = Export();
if (FLAG_trace_turbo) {
OFStream out(stdout);
out << "=== Schedule before instruction selection ===" << std::endl
<< *schedule;
}
- EXPECT_NE(0, graph()->NodeCount());
- int initial_node_count = graph()->NodeCount();
- Linkage linkage(test_->zone(), call_descriptor());
+ size_t const node_count = graph()->NodeCount();
+ EXPECT_NE(0u, node_count);
+ Linkage linkage(call_descriptor());
InstructionBlocks* instruction_blocks =
InstructionSequence::InstructionBlocksFor(test_->zone(), schedule);
- InstructionSequence sequence(test_->zone(), instruction_blocks);
+ InstructionSequence sequence(test_->isolate(), test_->zone(),
+ instruction_blocks);
SourcePositionTable source_position_table(graph());
- InstructionSelector selector(test_->zone(), graph(), &linkage, &sequence,
- schedule, &source_position_table, features);
+ InstructionSelector selector(test_->zone(), node_count, &linkage, &sequence,
+ schedule, &source_position_table,
+ source_position_mode, features);
selector.SelectInstructions();
if (FLAG_trace_turbo) {
OFStream out(stdout);
PrintableInstructionSequence printable = {
- RegisterConfiguration::ArchDefault(), &sequence};
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
+ &sequence};
out << "=== Code sequence after instruction selection ===" << std::endl
<< printable;
}
Stream s;
+ s.virtual_registers_ = selector.GetVirtualRegistersForTesting();
// Map virtual registers.
- {
- const NodeToVregMap& node_map = selector.GetNodeMapForTesting();
- for (int i = 0; i < initial_node_count; ++i) {
- if (node_map[i] != InstructionSelector::kNodeUnmapped) {
- s.virtual_registers_.insert(std::make_pair(i, node_map[i]));
- }
- }
- }
- std::set<int> virtual_registers;
- for (InstructionSequence::const_iterator i = sequence.begin();
- i != sequence.end(); ++i) {
- Instruction* instr = *i;
+ for (Instruction* const instr : sequence) {
if (instr->opcode() < 0) continue;
if (mode == kTargetInstructions) {
switch (instr->arch_opcode()) {
@@ -84,36 +74,32 @@
InstructionOperand* output = instr->OutputAt(i);
EXPECT_NE(InstructionOperand::IMMEDIATE, output->kind());
if (output->IsConstant()) {
- s.constants_.insert(std::make_pair(
- output->index(), sequence.GetConstant(output->index())));
- virtual_registers.insert(output->index());
- } else if (output->IsUnallocated()) {
- virtual_registers.insert(
- UnallocatedOperand::cast(output)->virtual_register());
+ int vreg = ConstantOperand::cast(output)->virtual_register();
+ s.constants_.insert(std::make_pair(vreg, sequence.GetConstant(vreg)));
}
}
for (size_t i = 0; i < instr->InputCount(); ++i) {
InstructionOperand* input = instr->InputAt(i);
EXPECT_NE(InstructionOperand::CONSTANT, input->kind());
if (input->IsImmediate()) {
- s.immediates_.insert(std::make_pair(
- input->index(), sequence.GetImmediate(input->index())));
- } else if (input->IsUnallocated()) {
- virtual_registers.insert(
- UnallocatedOperand::cast(input)->virtual_register());
+ auto imm = ImmediateOperand::cast(input);
+ if (imm->type() == ImmediateOperand::INDEXED) {
+ int index = imm->indexed_value();
+ s.immediates_.insert(
+ std::make_pair(index, sequence.GetImmediate(imm)));
+ }
}
}
s.instructions_.push_back(instr);
}
- for (std::set<int>::const_iterator i = virtual_registers.begin();
- i != virtual_registers.end(); ++i) {
- int virtual_register = *i;
- if (sequence.IsDouble(virtual_register)) {
+ for (auto i : s.virtual_registers_) {
+ int const virtual_register = i.second;
+ if (sequence.IsFloat(virtual_register)) {
EXPECT_FALSE(sequence.IsReference(virtual_register));
s.doubles_.insert(virtual_register);
}
if (sequence.IsReference(virtual_register)) {
- EXPECT_FALSE(sequence.IsDouble(virtual_register));
+ EXPECT_FALSE(sequence.IsFloat(virtual_register));
s.references_.insert(virtual_register);
}
}
@@ -137,8 +123,7 @@
if (!operand->IsUnallocated()) return false;
const UnallocatedOperand* unallocated = UnallocatedOperand::cast(operand);
if (!unallocated->HasFixedRegisterPolicy()) return false;
- const int index = Register::ToAllocationIndex(reg);
- return unallocated->fixed_register_index() == index;
+ return unallocated->fixed_register_index() == reg.code();
}
@@ -158,16 +143,25 @@
}
+const FrameStateFunctionInfo*
+InstructionSelectorTest::StreamBuilder::GetFrameStateFunctionInfo(
+ int parameter_count, int local_count) {
+ return common()->CreateFrameStateFunctionInfo(
+ FrameStateType::kJavaScriptFunction, parameter_count, local_count,
+ Handle<SharedFunctionInfo>(), CALL_MAINTAINS_NATIVE_CONTEXT);
+}
+
+
// -----------------------------------------------------------------------------
// Return.
TARGET_TEST_F(InstructionSelectorTest, ReturnFloat32Constant) {
const float kValue = 4.2f;
- StreamBuilder m(this, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float32());
m.Return(m.Float32Constant(kValue));
Stream s = m.Build(kAllInstructions);
- ASSERT_EQ(2U, s.size());
+ ASSERT_EQ(3U, s.size());
EXPECT_EQ(kArchNop, s[0]->arch_opcode());
ASSERT_EQ(InstructionOperand::CONSTANT, s[0]->OutputAt(0)->kind());
EXPECT_FLOAT_EQ(kValue, s.ToFloat32(s[0]->OutputAt(0)));
@@ -177,10 +171,10 @@
TARGET_TEST_F(InstructionSelectorTest, ReturnParameter) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Parameter(0));
Stream s = m.Build(kAllInstructions);
- ASSERT_EQ(2U, s.size());
+ ASSERT_EQ(3U, s.size());
EXPECT_EQ(kArchNop, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(kArchRet, s[1]->arch_opcode());
@@ -189,10 +183,10 @@
TARGET_TEST_F(InstructionSelectorTest, ReturnZero) {
- StreamBuilder m(this, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32());
m.Return(m.Int32Constant(0));
Stream s = m.Build(kAllInstructions);
- ASSERT_EQ(2U, s.size());
+ ASSERT_EQ(3U, s.size());
EXPECT_EQ(kArchNop, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(InstructionOperand::CONSTANT, s[0]->OutputAt(0)->kind());
@@ -207,10 +201,11 @@
TARGET_TEST_F(InstructionSelectorTest, TruncateFloat64ToInt32WithParameter) {
- StreamBuilder m(this, kMachInt32, kMachFloat64);
- m.Return(m.TruncateFloat64ToInt32(m.Parameter(0)));
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float64());
+ m.Return(
+ m.TruncateFloat64ToInt32(TruncationMode::kJavaScript, m.Parameter(0)));
Stream s = m.Build(kAllInstructions);
- ASSERT_EQ(3U, s.size());
+ ASSERT_EQ(4U, s.size());
EXPECT_EQ(kArchNop, s[0]->arch_opcode());
EXPECT_EQ(kArchTruncateDoubleToI, s[1]->arch_opcode());
EXPECT_EQ(1U, s[1]->InputCount());
@@ -224,7 +219,7 @@
TARGET_TEST_F(InstructionSelectorTest, DoubleParameter) {
- StreamBuilder m(this, kMachFloat64, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
Node* param = m.Parameter(0);
m.Return(param);
Stream s = m.Build(kAllInstructions);
@@ -233,7 +228,7 @@
TARGET_TEST_F(InstructionSelectorTest, ReferenceParameter) {
- StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged);
+ StreamBuilder m(this, MachineType::AnyTagged(), MachineType::AnyTagged());
Node* param = m.Parameter(0);
m.Return(param);
Stream s = m.Build(kAllInstructions);
@@ -242,16 +237,17 @@
// -----------------------------------------------------------------------------
-// Finish.
+// FinishRegion.
-TARGET_TEST_F(InstructionSelectorTest, Finish) {
- StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged);
+TARGET_TEST_F(InstructionSelectorTest, FinishRegion) {
+ StreamBuilder m(this, MachineType::AnyTagged(), MachineType::AnyTagged());
Node* param = m.Parameter(0);
- Node* finish = m.NewNode(m.common()->Finish(1), param, m.graph()->start());
+ Node* finish =
+ m.AddNode(m.common()->FinishRegion(), param, m.graph()->start());
m.Return(finish);
Stream s = m.Build(kAllInstructions);
- ASSERT_EQ(3U, s.size());
+ ASSERT_EQ(4U, s.size());
EXPECT_EQ(kArchNop, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->OutputCount());
ASSERT_TRUE(s[0]->Output()->IsUnallocated());
@@ -281,14 +277,14 @@
StreamBuilder m(this, type, type, type);
Node* param0 = m.Parameter(0);
Node* param1 = m.Parameter(1);
- MLabel a, b, c;
+ RawMachineLabel a, b, c;
m.Branch(m.Int32Constant(0), &a, &b);
m.Bind(&a);
m.Goto(&c);
m.Bind(&b);
m.Goto(&c);
m.Bind(&c);
- Node* phi = m.Phi(type, param0, param1);
+ Node* phi = m.Phi(type.representation(), param0, param1);
m.Return(phi);
Stream s = m.Build(kAllInstructions);
EXPECT_EQ(s.IsDouble(phi), s.IsDouble(param0));
@@ -301,14 +297,14 @@
StreamBuilder m(this, type, type, type);
Node* param0 = m.Parameter(0);
Node* param1 = m.Parameter(1);
- MLabel a, b, c;
+ RawMachineLabel a, b, c;
m.Branch(m.Int32Constant(1), &a, &b);
m.Bind(&a);
m.Goto(&c);
m.Bind(&b);
m.Goto(&c);
m.Bind(&c);
- Node* phi = m.Phi(type, param0, param1);
+ Node* phi = m.Phi(type.representation(), param0, param1);
m.Return(phi);
Stream s = m.Build(kAllInstructions);
EXPECT_EQ(s.IsReference(phi), s.IsReference(param0));
@@ -316,11 +312,14 @@
}
-INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorPhiTest,
- ::testing::Values(kMachFloat64, kMachInt8, kMachUint8,
- kMachInt16, kMachUint16, kMachInt32,
- kMachUint32, kMachInt64, kMachUint64,
- kMachPtr, kMachAnyTagged));
+INSTANTIATE_TEST_CASE_P(
+ InstructionSelectorTest, InstructionSelectorPhiTest,
+ ::testing::Values(MachineType::Float64(), MachineType::Int8(),
+ MachineType::Uint8(), MachineType::Int16(),
+ MachineType::Uint16(), MachineType::Int32(),
+ MachineType::Uint32(), MachineType::Int64(),
+ MachineType::Uint64(), MachineType::Pointer(),
+ MachineType::AnyTagged()));
// -----------------------------------------------------------------------------
@@ -328,14 +327,15 @@
TARGET_TEST_F(InstructionSelectorTest, ValueEffect) {
- StreamBuilder m1(this, kMachInt32, kMachPtr);
+ StreamBuilder m1(this, MachineType::Int32(), MachineType::Pointer());
Node* p1 = m1.Parameter(0);
- m1.Return(m1.Load(kMachInt32, p1, m1.Int32Constant(0)));
+ m1.Return(m1.Load(MachineType::Int32(), p1, m1.Int32Constant(0)));
Stream s1 = m1.Build(kAllInstructions);
- StreamBuilder m2(this, kMachInt32, kMachPtr);
+ StreamBuilder m2(this, MachineType::Int32(), MachineType::Pointer());
Node* p2 = m2.Parameter(0);
- m2.Return(m2.NewNode(m2.machine()->Load(kMachInt32), p2, m2.Int32Constant(0),
- m2.NewNode(m2.common()->ValueEffect(1), p2)));
+ m2.Return(m2.AddNode(
+ m2.machine()->Load(MachineType::Int32()), p2, m2.Int32Constant(0),
+ m2.AddNode(m2.common()->BeginRegion(), m2.graph()->start())));
Stream s2 = m2.Build(kAllInstructions);
EXPECT_LE(3U, s1.size());
ASSERT_EQ(s1.size(), s2.size());
@@ -354,8 +354,8 @@
TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
- StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
- kMachAnyTagged);
+ StreamBuilder m(this, MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::AnyTagged());
BailoutId bailout_id(42);
@@ -363,16 +363,28 @@
Node* receiver = m.Parameter(1);
Node* context = m.Parameter(2);
- Node* parameters = m.NewNode(m.common()->StateValues(1), m.Int32Constant(1));
- Node* locals = m.NewNode(m.common()->StateValues(0));
- Node* stack = m.NewNode(m.common()->StateValues(0));
- Node* context_dummy = m.Int32Constant(0);
+ ZoneVector<MachineType> int32_type(1, MachineType::Int32(), zone());
+ ZoneVector<MachineType> empty_types(zone());
- Node* state_node = m.NewNode(
- m.common()->FrameState(JS_FRAME, bailout_id,
- OutputFrameStateCombine::Push()),
- parameters, locals, stack, context_dummy, m.UndefinedConstant());
- Node* call = m.CallJS0(function_node, receiver, context, state_node);
+ CallDescriptor* descriptor = Linkage::GetJSCallDescriptor(
+ zone(), false, 1, CallDescriptor::kNeedsFrameState);
+
+ // Build frame state for the state before the call.
+ Node* parameters =
+ m.AddNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(1));
+ Node* locals = m.AddNode(m.common()->TypedStateValues(&empty_types));
+ Node* stack = m.AddNode(m.common()->TypedStateValues(&empty_types));
+ Node* context_sentinel = m.Int32Constant(0);
+ Node* state_node = m.AddNode(
+ m.common()->FrameState(bailout_id, OutputFrameStateCombine::Push(),
+ m.GetFrameStateFunctionInfo(1, 0)),
+ parameters, locals, stack, context_sentinel, function_node,
+ m.UndefinedConstant());
+
+ // Build the call.
+ Node* args[] = {receiver, m.UndefinedConstant(), m.Int32Constant(1), context};
+ Node* call =
+ m.CallNWithFrameState(descriptor, function_node, args, state_node);
m.Return(call);
Stream s = m.Build(kAllExceptNopInstructions);
@@ -392,9 +404,9 @@
}
-TARGET_TEST_F(InstructionSelectorTest, CallFunctionStubWithDeopt) {
- StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
- kMachAnyTagged);
+TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeopt) {
+ StreamBuilder m(this, MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::AnyTagged());
BailoutId bailout_id_before(42);
@@ -403,21 +415,33 @@
Node* receiver = m.Parameter(1);
Node* context = m.Int32Constant(1); // Context is ignored.
- // Build frame state for the state before the call.
- Node* parameters = m.NewNode(m.common()->StateValues(1), m.Int32Constant(43));
- Node* locals = m.NewNode(m.common()->StateValues(1), m.Float64Constant(0.5));
- Node* stack = m.NewNode(m.common()->StateValues(1), m.UndefinedConstant());
+ ZoneVector<MachineType> int32_type(1, MachineType::Int32(), zone());
+ ZoneVector<MachineType> float64_type(1, MachineType::Float64(), zone());
+ ZoneVector<MachineType> tagged_type(1, MachineType::AnyTagged(), zone());
+ Callable callable = CodeFactory::ToObject(isolate());
+ CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), 1,
+ CallDescriptor::kNeedsFrameState, Operator::kNoProperties);
+
+ // Build frame state for the state before the call.
+ Node* parameters =
+ m.AddNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(43));
+ Node* locals = m.AddNode(m.common()->TypedStateValues(&float64_type),
+ m.Float64Constant(0.5));
+ Node* stack = m.AddNode(m.common()->TypedStateValues(&tagged_type),
+ m.UndefinedConstant());
Node* context_sentinel = m.Int32Constant(0);
- Node* frame_state_before = m.NewNode(
- m.common()->FrameState(JS_FRAME, bailout_id_before,
- OutputFrameStateCombine::Push()),
- parameters, locals, stack, context_sentinel, m.UndefinedConstant());
+ Node* state_node = m.AddNode(
+ m.common()->FrameState(bailout_id_before, OutputFrameStateCombine::Push(),
+ m.GetFrameStateFunctionInfo(1, 1)),
+ parameters, locals, stack, context_sentinel, function_node,
+ m.UndefinedConstant());
// Build the call.
- Node* call = m.CallFunctionStub0(function_node, receiver, context,
- frame_state_before, CALL_AS_METHOD);
-
+ Node* args[] = {function_node, receiver, context};
+ Node* stub_code = m.HeapConstant(callable.code());
+ Node* call = m.CallNWithFrameState(descriptor, stub_code, args, state_node);
m.Return(call);
Stream s = m.Build(kAllExceptNopInstructions);
@@ -436,7 +460,7 @@
size_t num_operands =
1 + // Code object.
1 +
- 4 + // Frame state deopt id + one input for each value in frame state.
+ 5 + // Frame state deopt id + one input for each value in frame state.
1 + // Function.
1; // Context.
ASSERT_EQ(num_operands, call_instr->InputCount());
@@ -454,21 +478,25 @@
EXPECT_EQ(1u, desc_before->parameters_count());
EXPECT_EQ(1u, desc_before->locals_count());
EXPECT_EQ(1u, desc_before->stack_count());
- EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(2)));
- EXPECT_EQ(0, s.ToInt32(call_instr->InputAt(3))); // This should be a context.
+ EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(3)));
+ EXPECT_EQ(0, s.ToInt32(call_instr->InputAt(4))); // This should be a context.
// We inserted 0 here.
- EXPECT_EQ(0.5, s.ToFloat64(call_instr->InputAt(4)));
- EXPECT_TRUE(s.ToHeapObject(call_instr->InputAt(5))->IsUndefined());
- EXPECT_EQ(kMachInt32, desc_before->GetType(0));
- EXPECT_EQ(kMachAnyTagged, desc_before->GetType(1)); // context is always
- // tagged/any.
- EXPECT_EQ(kMachFloat64, desc_before->GetType(2));
- EXPECT_EQ(kMachAnyTagged, desc_before->GetType(3));
+ EXPECT_EQ(0.5, s.ToFloat64(call_instr->InputAt(5)));
+ EXPECT_TRUE(s.ToHeapObject(call_instr->InputAt(6))->IsUndefined());
+ EXPECT_EQ(MachineType::AnyTagged(),
+ desc_before->GetType(0)); // function is always
+ // tagged/any.
+ EXPECT_EQ(MachineType::Int32(), desc_before->GetType(1));
+ EXPECT_EQ(MachineType::AnyTagged(),
+ desc_before->GetType(2)); // context is always
+ // tagged/any.
+ EXPECT_EQ(MachineType::Float64(), desc_before->GetType(3));
+ EXPECT_EQ(MachineType::AnyTagged(), desc_before->GetType(4));
// Function.
- EXPECT_EQ(s.ToVreg(function_node), s.ToVreg(call_instr->InputAt(6)));
+ EXPECT_EQ(s.ToVreg(function_node), s.ToVreg(call_instr->InputAt(7)));
// Context.
- EXPECT_EQ(s.ToVreg(context), s.ToVreg(call_instr->InputAt(7)));
+ EXPECT_EQ(s.ToVreg(context), s.ToVreg(call_instr->InputAt(8)));
EXPECT_EQ(kArchRet, s[index++]->arch_opcode());
@@ -476,10 +504,9 @@
}
-TARGET_TEST_F(InstructionSelectorTest,
- CallFunctionStubDeoptRecursiveFrameState) {
- StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
- kMachAnyTagged);
+TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeoptRecursiveFrameState) {
+ StreamBuilder m(this, MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::AnyTagged());
BailoutId bailout_id_before(42);
BailoutId bailout_id_parent(62);
@@ -488,32 +515,46 @@
Node* function_node = m.Parameter(0);
Node* receiver = m.Parameter(1);
Node* context = m.Int32Constant(66);
+ Node* context2 = m.Int32Constant(46);
+
+ ZoneVector<MachineType> int32_type(1, MachineType::Int32(), zone());
+ ZoneVector<MachineType> int32x2_type(2, MachineType::Int32(), zone());
+ ZoneVector<MachineType> float64_type(1, MachineType::Float64(), zone());
+
+ Callable callable = CodeFactory::ToObject(isolate());
+ CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
+ isolate(), zone(), callable.descriptor(), 1,
+ CallDescriptor::kNeedsFrameState, Operator::kNoProperties);
// Build frame state for the state before the call.
- Node* parameters = m.NewNode(m.common()->StateValues(1), m.Int32Constant(63));
- Node* locals = m.NewNode(m.common()->StateValues(1), m.Int32Constant(64));
- Node* stack = m.NewNode(m.common()->StateValues(1), m.Int32Constant(65));
- Node* frame_state_parent =
- m.NewNode(m.common()->FrameState(JS_FRAME, bailout_id_parent,
- OutputFrameStateCombine::Ignore()),
- parameters, locals, stack, context, m.UndefinedConstant());
+ Node* parameters =
+ m.AddNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(63));
+ Node* locals =
+ m.AddNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(64));
+ Node* stack =
+ m.AddNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(65));
+ Node* frame_state_parent = m.AddNode(
+ m.common()->FrameState(bailout_id_parent,
+ OutputFrameStateCombine::Ignore(),
+ m.GetFrameStateFunctionInfo(1, 1)),
+ parameters, locals, stack, context, function_node, m.UndefinedConstant());
- Node* context2 = m.Int32Constant(46);
Node* parameters2 =
- m.NewNode(m.common()->StateValues(1), m.Int32Constant(43));
- Node* locals2 =
- m.NewNode(m.common()->StateValues(1), m.Float64Constant(0.25));
- Node* stack2 = m.NewNode(m.common()->StateValues(2), m.Int32Constant(44),
- m.Int32Constant(45));
- Node* frame_state_before =
- m.NewNode(m.common()->FrameState(JS_FRAME, bailout_id_before,
- OutputFrameStateCombine::Push()),
- parameters2, locals2, stack2, context2, frame_state_parent);
+ m.AddNode(m.common()->TypedStateValues(&int32_type), m.Int32Constant(43));
+ Node* locals2 = m.AddNode(m.common()->TypedStateValues(&float64_type),
+ m.Float64Constant(0.25));
+ Node* stack2 = m.AddNode(m.common()->TypedStateValues(&int32x2_type),
+ m.Int32Constant(44), m.Int32Constant(45));
+ Node* state_node = m.AddNode(
+ m.common()->FrameState(bailout_id_before, OutputFrameStateCombine::Push(),
+ m.GetFrameStateFunctionInfo(1, 1)),
+ parameters2, locals2, stack2, context2, function_node,
+ frame_state_parent);
// Build the call.
- Node* call = m.CallFunctionStub0(function_node, receiver, context2,
- frame_state_before, CALL_AS_METHOD);
-
+ Node* args[] = {function_node, receiver, context2};
+ Node* stub_code = m.HeapConstant(callable.code());
+ Node* call = m.CallNWithFrameState(descriptor, stub_code, args, state_node);
m.Return(call);
Stream s = m.Build(kAllExceptNopInstructions);
@@ -532,8 +573,8 @@
size_t num_operands =
1 + // Code object.
1 + // Frame state deopt id
- 5 + // One input for each value in frame state + context.
- 4 + // One input for each value in the parent frame state + context.
+ 6 + // One input for each value in frame state + context.
+ 5 + // One input for each value in the parent frame state + context.
1 + // Function.
1; // Context.
EXPECT_EQ(num_operands, call_instr->InputCount());
@@ -550,34 +591,36 @@
EXPECT_EQ(1u, desc_before_outer->locals_count());
EXPECT_EQ(1u, desc_before_outer->stack_count());
// Values from parent environment.
- EXPECT_EQ(63, s.ToInt32(call_instr->InputAt(2)));
- EXPECT_EQ(kMachInt32, desc_before_outer->GetType(0));
+ EXPECT_EQ(MachineType::AnyTagged(), desc_before->GetType(0));
+ EXPECT_EQ(63, s.ToInt32(call_instr->InputAt(3)));
+ EXPECT_EQ(MachineType::Int32(), desc_before_outer->GetType(1));
// Context:
- EXPECT_EQ(66, s.ToInt32(call_instr->InputAt(3)));
- EXPECT_EQ(kMachAnyTagged, desc_before_outer->GetType(1));
- EXPECT_EQ(64, s.ToInt32(call_instr->InputAt(4)));
- EXPECT_EQ(kMachInt32, desc_before_outer->GetType(2));
- EXPECT_EQ(65, s.ToInt32(call_instr->InputAt(5)));
- EXPECT_EQ(kMachInt32, desc_before_outer->GetType(3));
+ EXPECT_EQ(66, s.ToInt32(call_instr->InputAt(4)));
+ EXPECT_EQ(MachineType::AnyTagged(), desc_before_outer->GetType(2));
+ EXPECT_EQ(64, s.ToInt32(call_instr->InputAt(5)));
+ EXPECT_EQ(MachineType::Int32(), desc_before_outer->GetType(3));
+ EXPECT_EQ(65, s.ToInt32(call_instr->InputAt(6)));
+ EXPECT_EQ(MachineType::Int32(), desc_before_outer->GetType(4));
// Values from the nested frame.
EXPECT_EQ(1u, desc_before->parameters_count());
EXPECT_EQ(1u, desc_before->locals_count());
EXPECT_EQ(2u, desc_before->stack_count());
- EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(6)));
- EXPECT_EQ(kMachInt32, desc_before->GetType(0));
- EXPECT_EQ(46, s.ToInt32(call_instr->InputAt(7)));
- EXPECT_EQ(kMachAnyTagged, desc_before->GetType(1));
- EXPECT_EQ(0.25, s.ToFloat64(call_instr->InputAt(8)));
- EXPECT_EQ(kMachFloat64, desc_before->GetType(2));
- EXPECT_EQ(44, s.ToInt32(call_instr->InputAt(9)));
- EXPECT_EQ(kMachInt32, desc_before->GetType(3));
- EXPECT_EQ(45, s.ToInt32(call_instr->InputAt(10)));
- EXPECT_EQ(kMachInt32, desc_before->GetType(4));
+ EXPECT_EQ(MachineType::AnyTagged(), desc_before->GetType(0));
+ EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(8)));
+ EXPECT_EQ(MachineType::Int32(), desc_before->GetType(1));
+ EXPECT_EQ(46, s.ToInt32(call_instr->InputAt(9)));
+ EXPECT_EQ(MachineType::AnyTagged(), desc_before->GetType(2));
+ EXPECT_EQ(0.25, s.ToFloat64(call_instr->InputAt(10)));
+ EXPECT_EQ(MachineType::Float64(), desc_before->GetType(3));
+ EXPECT_EQ(44, s.ToInt32(call_instr->InputAt(11)));
+ EXPECT_EQ(MachineType::Int32(), desc_before->GetType(4));
+ EXPECT_EQ(45, s.ToInt32(call_instr->InputAt(12)));
+ EXPECT_EQ(MachineType::Int32(), desc_before->GetType(5));
// Function.
- EXPECT_EQ(s.ToVreg(function_node), s.ToVreg(call_instr->InputAt(11)));
+ EXPECT_EQ(s.ToVreg(function_node), s.ToVreg(call_instr->InputAt(13)));
// Context.
- EXPECT_EQ(s.ToVreg(context2), s.ToVreg(call_instr->InputAt(12)));
+ EXPECT_EQ(s.ToVreg(context2), s.ToVreg(call_instr->InputAt(14)));
// Continuation.
EXPECT_EQ(kArchRet, s[index++]->arch_opcode());
diff --git a/test/unittests/compiler/instruction-selector-unittest.h b/test/unittests/compiler/instruction-selector-unittest.h
index e65d68b..fc7c144 100644
--- a/test/unittests/compiler/instruction-selector-unittest.h
+++ b/test/unittests/compiler/instruction-selector-unittest.h
@@ -18,10 +18,11 @@
namespace internal {
namespace compiler {
-class InstructionSelectorTest : public TestWithContext, public TestWithZone {
+class InstructionSelectorTest : public TestWithContext,
+ public TestWithIsolateAndZone {
public:
InstructionSelectorTest();
- ~InstructionSelectorTest() OVERRIDE;
+ ~InstructionSelectorTest() override;
base::RandomNumberGenerator* rng() { return &rng_; }
@@ -33,32 +34,41 @@
kAllExceptNopInstructions
};
- class StreamBuilder FINAL : public RawMachineAssembler {
+ class StreamBuilder final : public RawMachineAssembler {
public:
StreamBuilder(InstructionSelectorTest* test, MachineType return_type)
- : RawMachineAssembler(new (test->zone()) Graph(test->zone()),
- MakeMachineSignature(test->zone(), return_type)),
+ : RawMachineAssembler(test->isolate(),
+ new (test->zone()) Graph(test->zone()),
+ MakeCallDescriptor(test->zone(), return_type),
+ MachineType::PointerRepresentation(),
+ MachineOperatorBuilder::kAllOptionalOps),
test_(test) {}
StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
MachineType parameter0_type)
: RawMachineAssembler(
- new (test->zone()) Graph(test->zone()),
- MakeMachineSignature(test->zone(), return_type, parameter0_type)),
+ test->isolate(), new (test->zone()) Graph(test->zone()),
+ MakeCallDescriptor(test->zone(), return_type, parameter0_type),
+ MachineType::PointerRepresentation(),
+ MachineOperatorBuilder::kAllOptionalOps),
test_(test) {}
StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
MachineType parameter0_type, MachineType parameter1_type)
: RawMachineAssembler(
- new (test->zone()) Graph(test->zone()),
- MakeMachineSignature(test->zone(), return_type, parameter0_type,
- parameter1_type)),
+ test->isolate(), new (test->zone()) Graph(test->zone()),
+ MakeCallDescriptor(test->zone(), return_type, parameter0_type,
+ parameter1_type),
+ MachineType::PointerRepresentation(),
+ MachineOperatorBuilder::kAllOptionalOps),
test_(test) {}
StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
MachineType parameter0_type, MachineType parameter1_type,
MachineType parameter2_type)
: RawMachineAssembler(
- new (test->zone()) Graph(test->zone()),
- MakeMachineSignature(test->zone(), return_type, parameter0_type,
- parameter1_type, parameter2_type)),
+ test->isolate(), new (test->zone()) Graph(test->zone()),
+ MakeCallDescriptor(test->zone(), return_type, parameter0_type,
+ parameter1_type, parameter2_type),
+ MachineType::PointerRepresentation(),
+ MachineOperatorBuilder::kAllOptionalOps),
test_(test) {}
Stream Build(CpuFeature feature) {
@@ -71,51 +81,55 @@
return Build(InstructionSelector::Features(), mode);
}
Stream Build(InstructionSelector::Features features,
- StreamBuilderMode mode = kTargetInstructions);
+ StreamBuilderMode mode = kTargetInstructions,
+ InstructionSelector::SourcePositionMode source_position_mode =
+ InstructionSelector::kAllSourcePositions);
+
+ const FrameStateFunctionInfo* GetFrameStateFunctionInfo(int parameter_count,
+ int local_count);
private:
- MachineSignature* MakeMachineSignature(Zone* zone,
- MachineType return_type) {
+ CallDescriptor* MakeCallDescriptor(Zone* zone, MachineType return_type) {
MachineSignature::Builder builder(zone, 1, 0);
builder.AddReturn(return_type);
- return builder.Build();
+ return Linkage::GetSimplifiedCDescriptor(zone, builder.Build());
}
- MachineSignature* MakeMachineSignature(Zone* zone, MachineType return_type,
- MachineType parameter0_type) {
+ CallDescriptor* MakeCallDescriptor(Zone* zone, MachineType return_type,
+ MachineType parameter0_type) {
MachineSignature::Builder builder(zone, 1, 1);
builder.AddReturn(return_type);
builder.AddParam(parameter0_type);
- return builder.Build();
+ return Linkage::GetSimplifiedCDescriptor(zone, builder.Build());
}
- MachineSignature* MakeMachineSignature(Zone* zone, MachineType return_type,
- MachineType parameter0_type,
- MachineType parameter1_type) {
+ CallDescriptor* MakeCallDescriptor(Zone* zone, MachineType return_type,
+ MachineType parameter0_type,
+ MachineType parameter1_type) {
MachineSignature::Builder builder(zone, 1, 2);
builder.AddReturn(return_type);
builder.AddParam(parameter0_type);
builder.AddParam(parameter1_type);
- return builder.Build();
+ return Linkage::GetSimplifiedCDescriptor(zone, builder.Build());
}
- MachineSignature* MakeMachineSignature(Zone* zone, MachineType return_type,
- MachineType parameter0_type,
- MachineType parameter1_type,
- MachineType parameter2_type) {
+ CallDescriptor* MakeCallDescriptor(Zone* zone, MachineType return_type,
+ MachineType parameter0_type,
+ MachineType parameter1_type,
+ MachineType parameter2_type) {
MachineSignature::Builder builder(zone, 1, 3);
builder.AddReturn(return_type);
builder.AddParam(parameter0_type);
builder.AddParam(parameter1_type);
builder.AddParam(parameter2_type);
- return builder.Build();
+ return Linkage::GetSimplifiedCDescriptor(zone, builder.Build());
}
private:
InstructionSelectorTest* test_;
};
- class Stream FINAL {
+ class Stream final {
public:
size_t size() const { return instructions_.size(); }
const Instruction* operator[](size_t index) const {
@@ -164,7 +178,9 @@
}
int ToVreg(const InstructionOperand* operand) const {
- if (operand->IsConstant()) return operand->index();
+ if (operand->IsConstant()) {
+ return ConstantOperand::cast(operand)->virtual_register();
+ }
EXPECT_EQ(InstructionOperand::UNALLOCATED, operand->kind());
return UnallocatedOperand::cast(operand)->virtual_register();
}
@@ -200,14 +216,19 @@
Constant ToConstant(const InstructionOperand* operand) const {
ConstantMap::const_iterator i;
if (operand->IsConstant()) {
- i = constants_.find(operand->index());
+ i = constants_.find(ConstantOperand::cast(operand)->virtual_register());
+ EXPECT_EQ(ConstantOperand::cast(operand)->virtual_register(), i->first);
EXPECT_FALSE(constants_.end() == i);
} else {
EXPECT_EQ(InstructionOperand::IMMEDIATE, operand->kind());
- i = immediates_.find(operand->index());
+ auto imm = ImmediateOperand::cast(operand);
+ if (imm->type() == ImmediateOperand::INLINE) {
+ return Constant(imm->inline_value());
+ }
+ i = immediates_.find(imm->indexed_value());
+ EXPECT_EQ(imm->indexed_value(), i->first);
EXPECT_FALSE(immediates_.end() == i);
}
- EXPECT_EQ(operand->index(), i->first);
return i->second;
}
diff --git a/test/unittests/compiler/instruction-sequence-unittest.cc b/test/unittests/compiler/instruction-sequence-unittest.cc
index 9546376..51112a6 100644
--- a/test/unittests/compiler/instruction-sequence-unittest.cc
+++ b/test/unittests/compiler/instruction-sequence-unittest.cc
@@ -20,6 +20,14 @@
RegisterConfiguration::kMaxDoubleRegisters)];
+namespace {
+static int allocatable_codes[InstructionSequenceTest::kDefaultNRegs] = {
+ 0, 1, 2, 3, 4, 5, 6, 7};
+static int allocatable_double_codes[InstructionSequenceTest::kDefaultNRegs] = {
+ 0, 1, 2, 3, 4, 5, 6, 7};
+}
+
+
static void InitializeRegisterNames() {
char* loc = register_names_;
for (int i = 0; i < RegisterConfiguration::kMaxGeneralRegisters; ++i) {
@@ -40,7 +48,6 @@
num_general_registers_(kDefaultNRegs),
num_double_registers_(kDefaultNRegs),
instruction_blocks_(zone()),
- current_instruction_index_(-1),
current_block_(nullptr),
block_returns_(false) {
InitializeRegisterNames();
@@ -60,8 +67,10 @@
RegisterConfiguration* InstructionSequenceTest::config() {
if (config_.is_empty()) {
config_.Reset(new RegisterConfiguration(
- num_general_registers_, num_double_registers_, num_double_registers_,
- general_register_names_, double_register_names_));
+ num_general_registers_, num_double_registers_, num_general_registers_,
+ num_double_registers_, num_double_registers_, allocatable_codes,
+ allocatable_double_codes, general_register_names_,
+ double_register_names_));
}
return config_.get();
}
@@ -69,7 +78,8 @@
InstructionSequence* InstructionSequenceTest::sequence() {
if (sequence_ == nullptr) {
- sequence_ = new (zone()) InstructionSequence(zone(), &instruction_blocks_);
+ sequence_ = new (zone())
+ InstructionSequence(isolate(), zone(), &instruction_blocks_);
}
return sequence_;
}
@@ -93,14 +103,14 @@
}
-void InstructionSequenceTest::StartBlock() {
+void InstructionSequenceTest::StartBlock(bool deferred) {
block_returns_ = false;
- NewBlock();
+ NewBlock(deferred);
}
-int InstructionSequenceTest::EndBlock(BlockCompletion completion) {
- int instruction_index = kMinInt;
+Instruction* InstructionSequenceTest::EndBlock(BlockCompletion completion) {
+ Instruction* result = nullptr;
if (block_returns_) {
CHECK(completion.type_ == kBlockEnd || completion.type_ == kFallThrough);
completion.type_ = kBlockEnd;
@@ -109,44 +119,43 @@
case kBlockEnd:
break;
case kFallThrough:
- instruction_index = EmitFallThrough();
+ result = EmitJump();
break;
case kJump:
CHECK(!block_returns_);
- instruction_index = EmitJump();
+ result = EmitJump();
break;
case kBranch:
CHECK(!block_returns_);
- instruction_index = EmitBranch(completion.op_);
+ result = EmitBranch(completion.op_);
break;
}
completions_.push_back(completion);
CHECK(current_block_ != nullptr);
sequence()->EndBlock(current_block_->rpo_number());
current_block_ = nullptr;
- return instruction_index;
+ return result;
}
InstructionSequenceTest::TestOperand InstructionSequenceTest::Imm(int32_t imm) {
- int index = sequence()->AddImmediate(Constant(imm));
- return TestOperand(kImmediate, index);
+ return TestOperand(kImmediate, imm);
}
InstructionSequenceTest::VReg InstructionSequenceTest::Define(
TestOperand output_op) {
VReg vreg = NewReg();
- InstructionOperand* outputs[1]{ConvertOutputOp(vreg, output_op)};
- Emit(vreg.value_, kArchNop, 1, outputs);
+ InstructionOperand outputs[1]{ConvertOutputOp(vreg, output_op)};
+ Emit(kArchNop, 1, outputs);
return vreg;
}
-int InstructionSequenceTest::Return(TestOperand input_op_0) {
+Instruction* InstructionSequenceTest::Return(TestOperand input_op_0) {
block_returns_ = true;
- InstructionOperand* inputs[1]{ConvertInputOp(input_op_0)};
- return Emit(NewIndex(), kArchRet, 0, nullptr, 1, inputs);
+ InstructionOperand inputs[1]{ConvertInputOp(input_op_0)};
+ return Emit(kArchRet, 0, nullptr, 1, inputs);
}
@@ -154,20 +163,35 @@
VReg incoming_vreg_1,
VReg incoming_vreg_2,
VReg incoming_vreg_3) {
- auto phi = new (zone()) PhiInstruction(zone(), NewReg().value_, 10);
VReg inputs[] = {incoming_vreg_0, incoming_vreg_1, incoming_vreg_2,
incoming_vreg_3};
- for (size_t i = 0; i < arraysize(inputs); ++i) {
- if (inputs[i].value_ == kNoValue) break;
- Extend(phi, inputs[i]);
+ size_t input_count = 0;
+ for (; input_count < arraysize(inputs); ++input_count) {
+ if (inputs[input_count].value_ == kNoValue) break;
+ }
+ CHECK(input_count > 0);
+ auto phi = new (zone()) PhiInstruction(zone(), NewReg().value_, input_count);
+ for (size_t i = 0; i < input_count; ++i) {
+ SetInput(phi, i, inputs[i]);
}
current_block_->AddPhi(phi);
return phi;
}
-void InstructionSequenceTest::Extend(PhiInstruction* phi, VReg vreg) {
- phi->Extend(zone(), vreg.value_);
+PhiInstruction* InstructionSequenceTest::Phi(VReg incoming_vreg_0,
+ size_t input_count) {
+ auto phi = new (zone()) PhiInstruction(zone(), NewReg().value_, input_count);
+ SetInput(phi, 0, incoming_vreg_0);
+ current_block_->AddPhi(phi);
+ return phi;
+}
+
+
+void InstructionSequenceTest::SetInput(PhiInstruction* phi, size_t input,
+ VReg vreg) {
+ CHECK(vreg.value_ != kNoValue);
+ phi->SetInput(input, vreg.value_);
}
@@ -175,13 +199,13 @@
int32_t imm) {
VReg vreg = NewReg();
sequence()->AddConstant(vreg.value_, Constant(imm));
- InstructionOperand* outputs[1]{ConstantOperand::Create(vreg.value_, zone())};
- Emit(vreg.value_, kArchNop, 1, outputs);
+ InstructionOperand outputs[1]{ConstantOperand(vreg.value_)};
+ Emit(kArchNop, 1, outputs);
return vreg;
}
-int InstructionSequenceTest::EmitNop() { return Emit(NewIndex(), kArchNop); }
+Instruction* InstructionSequenceTest::EmitNop() { return Emit(kArchNop); }
static size_t CountInputs(size_t size,
@@ -194,16 +218,17 @@
}
-int InstructionSequenceTest::EmitI(size_t input_size, TestOperand* inputs) {
- InstructionOperand** mapped_inputs = ConvertInputs(input_size, inputs);
- return Emit(NewIndex(), kArchNop, 0, nullptr, input_size, mapped_inputs);
+Instruction* InstructionSequenceTest::EmitI(size_t input_size,
+ TestOperand* inputs) {
+ InstructionOperand* mapped_inputs = ConvertInputs(input_size, inputs);
+ return Emit(kArchNop, 0, nullptr, input_size, mapped_inputs);
}
-int InstructionSequenceTest::EmitI(TestOperand input_op_0,
- TestOperand input_op_1,
- TestOperand input_op_2,
- TestOperand input_op_3) {
+Instruction* InstructionSequenceTest::EmitI(TestOperand input_op_0,
+ TestOperand input_op_1,
+ TestOperand input_op_2,
+ TestOperand input_op_3) {
TestOperand inputs[] = {input_op_0, input_op_1, input_op_2, input_op_3};
return EmitI(CountInputs(arraysize(inputs), inputs), inputs);
}
@@ -212,9 +237,9 @@
InstructionSequenceTest::VReg InstructionSequenceTest::EmitOI(
TestOperand output_op, size_t input_size, TestOperand* inputs) {
VReg output_vreg = NewReg();
- InstructionOperand* outputs[1]{ConvertOutputOp(output_vreg, output_op)};
- InstructionOperand** mapped_inputs = ConvertInputs(input_size, inputs);
- Emit(output_vreg.value_, kArchNop, 1, outputs, input_size, mapped_inputs);
+ InstructionOperand outputs[1]{ConvertOutputOp(output_vreg, output_op)};
+ InstructionOperand* mapped_inputs = ConvertInputs(input_size, inputs);
+ Emit(kArchNop, 1, outputs, input_size, mapped_inputs);
return output_vreg;
}
@@ -227,14 +252,36 @@
}
+InstructionSequenceTest::VRegPair InstructionSequenceTest::EmitOOI(
+ TestOperand output_op_0, TestOperand output_op_1, size_t input_size,
+ TestOperand* inputs) {
+ VRegPair output_vregs = std::make_pair(NewReg(), NewReg());
+ InstructionOperand outputs[2]{
+ ConvertOutputOp(output_vregs.first, output_op_0),
+ ConvertOutputOp(output_vregs.second, output_op_1)};
+ InstructionOperand* mapped_inputs = ConvertInputs(input_size, inputs);
+ Emit(kArchNop, 2, outputs, input_size, mapped_inputs);
+ return output_vregs;
+}
+
+
+InstructionSequenceTest::VRegPair InstructionSequenceTest::EmitOOI(
+ TestOperand output_op_0, TestOperand output_op_1, TestOperand input_op_0,
+ TestOperand input_op_1, TestOperand input_op_2, TestOperand input_op_3) {
+ TestOperand inputs[] = {input_op_0, input_op_1, input_op_2, input_op_3};
+ return EmitOOI(output_op_0, output_op_1,
+ CountInputs(arraysize(inputs), inputs), inputs);
+}
+
+
InstructionSequenceTest::VReg InstructionSequenceTest::EmitCall(
TestOperand output_op, size_t input_size, TestOperand* inputs) {
VReg output_vreg = NewReg();
- InstructionOperand* outputs[1]{ConvertOutputOp(output_vreg, output_op)};
- CHECK(UnallocatedOperand::cast(outputs[0])->HasFixedPolicy());
- InstructionOperand** mapped_inputs = ConvertInputs(input_size, inputs);
- Emit(output_vreg.value_, kArchCallCodeObject, 1, outputs, input_size,
- mapped_inputs, 0, nullptr, true);
+ InstructionOperand outputs[1]{ConvertOutputOp(output_vreg, output_op)};
+ CHECK(UnallocatedOperand::cast(outputs[0]).HasFixedPolicy());
+ InstructionOperand* mapped_inputs = ConvertInputs(input_size, inputs);
+ Emit(kArchCallCodeObject, 1, outputs, input_size, mapped_inputs, 0, nullptr,
+ true);
return output_vreg;
}
@@ -247,86 +294,68 @@
}
-const Instruction* InstructionSequenceTest::GetInstruction(
- int instruction_index) {
- auto it = instructions_.find(instruction_index);
- CHECK(it != instructions_.end());
- return it->second;
-}
-
-
-int InstructionSequenceTest::EmitBranch(TestOperand input_op) {
- InstructionOperand* inputs[4]{ConvertInputOp(input_op), ConvertInputOp(Imm()),
- ConvertInputOp(Imm()), ConvertInputOp(Imm())};
+Instruction* InstructionSequenceTest::EmitBranch(TestOperand input_op) {
+ InstructionOperand inputs[4]{ConvertInputOp(input_op), ConvertInputOp(Imm()),
+ ConvertInputOp(Imm()), ConvertInputOp(Imm())};
InstructionCode opcode = kArchJmp | FlagsModeField::encode(kFlags_branch) |
FlagsConditionField::encode(kEqual);
- auto instruction =
- NewInstruction(opcode, 0, nullptr, 4, inputs)->MarkAsControl();
- return AddInstruction(NewIndex(), instruction);
+ auto instruction = NewInstruction(opcode, 0, nullptr, 4, inputs);
+ return AddInstruction(instruction);
}
-int InstructionSequenceTest::EmitFallThrough() {
- auto instruction = NewInstruction(kArchNop, 0, nullptr)->MarkAsControl();
- return AddInstruction(NewIndex(), instruction);
+Instruction* InstructionSequenceTest::EmitFallThrough() {
+ auto instruction = NewInstruction(kArchNop, 0, nullptr);
+ return AddInstruction(instruction);
}
-int InstructionSequenceTest::EmitJump() {
- InstructionOperand* inputs[1]{ConvertInputOp(Imm())};
- auto instruction =
- NewInstruction(kArchJmp, 0, nullptr, 1, inputs)->MarkAsControl();
- return AddInstruction(NewIndex(), instruction);
+Instruction* InstructionSequenceTest::EmitJump() {
+ InstructionOperand inputs[1]{ConvertInputOp(Imm())};
+ auto instruction = NewInstruction(kArchJmp, 0, nullptr, 1, inputs);
+ return AddInstruction(instruction);
}
Instruction* InstructionSequenceTest::NewInstruction(
- InstructionCode code, size_t outputs_size, InstructionOperand** outputs,
- size_t inputs_size, InstructionOperand** inputs, size_t temps_size,
- InstructionOperand** temps) {
- CHECK_NE(nullptr, current_block_);
+ InstructionCode code, size_t outputs_size, InstructionOperand* outputs,
+ size_t inputs_size, InstructionOperand* inputs, size_t temps_size,
+ InstructionOperand* temps) {
+ CHECK(current_block_);
return Instruction::New(zone(), code, outputs_size, outputs, inputs_size,
inputs, temps_size, temps);
}
-InstructionOperand* InstructionSequenceTest::Unallocated(
+InstructionOperand InstructionSequenceTest::Unallocated(
TestOperand op, UnallocatedOperand::ExtendedPolicy policy) {
- auto unallocated = new (zone()) UnallocatedOperand(policy);
- unallocated->set_virtual_register(op.vreg_.value_);
- return unallocated;
+ return UnallocatedOperand(policy, op.vreg_.value_);
}
-InstructionOperand* InstructionSequenceTest::Unallocated(
+InstructionOperand InstructionSequenceTest::Unallocated(
TestOperand op, UnallocatedOperand::ExtendedPolicy policy,
UnallocatedOperand::Lifetime lifetime) {
- auto unallocated = new (zone()) UnallocatedOperand(policy, lifetime);
- unallocated->set_virtual_register(op.vreg_.value_);
- return unallocated;
+ return UnallocatedOperand(policy, lifetime, op.vreg_.value_);
}
-InstructionOperand* InstructionSequenceTest::Unallocated(
+InstructionOperand InstructionSequenceTest::Unallocated(
TestOperand op, UnallocatedOperand::ExtendedPolicy policy, int index) {
- auto unallocated = new (zone()) UnallocatedOperand(policy, index);
- unallocated->set_virtual_register(op.vreg_.value_);
- return unallocated;
+ return UnallocatedOperand(policy, index, op.vreg_.value_);
}
-InstructionOperand* InstructionSequenceTest::Unallocated(
+InstructionOperand InstructionSequenceTest::Unallocated(
TestOperand op, UnallocatedOperand::BasicPolicy policy, int index) {
- auto unallocated = new (zone()) UnallocatedOperand(policy, index);
- unallocated->set_virtual_register(op.vreg_.value_);
- return unallocated;
+ return UnallocatedOperand(policy, index, op.vreg_.value_);
}
-InstructionOperand** InstructionSequenceTest::ConvertInputs(
+InstructionOperand* InstructionSequenceTest::ConvertInputs(
size_t input_size, TestOperand* inputs) {
- InstructionOperand** mapped_inputs =
- zone()->NewArray<InstructionOperand*>(static_cast<int>(input_size));
+ InstructionOperand* mapped_inputs =
+ zone()->NewArray<InstructionOperand>(static_cast<int>(input_size));
for (size_t i = 0; i < input_size; ++i) {
mapped_inputs[i] = ConvertInputOp(inputs[i]);
}
@@ -334,10 +363,10 @@
}
-InstructionOperand* InstructionSequenceTest::ConvertInputOp(TestOperand op) {
+InstructionOperand InstructionSequenceTest::ConvertInputOp(TestOperand op) {
if (op.type_ == kImmediate) {
CHECK_EQ(op.vreg_.value_, kNoValue);
- return ImmediateOperand::Create(op.value_, zone());
+ return ImmediateOperand(ImmediateOperand::INLINE, op.value_);
}
CHECK_NE(op.vreg_.value_, kNoValue);
switch (op.type_) {
@@ -351,6 +380,9 @@
case kRegister:
return Unallocated(op, UnallocatedOperand::MUST_HAVE_REGISTER,
UnallocatedOperand::USED_AT_START);
+ case kSlot:
+ return Unallocated(op, UnallocatedOperand::MUST_HAVE_SLOT,
+ UnallocatedOperand::USED_AT_START);
case kFixedRegister:
CHECK(0 <= op.value_ && op.value_ < num_general_registers_);
return Unallocated(op, UnallocatedOperand::FIXED_REGISTER, op.value_);
@@ -360,12 +392,12 @@
break;
}
CHECK(false);
- return NULL;
+ return InstructionOperand();
}
-InstructionOperand* InstructionSequenceTest::ConvertOutputOp(VReg vreg,
- TestOperand op) {
+InstructionOperand InstructionSequenceTest::ConvertOutputOp(VReg vreg,
+ TestOperand op) {
CHECK_EQ(op.vreg_.value_, kNoValue);
op.vreg_ = vreg;
switch (op.type_) {
@@ -382,21 +414,20 @@
break;
}
CHECK(false);
- return NULL;
+ return InstructionOperand();
}
-InstructionBlock* InstructionSequenceTest::NewBlock() {
+InstructionBlock* InstructionSequenceTest::NewBlock(bool deferred) {
CHECK(current_block_ == nullptr);
- auto block_id = BasicBlock::Id::FromSize(instruction_blocks_.size());
- Rpo rpo = Rpo::FromInt(block_id.ToInt());
+ Rpo rpo = Rpo::FromInt(static_cast<int>(instruction_blocks_.size()));
Rpo loop_header = Rpo::Invalid();
Rpo loop_end = Rpo::Invalid();
if (!loop_blocks_.empty()) {
auto& loop_data = loop_blocks_.back();
// This is a loop header.
if (!loop_data.loop_header_.IsValid()) {
- loop_end = Rpo::FromInt(block_id.ToInt() + loop_data.expected_blocks_);
+ loop_end = Rpo::FromInt(rpo.ToInt() + loop_data.expected_blocks_);
loop_data.expected_blocks_--;
loop_data.loop_header_ = rpo;
} else {
@@ -409,7 +440,7 @@
}
// Construct instruction block.
auto instruction_block = new (zone())
- InstructionBlock(zone(), block_id, rpo, loop_header, loop_end, false);
+ InstructionBlock(zone(), rpo, loop_header, loop_end, deferred, false);
instruction_blocks_.push_back(instruction_block);
current_block_ = instruction_block;
sequence()->StartBlock(rpo);
@@ -418,13 +449,22 @@
void InstructionSequenceTest::WireBlocks() {
- CHECK_EQ(nullptr, current_block());
+ CHECK(!current_block());
CHECK(instruction_blocks_.size() == completions_.size());
+ CHECK(loop_blocks_.empty());
+ // Wire in end block to look like a scheduler produced cfg.
+ auto end_block = NewBlock();
+ current_block_ = nullptr;
+ sequence()->EndBlock(end_block->rpo_number());
size_t offset = 0;
for (const auto& completion : completions_) {
switch (completion.type_) {
- case kBlockEnd:
+ case kBlockEnd: {
+ auto block = instruction_blocks_[offset];
+ block->successors().push_back(end_block->rpo_number());
+ end_block->predecessors().push_back(block->rpo_number());
break;
+ }
case kFallThrough: // Fallthrough.
case kJump:
WireBlock(offset, completion.offset_0_);
@@ -450,24 +490,20 @@
}
-int InstructionSequenceTest::Emit(int instruction_index, InstructionCode code,
- size_t outputs_size,
- InstructionOperand** outputs,
- size_t inputs_size,
- InstructionOperand** inputs,
- size_t temps_size, InstructionOperand** temps,
- bool is_call) {
+Instruction* InstructionSequenceTest::Emit(
+ InstructionCode code, size_t outputs_size, InstructionOperand* outputs,
+ size_t inputs_size, InstructionOperand* inputs, size_t temps_size,
+ InstructionOperand* temps, bool is_call) {
auto instruction = NewInstruction(code, outputs_size, outputs, inputs_size,
inputs, temps_size, temps);
if (is_call) instruction->MarkAsCall();
- return AddInstruction(instruction_index, instruction);
+ return AddInstruction(instruction);
}
-int InstructionSequenceTest::AddInstruction(int instruction_index,
- Instruction* instruction) {
+Instruction* InstructionSequenceTest::AddInstruction(Instruction* instruction) {
sequence()->AddInstruction(instruction);
- return instruction_index;
+ return instruction;
}
} // namespace compiler
diff --git a/test/unittests/compiler/instruction-sequence-unittest.h b/test/unittests/compiler/instruction-sequence-unittest.h
index ce0a5b4..eb86bd9 100644
--- a/test/unittests/compiler/instruction-sequence-unittest.h
+++ b/test/unittests/compiler/instruction-sequence-unittest.h
@@ -13,12 +13,12 @@
namespace internal {
namespace compiler {
-class InstructionSequenceTest : public TestWithZone {
+class InstructionSequenceTest : public TestWithIsolateAndZone {
public:
- static const int kDefaultNRegs = 4;
+ static const int kDefaultNRegs = 8;
static const int kNoValue = kMinInt;
- typedef BasicBlock::RpoNumber Rpo;
+ typedef RpoNumber Rpo;
struct VReg {
VReg() : value_(kNoValue) {}
@@ -27,6 +27,8 @@
int value_;
};
+ typedef std::pair<VReg, VReg> VRegPair;
+
enum TestOperandType {
kInvalid,
kSameAsFirst,
@@ -34,6 +36,7 @@
kFixedRegister,
kSlot,
kFixedSlot,
+ kExplicit,
kImmediate,
kNone,
kConstant,
@@ -55,6 +58,11 @@
static TestOperand Same() { return TestOperand(kSameAsFirst, VReg()); }
+ static TestOperand ExplicitReg(int index) {
+ TestOperandType type = kExplicit;
+ return TestOperand(type, VReg(), index);
+ }
+
static TestOperand Reg(VReg vreg, int index = kNoValue) {
TestOperandType type = kRegister;
if (index != kNoValue) type = kFixedRegister;
@@ -124,44 +132,48 @@
void StartLoop(int loop_blocks);
void EndLoop();
- void StartBlock();
- int EndBlock(BlockCompletion completion = FallThrough());
+ void StartBlock(bool deferred = false);
+ Instruction* EndBlock(BlockCompletion completion = FallThrough());
TestOperand Imm(int32_t imm = 0);
VReg Define(TestOperand output_op);
VReg Parameter(TestOperand output_op = Reg()) { return Define(output_op); }
- int Return(TestOperand input_op_0);
- int Return(VReg vreg) { return Return(Reg(vreg, 0)); }
+ Instruction* Return(TestOperand input_op_0);
+ Instruction* Return(VReg vreg) { return Return(Reg(vreg, 0)); }
PhiInstruction* Phi(VReg incoming_vreg_0 = VReg(),
VReg incoming_vreg_1 = VReg(),
VReg incoming_vreg_2 = VReg(),
VReg incoming_vreg_3 = VReg());
- void Extend(PhiInstruction* phi, VReg vreg);
+ PhiInstruction* Phi(VReg incoming_vreg_0, size_t input_count);
+ void SetInput(PhiInstruction* phi, size_t input, VReg vreg);
VReg DefineConstant(int32_t imm = 0);
- int EmitNop();
- int EmitI(size_t input_size, TestOperand* inputs);
- int EmitI(TestOperand input_op_0 = TestOperand(),
- TestOperand input_op_1 = TestOperand(),
- TestOperand input_op_2 = TestOperand(),
- TestOperand input_op_3 = TestOperand());
+ Instruction* EmitNop();
+ Instruction* EmitI(size_t input_size, TestOperand* inputs);
+ Instruction* EmitI(TestOperand input_op_0 = TestOperand(),
+ TestOperand input_op_1 = TestOperand(),
+ TestOperand input_op_2 = TestOperand(),
+ TestOperand input_op_3 = TestOperand());
VReg EmitOI(TestOperand output_op, size_t input_size, TestOperand* inputs);
VReg EmitOI(TestOperand output_op, TestOperand input_op_0 = TestOperand(),
TestOperand input_op_1 = TestOperand(),
TestOperand input_op_2 = TestOperand(),
TestOperand input_op_3 = TestOperand());
+ VRegPair EmitOOI(TestOperand output_op_0, TestOperand output_op_1,
+ size_t input_size, TestOperand* inputs);
+ VRegPair EmitOOI(TestOperand output_op_0, TestOperand output_op_1,
+ TestOperand input_op_0 = TestOperand(),
+ TestOperand input_op_1 = TestOperand(),
+ TestOperand input_op_2 = TestOperand(),
+ TestOperand input_op_3 = TestOperand());
VReg EmitCall(TestOperand output_op, size_t input_size, TestOperand* inputs);
VReg EmitCall(TestOperand output_op, TestOperand input_op_0 = TestOperand(),
TestOperand input_op_1 = TestOperand(),
TestOperand input_op_2 = TestOperand(),
TestOperand input_op_3 = TestOperand());
- // Get defining instruction vreg or value returned at instruction creation
- // time when there is no return value.
- const Instruction* GetInstruction(int instruction_index);
-
InstructionBlock* current_block() const { return current_block_; }
int num_general_registers() const { return num_general_registers_; }
int num_double_registers() const { return num_double_registers_; }
@@ -171,42 +183,42 @@
private:
VReg NewReg() { return VReg(sequence()->NextVirtualRegister()); }
- int NewIndex() { return current_instruction_index_--; }
static TestOperand Invalid() { return TestOperand(kInvalid, VReg()); }
- int EmitBranch(TestOperand input_op);
- int EmitFallThrough();
- int EmitJump();
+ Instruction* EmitBranch(TestOperand input_op);
+ Instruction* EmitFallThrough();
+ Instruction* EmitJump();
Instruction* NewInstruction(InstructionCode code, size_t outputs_size,
- InstructionOperand** outputs,
+ InstructionOperand* outputs,
size_t inputs_size = 0,
- InstructionOperand* *inputs = nullptr,
+ InstructionOperand* inputs = nullptr,
size_t temps_size = 0,
- InstructionOperand* *temps = nullptr);
- InstructionOperand* Unallocated(TestOperand op,
- UnallocatedOperand::ExtendedPolicy policy);
- InstructionOperand* Unallocated(TestOperand op,
- UnallocatedOperand::ExtendedPolicy policy,
- UnallocatedOperand::Lifetime lifetime);
- InstructionOperand* Unallocated(TestOperand op,
- UnallocatedOperand::ExtendedPolicy policy,
- int index);
- InstructionOperand* Unallocated(TestOperand op,
- UnallocatedOperand::BasicPolicy policy,
- int index);
- InstructionOperand** ConvertInputs(size_t input_size, TestOperand* inputs);
- InstructionOperand* ConvertInputOp(TestOperand op);
- InstructionOperand* ConvertOutputOp(VReg vreg, TestOperand op);
- InstructionBlock* NewBlock();
+ InstructionOperand* temps = nullptr);
+ InstructionOperand Unallocated(TestOperand op,
+ UnallocatedOperand::ExtendedPolicy policy);
+ InstructionOperand Unallocated(TestOperand op,
+ UnallocatedOperand::ExtendedPolicy policy,
+ UnallocatedOperand::Lifetime lifetime);
+ InstructionOperand Unallocated(TestOperand op,
+ UnallocatedOperand::ExtendedPolicy policy,
+ int index);
+ InstructionOperand Unallocated(TestOperand op,
+ UnallocatedOperand::BasicPolicy policy,
+ int index);
+ InstructionOperand* ConvertInputs(size_t input_size, TestOperand* inputs);
+ InstructionOperand ConvertInputOp(TestOperand op);
+ InstructionOperand ConvertOutputOp(VReg vreg, TestOperand op);
+ InstructionBlock* NewBlock(bool deferred = false);
void WireBlock(size_t block_offset, int jump_offset);
- int Emit(int instruction_index, InstructionCode code, size_t outputs_size = 0,
- InstructionOperand* *outputs = nullptr, size_t inputs_size = 0,
- InstructionOperand* *inputs = nullptr, size_t temps_size = 0,
- InstructionOperand* *temps = nullptr, bool is_call = false);
+ Instruction* Emit(InstructionCode code, size_t outputs_size = 0,
+ InstructionOperand* outputs = nullptr,
+ size_t inputs_size = 0,
+ InstructionOperand* inputs = nullptr, size_t temps_size = 0,
+ InstructionOperand* temps = nullptr, bool is_call = false);
- int AddInstruction(int instruction_index, Instruction* instruction);
+ Instruction* AddInstruction(Instruction* instruction);
struct LoopData {
Rpo loop_header_;
@@ -217,7 +229,7 @@
typedef std::map<int, const Instruction*> Instructions;
typedef std::vector<BlockCompletion> Completions;
- SmartPointer<RegisterConfiguration> config_;
+ base::SmartPointer<RegisterConfiguration> config_;
InstructionSequence* sequence_;
int num_general_registers_;
int num_double_registers_;
@@ -225,7 +237,6 @@
// Block building state.
InstructionBlocks instruction_blocks_;
Instructions instructions_;
- int current_instruction_index_;
Completions completions_;
LoopBlocks loop_blocks_;
InstructionBlock* current_block_;
diff --git a/test/unittests/compiler/interpreter-assembler-unittest.cc b/test/unittests/compiler/interpreter-assembler-unittest.cc
new file mode 100644
index 0000000..f57ca05
--- /dev/null
+++ b/test/unittests/compiler/interpreter-assembler-unittest.cc
@@ -0,0 +1,687 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/compiler/interpreter-assembler-unittest.h"
+
+#include "src/code-factory.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/node.h"
+#include "src/interface-descriptors.h"
+#include "src/isolate.h"
+#include "test/unittests/compiler/compiler-test-utils.h"
+#include "test/unittests/compiler/node-test-utils.h"
+
+using ::testing::_;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+const interpreter::Bytecode kBytecodes[] = {
+#define DEFINE_BYTECODE(Name, ...) interpreter::Bytecode::k##Name,
+ BYTECODE_LIST(DEFINE_BYTECODE)
+#undef DEFINE_BYTECODE
+};
+
+
+Matcher<Node*> IsIntPtrConstant(const intptr_t value) {
+ return kPointerSize == 8 ? IsInt64Constant(static_cast<int64_t>(value))
+ : IsInt32Constant(static_cast<int32_t>(value));
+}
+
+
+Matcher<Node*> IsIntPtrAdd(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return kPointerSize == 8 ? IsInt64Add(lhs_matcher, rhs_matcher)
+ : IsInt32Add(lhs_matcher, rhs_matcher);
+}
+
+
+Matcher<Node*> IsIntPtrSub(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return kPointerSize == 8 ? IsInt64Sub(lhs_matcher, rhs_matcher)
+ : IsInt32Sub(lhs_matcher, rhs_matcher);
+}
+
+
+Matcher<Node*> IsWordShl(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return kPointerSize == 8 ? IsWord64Shl(lhs_matcher, rhs_matcher)
+ : IsWord32Shl(lhs_matcher, rhs_matcher);
+}
+
+
+Matcher<Node*> IsWordSar(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return kPointerSize == 8 ? IsWord64Sar(lhs_matcher, rhs_matcher)
+ : IsWord32Sar(lhs_matcher, rhs_matcher);
+}
+
+
+Matcher<Node*> IsWordOr(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return kPointerSize == 8 ? IsWord64Or(lhs_matcher, rhs_matcher)
+ : IsWord32Or(lhs_matcher, rhs_matcher);
+}
+
+
+Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoad(
+ const Matcher<LoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher) {
+ return ::i::compiler::IsLoad(rep_matcher, base_matcher, index_matcher, _, _);
+}
+
+
+Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore(
+ const Matcher<StoreRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& value_matcher) {
+ return ::i::compiler::IsStore(rep_matcher, base_matcher, index_matcher,
+ value_matcher, _, _);
+}
+
+
+Matcher<Node*>
+InterpreterAssemblerTest::InterpreterAssemblerForTest::IsBytecodeOperand(
+ int offset) {
+ return IsLoad(
+ MachineType::Uint8(),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
+ IsInt32Constant(offset)));
+}
+
+
+Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::
+ IsBytecodeOperandSignExtended(int offset) {
+ Matcher<Node*> load_matcher = IsLoad(
+ MachineType::Int8(),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
+ IsInt32Constant(offset)));
+ if (kPointerSize == 8) {
+ load_matcher = IsChangeInt32ToInt64(load_matcher);
+ }
+ return load_matcher;
+}
+
+
+Matcher<Node*>
+InterpreterAssemblerTest::InterpreterAssemblerForTest::IsBytecodeOperandShort(
+ int offset) {
+ if (TargetSupportsUnalignedAccess()) {
+ return IsLoad(
+ MachineType::Uint16(),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
+ IsInt32Constant(offset)));
+ } else {
+ Matcher<Node*> first_byte = IsLoad(
+ MachineType::Uint8(),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
+ IsInt32Constant(offset)));
+ Matcher<Node*> second_byte = IsLoad(
+ MachineType::Uint8(),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
+ IsInt32Constant(offset + 1)));
+#if V8_TARGET_LITTLE_ENDIAN
+ return IsWordOr(IsWordShl(second_byte, IsInt32Constant(kBitsPerByte)),
+ first_byte);
+#elif V8_TARGET_BIG_ENDIAN
+ return IsWordOr(IsWordShl(first_byte, IsInt32Constant(kBitsPerByte)),
+ second_byte);
+#else
+#error "Unknown Architecture"
+#endif
+ }
+}
+
+
+Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::
+ IsBytecodeOperandShortSignExtended(int offset) {
+ Matcher<Node*> load_matcher;
+ if (TargetSupportsUnalignedAccess()) {
+ load_matcher = IsLoad(
+ MachineType::Int16(),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
+ IsInt32Constant(offset)));
+ } else {
+#if V8_TARGET_LITTLE_ENDIAN
+ int hi_byte_offset = offset + 1;
+ int lo_byte_offset = offset;
+
+#elif V8_TARGET_BIG_ENDIAN
+ int hi_byte_offset = offset;
+ int lo_byte_offset = offset + 1;
+#else
+#error "Unknown Architecture"
+#endif
+ Matcher<Node*> hi_byte = IsLoad(
+ MachineType::Int8(),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
+ IsInt32Constant(hi_byte_offset)));
+ hi_byte = IsWord32Shl(hi_byte, IsInt32Constant(kBitsPerByte));
+ Matcher<Node*> lo_byte = IsLoad(
+ MachineType::Uint8(),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
+ IsInt32Constant(lo_byte_offset)));
+ load_matcher = IsWord32Or(hi_byte, lo_byte);
+ }
+
+ if (kPointerSize == 8) {
+ load_matcher = IsChangeInt32ToInt64(load_matcher);
+ }
+ return load_matcher;
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, Dispatch) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ m.Dispatch();
+ Graph* graph = m.graph();
+
+ Node* end = graph->end();
+ EXPECT_EQ(1, end->InputCount());
+ Node* tail_call_node = end->InputAt(0);
+
+ Matcher<Node*> next_bytecode_offset_matcher =
+ IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
+ IsInt32Constant(interpreter::Bytecodes::Size(bytecode)));
+ Matcher<Node*> target_bytecode_matcher =
+ m.IsLoad(MachineType::Uint8(),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ next_bytecode_offset_matcher);
+ Matcher<Node*> code_target_matcher =
+ m.IsLoad(MachineType::Pointer(),
+ IsParameter(Linkage::kInterpreterDispatchTableParameter),
+ IsWord32Shl(target_bytecode_matcher,
+ IsInt32Constant(kPointerSizeLog2)));
+
+ EXPECT_EQ(CallDescriptor::kCallCodeObject, m.call_descriptor()->kind());
+ EXPECT_TRUE(m.call_descriptor()->flags() & CallDescriptor::kCanUseRoots);
+ EXPECT_THAT(
+ tail_call_node,
+ IsTailCall(m.call_descriptor(), code_target_matcher,
+ IsParameter(Linkage::kInterpreterAccumulatorParameter),
+ IsParameter(Linkage::kInterpreterRegisterFileParameter),
+ next_bytecode_offset_matcher,
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsParameter(Linkage::kInterpreterDispatchTableParameter),
+ IsParameter(Linkage::kInterpreterContextParameter), _, _));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, Jump) {
+ int jump_offsets[] = {-9710, -77, 0, +3, +97109};
+ TRACED_FOREACH(int, jump_offset, jump_offsets) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ m.Jump(m.Int32Constant(jump_offset));
+ Graph* graph = m.graph();
+ Node* end = graph->end();
+ EXPECT_EQ(1, end->InputCount());
+ Node* tail_call_node = end->InputAt(0);
+
+ Matcher<Node*> next_bytecode_offset_matcher =
+ IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
+ IsInt32Constant(jump_offset));
+ Matcher<Node*> target_bytecode_matcher =
+ m.IsLoad(MachineType::Uint8(),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ next_bytecode_offset_matcher);
+ Matcher<Node*> code_target_matcher =
+ m.IsLoad(MachineType::Pointer(),
+ IsParameter(Linkage::kInterpreterDispatchTableParameter),
+ IsWord32Shl(target_bytecode_matcher,
+ IsInt32Constant(kPointerSizeLog2)));
+
+ EXPECT_EQ(CallDescriptor::kCallCodeObject, m.call_descriptor()->kind());
+ EXPECT_TRUE(m.call_descriptor()->flags() & CallDescriptor::kCanUseRoots);
+ EXPECT_THAT(
+ tail_call_node,
+ IsTailCall(m.call_descriptor(), code_target_matcher,
+ IsParameter(Linkage::kInterpreterAccumulatorParameter),
+ IsParameter(Linkage::kInterpreterRegisterFileParameter),
+ next_bytecode_offset_matcher,
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsParameter(Linkage::kInterpreterDispatchTableParameter),
+ IsParameter(Linkage::kInterpreterContextParameter), _, _));
+ }
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, JumpIfWordEqual) {
+ static const int kJumpIfTrueOffset = 73;
+
+ MachineOperatorBuilder machine(zone());
+
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* lhs = m.IntPtrConstant(0);
+ Node* rhs = m.IntPtrConstant(1);
+ m.JumpIfWordEqual(lhs, rhs, m.Int32Constant(kJumpIfTrueOffset));
+ Graph* graph = m.graph();
+ Node* end = graph->end();
+ EXPECT_EQ(2, end->InputCount());
+
+ int jump_offsets[] = {kJumpIfTrueOffset,
+ interpreter::Bytecodes::Size(bytecode)};
+ for (int i = 0; i < static_cast<int>(arraysize(jump_offsets)); i++) {
+ Matcher<Node*> next_bytecode_offset_matcher =
+ IsIntPtrAdd(IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
+ IsInt32Constant(jump_offsets[i]));
+ Matcher<Node*> target_bytecode_matcher =
+ m.IsLoad(MachineType::Uint8(),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ next_bytecode_offset_matcher);
+ Matcher<Node*> code_target_matcher =
+ m.IsLoad(MachineType::Pointer(),
+ IsParameter(Linkage::kInterpreterDispatchTableParameter),
+ IsWord32Shl(target_bytecode_matcher,
+ IsInt32Constant(kPointerSizeLog2)));
+ EXPECT_THAT(
+ end->InputAt(i),
+ IsTailCall(m.call_descriptor(), code_target_matcher,
+ IsParameter(Linkage::kInterpreterAccumulatorParameter),
+ IsParameter(Linkage::kInterpreterRegisterFileParameter),
+ next_bytecode_offset_matcher,
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsParameter(Linkage::kInterpreterDispatchTableParameter),
+ IsParameter(Linkage::kInterpreterContextParameter), _, _));
+ }
+
+ // TODO(oth): test control flow paths.
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, Return) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ m.Return();
+ Graph* graph = m.graph();
+
+ Node* end = graph->end();
+ EXPECT_EQ(1, end->InputCount());
+ Node* tail_call_node = end->InputAt(0);
+
+ EXPECT_EQ(CallDescriptor::kCallCodeObject, m.call_descriptor()->kind());
+ EXPECT_TRUE(m.call_descriptor()->flags() & CallDescriptor::kCanUseRoots);
+ Handle<HeapObject> exit_trampoline =
+ isolate()->builtins()->InterpreterExitTrampoline();
+ EXPECT_THAT(
+ tail_call_node,
+ IsTailCall(m.call_descriptor(), IsHeapConstant(exit_trampoline),
+ IsParameter(Linkage::kInterpreterAccumulatorParameter),
+ IsParameter(Linkage::kInterpreterRegisterFileParameter),
+ IsParameter(Linkage::kInterpreterBytecodeOffsetParameter),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsParameter(Linkage::kInterpreterDispatchTableParameter),
+ IsParameter(Linkage::kInterpreterContextParameter), _, _));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ int number_of_operands = interpreter::Bytecodes::NumberOfOperands(bytecode);
+ for (int i = 0; i < number_of_operands; i++) {
+ int offset = interpreter::Bytecodes::GetOperandOffset(bytecode, i);
+ switch (interpreter::Bytecodes::GetOperandType(bytecode, i)) {
+ case interpreter::OperandType::kCount8:
+ EXPECT_THAT(m.BytecodeOperandCount(i), m.IsBytecodeOperand(offset));
+ break;
+ case interpreter::OperandType::kIdx8:
+ EXPECT_THAT(m.BytecodeOperandIdx(i), m.IsBytecodeOperand(offset));
+ break;
+ case interpreter::OperandType::kImm8:
+ EXPECT_THAT(m.BytecodeOperandImm(i),
+ m.IsBytecodeOperandSignExtended(offset));
+ break;
+ case interpreter::OperandType::kMaybeReg8:
+ case interpreter::OperandType::kReg8:
+ case interpreter::OperandType::kRegPair8:
+ EXPECT_THAT(m.BytecodeOperandReg(i),
+ m.IsBytecodeOperandSignExtended(offset));
+ break;
+ case interpreter::OperandType::kCount16:
+ EXPECT_THAT(m.BytecodeOperandCount(i),
+ m.IsBytecodeOperandShort(offset));
+ break;
+ case interpreter::OperandType::kIdx16:
+ EXPECT_THAT(m.BytecodeOperandIdx(i),
+ m.IsBytecodeOperandShort(offset));
+ break;
+ case interpreter::OperandType::kReg16:
+ EXPECT_THAT(m.BytecodeOperandReg(i),
+ m.IsBytecodeOperandShortSignExtended(offset));
+ break;
+ case interpreter::OperandType::kNone:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, GetSetAccumulator) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ // Should be incoming accumulator if not set.
+ EXPECT_THAT(m.GetAccumulator(),
+ IsParameter(Linkage::kInterpreterAccumulatorParameter));
+
+ // Should be set by SedtAccumulator.
+ Node* accumulator_value_1 = m.Int32Constant(0xdeadbeef);
+ m.SetAccumulator(accumulator_value_1);
+ EXPECT_THAT(m.GetAccumulator(), accumulator_value_1);
+ Node* accumulator_value_2 = m.Int32Constant(42);
+ m.SetAccumulator(accumulator_value_2);
+ EXPECT_THAT(m.GetAccumulator(), accumulator_value_2);
+
+ // Should be passed to next bytecode handler on dispatch.
+ m.Dispatch();
+ Graph* graph = m.graph();
+
+ Node* end = graph->end();
+ EXPECT_EQ(1, end->InputCount());
+ Node* tail_call_node = end->InputAt(0);
+
+ EXPECT_THAT(tail_call_node,
+ IsTailCall(m.call_descriptor(), _, accumulator_value_2, _, _, _,
+ _, _, _));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, RegisterLocation) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* reg_index_node = m.Int32Constant(44);
+ Node* reg_location_node = m.RegisterLocation(reg_index_node);
+ EXPECT_THAT(
+ reg_location_node,
+ IsIntPtrAdd(
+ IsParameter(Linkage::kInterpreterRegisterFileParameter),
+ IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2))));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, LoadRegister) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* reg_index_node = m.Int32Constant(44);
+ Node* load_reg_node = m.LoadRegister(reg_index_node);
+ EXPECT_THAT(
+ load_reg_node,
+ m.IsLoad(MachineType::AnyTagged(),
+ IsParameter(Linkage::kInterpreterRegisterFileParameter),
+ IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2))));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, StoreRegister) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* store_value = m.Int32Constant(0xdeadbeef);
+ Node* reg_index_node = m.Int32Constant(44);
+ Node* store_reg_node = m.StoreRegister(store_value, reg_index_node);
+ EXPECT_THAT(
+ store_reg_node,
+ m.IsStore(StoreRepresentation(MachineRepresentation::kTagged,
+ kNoWriteBarrier),
+ IsParameter(Linkage::kInterpreterRegisterFileParameter),
+ IsWordShl(reg_index_node, IsInt32Constant(kPointerSizeLog2)),
+ store_value));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, SmiTag) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* value = m.Int32Constant(44);
+ EXPECT_THAT(m.SmiTag(value),
+ IsWordShl(value, IsInt32Constant(kSmiShiftSize + kSmiTagSize)));
+ EXPECT_THAT(m.SmiUntag(value),
+ IsWordSar(value, IsInt32Constant(kSmiShiftSize + kSmiTagSize)));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, IntPtrAdd) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* a = m.Int32Constant(0);
+ Node* b = m.Int32Constant(1);
+ Node* add = m.IntPtrAdd(a, b);
+ EXPECT_THAT(add, IsIntPtrAdd(a, b));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, IntPtrSub) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* a = m.Int32Constant(0);
+ Node* b = m.Int32Constant(1);
+ Node* add = m.IntPtrSub(a, b);
+ EXPECT_THAT(add, IsIntPtrSub(a, b));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, WordShl) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* a = m.Int32Constant(0);
+ Node* add = m.WordShl(a, 10);
+ EXPECT_THAT(add, IsWordShl(a, IsInt32Constant(10)));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* index = m.Int32Constant(2);
+ Node* load_constant = m.LoadConstantPoolEntry(index);
+ Matcher<Node*> constant_pool_matcher = m.IsLoad(
+ MachineType::AnyTagged(),
+ IsParameter(Linkage::kInterpreterBytecodeArrayParameter),
+ IsIntPtrConstant(BytecodeArray::kConstantPoolOffset - kHeapObjectTag));
+ EXPECT_THAT(
+ load_constant,
+ m.IsLoad(MachineType::AnyTagged(), constant_pool_matcher,
+ IsIntPtrAdd(
+ IsIntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
+ IsWordShl(index, IsInt32Constant(kPointerSizeLog2)))));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, LoadFixedArrayElement) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ int index = 3;
+ Node* fixed_array = m.IntPtrConstant(0xdeadbeef);
+ Node* load_element = m.LoadFixedArrayElement(fixed_array, index);
+ EXPECT_THAT(
+ load_element,
+ m.IsLoad(MachineType::AnyTagged(), fixed_array,
+ IsIntPtrAdd(
+ IsIntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
+ IsWordShl(IsInt32Constant(index),
+ IsInt32Constant(kPointerSizeLog2)))));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, LoadObjectField) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* object = m.IntPtrConstant(0xdeadbeef);
+ int offset = 16;
+ Node* load_field = m.LoadObjectField(object, offset);
+ EXPECT_THAT(load_field,
+ m.IsLoad(MachineType::AnyTagged(), object,
+ IsIntPtrConstant(offset - kHeapObjectTag)));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, LoadContextSlot) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* context = m.Int32Constant(1);
+ Node* slot_index = m.Int32Constant(22);
+ Node* load_context_slot = m.LoadContextSlot(context, slot_index);
+
+ Matcher<Node*> offset =
+ IsIntPtrAdd(IsWordShl(slot_index, IsInt32Constant(kPointerSizeLog2)),
+ IsInt32Constant(Context::kHeaderSize - kHeapObjectTag));
+ EXPECT_THAT(load_context_slot,
+ m.IsLoad(MachineType::AnyTagged(), context, offset));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, StoreContextSlot) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* context = m.Int32Constant(1);
+ Node* slot_index = m.Int32Constant(22);
+ Node* value = m.Int32Constant(100);
+ Node* store_context_slot = m.StoreContextSlot(context, slot_index, value);
+
+ Matcher<Node*> offset =
+ IsIntPtrAdd(IsWordShl(slot_index, IsInt32Constant(kPointerSizeLog2)),
+ IsInt32Constant(Context::kHeaderSize - kHeapObjectTag));
+ EXPECT_THAT(store_context_slot,
+ m.IsStore(StoreRepresentation(MachineRepresentation::kTagged,
+ kFullWriteBarrier),
+ context, offset, value));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime2) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* arg1 = m.Int32Constant(2);
+ Node* arg2 = m.Int32Constant(3);
+ Node* call_runtime = m.CallRuntime(Runtime::kAdd, arg1, arg2);
+ EXPECT_THAT(
+ call_runtime,
+ IsCall(_, _, arg1, arg2, _, IsInt32Constant(2),
+ IsParameter(Linkage::kInterpreterContextParameter), _, _));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, CallRuntime) {
+ const int kResultSizes[] = {1, 2};
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ TRACED_FOREACH(int, result_size, kResultSizes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Callable builtin = CodeFactory::InterpreterCEntry(isolate(), result_size);
+
+ Node* function_id = m.Int32Constant(0);
+ Node* first_arg = m.Int32Constant(1);
+ Node* arg_count = m.Int32Constant(2);
+
+ Matcher<Node*> function_table = IsExternalConstant(
+ ExternalReference::runtime_function_table_address(isolate()));
+ Matcher<Node*> function = IsIntPtrAdd(
+ function_table,
+ IsInt32Mul(function_id, IsInt32Constant(sizeof(Runtime::Function))));
+ Matcher<Node*> function_entry =
+ m.IsLoad(MachineType::Pointer(), function,
+ IsInt32Constant(offsetof(Runtime::Function, entry)));
+
+ Node* call_runtime =
+ m.CallRuntime(function_id, first_arg, arg_count, result_size);
+ EXPECT_THAT(
+ call_runtime,
+ IsCall(_, IsHeapConstant(builtin.code()), arg_count, first_arg,
+ function_entry,
+ IsParameter(Linkage::kInterpreterContextParameter), _, _));
+ }
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, CallIC) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ LoadWithVectorDescriptor descriptor(isolate());
+ Node* target = m.Int32Constant(1);
+ Node* arg1 = m.Int32Constant(2);
+ Node* arg2 = m.Int32Constant(3);
+ Node* arg3 = m.Int32Constant(4);
+ Node* arg4 = m.Int32Constant(5);
+ Node* call_ic = m.CallIC(descriptor, target, arg1, arg2, arg3, arg4);
+ EXPECT_THAT(
+ call_ic,
+ IsCall(_, target, arg1, arg2, arg3, arg4,
+ IsParameter(Linkage::kInterpreterContextParameter), _, _));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, CallJS) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Callable builtin = CodeFactory::InterpreterPushArgsAndCall(isolate());
+ Node* function = m.Int32Constant(0);
+ Node* first_arg = m.Int32Constant(1);
+ Node* arg_count = m.Int32Constant(2);
+ Node* call_js = m.CallJS(function, first_arg, arg_count);
+ EXPECT_THAT(
+ call_js,
+ IsCall(_, IsHeapConstant(builtin.code()), arg_count, first_arg,
+ function, IsParameter(Linkage::kInterpreterContextParameter), _,
+ _));
+ }
+}
+
+
+TARGET_TEST_F(InterpreterAssemblerTest, LoadTypeFeedbackVector) {
+ TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
+ InterpreterAssemblerForTest m(this, bytecode);
+ Node* feedback_vector = m.LoadTypeFeedbackVector();
+
+ Matcher<Node*> load_function_matcher =
+ m.IsLoad(MachineType::AnyTagged(),
+ IsParameter(Linkage::kInterpreterRegisterFileParameter),
+ IsIntPtrConstant(
+ InterpreterFrameConstants::kFunctionFromRegisterPointer));
+ Matcher<Node*> load_shared_function_info_matcher =
+ m.IsLoad(MachineType::AnyTagged(), load_function_matcher,
+ IsIntPtrConstant(JSFunction::kSharedFunctionInfoOffset -
+ kHeapObjectTag));
+
+ EXPECT_THAT(
+ feedback_vector,
+ m.IsLoad(MachineType::AnyTagged(), load_shared_function_info_matcher,
+ IsIntPtrConstant(SharedFunctionInfo::kFeedbackVectorOffset -
+ kHeapObjectTag)));
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/interpreter-assembler-unittest.h b/test/unittests/compiler/interpreter-assembler-unittest.h
new file mode 100644
index 0000000..15fa38b
--- /dev/null
+++ b/test/unittests/compiler/interpreter-assembler-unittest.h
@@ -0,0 +1,57 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UNITTESTS_COMPILER_INTERPRETER_ASSEMBLER_UNITTEST_H_
+#define V8_UNITTESTS_COMPILER_INTERPRETER_ASSEMBLER_UNITTEST_H_
+
+#include "src/compiler/interpreter-assembler.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock-support.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+using ::testing::Matcher;
+
+class InterpreterAssemblerTest : public TestWithIsolateAndZone {
+ public:
+ InterpreterAssemblerTest() {}
+ ~InterpreterAssemblerTest() override {}
+
+ class InterpreterAssemblerForTest final : public InterpreterAssembler {
+ public:
+ InterpreterAssemblerForTest(InterpreterAssemblerTest* test,
+ interpreter::Bytecode bytecode)
+ : InterpreterAssembler(test->isolate(), test->zone(), bytecode) {}
+ ~InterpreterAssemblerForTest() override {}
+
+ Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher);
+ Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& value_matcher);
+
+ Matcher<Node*> IsBytecodeOperand(int offset);
+ Matcher<Node*> IsBytecodeOperandSignExtended(int offset);
+ Matcher<Node*> IsBytecodeOperandShort(int offset);
+ Matcher<Node*> IsBytecodeOperandShortSignExtended(int offset);
+
+ using InterpreterAssembler::call_descriptor;
+ using InterpreterAssembler::graph;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(InterpreterAssemblerForTest);
+ };
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_UNITTESTS_COMPILER_INTERPRETER_ASSEMBLER_UNITTEST_H_
diff --git a/test/unittests/compiler/js-builtin-reducer-unittest.cc b/test/unittests/compiler/js-builtin-reducer-unittest.cc
index 9c57282..78e9253 100644
--- a/test/unittests/compiler/js-builtin-reducer-unittest.cc
+++ b/test/unittests/compiler/js-builtin-reducer-unittest.cc
@@ -4,8 +4,10 @@
#include "src/compiler/js-builtin-reducer.h"
#include "src/compiler/js-graph.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
#include "src/compiler/typer.h"
+#include "src/isolate-inl.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
#include "testing/gmock-support.h"
@@ -24,13 +26,18 @@
protected:
Reduction Reduce(Node* node, MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::Flag::kNoFlags) {
- MachineOperatorBuilder machine(zone(), kMachPtr, flags);
- JSGraph jsgraph(graph(), common(), javascript(), &machine);
- JSBuiltinReducer reducer(&jsgraph);
+ MachineOperatorBuilder machine(zone(), MachineType::PointerRepresentation(),
+ flags);
+ SimplifiedOperatorBuilder simplified(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
+ &machine);
+ // TODO(titzer): mock the GraphReducer here for better unit testing.
+ GraphReducer graph_reducer(zone(), graph());
+ JSBuiltinReducer reducer(&graph_reducer, &jsgraph);
return reducer.Reduce(node);
}
- Handle<JSFunction> MathFunction(const char* name) {
+ Node* MathFunction(const char* name) {
Handle<Object> m =
JSObject::GetProperty(isolate()->global_object(),
isolate()->factory()->NewStringFromAsciiChecked(
@@ -39,7 +46,7 @@
JSObject::GetProperty(
m, isolate()->factory()->NewStringFromAsciiChecked(name))
.ToHandleChecked());
- return f;
+ return HeapConstant(f);
}
JSOperatorBuilder* javascript() { return &javascript_; }
@@ -51,125 +58,93 @@
namespace {
+Type* const kIntegral32Types[] = {Type::UnsignedSmall(), Type::Negative32(),
+ Type::Unsigned31(), Type::SignedSmall(),
+ Type::Signed32(), Type::Unsigned32(),
+ Type::Integral32()};
+
+
+const LanguageMode kLanguageModes[] = {SLOPPY, STRICT, STRONG};
+
+
// TODO(mstarzinger): Find a common place and unify with test-js-typed-lowering.
Type* const kNumberTypes[] = {
- Type::UnsignedSmall(), Type::NegativeSigned32(),
- Type::NonNegativeSigned32(), Type::SignedSmall(),
- Type::Signed32(), Type::Unsigned32(),
- Type::Integral32(), Type::MinusZero(),
- Type::NaN(), Type::OrderedNumber(),
- Type::PlainNumber(), Type::Number()};
+ Type::UnsignedSmall(), Type::Negative32(), Type::Unsigned31(),
+ Type::SignedSmall(), Type::Signed32(), Type::Unsigned32(),
+ Type::Integral32(), Type::MinusZero(), Type::NaN(),
+ Type::OrderedNumber(), Type::PlainNumber(), Type::Number()};
} // namespace
// -----------------------------------------------------------------------------
-// Math.abs
-
-
-TEST_F(JSBuiltinReducerTest, MathAbs) {
- Handle<JSFunction> f = MathFunction("abs");
-
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
- Node* call =
- graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
- fun, UndefinedConstant(), p0);
- Reduction r = Reduce(call);
-
- if (t0->Is(Type::Unsigned32())) {
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), p0);
- } else {
- Capture<Node*> branch;
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(
- r.replacement(),
- IsSelect(kMachNone,
- IsNumberLessThan(IsNumberConstant(BitEq(0.0)), p0), p0,
- IsNumberSubtract(IsNumberConstant(BitEq(0.0)), p0)));
- }
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Math.sqrt
-
-
-TEST_F(JSBuiltinReducerTest, MathSqrt) {
- Handle<JSFunction> f = MathFunction("sqrt");
-
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
- Node* call =
- graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
- fun, UndefinedConstant(), p0);
- Reduction r = Reduce(call);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64Sqrt(p0));
- }
-}
-
-
-// -----------------------------------------------------------------------------
// Math.max
TEST_F(JSBuiltinReducerTest, MathMax0) {
- Handle<JSFunction> f = MathFunction("max");
+ Node* function = MathFunction("max");
- Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
- Node* call =
- graph()->NewNode(javascript()->CallFunction(2, NO_CALL_FUNCTION_FLAGS),
- fun, UndefinedConstant());
- Reduction r = Reduce(call);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
+ Node* call = graph()->NewNode(javascript()->CallFunction(2, language_mode),
+ function, UndefinedConstant(), context,
+ frame_state, frame_state, effect, control);
+ Reduction r = Reduce(call);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsNumberConstant(-V8_INFINITY));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberConstant(-V8_INFINITY));
+ }
}
TEST_F(JSBuiltinReducerTest, MathMax1) {
- Handle<JSFunction> f = MathFunction("max");
+ Node* function = MathFunction("max");
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
- Node* call =
- graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
- fun, UndefinedConstant(), p0);
- Reduction r = Reduce(call);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call =
+ graph()->NewNode(javascript()->CallFunction(3, language_mode),
+ function, UndefinedConstant(), p0, context,
+ frame_state, frame_state, effect, control);
+ Reduction r = Reduce(call);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), p0);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), p0);
+ }
}
}
TEST_F(JSBuiltinReducerTest, MathMax2) {
- Handle<JSFunction> f = MathFunction("max");
+ Node* function = MathFunction("max");
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- TRACED_FOREACH(Type*, t1, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* p1 = Parameter(t1, 1);
- Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
- Node* call = graph()->NewNode(
- javascript()->CallFunction(4, NO_CALL_FUNCTION_FLAGS), fun,
- UndefinedConstant(), p0, p1);
- Reduction r = Reduce(call);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
+ TRACED_FOREACH(Type*, t0, kIntegral32Types) {
+ TRACED_FOREACH(Type*, t1, kIntegral32Types) {
+ Node* p0 = Parameter(t0, 0);
+ Node* p1 = Parameter(t1, 1);
+ Node* call =
+ graph()->NewNode(javascript()->CallFunction(4, language_mode),
+ function, UndefinedConstant(), p0, p1, context,
+ frame_state, frame_state, effect, control);
+ Reduction r = Reduce(call);
- if (t0->Is(Type::Integral32()) && t1->Is(Type::Integral32())) {
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(),
- IsSelect(kMachNone, IsNumberLessThan(p1, p0), p1, p0));
- } else {
- ASSERT_FALSE(r.Changed());
- EXPECT_EQ(IrOpcode::kJSCallFunction, call->opcode());
+ IsSelect(MachineRepresentation::kNone,
+ IsNumberLessThan(p1, p0), p0, p1));
}
}
}
@@ -181,24 +156,25 @@
TEST_F(JSBuiltinReducerTest, MathImul) {
- Handle<JSFunction> f = MathFunction("imul");
+ Node* function = MathFunction("imul");
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- TRACED_FOREACH(Type*, t1, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* p1 = Parameter(t1, 1);
- Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
- Node* call = graph()->NewNode(
- javascript()->CallFunction(4, NO_CALL_FUNCTION_FLAGS), fun,
- UndefinedConstant(), p0, p1);
- Reduction r = Reduce(call);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
+ TRACED_FOREACH(Type*, t0, kIntegral32Types) {
+ TRACED_FOREACH(Type*, t1, kIntegral32Types) {
+ Node* p0 = Parameter(t0, 0);
+ Node* p1 = Parameter(t1, 1);
+ Node* call =
+ graph()->NewNode(javascript()->CallFunction(4, language_mode),
+ function, UndefinedConstant(), p0, p1, context,
+ frame_state, frame_state, effect, control);
+ Reduction r = Reduce(call);
- if (t0->Is(Type::Integral32()) && t1->Is(Type::Integral32())) {
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsInt32Mul(p0, p1));
- } else {
- ASSERT_FALSE(r.Changed());
- EXPECT_EQ(IrOpcode::kJSCallFunction, call->opcode());
}
}
}
@@ -210,94 +186,27 @@
TEST_F(JSBuiltinReducerTest, MathFround) {
- Handle<JSFunction> f = MathFunction("fround");
+ Node* function = MathFunction("fround");
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
- Node* call =
- graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
- fun, UndefinedConstant(), p0);
- Reduction r = Reduce(call);
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Node* context = UndefinedConstant();
+ Node* frame_state = graph()->start();
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
+ TRACED_FOREACH(Type*, t0, kNumberTypes) {
+ Node* p0 = Parameter(t0, 0);
+ Node* call =
+ graph()->NewNode(javascript()->CallFunction(3, language_mode),
+ function, UndefinedConstant(), p0, context,
+ frame_state, frame_state, effect, control);
+ Reduction r = Reduce(call);
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsTruncateFloat64ToFloat32(p0));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsTruncateFloat64ToFloat32(p0));
+ }
}
}
-
-// -----------------------------------------------------------------------------
-// Math.floor
-
-
-TEST_F(JSBuiltinReducerTest, MathFloorAvailable) {
- Handle<JSFunction> f = MathFunction("floor");
-
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
- Node* call =
- graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
- fun, UndefinedConstant(), p0);
- Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kFloat64Floor);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64Floor(p0));
- }
-}
-
-
-TEST_F(JSBuiltinReducerTest, MathFloorUnavailable) {
- Handle<JSFunction> f = MathFunction("floor");
-
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
- Node* call =
- graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
- fun, UndefinedConstant(), p0);
- Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kNoFlags);
-
- ASSERT_FALSE(r.Changed());
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Math.ceil
-
-
-TEST_F(JSBuiltinReducerTest, MathCeilAvailable) {
- Handle<JSFunction> f = MathFunction("ceil");
-
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
- Node* call =
- graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
- fun, UndefinedConstant(), p0);
- Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kFloat64Ceil);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFloat64Ceil(p0));
- }
-}
-
-
-TEST_F(JSBuiltinReducerTest, MathCeilUnavailable) {
- Handle<JSFunction> f = MathFunction("ceil");
-
- TRACED_FOREACH(Type*, t0, kNumberTypes) {
- Node* p0 = Parameter(t0, 0);
- Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
- Node* call =
- graph()->NewNode(javascript()->CallFunction(3, NO_CALL_FUNCTION_FLAGS),
- fun, UndefinedConstant(), p0);
- Reduction r = Reduce(call, MachineOperatorBuilder::Flag::kNoFlags);
-
- ASSERT_FALSE(r.Changed());
- }
-}
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/test/unittests/compiler/js-context-relaxation-unittest.cc b/test/unittests/compiler/js-context-relaxation-unittest.cc
new file mode 100644
index 0000000..a44bd02
--- /dev/null
+++ b/test/unittests/compiler/js-context-relaxation-unittest.cc
@@ -0,0 +1,285 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-context-relaxation.h"
+#include "src/compiler/js-graph.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSContextRelaxationTest : public GraphTest {
+ public:
+ JSContextRelaxationTest() : GraphTest(3), javascript_(zone()) {}
+ ~JSContextRelaxationTest() override {}
+
+ protected:
+ Reduction Reduce(Node* node, MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::kNoFlags) {
+ MachineOperatorBuilder machine(zone(), MachineType::PointerRepresentation(),
+ flags);
+ JSGraph jsgraph(isolate(), graph(), common(), javascript(), nullptr,
+ &machine);
+ // TODO(titzer): mock the GraphReducer here for better unit testing.
+ GraphReducer graph_reducer(zone(), graph());
+ JSContextRelaxation reducer;
+ return reducer.Reduce(node);
+ }
+
+ Node* EmptyFrameState() {
+ MachineOperatorBuilder machine(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), javascript(), nullptr,
+ &machine);
+ return jsgraph.EmptyFrameState();
+ }
+
+ Node* ShallowFrameStateChain(Node* outer_context,
+ ContextCallingMode context_calling_mode) {
+ const FrameStateFunctionInfo* const frame_state_function_info =
+ common()->CreateFrameStateFunctionInfo(
+ FrameStateType::kJavaScriptFunction, 3, 0,
+ Handle<SharedFunctionInfo>(), context_calling_mode);
+ const Operator* op = common()->FrameState(BailoutId::None(),
+ OutputFrameStateCombine::Ignore(),
+ frame_state_function_info);
+ return graph()->NewNode(op, graph()->start(), graph()->start(),
+ graph()->start(), outer_context, graph()->start(),
+ graph()->start());
+ }
+
+ Node* DeepFrameStateChain(Node* outer_context,
+ ContextCallingMode context_calling_mode) {
+ const FrameStateFunctionInfo* const frame_state_function_info =
+ common()->CreateFrameStateFunctionInfo(
+ FrameStateType::kJavaScriptFunction, 3, 0,
+ Handle<SharedFunctionInfo>(), context_calling_mode);
+ const Operator* op = common()->FrameState(BailoutId::None(),
+ OutputFrameStateCombine::Ignore(),
+ frame_state_function_info);
+ Node* shallow_frame_state =
+ ShallowFrameStateChain(outer_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ return graph()->NewNode(op, graph()->start(), graph()->start(),
+ graph()->start(), graph()->start(),
+ graph()->start(), shallow_frame_state);
+ }
+
+ JSOperatorBuilder* javascript() { return &javascript_; }
+
+ private:
+ JSOperatorBuilder javascript_;
+};
+
+
+TEST_F(JSContextRelaxationTest,
+ RelaxJSCallFunctionShallowFrameStateChainNoCrossCtx) {
+ Node* const input0 = Parameter(0);
+ Node* const input1 = Parameter(1);
+ Node* const context = Parameter(2);
+ Node* const outer_context = Parameter(3);
+ Node* const frame_state =
+ ShallowFrameStateChain(outer_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Node* node = graph()->NewNode(
+ javascript()->CallFunction(2, STRICT, VectorSlotPair()), input0, input1,
+ context, frame_state, frame_state, effect, control);
+ Reduction const r = Reduce(node);
+ EXPECT_TRUE(r.Changed());
+ EXPECT_EQ(outer_context, NodeProperties::GetContextInput(node));
+}
+
+TEST_F(JSContextRelaxationTest,
+ RelaxJSCallFunctionShallowFrameStateChainCrossCtx) {
+ Node* const input0 = Parameter(0);
+ Node* const input1 = Parameter(1);
+ Node* const context = Parameter(2);
+ Node* const outer_context = Parameter(3);
+ Node* const frame_state =
+ ShallowFrameStateChain(outer_context, CALL_CHANGES_NATIVE_CONTEXT);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Node* node = graph()->NewNode(
+ javascript()->CallFunction(2, STRICT, VectorSlotPair()), input0, input1,
+ context, frame_state, frame_state, effect, control);
+ Reduction const r = Reduce(node);
+ EXPECT_FALSE(r.Changed());
+ EXPECT_EQ(context, NodeProperties::GetContextInput(node));
+}
+
+TEST_F(JSContextRelaxationTest,
+ RelaxJSCallFunctionDeepFrameStateChainNoCrossCtx) {
+ Node* const input0 = Parameter(0);
+ Node* const input1 = Parameter(1);
+ Node* const context = Parameter(2);
+ Node* const outer_context = Parameter(3);
+ Node* const frame_state =
+ DeepFrameStateChain(outer_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Node* node = graph()->NewNode(
+ javascript()->CallFunction(2, STRICT, VectorSlotPair()), input0, input1,
+ context, frame_state, frame_state, effect, control);
+ Reduction const r = Reduce(node);
+ EXPECT_TRUE(r.Changed());
+ EXPECT_EQ(outer_context, NodeProperties::GetContextInput(node));
+}
+
+TEST_F(JSContextRelaxationTest,
+ RelaxJSCallFunctionDeepFrameStateChainCrossCtx) {
+ Node* const input0 = Parameter(0);
+ Node* const input1 = Parameter(1);
+ Node* const context = Parameter(2);
+ Node* const outer_context = Parameter(3);
+ Node* const frame_state =
+ DeepFrameStateChain(outer_context, CALL_CHANGES_NATIVE_CONTEXT);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Node* node = graph()->NewNode(
+ javascript()->CallFunction(2, STRICT, VectorSlotPair()), input0, input1,
+ context, frame_state, frame_state, effect, control);
+ Reduction const r = Reduce(node);
+ EXPECT_FALSE(r.Changed());
+ EXPECT_EQ(context, NodeProperties::GetContextInput(node));
+}
+
+TEST_F(JSContextRelaxationTest,
+ RelaxJSCallFunctionDeepContextChainFullRelaxForCatch) {
+ Node* const input0 = Parameter(0);
+ Node* const input1 = Parameter(1);
+ Node* const context = Parameter(2);
+ Node* const outer_context = Parameter(3);
+ const Operator* op = javascript()->CreateCatchContext(Handle<String>());
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Node* nested_context = graph()->NewNode(
+ op, graph()->start(), graph()->start(), outer_context, effect, control);
+ Node* const frame_state_2 =
+ ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Node* node = graph()->NewNode(
+ javascript()->CallFunction(2, STRICT, VectorSlotPair()), input0, input1,
+ context, frame_state_2, frame_state_2, effect, control);
+ Reduction const r = Reduce(node);
+ EXPECT_TRUE(r.Changed());
+ EXPECT_EQ(outer_context, NodeProperties::GetContextInput(node));
+}
+
+
+TEST_F(JSContextRelaxationTest,
+ RelaxJSCallFunctionDeepContextChainFullRelaxForWith) {
+ Node* const input0 = Parameter(0);
+ Node* const input1 = Parameter(1);
+ Node* const context = Parameter(2);
+ Node* const outer_context = Parameter(3);
+ const Operator* op = javascript()->CreateWithContext();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Node* nested_context = graph()->NewNode(
+ op, graph()->start(), graph()->start(), outer_context, effect, control);
+ Node* const frame_state_2 =
+ ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Node* node = graph()->NewNode(
+ javascript()->CallFunction(2, STRICT, VectorSlotPair()), input0, input1,
+ context, frame_state_2, frame_state_2, effect, control);
+ Reduction const r = Reduce(node);
+ EXPECT_TRUE(r.Changed());
+ EXPECT_EQ(outer_context, NodeProperties::GetContextInput(node));
+}
+
+
+TEST_F(JSContextRelaxationTest,
+ RelaxJSCallFunctionDeepContextChainFullRelaxForBlock) {
+ Node* const input0 = Parameter(0);
+ Node* const input1 = Parameter(1);
+ Node* const context = Parameter(2);
+ Node* const outer_context = Parameter(3);
+ Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::null();
+ const Operator* op = javascript()->CreateBlockContext(scope_info);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Node* nested_context =
+ graph()->NewNode(op, graph()->start(), outer_context, effect, control);
+ Node* const frame_state_2 =
+ ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Node* node = graph()->NewNode(
+ javascript()->CallFunction(2, STRICT, VectorSlotPair()), input0, input1,
+ context, frame_state_2, frame_state_2, effect, control);
+ Reduction const r = Reduce(node);
+ EXPECT_TRUE(r.Changed());
+ EXPECT_EQ(outer_context, NodeProperties::GetContextInput(node));
+}
+
+
+TEST_F(JSContextRelaxationTest,
+ RelaxJSCallFunctionDeepContextChainPartialRelaxForScript) {
+ Node* const input0 = Parameter(0);
+ Node* const input1 = Parameter(1);
+ Node* const context = Parameter(2);
+ Node* const outer_context = Parameter(3);
+ Handle<ScopeInfo> scope_info = Handle<ScopeInfo>::null();
+ const Operator* op = javascript()->CreateScriptContext(scope_info);
+ Node* const frame_state_1 =
+ ShallowFrameStateChain(outer_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Node* nested_context = graph()->NewNode(op, graph()->start(), outer_context,
+ frame_state_1, effect, control);
+ Node* const frame_state_2 =
+ ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Node* node = graph()->NewNode(
+ javascript()->CallFunction(2, STRICT, VectorSlotPair()), input0, input1,
+ context, frame_state_2, frame_state_2, effect, control);
+ Reduction const r = Reduce(node);
+ EXPECT_TRUE(r.Changed());
+ EXPECT_EQ(nested_context, NodeProperties::GetContextInput(node));
+}
+
+
+TEST_F(JSContextRelaxationTest,
+ RelaxJSCallFunctionDeepContextChainPartialRelaxForModule) {
+ Node* const input0 = Parameter(0);
+ Node* const input1 = Parameter(1);
+ Node* const context = Parameter(2);
+ Node* const outer_context = Parameter(3);
+ const Operator* op = javascript()->CreateModuleContext();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Node* nested_context = graph()->NewNode(
+ op, graph()->start(), graph()->start(), outer_context, effect, control);
+ Node* const frame_state_2 =
+ ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Node* node = graph()->NewNode(
+ javascript()->CallFunction(2, STRICT, VectorSlotPair()), input0, input1,
+ context, frame_state_2, frame_state_2, effect, control);
+ Reduction const r = Reduce(node);
+ EXPECT_TRUE(r.Changed());
+ EXPECT_EQ(nested_context, NodeProperties::GetContextInput(node));
+}
+
+
+TEST_F(JSContextRelaxationTest,
+ RelaxJSCallFunctionDeepContextChainPartialNoRelax) {
+ Node* const input0 = Parameter(0);
+ Node* const input1 = Parameter(1);
+ Node* const context = Parameter(2);
+ Node* const outer_context = Parameter(3);
+ const Operator* op = javascript()->CreateFunctionContext(0);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Node* nested_context =
+ graph()->NewNode(op, graph()->start(), outer_context, effect, control);
+ Node* const frame_state_2 =
+ ShallowFrameStateChain(nested_context, CALL_MAINTAINS_NATIVE_CONTEXT);
+ Node* node = graph()->NewNode(
+ javascript()->CallFunction(2, STRICT, VectorSlotPair()), input0, input1,
+ context, frame_state_2, frame_state_2, effect, control);
+ Reduction const r = Reduce(node);
+ EXPECT_FALSE(r.Changed());
+ EXPECT_EQ(context, NodeProperties::GetContextInput(node));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/js-intrinsic-lowering-unittest.cc b/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
new file mode 100644
index 0000000..f38f8ea
--- /dev/null
+++ b/test/unittests/compiler/js-intrinsic-lowering-unittest.cc
@@ -0,0 +1,462 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/diamond.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-intrinsic-lowering.h"
+#include "src/compiler/js-operator.h"
+#include "src/types-inl.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+
+using testing::_;
+using testing::AllOf;
+using testing::BitEq;
+using testing::Capture;
+using testing::CaptureEq;
+
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSIntrinsicLoweringTest : public TypedGraphTest {
+ public:
+ JSIntrinsicLoweringTest() : TypedGraphTest(3), javascript_(zone()) {}
+ ~JSIntrinsicLoweringTest() override {}
+
+ protected:
+ Reduction Reduce(Node* node, MachineOperatorBuilder::Flags flags =
+ MachineOperatorBuilder::kNoFlags) {
+ MachineOperatorBuilder machine(zone(), MachineType::PointerRepresentation(),
+ flags);
+ SimplifiedOperatorBuilder simplified(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
+ &machine);
+ // TODO(titzer): mock the GraphReducer here for better unit testing.
+ GraphReducer graph_reducer(zone(), graph());
+ JSIntrinsicLowering reducer(&graph_reducer, &jsgraph,
+ JSIntrinsicLowering::kDeoptimizationEnabled);
+ return reducer.Reduce(node);
+ }
+
+ Node* EmptyFrameState() {
+ MachineOperatorBuilder machine(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), javascript(), nullptr,
+ &machine);
+ return jsgraph.EmptyFrameState();
+ }
+
+ JSOperatorBuilder* javascript() { return &javascript_; }
+
+ private:
+ JSOperatorBuilder javascript_;
+};
+
+
+// -----------------------------------------------------------------------------
+// %_ConstructDouble
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineOptimizedConstructDouble) {
+ Node* const input0 = Parameter(0);
+ Node* const input1 = Parameter(1);
+ Node* const context = Parameter(2);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kInlineConstructDouble, 2), input0,
+ input1, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64InsertHighWord32(
+ IsFloat64InsertLowWord32(
+ IsNumberConstant(BitEq(0.0)), input1),
+ input0));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_DoubleLo
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineOptimizedDoubleLo) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineDoubleLo, 1),
+ input, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64ExtractLowWord32(input));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_DoubleHi
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineOptimizedDoubleHi) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineDoubleHi, 1),
+ input, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64ExtractHighWord32(input));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_IsSmi
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineIsSmi) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineIsSmi, 1),
+ input, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsObjectIsSmi(input));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_IsArray
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineIsArray) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineIsArray, 1),
+ input, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+
+ Node* phi = r.replacement();
+ Capture<Node*> branch, if_false;
+ EXPECT_THAT(
+ phi,
+ IsPhi(
+ MachineRepresentation::kTagged, IsFalseConstant(),
+ IsWord32Equal(IsLoadField(AccessBuilder::ForMapInstanceType(),
+ IsLoadField(AccessBuilder::ForMap(), input,
+ effect, CaptureEq(&if_false)),
+ effect, _),
+ IsInt32Constant(JS_ARRAY_TYPE)),
+ IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
+ IsBranch(IsObjectIsSmi(input), control))),
+ AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_IsDate
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineIsDate) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineIsDate, 1),
+ input, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+
+ Node* phi = r.replacement();
+ Capture<Node*> branch, if_false;
+ EXPECT_THAT(
+ phi,
+ IsPhi(
+ MachineRepresentation::kTagged, IsFalseConstant(),
+ IsWord32Equal(IsLoadField(AccessBuilder::ForMapInstanceType(),
+ IsLoadField(AccessBuilder::ForMap(), input,
+ effect, CaptureEq(&if_false)),
+ effect, _),
+ IsInt32Constant(JS_DATE_TYPE)),
+ IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
+ IsBranch(IsObjectIsSmi(input), control))),
+ AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_IsTypedArray
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineIsTypedArray) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kInlineIsTypedArray, 1), input,
+ context, effect, control));
+ ASSERT_TRUE(r.Changed());
+
+ Node* phi = r.replacement();
+ Capture<Node*> branch, if_false;
+ EXPECT_THAT(
+ phi,
+ IsPhi(
+ MachineRepresentation::kTagged, IsFalseConstant(),
+ IsWord32Equal(IsLoadField(AccessBuilder::ForMapInstanceType(),
+ IsLoadField(AccessBuilder::ForMap(), input,
+ effect, CaptureEq(&if_false)),
+ effect, _),
+ IsInt32Constant(JS_TYPED_ARRAY_TYPE)),
+ IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
+ IsBranch(IsObjectIsSmi(input), control))),
+ AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_IsFunction
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineIsFunction) {
+ Node* const input = Parameter(Type::Any());
+ Node* const context = Parameter(Type::Any());
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineIsFunction, 1),
+ input, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+
+ Node* phi = r.replacement();
+ Capture<Node*> branch, if_false;
+ EXPECT_THAT(
+ phi,
+ IsPhi(
+ MachineRepresentation::kTagged, IsFalseConstant(),
+ IsUint32LessThanOrEqual(
+ IsInt32Constant(FIRST_FUNCTION_TYPE),
+ IsLoadField(AccessBuilder::ForMapInstanceType(),
+ IsLoadField(AccessBuilder::ForMap(), input, effect,
+ CaptureEq(&if_false)),
+ effect, _)),
+ IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
+ IsBranch(IsObjectIsSmi(input), control))),
+ AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_IsRegExp
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineIsRegExp) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineIsRegExp, 1),
+ input, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+
+ Node* phi = r.replacement();
+ Capture<Node*> branch, if_false;
+ EXPECT_THAT(
+ phi,
+ IsPhi(
+ MachineRepresentation::kTagged, IsFalseConstant(),
+ IsWord32Equal(IsLoadField(AccessBuilder::ForMapInstanceType(),
+ IsLoadField(AccessBuilder::ForMap(), input,
+ effect, CaptureEq(&if_false)),
+ effect, _),
+ IsInt32Constant(JS_REGEXP_TYPE)),
+ IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
+ IsBranch(IsObjectIsSmi(input), control))),
+ AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_IsJSReceiver
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineIsJSReceiverWithAny) {
+ Node* const input = Parameter(Type::Any());
+ Node* const context = Parameter(Type::Any());
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kInlineIsJSReceiver, 1), input,
+ context, effect, control));
+ ASSERT_TRUE(r.Changed());
+
+ Node* phi = r.replacement();
+ Capture<Node *> branch, if_false;
+ EXPECT_THAT(
+ phi,
+ IsPhi(
+ MachineRepresentation::kTagged, IsFalseConstant(),
+ IsUint32LessThanOrEqual(
+ IsInt32Constant(FIRST_JS_RECEIVER_TYPE),
+ IsLoadField(AccessBuilder::ForMapInstanceType(),
+ IsLoadField(AccessBuilder::ForMap(), input, effect,
+ CaptureEq(&if_false)),
+ effect, _)),
+ IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
+ IsBranch(IsObjectIsSmi(input), control))),
+ AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
+}
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineIsJSReceiverWithReceiver) {
+ Node* const input = Parameter(Type::Receiver());
+ Node* const context = Parameter(Type::Any());
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kInlineIsJSReceiver, 1), input,
+ context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsTrueConstant());
+}
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineIsJSReceiverWithUndefined) {
+ Node* const input = Parameter(Type::Undefined());
+ Node* const context = Parameter(Type::Any());
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kInlineIsJSReceiver, 1), input,
+ context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFalseConstant());
+}
+
+
+// -----------------------------------------------------------------------------
+// %_JSValueGetValue
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineJSValueGetValue) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(graph()->NewNode(
+ javascript()->CallRuntime(Runtime::kInlineJSValueGetValue, 1), input,
+ context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsLoadField(AccessBuilder::ForValue(), input, effect, control));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_MathFloor
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineMathFloor) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineMathFloor, 1),
+ input, context, effect, control),
+ MachineOperatorBuilder::kFloat64RoundDown);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64RoundDown(input));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_MathSqrt
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineMathSqrt) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineMathSqrt, 1),
+ input, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat64Sqrt(input));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_MathClz32
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineMathClz32) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineMathClz32, 1),
+ input, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsWord32Clz(input));
+}
+
+
+// -----------------------------------------------------------------------------
+// %_ValueOf
+
+
+TEST_F(JSIntrinsicLoweringTest, InlineValueOf) {
+ Node* const input = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CallRuntime(Runtime::kInlineValueOf, 1),
+ input, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+
+ Node* phi = r.replacement();
+ Capture<Node*> branch0, if_false0, branch1, if_true1;
+ EXPECT_THAT(
+ phi,
+ IsPhi(
+ MachineRepresentation::kTagged, input,
+ IsPhi(MachineRepresentation::kTagged,
+ IsLoadField(AccessBuilder::ForValue(), input, effect,
+ CaptureEq(&if_true1)),
+ input,
+ IsMerge(
+ AllOf(CaptureEq(&if_true1), IsIfTrue(CaptureEq(&branch1))),
+ IsIfFalse(AllOf(
+ CaptureEq(&branch1),
+ IsBranch(
+ IsWord32Equal(
+ IsLoadField(
+ AccessBuilder::ForMapInstanceType(),
+ IsLoadField(AccessBuilder::ForMap(), input,
+ effect, CaptureEq(&if_false0)),
+ effect, _),
+ IsInt32Constant(JS_VALUE_TYPE)),
+ CaptureEq(&if_false0)))))),
+ IsMerge(
+ IsIfTrue(AllOf(CaptureEq(&branch0),
+ IsBranch(IsObjectIsSmi(input), control))),
+ AllOf(CaptureEq(&if_false0), IsIfFalse(CaptureEq(&branch0))))));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/js-operator-unittest.cc b/test/unittests/compiler/js-operator-unittest.cc
index 7aa0c64..e0db771 100644
--- a/test/unittests/compiler/js-operator-unittest.cc
+++ b/test/unittests/compiler/js-operator-unittest.cc
@@ -12,9 +12,34 @@
namespace internal {
namespace compiler {
-// -----------------------------------------------------------------------------
-// Shared operators.
+namespace {
+const LanguageMode kLanguageModes[] = {SLOPPY, STRICT, STRONG};
+
+
+#if GTEST_HAS_COMBINE
+
+template <typename T>
+class JSOperatorTestWithLanguageModeAndParam
+ : public TestWithZone,
+ public ::testing::WithParamInterface<::testing::tuple<LanguageMode, T>> {
+ protected:
+ LanguageMode language_mode() const {
+ return ::testing::get<0>(B::GetParam());
+ }
+ const T& GetParam() const { return ::testing::get<1>(B::GetParam()); }
+
+ private:
+ typedef ::testing::WithParamInterface<::testing::tuple<LanguageMode, T>> B;
+};
+
+#endif // GTEST_HAS_COMBINE
+
+} // namespace
+
+
+// -----------------------------------------------------------------------------
+// Shared operators without language mode.
namespace {
@@ -28,6 +53,36 @@
int control_input_count;
int value_output_count;
int effect_output_count;
+ int control_output_count;
+};
+
+
+const SharedOperator kSharedOperators[] = {
+#define SHARED(Name, properties, value_input_count, frame_state_input_count, \
+ effect_input_count, control_input_count, value_output_count, \
+ effect_output_count, control_output_count) \
+ { \
+ &JSOperatorBuilder::Name, IrOpcode::kJS##Name, properties, \
+ value_input_count, frame_state_input_count, effect_input_count, \
+ control_input_count, value_output_count, effect_output_count, \
+ control_output_count \
+ }
+ SHARED(Equal, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
+ SHARED(NotEqual, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
+ SHARED(StrictEqual, Operator::kNoThrow, 2, 0, 1, 1, 1, 1, 0),
+ SHARED(StrictNotEqual, Operator::kNoThrow, 2, 0, 1, 1, 1, 1, 0),
+ SHARED(ToNumber, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
+ SHARED(ToString, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
+ SHARED(ToName, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
+ SHARED(ToObject, Operator::kNoProperties, 1, 1, 1, 1, 1, 1, 2),
+ SHARED(Yield, Operator::kNoProperties, 1, 0, 1, 1, 1, 1, 2),
+ SHARED(Create, Operator::kEliminatable, 2, 1, 1, 0, 1, 1, 0),
+ SHARED(HasProperty, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
+ SHARED(TypeOf, Operator::kEliminatable, 1, 0, 1, 0, 1, 1, 0),
+ SHARED(InstanceOf, Operator::kNoProperties, 2, 1, 1, 1, 1, 1, 2),
+ SHARED(CreateWithContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1, 2),
+ SHARED(CreateModuleContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1, 2),
+#undef SHARED
};
@@ -35,55 +90,6 @@
return os << IrOpcode::Mnemonic(sop.opcode);
}
-
-const SharedOperator kSharedOperators[] = {
-#define SHARED(Name, properties, value_input_count, frame_state_input_count, \
- effect_input_count, control_input_count, value_output_count, \
- effect_output_count) \
- { \
- &JSOperatorBuilder::Name, IrOpcode::kJS##Name, properties, \
- value_input_count, frame_state_input_count, effect_input_count, \
- control_input_count, value_output_count, effect_output_count \
- }
- SHARED(Equal, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(NotEqual, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(StrictEqual, Operator::kPure, 2, 0, 0, 0, 1, 0),
- SHARED(StrictNotEqual, Operator::kPure, 2, 0, 0, 0, 1, 0),
- SHARED(LessThan, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(GreaterThan, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(LessThanOrEqual, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(GreaterThanOrEqual, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(BitwiseOr, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(BitwiseXor, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(BitwiseAnd, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(ShiftLeft, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(ShiftRight, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(ShiftRightLogical, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(Add, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(Subtract, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(Multiply, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(Divide, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(Modulus, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(UnaryNot, Operator::kPure, 1, 0, 0, 0, 1, 0),
- SHARED(ToBoolean, Operator::kPure, 1, 0, 0, 0, 1, 0),
- SHARED(ToNumber, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
- SHARED(ToString, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
- SHARED(ToName, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
- SHARED(ToObject, Operator::kNoProperties, 1, 1, 1, 1, 1, 1),
- SHARED(Yield, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
- SHARED(Create, Operator::kEliminatable, 0, 0, 1, 1, 1, 1),
- SHARED(HasProperty, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(TypeOf, Operator::kPure, 1, 0, 0, 0, 1, 0),
- SHARED(InstanceOf, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
- SHARED(Debugger, Operator::kNoProperties, 0, 0, 1, 1, 0, 1),
- SHARED(CreateFunctionContext, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
- SHARED(CreateWithContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1),
- SHARED(CreateBlockContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1),
- SHARED(CreateModuleContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1),
- SHARED(CreateScriptContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1)
-#undef SHARED
-};
-
} // namespace
@@ -106,23 +112,20 @@
const Operator* op = (javascript.*sop.constructor)();
const int context_input_count = 1;
- // TODO(jarin): Get rid of this hack.
- const int frame_state_input_count =
- FLAG_turbo_deoptimization ? sop.frame_state_input_count : 0;
EXPECT_EQ(sop.value_input_count, op->ValueInputCount());
EXPECT_EQ(context_input_count, OperatorProperties::GetContextInputCount(op));
- EXPECT_EQ(frame_state_input_count,
+ EXPECT_EQ(sop.frame_state_input_count,
OperatorProperties::GetFrameStateInputCount(op));
EXPECT_EQ(sop.effect_input_count, op->EffectInputCount());
EXPECT_EQ(sop.control_input_count, op->ControlInputCount());
EXPECT_EQ(sop.value_input_count + context_input_count +
- frame_state_input_count + sop.effect_input_count +
+ sop.frame_state_input_count + sop.effect_input_count +
sop.control_input_count,
OperatorProperties::GetTotalInputCount(op));
EXPECT_EQ(sop.value_output_count, op->ValueOutputCount());
EXPECT_EQ(sop.effect_output_count, op->EffectOutputCount());
- EXPECT_EQ(0, op->ControlOutputCount());
+ EXPECT_EQ(sop.control_output_count, op->ControlOutputCount());
}
@@ -147,70 +150,120 @@
// -----------------------------------------------------------------------------
-// JSStoreProperty.
+// Shared operators with language mode.
-class JSStorePropertyOperatorTest
- : public TestWithZone,
- public ::testing::WithParamInterface<StrictMode> {};
+#if GTEST_HAS_COMBINE
+
+namespace {
+
+struct SharedOperatorWithLanguageMode {
+ const Operator* (JSOperatorBuilder::*constructor)(LanguageMode);
+ IrOpcode::Value opcode;
+ Operator::Properties properties;
+ int value_input_count;
+ int frame_state_input_count;
+ int effect_input_count;
+ int control_input_count;
+ int value_output_count;
+ int effect_output_count;
+ int control_output_count;
+};
-TEST_P(JSStorePropertyOperatorTest, InstancesAreGloballyShared) {
- const StrictMode mode = GetParam();
+const SharedOperatorWithLanguageMode kSharedOperatorsWithLanguageMode[] = {
+#define SHARED(Name, properties, value_input_count, frame_state_input_count, \
+ effect_input_count, control_input_count, value_output_count, \
+ effect_output_count, control_output_count) \
+ { \
+ &JSOperatorBuilder::Name, IrOpcode::kJS##Name, properties, \
+ value_input_count, frame_state_input_count, effect_input_count, \
+ control_input_count, value_output_count, effect_output_count, \
+ control_output_count \
+ }
+ SHARED(LessThan, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
+ SHARED(GreaterThan, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
+ SHARED(LessThanOrEqual, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
+ SHARED(GreaterThanOrEqual, Operator::kNoProperties, 2, 2, 1, 1, 1, 1, 2),
+#undef SHARED
+};
+
+
+std::ostream& operator<<(std::ostream& os,
+ const SharedOperatorWithLanguageMode& sop) {
+ return os << IrOpcode::Mnemonic(sop.opcode);
+}
+
+} // namespace
+
+
+class JSSharedOperatorWithLanguageModeTest
+ : public JSOperatorTestWithLanguageModeAndParam<
+ SharedOperatorWithLanguageMode> {};
+
+
+TEST_P(JSSharedOperatorWithLanguageModeTest, InstancesAreGloballyShared) {
+ const SharedOperatorWithLanguageMode& sop = GetParam();
JSOperatorBuilder javascript1(zone());
JSOperatorBuilder javascript2(zone());
- EXPECT_EQ(javascript1.StoreProperty(mode), javascript2.StoreProperty(mode));
+ EXPECT_EQ((javascript1.*sop.constructor)(language_mode()),
+ (javascript2.*sop.constructor)(language_mode()));
}
-TEST_P(JSStorePropertyOperatorTest, NumberOfInputsAndOutputs) {
+TEST_P(JSSharedOperatorWithLanguageModeTest, NumberOfInputsAndOutputs) {
JSOperatorBuilder javascript(zone());
- const StrictMode mode = GetParam();
- const Operator* op = javascript.StoreProperty(mode);
+ const SharedOperatorWithLanguageMode& sop = GetParam();
+ const Operator* op = (javascript.*sop.constructor)(language_mode());
- // TODO(jarin): Get rid of this hack.
- const int frame_state_input_count = FLAG_turbo_deoptimization ? 1 : 0;
- EXPECT_EQ(3, op->ValueInputCount());
- EXPECT_EQ(1, OperatorProperties::GetContextInputCount(op));
- EXPECT_EQ(frame_state_input_count,
+ const int context_input_count = 1;
+ EXPECT_EQ(sop.value_input_count, op->ValueInputCount());
+ EXPECT_EQ(context_input_count, OperatorProperties::GetContextInputCount(op));
+ EXPECT_EQ(sop.frame_state_input_count,
OperatorProperties::GetFrameStateInputCount(op));
- EXPECT_EQ(1, op->EffectInputCount());
- EXPECT_EQ(1, op->ControlInputCount());
- EXPECT_EQ(6 + frame_state_input_count,
+ EXPECT_EQ(sop.effect_input_count, op->EffectInputCount());
+ EXPECT_EQ(sop.control_input_count, op->ControlInputCount());
+ EXPECT_EQ(sop.value_input_count + context_input_count +
+ sop.frame_state_input_count + sop.effect_input_count +
+ sop.control_input_count,
OperatorProperties::GetTotalInputCount(op));
- EXPECT_EQ(0, op->ValueOutputCount());
- EXPECT_EQ(1, op->EffectOutputCount());
- EXPECT_EQ(0, op->ControlOutputCount());
+ EXPECT_EQ(sop.value_output_count, op->ValueOutputCount());
+ EXPECT_EQ(sop.effect_output_count, op->EffectOutputCount());
+ EXPECT_EQ(sop.control_output_count, op->ControlOutputCount());
}
-TEST_P(JSStorePropertyOperatorTest, OpcodeIsCorrect) {
+TEST_P(JSSharedOperatorWithLanguageModeTest, OpcodeIsCorrect) {
JSOperatorBuilder javascript(zone());
- const StrictMode mode = GetParam();
- const Operator* op = javascript.StoreProperty(mode);
- EXPECT_EQ(IrOpcode::kJSStoreProperty, op->opcode());
+ const SharedOperatorWithLanguageMode& sop = GetParam();
+ const Operator* op = (javascript.*sop.constructor)(language_mode());
+ EXPECT_EQ(sop.opcode, op->opcode());
}
-TEST_P(JSStorePropertyOperatorTest, OpParameter) {
+TEST_P(JSSharedOperatorWithLanguageModeTest, Parameter) {
JSOperatorBuilder javascript(zone());
- const StrictMode mode = GetParam();
- const Operator* op = javascript.StoreProperty(mode);
- EXPECT_EQ(mode, OpParameter<StrictMode>(op));
+ const SharedOperatorWithLanguageMode& sop = GetParam();
+ const Operator* op = (javascript.*sop.constructor)(language_mode());
+ EXPECT_EQ(language_mode(), OpParameter<LanguageMode>(op));
}
-TEST_P(JSStorePropertyOperatorTest, Properties) {
+TEST_P(JSSharedOperatorWithLanguageModeTest, Properties) {
JSOperatorBuilder javascript(zone());
- const StrictMode mode = GetParam();
- const Operator* op = javascript.StoreProperty(mode);
- EXPECT_EQ(Operator::kNoProperties, op->properties());
+ const SharedOperatorWithLanguageMode& sop = GetParam();
+ const Operator* op = (javascript.*sop.constructor)(language_mode());
+ EXPECT_EQ(sop.properties, op->properties());
}
-INSTANTIATE_TEST_CASE_P(JSOperatorTest, JSStorePropertyOperatorTest,
- ::testing::Values(SLOPPY, STRICT));
+INSTANTIATE_TEST_CASE_P(
+ JSOperatorTest, JSSharedOperatorWithLanguageModeTest,
+ ::testing::Combine(::testing::ValuesIn(kLanguageModes),
+ ::testing::ValuesIn(kSharedOperatorsWithLanguageMode)));
+
+#endif // GTEST_HAS_COMBINE
} // namespace compiler
} // namespace internal
diff --git a/test/unittests/compiler/js-type-feedback-unittest.cc b/test/unittests/compiler/js-type-feedback-unittest.cc
new file mode 100644
index 0000000..dece25d
--- /dev/null
+++ b/test/unittests/compiler/js-type-feedback-unittest.cc
@@ -0,0 +1,336 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler.h"
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/js-type-feedback.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
+
+#include "test/unittests/compiler/compiler-test-utils.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::Capture;
+
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSTypeFeedbackTest : public TypedGraphTest {
+ public:
+ JSTypeFeedbackTest()
+ : TypedGraphTest(3),
+ javascript_(zone()),
+ dependencies_(isolate(), zone()) {}
+ ~JSTypeFeedbackTest() override { dependencies_.Rollback(); }
+
+ protected:
+ Reduction Reduce(Node* node,
+ JSTypeFeedbackSpecializer::DeoptimizationMode mode) {
+ Handle<GlobalObject> global_object(
+ isolate()->native_context()->global_object(), isolate());
+
+ MachineOperatorBuilder machine(zone());
+ SimplifiedOperatorBuilder simplified(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
+ &machine);
+ JSTypeFeedbackTable table(zone());
+ // TODO(titzer): mock the GraphReducer here for better unit testing.
+ GraphReducer graph_reducer(zone(), graph());
+ JSTypeFeedbackSpecializer reducer(&graph_reducer, &jsgraph, &table, nullptr,
+ global_object, mode, &dependencies_);
+ return reducer.Reduce(node);
+ }
+
+ Node* EmptyFrameState() {
+ MachineOperatorBuilder machine(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), javascript(), nullptr,
+ &machine);
+ return jsgraph.EmptyFrameState();
+ }
+
+ JSOperatorBuilder* javascript() { return &javascript_; }
+
+ void SetGlobalProperty(const char* string, int value) {
+ SetGlobalProperty(string, Handle<Smi>(Smi::FromInt(value), isolate()));
+ }
+
+ void SetGlobalProperty(const char* string, double value) {
+ SetGlobalProperty(string, isolate()->factory()->NewNumber(value));
+ }
+
+ void SetGlobalProperty(const char* string, Handle<Object> value) {
+ Handle<JSObject> global(isolate()->context()->global_object(), isolate());
+ Handle<String> name =
+ isolate()->factory()->NewStringFromAsciiChecked(string);
+ MaybeHandle<Object> result =
+ JSReceiver::SetProperty(global, name, value, SLOPPY);
+ result.Assert();
+ }
+
+ Node* ReturnLoadNamedFromGlobal(
+ const char* string, Node* effect, Node* control,
+ JSTypeFeedbackSpecializer::DeoptimizationMode mode) {
+ VectorSlotPair feedback;
+ Node* vector = UndefinedConstant();
+ Node* context = UndefinedConstant();
+
+ Handle<Name> name = isolate()->factory()->InternalizeUtf8String(string);
+ const Operator* op = javascript()->LoadGlobal(name, feedback);
+ Node* load = graph()->NewNode(op, vector, context, EmptyFrameState(),
+ EmptyFrameState(), effect, control);
+ Node* if_success = graph()->NewNode(common()->IfSuccess(), load);
+ return graph()->NewNode(common()->Return(), load, load, if_success);
+ }
+
+ CompilationDependencies* dependencies() { return &dependencies_; }
+
+ private:
+ JSOperatorBuilder javascript_;
+ CompilationDependencies dependencies_;
+};
+
+
+TEST_F(JSTypeFeedbackTest, JSLoadNamedGlobalConstSmi) {
+ const int kValue = 111;
+ const char* kName = "banana";
+ SetGlobalProperty(kName, kValue);
+
+ Node* ret = ReturnLoadNamedFromGlobal(
+ kName, graph()->start(), graph()->start(),
+ JSTypeFeedbackSpecializer::kDeoptimizationDisabled);
+ graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
+
+ Reduction r = Reduce(ret->InputAt(0),
+ JSTypeFeedbackSpecializer::kDeoptimizationDisabled);
+ EXPECT_FALSE(r.Changed());
+ EXPECT_TRUE(dependencies()->IsEmpty());
+}
+
+
+TEST_F(JSTypeFeedbackTest, JSLoadNamedGlobalConstSmiWithDeoptimization) {
+ const int kValue = 111;
+ const char* kName = "banana";
+ SetGlobalProperty(kName, kValue);
+
+ Node* ret = ReturnLoadNamedFromGlobal(
+ kName, graph()->start(), graph()->start(),
+ JSTypeFeedbackSpecializer::kDeoptimizationEnabled);
+ graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
+
+ Reduction r = Reduce(ret->InputAt(0),
+ JSTypeFeedbackSpecializer::kDeoptimizationEnabled);
+
+ // Check LoadNamed(global) => HeapConstant[kValue]
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberConstant(kValue));
+
+ EXPECT_THAT(ret, IsReturn(IsNumberConstant(kValue), graph()->start(),
+ graph()->start()));
+ EXPECT_THAT(graph()->end(), IsEnd(ret));
+
+ EXPECT_FALSE(dependencies()->IsEmpty());
+ dependencies()->Rollback();
+}
+
+
+TEST_F(JSTypeFeedbackTest, JSLoadNamedGlobalConstNumber) {
+ const double kValue = -11.25;
+ const char* kName = "kiwi";
+ SetGlobalProperty(kName, kValue);
+
+ Node* ret = ReturnLoadNamedFromGlobal(
+ kName, graph()->start(), graph()->start(),
+ JSTypeFeedbackSpecializer::kDeoptimizationDisabled);
+ graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
+
+ Reduction r = Reduce(ret->InputAt(0),
+ JSTypeFeedbackSpecializer::kDeoptimizationDisabled);
+
+ EXPECT_FALSE(r.Changed());
+ EXPECT_TRUE(dependencies()->IsEmpty());
+}
+
+
+TEST_F(JSTypeFeedbackTest, JSLoadNamedGlobalConstNumberWithDeoptimization) {
+ const double kValue = -11.25;
+ const char* kName = "kiwi";
+ SetGlobalProperty(kName, kValue);
+
+ Node* ret = ReturnLoadNamedFromGlobal(
+ kName, graph()->start(), graph()->start(),
+ JSTypeFeedbackSpecializer::kDeoptimizationEnabled);
+ graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
+
+ Reduction r = Reduce(ret->InputAt(0),
+ JSTypeFeedbackSpecializer::kDeoptimizationEnabled);
+
+ // Check LoadNamed(global) => HeapConstant[kValue]
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberConstant(kValue));
+
+ EXPECT_THAT(ret, IsReturn(IsNumberConstant(kValue), graph()->start(),
+ graph()->start()));
+ EXPECT_THAT(graph()->end(), IsEnd(ret));
+
+ EXPECT_FALSE(dependencies()->IsEmpty());
+}
+
+
+TEST_F(JSTypeFeedbackTest, JSLoadNamedGlobalConstString) {
+ Handle<HeapObject> kValue = isolate()->factory()->undefined_string();
+ const char* kName = "mango";
+ SetGlobalProperty(kName, kValue);
+
+ Node* ret = ReturnLoadNamedFromGlobal(
+ kName, graph()->start(), graph()->start(),
+ JSTypeFeedbackSpecializer::kDeoptimizationDisabled);
+ graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
+
+ Reduction r = Reduce(ret->InputAt(0),
+ JSTypeFeedbackSpecializer::kDeoptimizationDisabled);
+ ASSERT_FALSE(r.Changed());
+ EXPECT_TRUE(dependencies()->IsEmpty());
+}
+
+
+TEST_F(JSTypeFeedbackTest, JSLoadNamedGlobalConstStringWithDeoptimization) {
+ Handle<HeapObject> kValue = isolate()->factory()->undefined_string();
+ const char* kName = "mango";
+ SetGlobalProperty(kName, kValue);
+
+ Node* ret = ReturnLoadNamedFromGlobal(
+ kName, graph()->start(), graph()->start(),
+ JSTypeFeedbackSpecializer::kDeoptimizationEnabled);
+ graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
+
+ Reduction r = Reduce(ret->InputAt(0),
+ JSTypeFeedbackSpecializer::kDeoptimizationEnabled);
+
+ // Check LoadNamed(global) => HeapConstant[kValue]
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsHeapConstant(kValue));
+
+ EXPECT_THAT(ret, IsReturn(IsHeapConstant(kValue), graph()->start(),
+ graph()->start()));
+ EXPECT_THAT(graph()->end(), IsEnd(ret));
+
+ EXPECT_FALSE(dependencies()->IsEmpty());
+ dependencies()->Rollback();
+}
+
+
+TEST_F(JSTypeFeedbackTest, JSLoadNamedGlobalPropertyCellSmi) {
+ const char* kName = "melon";
+ SetGlobalProperty(kName, 123);
+ SetGlobalProperty(kName, 124);
+
+ Node* ret = ReturnLoadNamedFromGlobal(
+ kName, graph()->start(), graph()->start(),
+ JSTypeFeedbackSpecializer::kDeoptimizationDisabled);
+ graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
+
+ Reduction r = Reduce(ret->InputAt(0),
+ JSTypeFeedbackSpecializer::kDeoptimizationDisabled);
+ ASSERT_FALSE(r.Changed());
+ EXPECT_TRUE(dependencies()->IsEmpty());
+}
+
+
+TEST_F(JSTypeFeedbackTest, JSLoadNamedGlobalPropertyCellSmiWithDeoptimization) {
+ const char* kName = "melon";
+ SetGlobalProperty(kName, 123);
+ SetGlobalProperty(kName, 124);
+
+ Node* ret = ReturnLoadNamedFromGlobal(
+ kName, graph()->start(), graph()->start(),
+ JSTypeFeedbackSpecializer::kDeoptimizationEnabled);
+ graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
+
+ Reduction r = Reduce(ret->InputAt(0),
+ JSTypeFeedbackSpecializer::kDeoptimizationEnabled);
+
+ // Check LoadNamed(global) => LoadField[PropertyCell::value](cell)
+ ASSERT_TRUE(r.Changed());
+ FieldAccess access = AccessBuilder::ForPropertyCellValue();
+ Capture<Node*> cell_capture;
+ Matcher<Node*> load_field_match = IsLoadField(
+ access, CaptureEq(&cell_capture), graph()->start(), graph()->start());
+ EXPECT_THAT(r.replacement(), load_field_match);
+
+ HeapObjectMatcher cell(cell_capture.value());
+ EXPECT_TRUE(cell.HasValue());
+ EXPECT_TRUE(cell.Value()->IsPropertyCell());
+
+ EXPECT_THAT(ret,
+ IsReturn(load_field_match, load_field_match, graph()->start()));
+ EXPECT_THAT(graph()->end(), IsEnd(ret));
+
+ EXPECT_FALSE(dependencies()->IsEmpty());
+ dependencies()->Rollback();
+}
+
+
+TEST_F(JSTypeFeedbackTest, JSLoadNamedGlobalPropertyCellString) {
+ const char* kName = "pineapple";
+ SetGlobalProperty(kName, isolate()->factory()->undefined_string());
+ SetGlobalProperty(kName, isolate()->factory()->undefined_value());
+
+ Node* ret = ReturnLoadNamedFromGlobal(
+ kName, graph()->start(), graph()->start(),
+ JSTypeFeedbackSpecializer::kDeoptimizationDisabled);
+ graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
+
+ Reduction r = Reduce(ret->InputAt(0),
+ JSTypeFeedbackSpecializer::kDeoptimizationDisabled);
+ ASSERT_FALSE(r.Changed());
+ EXPECT_TRUE(dependencies()->IsEmpty());
+}
+
+
+TEST_F(JSTypeFeedbackTest,
+ JSLoadNamedGlobalPropertyCellStringWithDeoptimization) {
+ const char* kName = "pineapple";
+ SetGlobalProperty(kName, isolate()->factory()->undefined_string());
+ SetGlobalProperty(kName, isolate()->factory()->undefined_value());
+
+ Node* ret = ReturnLoadNamedFromGlobal(
+ kName, graph()->start(), graph()->start(),
+ JSTypeFeedbackSpecializer::kDeoptimizationEnabled);
+ graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
+
+ Reduction r = Reduce(ret->InputAt(0),
+ JSTypeFeedbackSpecializer::kDeoptimizationEnabled);
+
+ // Check LoadNamed(global) => LoadField[PropertyCell::value](cell)
+ ASSERT_TRUE(r.Changed());
+ FieldAccess access = AccessBuilder::ForPropertyCellValue();
+ Capture<Node*> cell_capture;
+ Matcher<Node*> load_field_match = IsLoadField(
+ access, CaptureEq(&cell_capture), graph()->start(), graph()->start());
+ EXPECT_THAT(r.replacement(), load_field_match);
+
+ HeapObjectMatcher cell(cell_capture.value());
+ EXPECT_TRUE(cell.HasValue());
+ EXPECT_TRUE(cell.Value()->IsPropertyCell());
+
+ EXPECT_THAT(ret,
+ IsReturn(load_field_match, load_field_match, graph()->start()));
+ EXPECT_THAT(graph()->end(), IsEnd(ret));
+
+ EXPECT_FALSE(dependencies()->IsEmpty());
+ dependencies()->Rollback();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/js-typed-lowering-unittest.cc b/test/unittests/compiler/js-typed-lowering-unittest.cc
index 97ff106..6fc89bb 100644
--- a/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -2,17 +2,21 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "src/code-factory.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
#include "src/compiler/js-operator.h"
#include "src/compiler/js-typed-lowering.h"
#include "src/compiler/machine-operator.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
+#include "src/isolate-inl.h"
#include "test/unittests/compiler/compiler-test-utils.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
#include "testing/gmock-support.h"
+using testing::_;
using testing::BitEq;
using testing::IsNaN;
@@ -62,27 +66,47 @@
Type::Number(), Type::String(), Type::Object()};
-const StrictMode kStrictModes[] = {SLOPPY, STRICT};
+STATIC_ASSERT(LANGUAGE_END == 3);
+const LanguageMode kLanguageModes[] = {SLOPPY, STRICT, STRONG};
} // namespace
class JSTypedLoweringTest : public TypedGraphTest {
public:
- JSTypedLoweringTest() : TypedGraphTest(3), javascript_(zone()) {}
- ~JSTypedLoweringTest() OVERRIDE {}
+ JSTypedLoweringTest()
+ : TypedGraphTest(3), javascript_(zone()), deps_(isolate(), zone()) {}
+ ~JSTypedLoweringTest() override {}
protected:
Reduction Reduce(Node* node) {
MachineOperatorBuilder machine(zone());
- JSGraph jsgraph(graph(), common(), javascript(), &machine);
- JSTypedLowering reducer(&jsgraph, zone());
+ SimplifiedOperatorBuilder simplified(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), javascript(), &simplified,
+ &machine);
+ // TODO(titzer): mock the GraphReducer here for better unit testing.
+ GraphReducer graph_reducer(zone(), graph());
+ JSTypedLowering reducer(&graph_reducer, &deps_,
+ JSTypedLowering::kDeoptimizationEnabled, &jsgraph,
+ zone());
return reducer.Reduce(node);
}
+ Node* FrameState(Handle<SharedFunctionInfo> shared, Node* outer_frame_state) {
+ Node* state_values = graph()->NewNode(common()->StateValues(0));
+ return graph()->NewNode(
+ common()->FrameState(BailoutId::None(),
+ OutputFrameStateCombine::Ignore(),
+ common()->CreateFrameStateFunctionInfo(
+ FrameStateType::kJavaScriptFunction, 1, 0,
+ shared, CALL_MAINTAINS_NATIVE_CONTEXT)),
+ state_values, state_values, state_values, NumberConstant(0),
+ UndefinedConstant(), outer_frame_state);
+ }
+
Handle<JSArrayBuffer> NewArrayBuffer(void* bytes, size_t byte_length) {
Handle<JSArrayBuffer> buffer = factory()->NewJSArrayBuffer();
- Runtime::SetupArrayBuffer(isolate(), buffer, true, bytes, byte_length);
+ JSArrayBuffer::Setup(buffer, isolate(), true, bytes, byte_length);
return buffer;
}
@@ -95,91 +119,11 @@
private:
JSOperatorBuilder javascript_;
+ CompilationDependencies deps_;
};
// -----------------------------------------------------------------------------
-// JSUnaryNot
-
-
-TEST_F(JSTypedLoweringTest, JSUnaryNotWithBoolean) {
- Node* input = Parameter(Type::Boolean(), 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->UnaryNot(), input, context));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsBooleanNot(input));
-}
-
-
-TEST_F(JSTypedLoweringTest, JSUnaryNotWithFalsish) {
- Handle<Object> zero = factory()->NewNumber(0);
- Node* input = Parameter(
- Type::Union(
- Type::MinusZero(),
- Type::Union(
- Type::NaN(),
- Type::Union(
- Type::Null(),
- Type::Union(
- Type::Undefined(),
- Type::Union(
- Type::Undetectable(),
- Type::Union(
- Type::Constant(factory()->false_value(), zone()),
- Type::Range(zero, zero, zone()), zone()),
- zone()),
- zone()),
- zone()),
- zone()),
- zone()),
- 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->UnaryNot(), input, context));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsTrueConstant());
-}
-
-
-TEST_F(JSTypedLoweringTest, JSUnaryNotWithTruish) {
- Node* input = Parameter(
- Type::Union(
- Type::Constant(factory()->true_value(), zone()),
- Type::Union(Type::DetectableReceiver(), Type::Symbol(), zone()),
- zone()),
- 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->UnaryNot(), input, context));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFalseConstant());
-}
-
-
-TEST_F(JSTypedLoweringTest, JSUnaryNotWithNonZeroPlainNumber) {
- Node* input = Parameter(
- Type::Range(factory()->NewNumber(1), factory()->NewNumber(42), zone()),
- 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->UnaryNot(), input, context));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsFalseConstant());
-}
-
-
-TEST_F(JSTypedLoweringTest, JSUnaryNotWithAny) {
- Node* input = Parameter(Type::Any(), 0);
- Node* context = Parameter(Type::Any(), 1);
- Reduction r =
- Reduce(graph()->NewNode(javascript()->UnaryNot(), input, context));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsBooleanNot(IsAnyToBoolean(input)));
-}
-
-
-// -----------------------------------------------------------------------------
// Constant propagation
@@ -209,20 +153,18 @@
{
Reduction r = Reduce(Parameter(Type::Constant(null, zone())));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsHeapConstant(Unique<HeapObject>::CreateImmovable(null)));
+ EXPECT_THAT(r.replacement(), IsHeapConstant(null));
}
{
Reduction r = Reduce(Parameter(Type::Null()));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsHeapConstant(Unique<HeapObject>::CreateImmovable(null)));
+ EXPECT_THAT(r.replacement(), IsHeapConstant(null));
}
}
TEST_F(JSTypedLoweringTest, ParameterWithNaN) {
- const double kNaNs[] = {base::OS::nan_value(),
+ const double kNaNs[] = {-std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::quiet_NaN(),
std::numeric_limits<double>::signaling_NaN()};
TRACED_FOREACH(double, nan, kNaNs) {
@@ -253,8 +195,7 @@
EXPECT_THAT(r.replacement(), IsNumberConstant(value));
}
TRACED_FOREACH(double, value, kIntegerValues) {
- Handle<Object> constant = factory()->NewNumber(value);
- Reduction r = Reduce(Parameter(Type::Range(constant, constant, zone())));
+ Reduction r = Reduce(Parameter(Type::Range(value, value, zone())));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsNumberConstant(value));
}
@@ -266,14 +207,12 @@
{
Reduction r = Reduce(Parameter(Type::Undefined()));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsHeapConstant(Unique<HeapObject>::CreateImmovable(undefined)));
+ EXPECT_THAT(r.replacement(), IsHeapConstant(undefined));
}
{
Reduction r = Reduce(Parameter(Type::Constant(undefined, zone())));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsHeapConstant(Unique<HeapObject>::CreateImmovable(undefined)));
+ EXPECT_THAT(r.replacement(), IsHeapConstant(undefined));
}
}
@@ -286,14 +225,14 @@
Node* input = Parameter(Type::Boolean(), 0);
Node* context = Parameter(Type::Any(), 1);
Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(), input, context));
+ Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
+ input, context, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_EQ(input, r.replacement());
}
TEST_F(JSTypedLoweringTest, JSToBooleanWithFalsish) {
- Handle<Object> zero = factory()->NewNumber(0);
Node* input = Parameter(
Type::Union(
Type::MinusZero(),
@@ -307,7 +246,7 @@
Type::Undetectable(),
Type::Union(
Type::Constant(factory()->false_value(), zone()),
- Type::Range(zero, zero, zone()), zone()),
+ Type::Range(0.0, 0.0, zone()), zone()),
zone()),
zone()),
zone()),
@@ -316,7 +255,8 @@
0);
Node* context = Parameter(Type::Any(), 1);
Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(), input, context));
+ Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
+ input, context, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFalseConstant());
}
@@ -331,32 +271,58 @@
0);
Node* context = Parameter(Type::Any(), 1);
Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(), input, context));
+ Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
+ input, context, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsTrueConstant());
}
TEST_F(JSTypedLoweringTest, JSToBooleanWithNonZeroPlainNumber) {
- Node* input =
- Parameter(Type::Range(factory()->NewNumber(1),
- factory()->NewNumber(V8_INFINITY), zone()),
- 0);
+ Node* input = Parameter(Type::Range(1, V8_INFINITY, zone()), 0);
Node* context = Parameter(Type::Any(), 1);
Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(), input, context));
+ Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
+ input, context, graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsTrueConstant());
}
+TEST_F(JSTypedLoweringTest, JSToBooleanWithOrderedNumber) {
+ Node* input = Parameter(Type::OrderedNumber(), 0);
+ Node* context = Parameter(Type::Any(), 1);
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
+ input, context, graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsBooleanNot(IsNumberEqual(input, IsNumberConstant(0.0))));
+}
+
+
+TEST_F(JSTypedLoweringTest, JSToBooleanWithString) {
+ Node* input = Parameter(Type::String(), 0);
+ Node* context = Parameter(Type::Any(), 1);
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
+ input, context, graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsNumberLessThan(IsNumberConstant(0.0),
+ IsLoadField(AccessBuilder::ForStringLength(), input,
+ graph()->start(), graph()->start())));
+}
+
+
TEST_F(JSTypedLoweringTest, JSToBooleanWithAny) {
Node* input = Parameter(Type::Any(), 0);
Node* context = Parameter(Type::Any(), 1);
Reduction r =
- Reduce(graph()->NewNode(javascript()->ToBoolean(), input, context));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsAnyToBoolean(input));
+ Reduce(graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
+ input, context, graph()->start()));
+ ASSERT_FALSE(r.Changed());
}
@@ -369,8 +335,9 @@
Node* const context = Parameter(Type::Any(), 1);
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Reduction r = Reduce(graph()->NewNode(javascript()->ToNumber(), input,
- context, effect, control));
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->ToNumber(), input, context,
+ EmptyFrameState(), effect, control));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsToNumber(input, IsNumberConstant(BitEq(0.0)),
graph()->start(), control));
@@ -378,55 +345,123 @@
// -----------------------------------------------------------------------------
+// JSToObject
+
+
+TEST_F(JSTypedLoweringTest, JSToObjectWithAny) {
+ Node* const input = Parameter(Type::Any(), 0);
+ Node* const context = Parameter(Type::Any(), 1);
+ Node* const frame_state = EmptyFrameState();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ToObject(), input,
+ context, frame_state, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsPhi(MachineRepresentation::kTagged, _, _, _));
+}
+
+
+TEST_F(JSTypedLoweringTest, JSToObjectWithReceiver) {
+ Node* const input = Parameter(Type::Receiver(), 0);
+ Node* const context = Parameter(Type::Any(), 1);
+ Node* const frame_state = EmptyFrameState();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ToObject(), input,
+ context, frame_state, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_EQ(input, r.replacement());
+}
+
+
+// -----------------------------------------------------------------------------
+// JSToString
+
+
+TEST_F(JSTypedLoweringTest, JSToStringWithBoolean) {
+ Node* const input = Parameter(Type::Boolean(), 0);
+ Node* const context = Parameter(Type::Any(), 1);
+ Node* const frame_state = EmptyFrameState();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->ToString(), input,
+ context, frame_state, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsSelect(MachineRepresentation::kTagged, input,
+ IsHeapConstant(factory()->true_string()),
+ IsHeapConstant(factory()->false_string())));
+}
+
+
+// -----------------------------------------------------------------------------
// JSStrictEqual
TEST_F(JSTypedLoweringTest, JSStrictEqualWithTheHole) {
Node* const the_hole = HeapConstant(factory()->the_hole_value());
Node* const context = UndefinedConstant();
- Node* const effect = graph()->start();
- Node* const control = graph()->start();
TRACED_FOREACH(Type*, type, kJSTypes) {
Node* const lhs = Parameter(type);
- Reduction r = Reduce(graph()->NewNode(javascript()->StrictEqual(), lhs,
- the_hole, context, effect, control));
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->StrictEqual(), lhs, the_hole,
+ context, graph()->start(), graph()->start()));
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsFalseConstant());
}
}
+TEST_F(JSTypedLoweringTest, JSStrictEqualWithUnique) {
+ Node* const lhs = Parameter(Type::Unique(), 0);
+ Node* const rhs = Parameter(Type::Unique(), 1);
+ Node* const context = Parameter(Type::Any(), 2);
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->StrictEqual(), lhs, rhs, context,
+ graph()->start(), graph()->start()));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsReferenceEqual(Type::Unique(), lhs, rhs));
+}
+
+
// -----------------------------------------------------------------------------
// JSShiftLeft
TEST_F(JSTypedLoweringTest, JSShiftLeftWithSigned32AndConstant) {
+ BinaryOperationHints const hints = BinaryOperationHints::Any();
Node* const lhs = Parameter(Type::Signed32());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
TRACED_FORRANGE(double, rhs, 0, 31) {
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ShiftLeft(), lhs,
- NumberConstant(rhs), context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsWord32Shl(lhs, IsNumberConstant(BitEq(rhs))));
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
+ Reduction r = Reduce(
+ graph()->NewNode(javascript()->ShiftLeft(language_mode, hints), lhs,
+ NumberConstant(rhs), context, EmptyFrameState(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsNumberShiftLeft(lhs, IsNumberConstant(BitEq(rhs))));
+ }
}
}
TEST_F(JSTypedLoweringTest, JSShiftLeftWithSigned32AndUnsigned32) {
+ BinaryOperationHints const hints = BinaryOperationHints::Any();
Node* const lhs = Parameter(Type::Signed32());
Node* const rhs = Parameter(Type::Unsigned32());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Reduction r = Reduce(graph()->NewNode(javascript()->ShiftLeft(), lhs, rhs,
- context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsWord32Shl(lhs, IsWord32And(rhs, IsInt32Constant(0x1f))));
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ShiftLeft(language_mode, hints), lhs, rhs, context,
+ EmptyFrameState(), EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberShiftLeft(lhs, rhs));
+ }
}
@@ -435,32 +470,39 @@
TEST_F(JSTypedLoweringTest, JSShiftRightWithSigned32AndConstant) {
+ BinaryOperationHints const hints = BinaryOperationHints::Any();
Node* const lhs = Parameter(Type::Signed32());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
TRACED_FORRANGE(double, rhs, 0, 31) {
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ShiftRight(), lhs,
- NumberConstant(rhs), context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsWord32Sar(lhs, IsNumberConstant(BitEq(rhs))));
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
+ Reduction r = Reduce(
+ graph()->NewNode(javascript()->ShiftRight(language_mode, hints), lhs,
+ NumberConstant(rhs), context, EmptyFrameState(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsNumberShiftRight(lhs, IsNumberConstant(BitEq(rhs))));
+ }
}
}
TEST_F(JSTypedLoweringTest, JSShiftRightWithSigned32AndUnsigned32) {
+ BinaryOperationHints const hints = BinaryOperationHints::Any();
Node* const lhs = Parameter(Type::Signed32());
Node* const rhs = Parameter(Type::Unsigned32());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRight(), lhs, rhs,
- context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsWord32Sar(lhs, IsWord32And(rhs, IsInt32Constant(0x1f))));
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ShiftRight(language_mode, hints), lhs, rhs, context,
+ EmptyFrameState(), EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberShiftRight(lhs, rhs));
+ }
}
@@ -468,33 +510,41 @@
// JSShiftRightLogical
-TEST_F(JSTypedLoweringTest, JSShiftRightLogicalWithUnsigned32AndConstant) {
+TEST_F(JSTypedLoweringTest,
+ JSShiftRightLogicalWithUnsigned32AndConstant) {
+ BinaryOperationHints const hints = BinaryOperationHints::Any();
Node* const lhs = Parameter(Type::Unsigned32());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
TRACED_FORRANGE(double, rhs, 0, 31) {
- Reduction r =
- Reduce(graph()->NewNode(javascript()->ShiftRightLogical(), lhs,
- NumberConstant(rhs), context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsWord32Shr(lhs, IsNumberConstant(BitEq(rhs))));
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ShiftRightLogical(language_mode, hints), lhs,
+ NumberConstant(rhs), context, EmptyFrameState(), EmptyFrameState(),
+ effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsNumberShiftRightLogical(lhs, IsNumberConstant(BitEq(rhs))));
+ }
}
}
TEST_F(JSTypedLoweringTest, JSShiftRightLogicalWithUnsigned32AndUnsigned32) {
+ BinaryOperationHints const hints = BinaryOperationHints::Any();
Node* const lhs = Parameter(Type::Unsigned32());
Node* const rhs = Parameter(Type::Unsigned32());
Node* const context = UndefinedConstant();
Node* const effect = graph()->start();
Node* const control = graph()->start();
- Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRightLogical(), lhs,
- rhs, context, effect, control));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsWord32Shr(lhs, IsWord32And(rhs, IsInt32Constant(0x1f))));
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->ShiftRightLogical(language_mode, hints), lhs, rhs,
+ context, EmptyFrameState(), EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsNumberShiftRightLogical(lhs, rhs));
+ }
}
@@ -525,7 +575,7 @@
IsLoadField(AccessBuilder::ForContextSlot(
Context::PREVIOUS_INDEX),
context, effect, graph()->start()),
- effect, graph()->start()));
+ _, graph()->start()));
}
}
}
@@ -560,7 +610,7 @@
IsLoadField(AccessBuilder::ForContextSlot(
Context::PREVIOUS_INDEX),
context, effect, graph()->start()),
- value, effect, control));
+ value, _, control));
}
}
}
@@ -575,42 +625,39 @@
double backing_store[kLength];
Handle<JSArrayBuffer> buffer =
NewArrayBuffer(backing_store, sizeof(backing_store));
- VectorSlotPair feedback(Handle<TypeFeedbackVector>::null(),
- FeedbackVectorICSlot::Invalid());
+ VectorSlotPair feedback;
TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
- Handle<JSTypedArray> array =
- factory()->NewJSTypedArray(type, buffer, 0, kLength);
- int const element_size = static_cast<int>(array->element_size());
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
+ Handle<JSTypedArray> array =
+ factory()->NewJSTypedArray(type, buffer, 0, kLength);
+ int const element_size = static_cast<int>(array->element_size());
- Node* key = Parameter(
- Type::Range(factory()->NewNumber(kMinInt / element_size),
- factory()->NewNumber(kMaxInt / element_size), zone()));
- Node* base = HeapConstant(array);
- Node* context = UndefinedConstant();
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* node = graph()->NewNode(javascript()->LoadProperty(feedback), base,
- key, context);
- if (FLAG_turbo_deoptimization) {
- node->AppendInput(zone(), UndefinedConstant());
+ Node* key = Parameter(
+ Type::Range(kMinInt / element_size, kMaxInt / element_size, zone()));
+ Node* base = HeapConstant(array);
+ Node* vector = UndefinedConstant();
+ Node* context = UndefinedConstant();
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(
+ graph()->NewNode(javascript()->LoadProperty(language_mode, feedback),
+ base, key, vector, context, EmptyFrameState(),
+ EmptyFrameState(), effect, control));
+
+ Matcher<Node*> offset_matcher =
+ element_size == 1
+ ? key
+ : IsWord32Shl(key, IsInt32Constant(WhichPowerOf2(element_size)));
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsLoadBuffer(BufferAccess(type),
+ IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
+ offset_matcher,
+ IsNumberConstant(array->byte_length()->Number()), effect,
+ control));
}
- node->AppendInput(zone(), effect);
- node->AppendInput(zone(), control);
- Reduction r = Reduce(node);
-
- Matcher<Node*> offset_matcher =
- element_size == 1
- ? key
- : IsWord32Shl(key, IsInt32Constant(WhichPowerOf2(element_size)));
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(
- r.replacement(),
- IsLoadBuffer(BufferAccess(type),
- IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
- offset_matcher,
- IsNumberConstant(array->byte_length()->Number()), effect,
- control));
}
}
@@ -620,37 +667,34 @@
double backing_store[kLength];
Handle<JSArrayBuffer> buffer =
NewArrayBuffer(backing_store, sizeof(backing_store));
- VectorSlotPair feedback(Handle<TypeFeedbackVector>::null(),
- FeedbackVectorICSlot::Invalid());
+ VectorSlotPair feedback;
TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
- Handle<JSTypedArray> array =
- factory()->NewJSTypedArray(type, buffer, 0, kLength);
- ElementAccess access = AccessBuilder::ForTypedArrayElement(type, true);
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
+ Handle<JSTypedArray> array =
+ factory()->NewJSTypedArray(type, buffer, 0, kLength);
+ ElementAccess access = AccessBuilder::ForTypedArrayElement(type, true);
- int min = random_number_generator()->NextInt(static_cast<int>(kLength));
- int max = random_number_generator()->NextInt(static_cast<int>(kLength));
- if (min > max) std::swap(min, max);
- Node* key = Parameter(Type::Range(factory()->NewNumber(min),
- factory()->NewNumber(max), zone()));
- Node* base = HeapConstant(array);
- Node* context = UndefinedConstant();
- Node* effect = graph()->start();
- Node* control = graph()->start();
- Node* node = graph()->NewNode(javascript()->LoadProperty(feedback), base,
- key, context);
- if (FLAG_turbo_deoptimization) {
- node->AppendInput(zone(), UndefinedConstant());
+ int min = random_number_generator()->NextInt(static_cast<int>(kLength));
+ int max = random_number_generator()->NextInt(static_cast<int>(kLength));
+ if (min > max) std::swap(min, max);
+ Node* key = Parameter(Type::Range(min, max, zone()));
+ Node* base = HeapConstant(array);
+ Node* vector = UndefinedConstant();
+ Node* context = UndefinedConstant();
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(
+ graph()->NewNode(javascript()->LoadProperty(language_mode, feedback),
+ base, key, vector, context, EmptyFrameState(),
+ EmptyFrameState(), effect, control));
+
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsLoadElement(access,
+ IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
+ key, effect, control));
}
- node->AppendInput(zone(), effect);
- node->AppendInput(zone(), control);
- Reduction r = Reduce(node);
-
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(
- r.replacement(),
- IsLoadElement(access,
- IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
- key, effect, control));
}
}
@@ -665,27 +709,25 @@
Handle<JSArrayBuffer> buffer =
NewArrayBuffer(backing_store, sizeof(backing_store));
TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
- TRACED_FOREACH(StrictMode, strict_mode, kStrictModes) {
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
Handle<JSTypedArray> array =
factory()->NewJSTypedArray(type, buffer, 0, kLength);
int const element_size = static_cast<int>(array->element_size());
Node* key = Parameter(
- Type::Range(factory()->NewNumber(kMinInt / element_size),
- factory()->NewNumber(kMaxInt / element_size), zone()));
+ Type::Range(kMinInt / element_size, kMaxInt / element_size, zone()));
Node* base = HeapConstant(array);
Node* value =
Parameter(AccessBuilder::ForTypedArrayElement(type, true).type);
+ Node* vector = UndefinedConstant();
Node* context = UndefinedConstant();
Node* effect = graph()->start();
Node* control = graph()->start();
- Node* node = graph()->NewNode(javascript()->StoreProperty(strict_mode),
- base, key, value, context);
- if (FLAG_turbo_deoptimization) {
- node->AppendInput(zone(), UndefinedConstant());
- }
- node->AppendInput(zone(), effect);
- node->AppendInput(zone(), control);
+ VectorSlotPair feedback;
+ const Operator* op = javascript()->StoreProperty(language_mode, feedback);
+ Node* node = graph()->NewNode(op, base, key, value, vector, context,
+ EmptyFrameState(), EmptyFrameState(),
+ effect, control);
Reduction r = Reduce(node);
Matcher<Node*> offset_matcher =
@@ -712,26 +754,24 @@
Handle<JSArrayBuffer> buffer =
NewArrayBuffer(backing_store, sizeof(backing_store));
TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
- TRACED_FOREACH(StrictMode, strict_mode, kStrictModes) {
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
Handle<JSTypedArray> array =
factory()->NewJSTypedArray(type, buffer, 0, kLength);
int const element_size = static_cast<int>(array->element_size());
Node* key = Parameter(
- Type::Range(factory()->NewNumber(kMinInt / element_size),
- factory()->NewNumber(kMaxInt / element_size), zone()));
+ Type::Range(kMinInt / element_size, kMaxInt / element_size, zone()));
Node* base = HeapConstant(array);
Node* value = Parameter(Type::Any());
+ Node* vector = UndefinedConstant();
Node* context = UndefinedConstant();
Node* effect = graph()->start();
Node* control = graph()->start();
- Node* node = graph()->NewNode(javascript()->StoreProperty(strict_mode),
- base, key, value, context);
- if (FLAG_turbo_deoptimization) {
- node->AppendInput(zone(), UndefinedConstant());
- }
- node->AppendInput(zone(), effect);
- node->AppendInput(zone(), control);
+ VectorSlotPair feedback;
+ const Operator* op = javascript()->StoreProperty(language_mode, feedback);
+ Node* node = graph()->NewNode(op, base, key, value, vector, context,
+ EmptyFrameState(), EmptyFrameState(),
+ effect, control);
Reduction r = Reduce(node);
Matcher<Node*> offset_matcher =
@@ -742,13 +782,6 @@
Matcher<Node*> value_matcher =
IsToNumber(value, context, effect, control);
Matcher<Node*> effect_matcher = value_matcher;
- if (AccessBuilder::ForTypedArrayElement(type, true)
- .type->Is(Type::Signed32())) {
- value_matcher = IsNumberToInt32(value_matcher);
- } else if (AccessBuilder::ForTypedArrayElement(type, true)
- .type->Is(Type::Unsigned32())) {
- value_matcher = IsNumberToUint32(value_matcher);
- }
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
@@ -769,7 +802,7 @@
Handle<JSArrayBuffer> buffer =
NewArrayBuffer(backing_store, sizeof(backing_store));
TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
- TRACED_FOREACH(StrictMode, strict_mode, kStrictModes) {
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
Handle<JSTypedArray> array =
factory()->NewJSTypedArray(type, buffer, 0, kLength);
ElementAccess access = AccessBuilder::ForTypedArrayElement(type, true);
@@ -777,20 +810,18 @@
int min = random_number_generator()->NextInt(static_cast<int>(kLength));
int max = random_number_generator()->NextInt(static_cast<int>(kLength));
if (min > max) std::swap(min, max);
- Node* key = Parameter(Type::Range(factory()->NewNumber(min),
- factory()->NewNumber(max), zone()));
+ Node* key = Parameter(Type::Range(min, max, zone()));
Node* base = HeapConstant(array);
Node* value = Parameter(access.type);
+ Node* vector = UndefinedConstant();
Node* context = UndefinedConstant();
Node* effect = graph()->start();
Node* control = graph()->start();
- Node* node = graph()->NewNode(javascript()->StoreProperty(strict_mode),
- base, key, value, context);
- if (FLAG_turbo_deoptimization) {
- node->AppendInput(zone(), UndefinedConstant());
- }
- node->AppendInput(zone(), effect);
- node->AppendInput(zone(), control);
+ VectorSlotPair feedback;
+ const Operator* op = javascript()->StoreProperty(language_mode, feedback);
+ Node* node = graph()->NewNode(op, base, key, value, vector, context,
+ EmptyFrameState(), EmptyFrameState(),
+ effect, control);
Reduction r = Reduce(node);
ASSERT_TRUE(r.Changed());
@@ -803,6 +834,426 @@
}
}
+
+// -----------------------------------------------------------------------------
+// JSLoadNamed
+
+
+TEST_F(JSTypedLoweringTest, JSLoadNamedStringLength) {
+ VectorSlotPair feedback;
+ Handle<Name> name = factory()->length_string();
+ Node* const receiver = Parameter(Type::String(), 0);
+ Node* const vector = Parameter(Type::Internal(), 1);
+ Node* const context = UndefinedConstant();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->LoadNamed(language_mode, name, feedback),
+ receiver, vector, context, EmptyFrameState(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsLoadField(AccessBuilder::ForStringLength(),
+ receiver, effect, control));
+ }
+}
+
+
+TEST_F(JSTypedLoweringTest, JSLoadNamedFunctionPrototype) {
+ VectorSlotPair feedback;
+ Handle<Name> name = factory()->prototype_string();
+ Handle<JSFunction> function = isolate()->object_function();
+ Handle<JSObject> function_prototype(JSObject::cast(function->prototype()));
+ Node* const receiver = Parameter(Type::Constant(function, zone()), 0);
+ Node* const vector = Parameter(Type::Internal(), 1);
+ Node* const context = Parameter(Type::Internal(), 2);
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->LoadNamed(language_mode, name, feedback),
+ receiver, vector, context, EmptyFrameState(),
+ EmptyFrameState(), effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsHeapConstant(function_prototype));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// JSAdd
+
+
+TEST_F(JSTypedLoweringTest, JSAddWithString) {
+ BinaryOperationHints const hints = BinaryOperationHints::Any();
+ TRACED_FOREACH(LanguageMode, language_mode, kLanguageModes) {
+ Node* lhs = Parameter(Type::String(), 0);
+ Node* rhs = Parameter(Type::String(), 1);
+ Node* context = Parameter(Type::Any(), 2);
+ Node* frame_state0 = EmptyFrameState();
+ Node* frame_state1 = EmptyFrameState();
+ Node* effect = graph()->start();
+ Node* control = graph()->start();
+ Reduction r = Reduce(
+ graph()->NewNode(javascript()->Add(language_mode, hints), lhs, rhs,
+ context, frame_state0, frame_state1, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsCall(_, IsHeapConstant(CodeFactory::StringAdd(
+ isolate(), STRING_ADD_CHECK_NONE,
+ NOT_TENURED).code()),
+ lhs, rhs, context, frame_state0, effect, control));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// JSCreate
+
+
+TEST_F(JSTypedLoweringTest, JSCreate) {
+ Handle<JSFunction> function = isolate()->object_function();
+ Node* const target = Parameter(Type::Constant(function, graph()->zone()));
+ Node* const context = Parameter(Type::Any());
+ Node* const effect = graph()->start();
+ Reduction r = Reduce(graph()->NewNode(javascript()->Create(), target, target,
+ context, EmptyFrameState(), effect));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFinishRegion(
+ IsAllocate(IsNumberConstant(function->initial_map()->instance_size()),
+ IsBeginRegion(effect), _),
+ _));
+}
+
+
+// -----------------------------------------------------------------------------
+// JSCreateArguments
+
+
+TEST_F(JSTypedLoweringTest, JSCreateArgumentsViaStub) {
+ Node* const closure = Parameter(Type::Any());
+ Node* const context = UndefinedConstant();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
+ Node* const frame_state = FrameState(shared, graph()->start());
+ Reduction r = Reduce(
+ graph()->NewNode(javascript()->CreateArguments(
+ CreateArgumentsParameters::kMappedArguments, 0),
+ closure, context, frame_state, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsCall(_, IsHeapConstant(CodeFactory::ArgumentsAccess(
+ isolate(), false, false)
+ .code()),
+ closure, IsNumberConstant(0), _, effect, control));
+}
+
+
+TEST_F(JSTypedLoweringTest, JSCreateArgumentsRestArrayViaStub) {
+ Node* const closure = Parameter(Type::Any());
+ Node* const context = UndefinedConstant();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
+ Node* const frame_state = FrameState(shared, graph()->start());
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->CreateArguments(CreateArgumentsParameters::kRestArray, 0),
+ closure, context, frame_state, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsCall(_,
+ IsHeapConstant(CodeFactory::RestArgumentsAccess(isolate()).code()),
+ IsNumberConstant(0), _, IsNumberConstant(0), _, effect, control));
+}
+
+
+TEST_F(JSTypedLoweringTest, JSCreateArgumentsInlinedMapped) {
+ Node* const closure = Parameter(Type::Any());
+ Node* const context = UndefinedConstant();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
+ Node* const frame_state_outer = FrameState(shared, graph()->start());
+ Node* const frame_state_inner = FrameState(shared, frame_state_outer);
+ Reduction r = Reduce(
+ graph()->NewNode(javascript()->CreateArguments(
+ CreateArgumentsParameters::kMappedArguments, 0),
+ closure, context, frame_state_inner, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFinishRegion(
+ IsAllocate(IsNumberConstant(Heap::kSloppyArgumentsObjectSize),
+ _, control),
+ _));
+}
+
+
+TEST_F(JSTypedLoweringTest, JSCreateArgumentsInlinedUnmapped) {
+ Node* const closure = Parameter(Type::Any());
+ Node* const context = UndefinedConstant();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
+ Node* const frame_state_outer = FrameState(shared, graph()->start());
+ Node* const frame_state_inner = FrameState(shared, frame_state_outer);
+ Reduction r = Reduce(
+ graph()->NewNode(javascript()->CreateArguments(
+ CreateArgumentsParameters::kUnmappedArguments, 0),
+ closure, context, frame_state_inner, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFinishRegion(
+ IsAllocate(IsNumberConstant(Heap::kStrictArgumentsObjectSize),
+ _, control),
+ _));
+}
+
+
+TEST_F(JSTypedLoweringTest, JSCreateArgumentsInlinedRestArray) {
+ Node* const closure = Parameter(Type::Any());
+ Node* const context = UndefinedConstant();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
+ Node* const frame_state_outer = FrameState(shared, graph()->start());
+ Node* const frame_state_inner = FrameState(shared, frame_state_outer);
+ Reduction r = Reduce(graph()->NewNode(
+ javascript()->CreateArguments(CreateArgumentsParameters::kRestArray, 0),
+ closure, context, frame_state_inner, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFinishRegion(
+ IsAllocate(IsNumberConstant(JSArray::kSize), _, control), _));
+}
+
+
+// -----------------------------------------------------------------------------
+// JSCreateClosure
+
+
+TEST_F(JSTypedLoweringTest, JSCreateClosure) {
+ Node* const context = UndefinedConstant();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Handle<SharedFunctionInfo> shared(isolate()->object_function()->shared());
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->CreateClosure(shared, NOT_TENURED),
+ context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsCall(_, IsHeapConstant(CodeFactory::FastNewClosure(
+ isolate(), shared->language_mode(),
+ shared->kind()).code()),
+ IsHeapConstant(shared), effect, control));
+}
+
+
+// -----------------------------------------------------------------------------
+// JSCreateLiteralArray
+
+
+TEST_F(JSTypedLoweringTest, JSCreateLiteralArray) {
+ Handle<FixedArray> const constant_elements = factory()->NewFixedArray(12);
+ int const literal_flags = ArrayLiteral::kShallowElements;
+ int const literal_index = 1;
+ Node* const closure = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const frame_state = EmptyFrameState();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CreateLiteralArray(
+ constant_elements, literal_flags, literal_index),
+ closure, context, frame_state, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsCall(_, IsHeapConstant(
+ CodeFactory::FastCloneShallowArray(isolate()).code()),
+ closure, IsNumberConstant(literal_index),
+ IsHeapConstant(constant_elements), context, frame_state, effect,
+ control));
+}
+
+
+// -----------------------------------------------------------------------------
+// JSCreateLiteralObject
+
+
+TEST_F(JSTypedLoweringTest, JSCreateLiteralObject) {
+ Handle<FixedArray> const constant_properties =
+ factory()->NewFixedArray(6 * 2);
+ int const literal_flags = ObjectLiteral::kShallowProperties;
+ int const literal_index = 1;
+ Node* const closure = Parameter(0);
+ Node* const context = Parameter(1);
+ Node* const frame_state = EmptyFrameState();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r = Reduce(
+ graph()->NewNode(javascript()->CreateLiteralObject(
+ constant_properties, literal_flags, literal_index),
+ closure, context, frame_state, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsCall(_, IsHeapConstant(
+ CodeFactory::FastCloneShallowObject(isolate(), 6).code()),
+ closure, IsNumberConstant(literal_index),
+ IsHeapConstant(constant_properties), _, context, frame_state,
+ effect, control));
+}
+
+
+// -----------------------------------------------------------------------------
+// JSCreateFunctionContext
+
+
+TEST_F(JSTypedLoweringTest, JSCreateFunctionContextViaInlinedAllocation) {
+ Node* const closure = Parameter(Type::Any());
+ Node* const context = Parameter(Type::Any());
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r =
+ Reduce(graph()->NewNode(javascript()->CreateFunctionContext(8), closure,
+ context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFinishRegion(IsAllocate(IsNumberConstant(Context::SizeFor(
+ 8 + Context::MIN_CONTEXT_SLOTS)),
+ IsBeginRegion(_), control),
+ _));
+}
+
+
+TEST_F(JSTypedLoweringTest, JSCreateFunctionContextViaStub) {
+ Node* const closure = Parameter(Type::Any());
+ Node* const context = Parameter(Type::Any());
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction const r =
+ Reduce(graph()->NewNode(javascript()->CreateFunctionContext(32), closure,
+ context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsCall(_, IsHeapConstant(
+ CodeFactory::FastNewContext(isolate(), 32).code()),
+ closure, context, effect, control));
+}
+
+
+// -----------------------------------------------------------------------------
+// JSCreateWithContext
+
+
+TEST_F(JSTypedLoweringTest, JSCreateWithContext) {
+ Node* const object = Parameter(Type::Receiver());
+ Node* const closure = Parameter(Type::Function());
+ Node* const context = Parameter(Type::Any());
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->CreateWithContext(), object,
+ closure, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFinishRegion(IsAllocate(IsNumberConstant(Context::SizeFor(
+ Context::MIN_CONTEXT_SLOTS)),
+ IsBeginRegion(_), control),
+ _));
+}
+
+
+// -----------------------------------------------------------------------------
+// JSCreateCatchContext
+
+
+TEST_F(JSTypedLoweringTest, JSCreateCatchContext) {
+ Handle<String> name = factory()->length_string();
+ Node* const exception = Parameter(Type::Receiver());
+ Node* const closure = Parameter(Type::Function());
+ Node* const context = Parameter(Type::Any());
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+ Reduction r =
+ Reduce(graph()->NewNode(javascript()->CreateCatchContext(name), exception,
+ closure, context, effect, control));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFinishRegion(IsAllocate(IsNumberConstant(Context::SizeFor(
+ Context::MIN_CONTEXT_SLOTS + 1)),
+ IsBeginRegion(_), control),
+ _));
+}
+
+
+// -----------------------------------------------------------------------------
+// JSInstanceOf
+// Test that instanceOf is reduced if and only if the right-hand side is a
+// function constant. Functional correctness is ensured elsewhere.
+
+
+TEST_F(JSTypedLoweringTest, JSInstanceOfSpecializationWithoutSmiCheck) {
+ Node* const context = Parameter(Type::Any());
+ Node* const frame_state = EmptyFrameState();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+
+ // Reduce if left-hand side is known to be an object.
+ Node* instanceOf =
+ graph()->NewNode(javascript()->InstanceOf(), Parameter(Type::Object(), 0),
+ HeapConstant(isolate()->object_function()), context,
+ frame_state, effect, control);
+ Node* dummy = graph()->NewNode(javascript()->ToObject(), instanceOf, context,
+ frame_state, effect, control);
+ Reduction r = Reduce(instanceOf);
+ ASSERT_TRUE(r.Changed());
+ ASSERT_EQ(r.replacement(), dummy->InputAt(0));
+ ASSERT_NE(instanceOf, dummy->InputAt(0));
+}
+
+
+TEST_F(JSTypedLoweringTest, JSInstanceOfSpecializationWithSmiCheck) {
+ Node* const context = Parameter(Type::Any());
+ Node* const frame_state = EmptyFrameState();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+
+ // Reduce if left-hand side could be a Smi.
+ Node* instanceOf =
+ graph()->NewNode(javascript()->InstanceOf(), Parameter(Type::Any(), 0),
+ HeapConstant(isolate()->object_function()), context,
+ frame_state, effect, control);
+ Node* dummy = graph()->NewNode(javascript()->ToObject(), instanceOf, context,
+ frame_state, effect, control);
+ Reduction r = Reduce(instanceOf);
+ ASSERT_TRUE(r.Changed());
+ ASSERT_EQ(r.replacement(), dummy->InputAt(0));
+ ASSERT_NE(instanceOf, dummy->InputAt(0));
+}
+
+
+TEST_F(JSTypedLoweringTest, JSInstanceOfNoSpecialization) {
+ Node* const context = Parameter(Type::Any());
+ Node* const frame_state = EmptyFrameState();
+ Node* const effect = graph()->start();
+ Node* const control = graph()->start();
+
+ // Do not reduce if right-hand side is not a function constant.
+ Node* instanceOf = graph()->NewNode(
+ javascript()->InstanceOf(), Parameter(Type::Any(), 0),
+ Parameter(Type::Any()), context, frame_state, effect, control);
+ Node* dummy = graph()->NewNode(javascript()->ToObject(), instanceOf, context,
+ frame_state, effect, control);
+ Reduction r = Reduce(instanceOf);
+ ASSERT_FALSE(r.Changed());
+ ASSERT_EQ(instanceOf, dummy->InputAt(0));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/test/unittests/compiler/linkage-tail-call-unittest.cc b/test/unittests/compiler/linkage-tail-call-unittest.cc
new file mode 100644
index 0000000..597edde
--- /dev/null
+++ b/test/unittests/compiler/linkage-tail-call-unittest.cc
@@ -0,0 +1,352 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/node.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+MachineType kMachineTypes[] = {
+ MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::AnyTagged(),
+ MachineType::AnyTagged(), MachineType::AnyTagged()};
+}
+
+class LinkageTailCall : public TestWithZone {
+ protected:
+ CallDescriptor* NewStandardCallDescriptor(LocationSignature* locations) {
+ DCHECK(arraysize(kMachineTypes) >=
+ locations->return_count() + locations->parameter_count());
+ MachineSignature* types = new (zone()) MachineSignature(
+ locations->return_count(), locations->parameter_count(), kMachineTypes);
+ return new (zone()) CallDescriptor(CallDescriptor::kCallCodeObject,
+ MachineType::AnyTagged(),
+ LinkageLocation::ForAnyRegister(),
+ types, // machine_sig
+ locations, // location_sig
+ 0, // js_parameter_count
+ Operator::kNoProperties, // properties
+ 0, // callee-saved
+ 0, // callee-saved fp
+ CallDescriptor::kNoFlags, // flags,
+ "");
+ }
+
+ LinkageLocation StackLocation(int loc) {
+ return LinkageLocation::ForCallerFrameSlot(-loc);
+ }
+
+ LinkageLocation RegisterLocation(int loc) {
+ return LinkageLocation::ForRegister(loc);
+ }
+};
+
+
+TEST_F(LinkageTailCall, EmptyToEmpty) {
+ LocationSignature locations(0, 0, nullptr);
+ CallDescriptor* desc = NewStandardCallDescriptor(&locations);
+ CommonOperatorBuilder common(zone());
+ const Operator* op = common.Call(desc);
+ Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(0, stack_param_delta);
+}
+
+
+TEST_F(LinkageTailCall, SameReturn) {
+ // Caller
+ LinkageLocation location_array[] = {RegisterLocation(0)};
+ LocationSignature locations1(1, 0, location_array);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Callee
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations1);
+
+ CommonOperatorBuilder common(zone());
+ const Operator* op = common.Call(desc2);
+ Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(0, stack_param_delta);
+}
+
+
+TEST_F(LinkageTailCall, DifferingReturn) {
+ // Caller
+ LinkageLocation location_array1[] = {RegisterLocation(0)};
+ LocationSignature locations1(1, 0, location_array1);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Callee
+ LinkageLocation location_array2[] = {RegisterLocation(1)};
+ LocationSignature locations2(1, 0, location_array2);
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations2);
+
+ CommonOperatorBuilder common(zone());
+ const Operator* op = common.Call(desc2);
+ Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
+ int stack_param_delta = 0;
+ EXPECT_FALSE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(0, stack_param_delta);
+}
+
+
+TEST_F(LinkageTailCall, MoreRegisterParametersCallee) {
+ // Caller
+ LinkageLocation location_array1[] = {RegisterLocation(0)};
+ LocationSignature locations1(1, 0, location_array1);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Callee
+ LinkageLocation location_array2[] = {RegisterLocation(0),
+ RegisterLocation(0)};
+ LocationSignature locations2(1, 1, location_array2);
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations2);
+
+ CommonOperatorBuilder common(zone());
+ const Operator* op = common.Call(desc2);
+ Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(0, stack_param_delta);
+}
+
+
+TEST_F(LinkageTailCall, MoreRegisterParametersCaller) {
+ // Caller
+ LinkageLocation location_array1[] = {RegisterLocation(0),
+ RegisterLocation(0)};
+ LocationSignature locations1(1, 1, location_array1);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Callee
+ LinkageLocation location_array2[] = {RegisterLocation(0)};
+ LocationSignature locations2(1, 0, location_array2);
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations2);
+
+ CommonOperatorBuilder common(zone());
+ const Operator* op = common.Call(desc2);
+ Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(0, stack_param_delta);
+}
+
+
+TEST_F(LinkageTailCall, MoreRegisterAndStackParametersCallee) {
+ // Caller
+ LinkageLocation location_array1[] = {RegisterLocation(0)};
+ LocationSignature locations1(1, 0, location_array1);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Callee
+ LinkageLocation location_array2[] = {RegisterLocation(0), RegisterLocation(0),
+ RegisterLocation(1), StackLocation(1)};
+ LocationSignature locations2(1, 3, location_array2);
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations2);
+
+ CommonOperatorBuilder common(zone());
+ const Operator* op = common.Call(desc2);
+ Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(-1, stack_param_delta);
+}
+
+
+TEST_F(LinkageTailCall, MoreRegisterAndStackParametersCaller) {
+ // Caller
+ LinkageLocation location_array[] = {RegisterLocation(0), RegisterLocation(0),
+ RegisterLocation(1), StackLocation(1)};
+ LocationSignature locations1(1, 3, location_array);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Callee
+ LinkageLocation location_array2[] = {RegisterLocation(0)};
+ LocationSignature locations2(1, 0, location_array2);
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations2);
+
+ CommonOperatorBuilder common(zone());
+ const Operator* op = common.Call(desc2);
+ Node* const node = Node::New(zone(), 1, op, 0, nullptr, false);
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(1, stack_param_delta);
+}
+
+
+TEST_F(LinkageTailCall, MatchingStackParameters) {
+ // Caller
+ LinkageLocation location_array[] = {RegisterLocation(0), StackLocation(3),
+ StackLocation(2), StackLocation(1)};
+ LocationSignature locations1(1, 3, location_array);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Caller
+ LocationSignature locations2(1, 3, location_array);
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations1);
+
+ CommonOperatorBuilder common(zone());
+ Node* p0 = Node::New(zone(), 0, nullptr, 0, nullptr, false);
+ Node* p1 = Node::New(zone(), 0, common.Parameter(0), 0, nullptr, false);
+ Node* p2 = Node::New(zone(), 0, common.Parameter(1), 0, nullptr, false);
+ Node* p3 = Node::New(zone(), 0, common.Parameter(2), 0, nullptr, false);
+ Node* parameters[] = {p0, p1, p2, p3};
+ const Operator* op = common.Call(desc2);
+ Node* const node =
+ Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(0, stack_param_delta);
+}
+
+
+TEST_F(LinkageTailCall, NonMatchingStackParameters) {
+ // Caller
+ LinkageLocation location_array[] = {RegisterLocation(0), StackLocation(3),
+ StackLocation(2), StackLocation(1)};
+ LocationSignature locations1(1, 3, location_array);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Caller
+ LocationSignature locations2(1, 3, location_array);
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations1);
+
+ CommonOperatorBuilder common(zone());
+ Node* p0 = Node::New(zone(), 0, nullptr, 0, nullptr, false);
+ Node* p1 = Node::New(zone(), 0, common.Parameter(0), 0, nullptr, false);
+ Node* p2 = Node::New(zone(), 0, common.Parameter(2), 0, nullptr, false);
+ Node* p3 = Node::New(zone(), 0, common.Parameter(1), 0, nullptr, false);
+ Node* parameters[] = {p0, p1, p2, p3};
+ const Operator* op = common.Call(desc2);
+ Node* const node =
+ Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(0, stack_param_delta);
+}
+
+
+TEST_F(LinkageTailCall, MatchingStackParametersExtraCallerRegisters) {
+ // Caller
+ LinkageLocation location_array[] = {RegisterLocation(0), StackLocation(3),
+ StackLocation(2), StackLocation(1),
+ RegisterLocation(0), RegisterLocation(1)};
+ LocationSignature locations1(1, 5, location_array);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Caller
+ LocationSignature locations2(1, 3, location_array);
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations1);
+
+ CommonOperatorBuilder common(zone());
+ Node* p0 = Node::New(zone(), 0, nullptr, 0, nullptr, false);
+ Node* p1 = Node::New(zone(), 0, common.Parameter(0), 0, nullptr, false);
+ Node* p2 = Node::New(zone(), 0, common.Parameter(1), 0, nullptr, false);
+ Node* p3 = Node::New(zone(), 0, common.Parameter(2), 0, nullptr, false);
+ Node* parameters[] = {p0, p1, p2, p3};
+ const Operator* op = common.Call(desc2);
+ Node* const node =
+ Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(0, stack_param_delta);
+}
+
+
+TEST_F(LinkageTailCall, MatchingStackParametersExtraCalleeRegisters) {
+ // Caller
+ LinkageLocation location_array[] = {RegisterLocation(0), StackLocation(3),
+ StackLocation(2), StackLocation(1),
+ RegisterLocation(0), RegisterLocation(1)};
+ LocationSignature locations1(1, 3, location_array);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Caller
+ LocationSignature locations2(1, 5, location_array);
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations1);
+
+ CommonOperatorBuilder common(zone());
+ Node* p0 = Node::New(zone(), 0, nullptr, 0, nullptr, false);
+ Node* p1 = Node::New(zone(), 0, common.Parameter(0), 0, nullptr, false);
+ Node* p2 = Node::New(zone(), 0, common.Parameter(1), 0, nullptr, false);
+ Node* p3 = Node::New(zone(), 0, common.Parameter(2), 0, nullptr, false);
+ Node* p4 = Node::New(zone(), 0, common.Parameter(3), 0, nullptr, false);
+ Node* parameters[] = {p0, p1, p2, p3, p4};
+ const Operator* op = common.Call(desc2);
+ Node* const node =
+ Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(0, stack_param_delta);
+}
+
+
+TEST_F(LinkageTailCall, MatchingStackParametersExtraCallerRegistersAndStack) {
+ // Caller
+ LinkageLocation location_array[] = {RegisterLocation(0), StackLocation(3),
+ StackLocation(2), StackLocation(1),
+ RegisterLocation(0), StackLocation(4)};
+ LocationSignature locations1(1, 5, location_array);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Caller
+ LocationSignature locations2(1, 3, location_array);
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations2);
+
+ CommonOperatorBuilder common(zone());
+ Node* p0 = Node::New(zone(), 0, nullptr, 0, nullptr, false);
+ Node* p1 = Node::New(zone(), 0, common.Parameter(0), 0, nullptr, false);
+ Node* p2 = Node::New(zone(), 0, common.Parameter(1), 0, nullptr, false);
+ Node* p3 = Node::New(zone(), 0, common.Parameter(2), 0, nullptr, false);
+ Node* p4 = Node::New(zone(), 0, common.Parameter(3), 0, nullptr, false);
+ Node* parameters[] = {p0, p1, p2, p3, p4};
+ const Operator* op = common.Call(desc2);
+ Node* const node =
+ Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(1, stack_param_delta);
+}
+
+
+TEST_F(LinkageTailCall, MatchingStackParametersExtraCalleeRegistersAndStack) {
+ // Caller
+ LinkageLocation location_array[] = {RegisterLocation(0), StackLocation(3),
+ StackLocation(2), RegisterLocation(0),
+ RegisterLocation(1), StackLocation(4)};
+ LocationSignature locations1(1, 3, location_array);
+ CallDescriptor* desc1 = NewStandardCallDescriptor(&locations1);
+
+ // Caller
+ LocationSignature locations2(1, 5, location_array);
+ CallDescriptor* desc2 = NewStandardCallDescriptor(&locations2);
+
+ CommonOperatorBuilder common(zone());
+ Node* p0 = Node::New(zone(), 0, nullptr, 0, nullptr, false);
+ Node* p1 = Node::New(zone(), 0, common.Parameter(0), 0, nullptr, false);
+ Node* p2 = Node::New(zone(), 0, common.Parameter(1), 0, nullptr, false);
+ Node* p3 = Node::New(zone(), 0, common.Parameter(2), 0, nullptr, false);
+ Node* p4 = Node::New(zone(), 0, common.Parameter(3), 0, nullptr, false);
+ Node* parameters[] = {p0, p1, p2, p3, p4};
+ const Operator* op = common.Call(desc2);
+ Node* const node =
+ Node::New(zone(), 1, op, arraysize(parameters), parameters, false);
+ int stack_param_delta = 0;
+ EXPECT_TRUE(desc1->CanTailCall(node, &stack_param_delta));
+ EXPECT_EQ(-1, stack_param_delta);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/live-range-builder.h b/test/unittests/compiler/live-range-builder.h
new file mode 100644
index 0000000..4a5621f
--- /dev/null
+++ b/test/unittests/compiler/live-range-builder.h
@@ -0,0 +1,78 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LIVE_RANGE_BUILDER_H_
+#define V8_LIVE_RANGE_BUILDER_H_
+
+#include "src/compiler/register-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+// Utility offering shorthand syntax for building up a range by providing its ID
+// and pairs (start, end) specifying intervals. Circumvents current incomplete
+// support for C++ features such as instantiation lists, on OS X and Android.
+class TestRangeBuilder {
+ public:
+ explicit TestRangeBuilder(Zone* zone)
+ : id_(-1), pairs_(), uses_(), zone_(zone) {}
+
+ TestRangeBuilder& Id(int id) {
+ id_ = id;
+ return *this;
+ }
+ TestRangeBuilder& Add(int start, int end) {
+ pairs_.push_back({start, end});
+ return *this;
+ }
+
+ TestRangeBuilder& AddUse(int pos) {
+ uses_.insert(pos);
+ return *this;
+ }
+
+ TopLevelLiveRange* Build(int start, int end) {
+ return Add(start, end).Build();
+ }
+
+ TopLevelLiveRange* Build() {
+ TopLevelLiveRange* range =
+ new (zone_) TopLevelLiveRange(id_, MachineRepresentation::kTagged);
+ // Traverse the provided interval specifications backwards, because that is
+ // what LiveRange expects.
+ for (int i = static_cast<int>(pairs_.size()) - 1; i >= 0; --i) {
+ Interval pair = pairs_[i];
+ LifetimePosition start = LifetimePosition::FromInt(pair.first);
+ LifetimePosition end = LifetimePosition::FromInt(pair.second);
+ CHECK(start < end);
+ range->AddUseInterval(start, end, zone_);
+ }
+ for (int pos : uses_) {
+ UsePosition* use_position =
+ new (zone_) UsePosition(LifetimePosition::FromInt(pos), nullptr,
+ nullptr, UsePositionHintType::kNone);
+ range->AddUsePosition(use_position);
+ }
+
+ pairs_.clear();
+ return range;
+ }
+
+ private:
+ typedef std::pair<int, int> Interval;
+ typedef std::vector<Interval> IntervalList;
+ int id_;
+ IntervalList pairs_;
+ std::set<int> uses_;
+ Zone* zone_;
+};
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_LIVE_RANGE_BUILDER_H_
diff --git a/test/unittests/compiler/live-range-unittest.cc b/test/unittests/compiler/live-range-unittest.cc
new file mode 100644
index 0000000..e4fc2ca
--- /dev/null
+++ b/test/unittests/compiler/live-range-unittest.cc
@@ -0,0 +1,464 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#include "test/unittests/compiler/live-range-builder.h"
+#include "test/unittests/test-utils.h"
+
+
+// TODO(mtrofin): would we want to centralize this definition?
+#ifdef DEBUG
+#define V8_ASSERT_DEBUG_DEATH(statement, regex) \
+ ASSERT_DEATH_IF_SUPPORTED(statement, regex)
+#define DISABLE_IN_RELEASE(Name) Name
+
+#else
+#define V8_ASSERT_DEBUG_DEATH(statement, regex) statement
+#define DISABLE_IN_RELEASE(Name) DISABLED_##Name
+#endif // DEBUG
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class LiveRangeUnitTest : public TestWithZone {
+ public:
+ // Split helper, to avoid int->LifetimePosition conversion nuisance.
+ LiveRange* Split(LiveRange* range, int pos) {
+ return range->SplitAt(LifetimePosition::FromInt(pos), zone());
+ }
+
+
+ TopLevelLiveRange* Splinter(TopLevelLiveRange* top, int start, int end,
+ int new_id = 0) {
+ if (top->splinter() == nullptr) {
+ TopLevelLiveRange* ret = new (zone())
+ TopLevelLiveRange(new_id, MachineRepresentation::kTagged);
+ top->SetSplinter(ret);
+ }
+ top->Splinter(LifetimePosition::FromInt(start),
+ LifetimePosition::FromInt(end), zone());
+ return top->splinter();
+ }
+
+ // Ranges first and second match structurally.
+ bool RangesMatch(LiveRange* first, LiveRange* second) {
+ if (first->Start() != second->Start() || first->End() != second->End()) {
+ return false;
+ }
+ UseInterval* i1 = first->first_interval();
+ UseInterval* i2 = second->first_interval();
+
+ while (i1 != nullptr && i2 != nullptr) {
+ if (i1->start() != i2->start() || i1->end() != i2->end()) return false;
+ i1 = i1->next();
+ i2 = i2->next();
+ }
+ if (i1 != nullptr || i2 != nullptr) return false;
+
+ UsePosition* p1 = first->first_pos();
+ UsePosition* p2 = second->first_pos();
+
+ while (p1 != nullptr && p2 != nullptr) {
+ if (p1->pos() != p2->pos()) return false;
+ p1 = p1->next();
+ p2 = p2->next();
+ }
+ if (p1 != nullptr || p2 != nullptr) return false;
+ return true;
+ }
+};
+
+
+TEST_F(LiveRangeUnitTest, InvalidConstruction) {
+ // Build a range manually, because the builder guards against empty cases.
+ TopLevelLiveRange* range =
+ new (zone()) TopLevelLiveRange(1, MachineRepresentation::kTagged);
+ V8_ASSERT_DEBUG_DEATH(
+ range->AddUseInterval(LifetimePosition::FromInt(0),
+ LifetimePosition::FromInt(0), zone()),
+ ".*");
+}
+
+
+TEST_F(LiveRangeUnitTest, SplitInvalidStart) {
+ TopLevelLiveRange* range = TestRangeBuilder(zone()).Build(0, 1);
+ V8_ASSERT_DEBUG_DEATH(Split(range, 0), ".*");
+}
+
+
+TEST_F(LiveRangeUnitTest, DISABLE_IN_RELEASE(InvalidSplitEnd)) {
+ TopLevelLiveRange* range = TestRangeBuilder(zone()).Build(0, 1);
+ ASSERT_DEATH_IF_SUPPORTED(Split(range, 1), ".*");
+}
+
+
+TEST_F(LiveRangeUnitTest, DISABLE_IN_RELEASE(SplitInvalidPreStart)) {
+ TopLevelLiveRange* range = TestRangeBuilder(zone()).Build(1, 2);
+ ASSERT_DEATH_IF_SUPPORTED(Split(range, 0), ".*");
+}
+
+
+TEST_F(LiveRangeUnitTest, DISABLE_IN_RELEASE(SplitInvalidPostEnd)) {
+ TopLevelLiveRange* range = TestRangeBuilder(zone()).Build(0, 1);
+ ASSERT_DEATH_IF_SUPPORTED(Split(range, 2), ".*");
+}
+
+
+TEST_F(LiveRangeUnitTest, SplitSingleIntervalNoUsePositions) {
+ TopLevelLiveRange* range = TestRangeBuilder(zone()).Build(0, 2);
+ LiveRange* child = Split(range, 1);
+
+ EXPECT_NE(nullptr, range->next());
+ EXPECT_EQ(child, range->next());
+
+ LiveRange* expected_top = TestRangeBuilder(zone()).Build(0, 1);
+ LiveRange* expected_bottom = TestRangeBuilder(zone()).Build(1, 2);
+ EXPECT_TRUE(RangesMatch(expected_top, range));
+ EXPECT_TRUE(RangesMatch(expected_bottom, child));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplitManyIntervalNoUsePositionsBetween) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 2).Add(4, 6).Build();
+ LiveRange* child = Split(range, 3);
+
+ EXPECT_NE(nullptr, range->next());
+ EXPECT_EQ(child, range->next());
+
+ LiveRange* expected_top = TestRangeBuilder(zone()).Build(0, 2);
+ LiveRange* expected_bottom = TestRangeBuilder(zone()).Build(4, 6);
+ EXPECT_TRUE(RangesMatch(expected_top, range));
+ EXPECT_TRUE(RangesMatch(expected_bottom, child));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplitManyIntervalNoUsePositionsFront) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 2).Add(4, 6).Build();
+ LiveRange* child = Split(range, 1);
+
+ EXPECT_NE(nullptr, range->next());
+ EXPECT_EQ(child, range->next());
+
+ LiveRange* expected_top = TestRangeBuilder(zone()).Build(0, 1);
+ LiveRange* expected_bottom =
+ TestRangeBuilder(zone()).Add(1, 2).Add(4, 6).Build();
+ EXPECT_TRUE(RangesMatch(expected_top, range));
+ EXPECT_TRUE(RangesMatch(expected_bottom, child));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplitManyIntervalNoUsePositionsAfter) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 2).Add(4, 6).Build();
+ LiveRange* child = Split(range, 5);
+
+ EXPECT_NE(nullptr, range->next());
+ EXPECT_EQ(child, range->next());
+
+ LiveRange* expected_top =
+ TestRangeBuilder(zone()).Add(0, 2).Add(4, 5).Build();
+ LiveRange* expected_bottom = TestRangeBuilder(zone()).Build(5, 6);
+ EXPECT_TRUE(RangesMatch(expected_top, range));
+ EXPECT_TRUE(RangesMatch(expected_bottom, child));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplitSingleIntervalUsePositions) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 3).AddUse(0).AddUse(2).Build();
+
+ LiveRange* child = Split(range, 1);
+
+ EXPECT_NE(nullptr, range->next());
+ EXPECT_EQ(child, range->next());
+
+ LiveRange* expected_top =
+ TestRangeBuilder(zone()).Add(0, 1).AddUse(0).Build();
+ LiveRange* expected_bottom =
+ TestRangeBuilder(zone()).Add(1, 3).AddUse(2).Build();
+ EXPECT_TRUE(RangesMatch(expected_top, range));
+ EXPECT_TRUE(RangesMatch(expected_bottom, child));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplitSingleIntervalUsePositionsAtPos) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 3).AddUse(0).AddUse(2).Build();
+
+ LiveRange* child = Split(range, 2);
+
+ EXPECT_NE(nullptr, range->next());
+ EXPECT_EQ(child, range->next());
+
+ LiveRange* expected_top =
+ TestRangeBuilder(zone()).Add(0, 2).AddUse(0).AddUse(2).Build();
+ LiveRange* expected_bottom = TestRangeBuilder(zone()).Build(2, 3);
+ EXPECT_TRUE(RangesMatch(expected_top, range));
+ EXPECT_TRUE(RangesMatch(expected_bottom, child));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplitManyIntervalUsePositionsBetween) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 2).Add(4, 6).AddUse(1).AddUse(5).Build();
+ LiveRange* child = Split(range, 3);
+
+ EXPECT_NE(nullptr, range->next());
+ EXPECT_EQ(child, range->next());
+
+ LiveRange* expected_top =
+ TestRangeBuilder(zone()).Add(0, 2).AddUse(1).Build();
+ LiveRange* expected_bottom =
+ TestRangeBuilder(zone()).Add(4, 6).AddUse(5).Build();
+ EXPECT_TRUE(RangesMatch(expected_top, range));
+ EXPECT_TRUE(RangesMatch(expected_bottom, child));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplitManyIntervalUsePositionsAtInterval) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 2).Add(4, 6).AddUse(1).AddUse(4).Build();
+ LiveRange* child = Split(range, 4);
+
+ EXPECT_NE(nullptr, range->next());
+ EXPECT_EQ(child, range->next());
+
+ LiveRange* expected_top =
+ TestRangeBuilder(zone()).Add(0, 2).AddUse(1).Build();
+ LiveRange* expected_bottom =
+ TestRangeBuilder(zone()).Add(4, 6).AddUse(4).Build();
+ EXPECT_TRUE(RangesMatch(expected_top, range));
+ EXPECT_TRUE(RangesMatch(expected_bottom, child));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplitManyIntervalUsePositionsFront) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 2).Add(4, 6).AddUse(1).AddUse(5).Build();
+ LiveRange* child = Split(range, 1);
+
+ EXPECT_NE(nullptr, range->next());
+ EXPECT_EQ(child, range->next());
+
+ LiveRange* expected_top =
+ TestRangeBuilder(zone()).Add(0, 1).AddUse(1).Build();
+ LiveRange* expected_bottom =
+ TestRangeBuilder(zone()).Add(1, 2).Add(4, 6).AddUse(5).Build();
+ EXPECT_TRUE(RangesMatch(expected_top, range));
+ EXPECT_TRUE(RangesMatch(expected_bottom, child));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplitManyIntervalUsePositionsAfter) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 2).Add(4, 6).AddUse(1).AddUse(5).Build();
+ LiveRange* child = Split(range, 5);
+
+ EXPECT_NE(nullptr, range->next());
+ EXPECT_EQ(child, range->next());
+
+ LiveRange* expected_top =
+ TestRangeBuilder(zone()).Add(0, 2).Add(4, 5).AddUse(1).AddUse(5).Build();
+ LiveRange* expected_bottom = TestRangeBuilder(zone()).Build(5, 6);
+ EXPECT_TRUE(RangesMatch(expected_top, range));
+ EXPECT_TRUE(RangesMatch(expected_bottom, child));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplinterSingleInterval) {
+ TopLevelLiveRange* range = TestRangeBuilder(zone()).Build(0, 6);
+ TopLevelLiveRange* splinter = Splinter(range, 3, 5);
+ EXPECT_EQ(nullptr, range->next());
+ EXPECT_EQ(nullptr, splinter->next());
+ EXPECT_EQ(range, splinter->splintered_from());
+
+ TopLevelLiveRange* expected_source =
+ TestRangeBuilder(zone()).Add(0, 3).Add(5, 6).Build();
+ TopLevelLiveRange* expected_splinter = TestRangeBuilder(zone()).Build(3, 5);
+ EXPECT_TRUE(RangesMatch(expected_source, range));
+ EXPECT_TRUE(RangesMatch(expected_splinter, splinter));
+}
+
+
+TEST_F(LiveRangeUnitTest, MergeSingleInterval) {
+ TopLevelLiveRange* original = TestRangeBuilder(zone()).Build(0, 6);
+ TopLevelLiveRange* splinter = Splinter(original, 3, 5);
+
+ original->Merge(splinter, zone());
+ TopLevelLiveRange* result = TestRangeBuilder(zone()).Build(0, 6);
+ LiveRange* child_1 = Split(result, 3);
+ Split(child_1, 5);
+
+ EXPECT_TRUE(RangesMatch(result, original));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplinterMultipleIntervalsOutside) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
+ TopLevelLiveRange* splinter = Splinter(range, 2, 6);
+ EXPECT_EQ(nullptr, range->next());
+ EXPECT_EQ(nullptr, splinter->next());
+ EXPECT_EQ(range, splinter->splintered_from());
+
+ TopLevelLiveRange* expected_source =
+ TestRangeBuilder(zone()).Add(0, 2).Add(6, 8).Build();
+ TopLevelLiveRange* expected_splinter =
+ TestRangeBuilder(zone()).Add(2, 3).Add(5, 6).Build();
+ EXPECT_TRUE(RangesMatch(expected_source, range));
+ EXPECT_TRUE(RangesMatch(expected_splinter, splinter));
+}
+
+
+TEST_F(LiveRangeUnitTest, MergeMultipleIntervalsOutside) {
+ TopLevelLiveRange* original =
+ TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
+ TopLevelLiveRange* splinter = Splinter(original, 2, 6);
+ original->Merge(splinter, zone());
+
+ TopLevelLiveRange* result =
+ TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
+ LiveRange* child_1 = Split(result, 2);
+ Split(child_1, 6);
+ EXPECT_TRUE(RangesMatch(result, original));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplinterMultipleIntervalsInside) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
+ V8_ASSERT_DEBUG_DEATH(Splinter(range, 3, 5), ".*");
+}
+
+
+TEST_F(LiveRangeUnitTest, SplinterMultipleIntervalsLeft) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
+ TopLevelLiveRange* splinter = Splinter(range, 2, 4);
+ EXPECT_EQ(nullptr, range->next());
+ EXPECT_EQ(nullptr, splinter->next());
+ EXPECT_EQ(range, splinter->splintered_from());
+
+ TopLevelLiveRange* expected_source =
+ TestRangeBuilder(zone()).Add(0, 2).Add(5, 8).Build();
+ TopLevelLiveRange* expected_splinter = TestRangeBuilder(zone()).Build(2, 3);
+ EXPECT_TRUE(RangesMatch(expected_source, range));
+ EXPECT_TRUE(RangesMatch(expected_splinter, splinter));
+}
+
+
+TEST_F(LiveRangeUnitTest, MergeMultipleIntervalsLeft) {
+ TopLevelLiveRange* original =
+ TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
+ TopLevelLiveRange* splinter = Splinter(original, 2, 4);
+ original->Merge(splinter, zone());
+
+ TopLevelLiveRange* result =
+ TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
+ Split(result, 2);
+ EXPECT_TRUE(RangesMatch(result, original));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplinterMultipleIntervalsRight) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
+ TopLevelLiveRange* splinter = Splinter(range, 4, 6);
+ EXPECT_EQ(nullptr, range->next());
+ EXPECT_EQ(nullptr, splinter->next());
+ EXPECT_EQ(range, splinter->splintered_from());
+
+ TopLevelLiveRange* expected_source =
+ TestRangeBuilder(zone()).Add(0, 3).Add(6, 8).Build();
+ TopLevelLiveRange* expected_splinter = TestRangeBuilder(zone()).Build(5, 6);
+ EXPECT_TRUE(RangesMatch(expected_source, range));
+ EXPECT_TRUE(RangesMatch(expected_splinter, splinter));
+}
+
+
+TEST_F(LiveRangeUnitTest, SplinterMergeMultipleTimes) {
+ TopLevelLiveRange* range =
+ TestRangeBuilder(zone()).Add(0, 3).Add(5, 10).Add(12, 16).Build();
+ Splinter(range, 4, 6);
+ Splinter(range, 8, 14);
+ TopLevelLiveRange* splinter = range->splinter();
+ EXPECT_EQ(nullptr, range->next());
+ EXPECT_EQ(nullptr, splinter->next());
+ EXPECT_EQ(range, splinter->splintered_from());
+
+ TopLevelLiveRange* expected_source =
+ TestRangeBuilder(zone()).Add(0, 3).Add(6, 8).Add(14, 16).Build();
+ TopLevelLiveRange* expected_splinter =
+ TestRangeBuilder(zone()).Add(5, 6).Add(8, 10).Add(12, 14).Build();
+ EXPECT_TRUE(RangesMatch(expected_source, range));
+ EXPECT_TRUE(RangesMatch(expected_splinter, splinter));
+}
+
+
+TEST_F(LiveRangeUnitTest, MergeMultipleIntervalsRight) {
+ TopLevelLiveRange* original =
+ TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
+ TopLevelLiveRange* splinter = Splinter(original, 4, 6);
+ original->Merge(splinter, zone());
+
+ TopLevelLiveRange* result =
+ TestRangeBuilder(zone()).Add(0, 3).Add(5, 8).Build();
+ LiveRange* child_1 = Split(result, 5);
+ Split(child_1, 6);
+
+ EXPECT_TRUE(RangesMatch(result, original));
+}
+
+
+TEST_F(LiveRangeUnitTest, MergeAfterSplitting) {
+ TopLevelLiveRange* original = TestRangeBuilder(zone()).Build(0, 8);
+ TopLevelLiveRange* splinter = Splinter(original, 4, 6);
+ LiveRange* original_child = Split(original, 2);
+ Split(original_child, 7);
+ original->Merge(splinter, zone());
+
+ TopLevelLiveRange* result = TestRangeBuilder(zone()).Build(0, 8);
+ LiveRange* child_1 = Split(result, 2);
+ LiveRange* child_2 = Split(child_1, 4);
+ LiveRange* child_3 = Split(child_2, 6);
+ Split(child_3, 7);
+
+ EXPECT_TRUE(RangesMatch(result, original));
+}
+
+
+TEST_F(LiveRangeUnitTest, IDGeneration) {
+ TopLevelLiveRange* vreg = TestRangeBuilder(zone()).Id(2).Build(0, 100);
+ EXPECT_EQ(2, vreg->vreg());
+ EXPECT_EQ(0, vreg->relative_id());
+
+ TopLevelLiveRange* splinter =
+ new (zone()) TopLevelLiveRange(101, MachineRepresentation::kTagged);
+ vreg->SetSplinter(splinter);
+ vreg->Splinter(LifetimePosition::FromInt(4), LifetimePosition::FromInt(12),
+ zone());
+
+ EXPECT_EQ(101, splinter->vreg());
+ EXPECT_EQ(1, splinter->relative_id());
+
+ LiveRange* child = vreg->SplitAt(LifetimePosition::FromInt(50), zone());
+
+ EXPECT_EQ(2, child->relative_id());
+
+ LiveRange* splinter_child =
+ splinter->SplitAt(LifetimePosition::FromInt(8), zone());
+
+ EXPECT_EQ(1, splinter->relative_id());
+ EXPECT_EQ(3, splinter_child->relative_id());
+
+ vreg->Merge(splinter, zone());
+ EXPECT_EQ(1, splinter->relative_id());
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/liveness-analyzer-unittest.cc b/test/unittests/compiler/liveness-analyzer-unittest.cc
new file mode 100644
index 0000000..b77830a
--- /dev/null
+++ b/test/unittests/compiler/liveness-analyzer-unittest.cc
@@ -0,0 +1,379 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/liveness-analyzer.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/state-values-utils.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+
+using testing::MakeMatcher;
+using testing::MatcherInterface;
+using testing::MatchResultListener;
+using testing::StringMatchResultListener;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class LivenessAnalysisTest : public GraphTest {
+ public:
+ explicit LivenessAnalysisTest(int locals_count = 4)
+ : locals_count_(locals_count),
+ machine_(zone(), MachineRepresentation::kWord32),
+ javascript_(zone()),
+ jsgraph_(isolate(), graph(), common(), &javascript_, nullptr,
+ &machine_),
+ analyzer_(locals_count, zone()),
+ empty_values_(graph()->NewNode(common()->StateValues(0), 0, nullptr)),
+ next_checkpoint_id_(0),
+ current_block_(nullptr) {}
+
+
+ protected:
+ JSGraph* jsgraph() { return &jsgraph_; }
+
+ LivenessAnalyzer* analyzer() { return &analyzer_; }
+ void Run() {
+ StateValuesCache cache(jsgraph());
+ NonLiveFrameStateSlotReplacer replacer(&cache,
+ jsgraph()->UndefinedConstant(),
+ analyzer()->local_count(), zone());
+ analyzer()->Run(&replacer);
+ }
+
+ Node* Checkpoint() {
+ int ast_num = next_checkpoint_id_++;
+ int first_const = intconst_from_bailout_id(ast_num, locals_count_);
+
+ const Operator* locals_op = common()->StateValues(locals_count_);
+
+ ZoneVector<Node*> local_inputs(locals_count_, nullptr, zone());
+ for (int i = 0; i < locals_count_; i++) {
+ local_inputs[i] = jsgraph()->Int32Constant(i + first_const);
+ }
+ Node* locals =
+ graph()->NewNode(locals_op, locals_count_, &local_inputs.front());
+
+ const FrameStateFunctionInfo* state_info =
+ common()->CreateFrameStateFunctionInfo(
+ FrameStateType::kJavaScriptFunction, 0, locals_count_,
+ Handle<SharedFunctionInfo>(), CALL_MAINTAINS_NATIVE_CONTEXT);
+
+ const Operator* op = common()->FrameState(
+ BailoutId(ast_num), OutputFrameStateCombine::Ignore(), state_info);
+ Node* result =
+ graph()->NewNode(op, empty_values_, locals, empty_values_,
+ jsgraph()->UndefinedConstant(),
+ jsgraph()->UndefinedConstant(), graph()->start());
+
+ current_block_->Checkpoint(result);
+ return result;
+ }
+
+ void Bind(int var) { current_block()->Bind(var); }
+ void Lookup(int var) { current_block()->Lookup(var); }
+
+ class CheckpointMatcher : public MatcherInterface<Node*> {
+ public:
+ explicit CheckpointMatcher(const char* liveness, Node* empty_values,
+ int locals_count, Node* replacement)
+ : liveness_(liveness),
+ empty_values_(empty_values),
+ locals_count_(locals_count),
+ replacement_(replacement) {}
+
+ void DescribeTo(std::ostream* os) const override {
+ *os << "is a frame state with '" << liveness_
+ << "' liveness, empty "
+ "parameters and empty expression stack";
+ }
+
+ bool MatchAndExplain(Node* frame_state,
+ MatchResultListener* listener) const override {
+ if (frame_state == NULL) {
+ *listener << "which is NULL";
+ return false;
+ }
+ DCHECK(frame_state->opcode() == IrOpcode::kFrameState);
+
+ FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+ int ast_num = state_info.bailout_id().ToInt();
+ int first_const = intconst_from_bailout_id(ast_num, locals_count_);
+
+ if (empty_values_ != frame_state->InputAt(0)) {
+ *listener << "whose parameters are " << frame_state->InputAt(0)
+ << " but should have been " << empty_values_ << " (empty)";
+ return false;
+ }
+ if (empty_values_ != frame_state->InputAt(2)) {
+ *listener << "whose expression stack is " << frame_state->InputAt(2)
+ << " but should have been " << empty_values_ << " (empty)";
+ return false;
+ }
+ StateValuesAccess locals(frame_state->InputAt(1));
+ if (locals_count_ != static_cast<int>(locals.size())) {
+ *listener << "whose number of locals is " << locals.size()
+ << " but should have been " << locals_count_;
+ return false;
+ }
+ int i = 0;
+ for (StateValuesAccess::TypedNode value : locals) {
+ if (liveness_[i] == 'L') {
+ StringMatchResultListener value_listener;
+ if (value.node == replacement_) {
+ *listener << "whose local #" << i << " was " << value.node->opcode()
+ << " but should have been 'undefined'";
+ return false;
+ } else if (!IsInt32Constant(first_const + i)
+ .MatchAndExplain(value.node, &value_listener)) {
+ *listener << "whose local #" << i << " does not match";
+ if (value_listener.str() != "") {
+ *listener << ", " << value_listener.str();
+ }
+ return false;
+ }
+ } else if (liveness_[i] == '.') {
+ if (value.node != replacement_) {
+ *listener << "whose local #" << i << " is " << value.node
+ << " but should have been " << replacement_
+ << " (undefined)";
+ return false;
+ }
+ } else {
+ UNREACHABLE();
+ }
+ i++;
+ }
+ return true;
+ }
+
+ private:
+ const char* liveness_;
+ Node* empty_values_;
+ int locals_count_;
+ Node* replacement_;
+ };
+
+ Matcher<Node*> IsCheckpointModuloLiveness(const char* liveness) {
+ return MakeMatcher(new CheckpointMatcher(liveness, empty_values_,
+ locals_count_,
+ jsgraph()->UndefinedConstant()));
+ }
+
+ LivenessAnalyzerBlock* current_block() { return current_block_; }
+ void set_current_block(LivenessAnalyzerBlock* block) {
+ current_block_ = block;
+ }
+
+ private:
+ static int intconst_from_bailout_id(int ast_num, int locals_count) {
+ return (locals_count + 1) * ast_num + 1;
+ }
+
+ int locals_count_;
+ MachineOperatorBuilder machine_;
+ JSOperatorBuilder javascript_;
+ JSGraph jsgraph_;
+ LivenessAnalyzer analyzer_;
+ Node* empty_values_;
+ int next_checkpoint_id_;
+ LivenessAnalyzerBlock* current_block_;
+};
+
+
+TEST_F(LivenessAnalysisTest, EmptyBlock) {
+ set_current_block(analyzer()->NewBlock());
+
+ Node* c1 = Checkpoint();
+
+ Run();
+
+ // Nothing is live.
+ EXPECT_THAT(c1, IsCheckpointModuloLiveness("...."));
+}
+
+
+TEST_F(LivenessAnalysisTest, SimpleLookup) {
+ set_current_block(analyzer()->NewBlock());
+
+ Node* c1 = Checkpoint();
+ Lookup(1);
+ Node* c2 = Checkpoint();
+
+ Run();
+
+ EXPECT_THAT(c1, IsCheckpointModuloLiveness(".L.."));
+ EXPECT_THAT(c2, IsCheckpointModuloLiveness("...."));
+}
+
+
+TEST_F(LivenessAnalysisTest, DiamondLookups) {
+ // Start block.
+ LivenessAnalyzerBlock* start = analyzer()->NewBlock();
+ set_current_block(start);
+ Node* c1_start = Checkpoint();
+
+ // First branch.
+ LivenessAnalyzerBlock* b1 = analyzer()->NewBlock(start);
+ set_current_block(b1);
+
+ Node* c1_b1 = Checkpoint();
+ Lookup(1);
+ Node* c2_b1 = Checkpoint();
+ Lookup(3);
+ Node* c3_b1 = Checkpoint();
+
+ // Second branch.
+ LivenessAnalyzerBlock* b2 = analyzer()->NewBlock(start);
+ set_current_block(b2);
+
+ Node* c1_b2 = Checkpoint();
+ Lookup(3);
+ Node* c2_b2 = Checkpoint();
+ Lookup(2);
+ Node* c3_b2 = Checkpoint();
+
+ // Merge block.
+ LivenessAnalyzerBlock* m = analyzer()->NewBlock(b1);
+ m->AddPredecessor(b2);
+ set_current_block(m);
+ Node* c1_m = Checkpoint();
+ Lookup(0);
+ Node* c2_m = Checkpoint();
+
+ Run();
+
+ EXPECT_THAT(c1_start, IsCheckpointModuloLiveness("LLLL"));
+
+ EXPECT_THAT(c1_b1, IsCheckpointModuloLiveness("LL.L"));
+ EXPECT_THAT(c2_b1, IsCheckpointModuloLiveness("L..L"));
+ EXPECT_THAT(c3_b1, IsCheckpointModuloLiveness("L..."));
+
+ EXPECT_THAT(c1_b2, IsCheckpointModuloLiveness("L.LL"));
+ EXPECT_THAT(c2_b2, IsCheckpointModuloLiveness("L.L."));
+ EXPECT_THAT(c3_b2, IsCheckpointModuloLiveness("L..."));
+
+ EXPECT_THAT(c1_m, IsCheckpointModuloLiveness("L..."));
+ EXPECT_THAT(c2_m, IsCheckpointModuloLiveness("...."));
+}
+
+
+TEST_F(LivenessAnalysisTest, DiamondLookupsAndBinds) {
+ // Start block.
+ LivenessAnalyzerBlock* start = analyzer()->NewBlock();
+ set_current_block(start);
+ Node* c1_start = Checkpoint();
+ Bind(0);
+ Node* c2_start = Checkpoint();
+
+ // First branch.
+ LivenessAnalyzerBlock* b1 = analyzer()->NewBlock(start);
+ set_current_block(b1);
+
+ Node* c1_b1 = Checkpoint();
+ Bind(2);
+ Bind(1);
+ Node* c2_b1 = Checkpoint();
+ Bind(3);
+ Node* c3_b1 = Checkpoint();
+
+ // Second branch.
+ LivenessAnalyzerBlock* b2 = analyzer()->NewBlock(start);
+ set_current_block(b2);
+
+ Node* c1_b2 = Checkpoint();
+ Lookup(2);
+ Node* c2_b2 = Checkpoint();
+ Bind(2);
+ Bind(3);
+ Node* c3_b2 = Checkpoint();
+
+ // Merge block.
+ LivenessAnalyzerBlock* m = analyzer()->NewBlock(b1);
+ m->AddPredecessor(b2);
+ set_current_block(m);
+ Node* c1_m = Checkpoint();
+ Lookup(0);
+ Lookup(1);
+ Lookup(2);
+ Lookup(3);
+ Node* c2_m = Checkpoint();
+
+ Run();
+
+ EXPECT_THAT(c1_start, IsCheckpointModuloLiveness(".LL."));
+ EXPECT_THAT(c2_start, IsCheckpointModuloLiveness("LLL."));
+
+ EXPECT_THAT(c1_b1, IsCheckpointModuloLiveness("L..."));
+ EXPECT_THAT(c2_b1, IsCheckpointModuloLiveness("LLL."));
+ EXPECT_THAT(c3_b1, IsCheckpointModuloLiveness("LLLL"));
+
+ EXPECT_THAT(c1_b2, IsCheckpointModuloLiveness("LLL."));
+ EXPECT_THAT(c2_b2, IsCheckpointModuloLiveness("LL.."));
+ EXPECT_THAT(c3_b2, IsCheckpointModuloLiveness("LLLL"));
+
+ EXPECT_THAT(c1_m, IsCheckpointModuloLiveness("LLLL"));
+ EXPECT_THAT(c2_m, IsCheckpointModuloLiveness("...."));
+}
+
+
+TEST_F(LivenessAnalysisTest, SimpleLoop) {
+ // Start block.
+ LivenessAnalyzerBlock* start = analyzer()->NewBlock();
+ set_current_block(start);
+ Node* c1_start = Checkpoint();
+ Bind(0);
+ Bind(1);
+ Bind(2);
+ Bind(3);
+ Node* c2_start = Checkpoint();
+
+ // Loop header block.
+ LivenessAnalyzerBlock* header = analyzer()->NewBlock(start);
+ set_current_block(header);
+ Node* c1_header = Checkpoint();
+ Lookup(0);
+ Bind(2);
+ Node* c2_header = Checkpoint();
+
+ // Inside-loop block.
+ LivenessAnalyzerBlock* in_loop = analyzer()->NewBlock(header);
+ set_current_block(in_loop);
+ Node* c1_in_loop = Checkpoint();
+ Bind(0);
+ Lookup(3);
+ Node* c2_in_loop = Checkpoint();
+
+ // Add back edge.
+ header->AddPredecessor(in_loop);
+
+ // After-loop block.
+ LivenessAnalyzerBlock* end = analyzer()->NewBlock(header);
+ set_current_block(end);
+ Node* c1_end = Checkpoint();
+ Lookup(1);
+ Lookup(2);
+ Node* c2_end = Checkpoint();
+
+ Run();
+
+ EXPECT_THAT(c1_start, IsCheckpointModuloLiveness("...."));
+ EXPECT_THAT(c2_start, IsCheckpointModuloLiveness("LL.L"));
+
+ EXPECT_THAT(c1_header, IsCheckpointModuloLiveness("LL.L"));
+ EXPECT_THAT(c2_header, IsCheckpointModuloLiveness(".LLL"));
+
+ EXPECT_THAT(c1_in_loop, IsCheckpointModuloLiveness(".L.L"));
+ EXPECT_THAT(c2_in_loop, IsCheckpointModuloLiveness("LL.L"));
+
+ EXPECT_THAT(c1_end, IsCheckpointModuloLiveness(".LL."));
+ EXPECT_THAT(c2_end, IsCheckpointModuloLiveness("...."));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/load-elimination-unittest.cc b/test/unittests/compiler/load-elimination-unittest.cc
index f0cd60e..3ad11cf 100644
--- a/test/unittests/compiler/load-elimination-unittest.cc
+++ b/test/unittests/compiler/load-elimination-unittest.cc
@@ -15,11 +15,13 @@
class LoadEliminationTest : public GraphTest {
public:
LoadEliminationTest() : GraphTest(3), simplified_(zone()) {}
- ~LoadEliminationTest() OVERRIDE {}
+ ~LoadEliminationTest() override {}
protected:
Reduction Reduce(Node* node) {
- LoadElimination reducer;
+ // TODO(titzer): mock the GraphReducer here for better unit testing.
+ GraphReducer graph_reducer(zone(), graph());
+ LoadElimination reducer(&graph_reducer);
return reducer.Reduce(node);
}
diff --git a/test/unittests/compiler/loop-peeling-unittest.cc b/test/unittests/compiler/loop-peeling-unittest.cc
new file mode 100644
index 0000000..9dcec85
--- /dev/null
+++ b/test/unittests/compiler/loop-peeling-unittest.cc
@@ -0,0 +1,500 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/loop-peeling.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+#include "test/unittests/compiler/compiler-test-utils.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::AllOf;
+using testing::BitEq;
+using testing::Capture;
+using testing::CaptureEq;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct While {
+ Node* loop;
+ Node* branch;
+ Node* if_true;
+ Node* exit;
+};
+
+
+// A helper for building branches.
+struct Branch {
+ Node* branch;
+ Node* if_true;
+ Node* if_false;
+};
+
+
+// A helper for building counters attached to loops.
+struct Counter {
+ Node* base;
+ Node* inc;
+ Node* phi;
+ Node* add;
+};
+
+
+class LoopPeelingTest : public GraphTest {
+ public:
+ LoopPeelingTest() : GraphTest(1), machine_(zone()) {}
+ ~LoopPeelingTest() override {}
+
+ protected:
+ MachineOperatorBuilder machine_;
+
+ MachineOperatorBuilder* machine() { return &machine_; }
+
+ LoopTree* GetLoopTree() {
+ if (FLAG_trace_turbo_graph) {
+ OFStream os(stdout);
+ os << AsRPO(*graph());
+ }
+ Zone zone;
+ return LoopFinder::BuildLoopTree(graph(), &zone);
+ }
+
+
+ PeeledIteration* PeelOne() {
+ LoopTree* loop_tree = GetLoopTree();
+ LoopTree::Loop* loop = loop_tree->outer_loops()[0];
+ EXPECT_TRUE(LoopPeeler::CanPeel(loop_tree, loop));
+ return Peel(loop_tree, loop);
+ }
+
+ PeeledIteration* Peel(LoopTree* loop_tree, LoopTree::Loop* loop) {
+ EXPECT_TRUE(LoopPeeler::CanPeel(loop_tree, loop));
+ PeeledIteration* peeled =
+ LoopPeeler::Peel(graph(), common(), loop_tree, loop, zone());
+ if (FLAG_trace_turbo_graph) {
+ OFStream os(stdout);
+ os << AsRPO(*graph());
+ }
+ return peeled;
+ }
+
+ Node* InsertReturn(Node* val, Node* effect, Node* control) {
+ Node* r = graph()->NewNode(common()->Return(), val, effect, control);
+ graph()->SetEnd(r);
+ return r;
+ }
+
+ Node* ExpectPeeled(Node* node, PeeledIteration* iter) {
+ Node* p = iter->map(node);
+ EXPECT_NE(node, p);
+ return p;
+ }
+
+ void ExpectNotPeeled(Node* node, PeeledIteration* iter) {
+ EXPECT_EQ(node, iter->map(node));
+ }
+
+ While NewWhile(Node* cond, Node* control = nullptr) {
+ if (control == nullptr) control = start();
+ Node* loop = graph()->NewNode(common()->Loop(2), control, control);
+ Node* branch = graph()->NewNode(common()->Branch(), cond, loop);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* exit = graph()->NewNode(common()->IfFalse(), branch);
+ loop->ReplaceInput(1, if_true);
+ return {loop, branch, if_true, exit};
+ }
+
+ void Chain(While* a, Node* control) { a->loop->ReplaceInput(0, control); }
+ void Nest(While* a, While* b) {
+ b->loop->ReplaceInput(1, a->exit);
+ a->loop->ReplaceInput(0, b->if_true);
+ }
+ Node* NewPhi(While* w, Node* a, Node* b) {
+ return graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), a,
+ b, w->loop);
+ }
+
+ Branch NewBranch(Node* cond, Node* control = nullptr) {
+ if (control == nullptr) control = start();
+ Node* branch = graph()->NewNode(common()->Branch(), cond, control);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ return {branch, if_true, if_false};
+ }
+
+ Counter NewCounter(While* w, int32_t b, int32_t k) {
+ Node* base = Int32Constant(b);
+ Node* inc = Int32Constant(k);
+ Node* phi = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), base, base, w->loop);
+ Node* add = graph()->NewNode(machine()->Int32Add(), phi, inc);
+ phi->ReplaceInput(1, add);
+ return {base, inc, phi, add};
+ }
+};
+
+
+TEST_F(LoopPeelingTest, SimpleLoop) {
+ Node* p0 = Parameter(0);
+ While w = NewWhile(p0);
+ Node* r = InsertReturn(p0, start(), w.exit);
+
+ PeeledIteration* peeled = PeelOne();
+
+ Node* br1 = ExpectPeeled(w.branch, peeled);
+ Node* if_true1 = ExpectPeeled(w.if_true, peeled);
+ Node* if_false1 = ExpectPeeled(w.exit, peeled);
+
+ EXPECT_THAT(br1, IsBranch(p0, start()));
+ EXPECT_THAT(if_true1, IsIfTrue(br1));
+ EXPECT_THAT(if_false1, IsIfFalse(br1));
+
+ EXPECT_THAT(w.loop, IsLoop(if_true1, w.if_true));
+ EXPECT_THAT(r, IsReturn(p0, start(), IsMerge(w.exit, if_false1)));
+}
+
+
+TEST_F(LoopPeelingTest, SimpleLoopWithCounter) {
+ Node* p0 = Parameter(0);
+ While w = NewWhile(p0);
+ Counter c = NewCounter(&w, 0, 1);
+ Node* r = InsertReturn(c.phi, start(), w.exit);
+
+ PeeledIteration* peeled = PeelOne();
+
+ Node* br1 = ExpectPeeled(w.branch, peeled);
+ Node* if_true1 = ExpectPeeled(w.if_true, peeled);
+ Node* if_false1 = ExpectPeeled(w.exit, peeled);
+
+ EXPECT_THAT(br1, IsBranch(p0, start()));
+ EXPECT_THAT(if_true1, IsIfTrue(br1));
+ EXPECT_THAT(if_false1, IsIfFalse(br1));
+ EXPECT_THAT(w.loop, IsLoop(if_true1, w.if_true));
+
+ EXPECT_THAT(peeled->map(c.add), IsInt32Add(c.base, c.inc));
+
+ Capture<Node*> merge;
+ EXPECT_THAT(
+ r, IsReturn(IsPhi(MachineRepresentation::kTagged, c.phi, c.base,
+ AllOf(CaptureEq(&merge), IsMerge(w.exit, if_false1))),
+ start(), CaptureEq(&merge)));
+}
+
+
+TEST_F(LoopPeelingTest, SimpleNestedLoopWithCounter_peel_outer) {
+ Node* p0 = Parameter(0);
+ While outer = NewWhile(p0);
+ While inner = NewWhile(p0);
+ Nest(&inner, &outer);
+
+ Counter c = NewCounter(&outer, 0, 1);
+ Node* r = InsertReturn(c.phi, start(), outer.exit);
+
+ PeeledIteration* peeled = PeelOne();
+
+ Node* bro = ExpectPeeled(outer.branch, peeled);
+ Node* if_trueo = ExpectPeeled(outer.if_true, peeled);
+ Node* if_falseo = ExpectPeeled(outer.exit, peeled);
+
+ EXPECT_THAT(bro, IsBranch(p0, start()));
+ EXPECT_THAT(if_trueo, IsIfTrue(bro));
+ EXPECT_THAT(if_falseo, IsIfFalse(bro));
+
+ Node* bri = ExpectPeeled(inner.branch, peeled);
+ Node* if_truei = ExpectPeeled(inner.if_true, peeled);
+ Node* if_falsei = ExpectPeeled(inner.exit, peeled);
+
+ EXPECT_THAT(bri, IsBranch(p0, ExpectPeeled(inner.loop, peeled)));
+ EXPECT_THAT(if_truei, IsIfTrue(bri));
+ EXPECT_THAT(if_falsei, IsIfFalse(bri));
+
+ EXPECT_THAT(outer.loop, IsLoop(if_falsei, inner.exit));
+ EXPECT_THAT(peeled->map(c.add), IsInt32Add(c.base, c.inc));
+
+ Capture<Node*> merge;
+ EXPECT_THAT(
+ r,
+ IsReturn(IsPhi(MachineRepresentation::kTagged, c.phi, c.base,
+ AllOf(CaptureEq(&merge), IsMerge(outer.exit, if_falseo))),
+ start(), CaptureEq(&merge)));
+}
+
+
+TEST_F(LoopPeelingTest, SimpleNestedLoopWithCounter_peel_inner) {
+ Node* p0 = Parameter(0);
+ While outer = NewWhile(p0);
+ While inner = NewWhile(p0);
+ Nest(&inner, &outer);
+
+ Counter c = NewCounter(&outer, 0, 1);
+ Node* r = InsertReturn(c.phi, start(), outer.exit);
+
+ LoopTree* loop_tree = GetLoopTree();
+ LoopTree::Loop* loop = loop_tree->ContainingLoop(inner.loop);
+ EXPECT_NE(nullptr, loop);
+ EXPECT_EQ(1u, loop->depth());
+
+ PeeledIteration* peeled = Peel(loop_tree, loop);
+
+ ExpectNotPeeled(outer.loop, peeled);
+ ExpectNotPeeled(outer.branch, peeled);
+ ExpectNotPeeled(outer.if_true, peeled);
+ ExpectNotPeeled(outer.exit, peeled);
+
+ Node* bri = ExpectPeeled(inner.branch, peeled);
+ Node* if_truei = ExpectPeeled(inner.if_true, peeled);
+ Node* if_falsei = ExpectPeeled(inner.exit, peeled);
+
+ EXPECT_THAT(bri, IsBranch(p0, ExpectPeeled(inner.loop, peeled)));
+ EXPECT_THAT(if_truei, IsIfTrue(bri));
+ EXPECT_THAT(if_falsei, IsIfFalse(bri));
+
+ EXPECT_THAT(outer.loop, IsLoop(start(), IsMerge(inner.exit, if_falsei)));
+ ExpectNotPeeled(c.add, peeled);
+
+ EXPECT_THAT(r, IsReturn(c.phi, start(), outer.exit));
+}
+
+
+TEST_F(LoopPeelingTest, SimpleInnerCounter_peel_inner) {
+ Node* p0 = Parameter(0);
+ While outer = NewWhile(p0);
+ While inner = NewWhile(p0);
+ Nest(&inner, &outer);
+ Counter c = NewCounter(&inner, 0, 1);
+ Node* phi = NewPhi(&outer, Int32Constant(11), c.phi);
+
+ Node* r = InsertReturn(phi, start(), outer.exit);
+
+ LoopTree* loop_tree = GetLoopTree();
+ LoopTree::Loop* loop = loop_tree->ContainingLoop(inner.loop);
+ EXPECT_NE(nullptr, loop);
+ EXPECT_EQ(1u, loop->depth());
+
+ PeeledIteration* peeled = Peel(loop_tree, loop);
+
+ ExpectNotPeeled(outer.loop, peeled);
+ ExpectNotPeeled(outer.branch, peeled);
+ ExpectNotPeeled(outer.if_true, peeled);
+ ExpectNotPeeled(outer.exit, peeled);
+
+ Node* bri = ExpectPeeled(inner.branch, peeled);
+ Node* if_truei = ExpectPeeled(inner.if_true, peeled);
+ Node* if_falsei = ExpectPeeled(inner.exit, peeled);
+
+ EXPECT_THAT(bri, IsBranch(p0, ExpectPeeled(inner.loop, peeled)));
+ EXPECT_THAT(if_truei, IsIfTrue(bri));
+ EXPECT_THAT(if_falsei, IsIfFalse(bri));
+
+ EXPECT_THAT(outer.loop, IsLoop(start(), IsMerge(inner.exit, if_falsei)));
+ EXPECT_THAT(peeled->map(c.add), IsInt32Add(c.base, c.inc));
+
+ Node* back = phi->InputAt(1);
+ EXPECT_THAT(back, IsPhi(MachineRepresentation::kTagged, c.phi, c.base,
+ IsMerge(inner.exit, if_falsei)));
+
+ EXPECT_THAT(phi, IsPhi(MachineRepresentation::kTagged, IsInt32Constant(11),
+ back, outer.loop));
+
+ EXPECT_THAT(r, IsReturn(phi, start(), outer.exit));
+}
+
+
+TEST_F(LoopPeelingTest, TwoBackedgeLoop) {
+ Node* p0 = Parameter(0);
+ Node* loop = graph()->NewNode(common()->Loop(3), start(), start(), start());
+ Branch b1 = NewBranch(p0, loop);
+ Branch b2 = NewBranch(p0, b1.if_true);
+
+ loop->ReplaceInput(1, b2.if_true);
+ loop->ReplaceInput(2, b2.if_false);
+
+ Node* r = InsertReturn(p0, start(), b1.if_false);
+
+ PeeledIteration* peeled = PeelOne();
+
+ Node* b1b = ExpectPeeled(b1.branch, peeled);
+ Node* b1t = ExpectPeeled(b1.if_true, peeled);
+ Node* b1f = ExpectPeeled(b1.if_false, peeled);
+
+ EXPECT_THAT(b1b, IsBranch(p0, start()));
+ EXPECT_THAT(ExpectPeeled(b1.if_true, peeled), IsIfTrue(b1b));
+ EXPECT_THAT(b1f, IsIfFalse(b1b));
+
+ Node* b2b = ExpectPeeled(b2.branch, peeled);
+ Node* b2t = ExpectPeeled(b2.if_true, peeled);
+ Node* b2f = ExpectPeeled(b2.if_false, peeled);
+
+ EXPECT_THAT(b2b, IsBranch(p0, b1t));
+ EXPECT_THAT(b2t, IsIfTrue(b2b));
+ EXPECT_THAT(b2f, IsIfFalse(b2b));
+
+ EXPECT_THAT(loop, IsLoop(IsMerge(b2t, b2f), b2.if_true, b2.if_false));
+ EXPECT_THAT(r, IsReturn(p0, start(), IsMerge(b1.if_false, b1f)));
+}
+
+
+TEST_F(LoopPeelingTest, TwoBackedgeLoopWithPhi) {
+ Node* p0 = Parameter(0);
+ Node* loop = graph()->NewNode(common()->Loop(3), start(), start(), start());
+ Branch b1 = NewBranch(p0, loop);
+ Branch b2 = NewBranch(p0, b1.if_true);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 3),
+ Int32Constant(0), Int32Constant(1),
+ Int32Constant(2), loop);
+
+ loop->ReplaceInput(1, b2.if_true);
+ loop->ReplaceInput(2, b2.if_false);
+
+ Node* r = InsertReturn(phi, start(), b1.if_false);
+
+ PeeledIteration* peeled = PeelOne();
+
+ Node* b1b = ExpectPeeled(b1.branch, peeled);
+ Node* b1t = ExpectPeeled(b1.if_true, peeled);
+ Node* b1f = ExpectPeeled(b1.if_false, peeled);
+
+ EXPECT_THAT(b1b, IsBranch(p0, start()));
+ EXPECT_THAT(ExpectPeeled(b1.if_true, peeled), IsIfTrue(b1b));
+ EXPECT_THAT(b1f, IsIfFalse(b1b));
+
+ Node* b2b = ExpectPeeled(b2.branch, peeled);
+ Node* b2t = ExpectPeeled(b2.if_true, peeled);
+ Node* b2f = ExpectPeeled(b2.if_false, peeled);
+
+ EXPECT_THAT(b2b, IsBranch(p0, b1t));
+ EXPECT_THAT(b2t, IsIfTrue(b2b));
+ EXPECT_THAT(b2f, IsIfFalse(b2b));
+
+ EXPECT_THAT(loop, IsLoop(IsMerge(b2t, b2f), b2.if_true, b2.if_false));
+
+ EXPECT_THAT(phi,
+ IsPhi(MachineRepresentation::kTagged,
+ IsPhi(MachineRepresentation::kTagged, IsInt32Constant(1),
+ IsInt32Constant(2), IsMerge(b2t, b2f)),
+ IsInt32Constant(1), IsInt32Constant(2), loop));
+
+ Capture<Node*> merge;
+ EXPECT_THAT(
+ r, IsReturn(IsPhi(MachineRepresentation::kTagged, phi, IsInt32Constant(0),
+ AllOf(CaptureEq(&merge), IsMerge(b1.if_false, b1f))),
+ start(), CaptureEq(&merge)));
+}
+
+
+TEST_F(LoopPeelingTest, TwoBackedgeLoopWithCounter) {
+ Node* p0 = Parameter(0);
+ Node* loop = graph()->NewNode(common()->Loop(3), start(), start(), start());
+ Branch b1 = NewBranch(p0, loop);
+ Branch b2 = NewBranch(p0, b1.if_true);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 3),
+ Int32Constant(0), Int32Constant(1),
+ Int32Constant(2), loop);
+
+ phi->ReplaceInput(
+ 1, graph()->NewNode(machine()->Int32Add(), phi, Int32Constant(1)));
+ phi->ReplaceInput(
+ 2, graph()->NewNode(machine()->Int32Add(), phi, Int32Constant(2)));
+
+ loop->ReplaceInput(1, b2.if_true);
+ loop->ReplaceInput(2, b2.if_false);
+
+ Node* r = InsertReturn(phi, start(), b1.if_false);
+
+ PeeledIteration* peeled = PeelOne();
+
+ Node* b1b = ExpectPeeled(b1.branch, peeled);
+ Node* b1t = ExpectPeeled(b1.if_true, peeled);
+ Node* b1f = ExpectPeeled(b1.if_false, peeled);
+
+ EXPECT_THAT(b1b, IsBranch(p0, start()));
+ EXPECT_THAT(ExpectPeeled(b1.if_true, peeled), IsIfTrue(b1b));
+ EXPECT_THAT(b1f, IsIfFalse(b1b));
+
+ Node* b2b = ExpectPeeled(b2.branch, peeled);
+ Node* b2t = ExpectPeeled(b2.if_true, peeled);
+ Node* b2f = ExpectPeeled(b2.if_false, peeled);
+
+ EXPECT_THAT(b2b, IsBranch(p0, b1t));
+ EXPECT_THAT(b2t, IsIfTrue(b2b));
+ EXPECT_THAT(b2f, IsIfFalse(b2b));
+
+ Capture<Node*> entry;
+ EXPECT_THAT(loop, IsLoop(AllOf(CaptureEq(&entry), IsMerge(b2t, b2f)),
+ b2.if_true, b2.if_false));
+
+ Node* eval = phi->InputAt(0);
+
+ EXPECT_THAT(eval, IsPhi(MachineRepresentation::kTagged,
+ IsInt32Add(IsInt32Constant(0), IsInt32Constant(1)),
+ IsInt32Add(IsInt32Constant(0), IsInt32Constant(2)),
+ CaptureEq(&entry)));
+
+ EXPECT_THAT(phi, IsPhi(MachineRepresentation::kTagged, eval,
+ IsInt32Add(phi, IsInt32Constant(1)),
+ IsInt32Add(phi, IsInt32Constant(2)), loop));
+
+ Capture<Node*> merge;
+ EXPECT_THAT(
+ r, IsReturn(IsPhi(MachineRepresentation::kTagged, phi, IsInt32Constant(0),
+ AllOf(CaptureEq(&merge), IsMerge(b1.if_false, b1f))),
+ start(), CaptureEq(&merge)));
+}
+
+
+TEST_F(LoopPeelingTest, TwoExitLoop_nope) {
+ Node* p0 = Parameter(0);
+ Node* loop = graph()->NewNode(common()->Loop(2), start(), start());
+ Branch b1 = NewBranch(p0, loop);
+ Branch b2 = NewBranch(p0, b1.if_true);
+
+ loop->ReplaceInput(1, b2.if_true);
+ Node* merge = graph()->NewNode(common()->Merge(2), b1.if_false, b2.if_false);
+ InsertReturn(p0, start(), merge);
+
+ {
+ LoopTree* loop_tree = GetLoopTree();
+ LoopTree::Loop* loop = loop_tree->outer_loops()[0];
+ EXPECT_FALSE(LoopPeeler::CanPeel(loop_tree, loop));
+ }
+}
+
+
+const Operator kMockCall(IrOpcode::kCall, Operator::kNoProperties, "MockCall",
+ 0, 0, 1, 1, 1, 2);
+
+
+TEST_F(LoopPeelingTest, TwoExitLoopWithCall_nope) {
+ Node* p0 = Parameter(0);
+ Node* loop = graph()->NewNode(common()->Loop(2), start(), start());
+ Branch b1 = NewBranch(p0, loop);
+
+ Node* call = graph()->NewNode(&kMockCall, b1.if_true);
+ Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
+ Node* if_exception = graph()->NewNode(
+ common()->IfException(IfExceptionHint::kLocallyUncaught), call, call);
+
+ loop->ReplaceInput(1, if_success);
+ Node* merge = graph()->NewNode(common()->Merge(2), b1.if_false, if_exception);
+ InsertReturn(p0, start(), merge);
+
+ {
+ LoopTree* loop_tree = GetLoopTree();
+ LoopTree::Loop* loop = loop_tree->outer_loops()[0];
+ EXPECT_FALSE(LoopPeeler::CanPeel(loop_tree, loop));
+ }
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/machine-operator-reducer-unittest.cc b/test/unittests/compiler/machine-operator-reducer-unittest.cc
index 6fdba35..2feba2e 100644
--- a/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -7,6 +7,7 @@
#include "src/compiler/js-graph.h"
#include "src/compiler/machine-operator-reducer.h"
#include "src/compiler/typer.h"
+#include "src/conversions-inl.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
#include "testing/gmock-support.h"
@@ -28,7 +29,8 @@
protected:
Reduction Reduce(Node* node) {
JSOperatorBuilder javascript(zone());
- JSGraph jsgraph(graph(), common(), &javascript, &machine_);
+ JSGraph jsgraph(isolate(), graph(), common(), &javascript, nullptr,
+ &machine_);
MachineOperatorReducer reducer(&jsgraph);
return reducer.Reduce(node);
}
@@ -67,7 +69,7 @@
public:
explicit MachineOperatorReducerTestWithParam(int num_parameters = 2)
: MachineOperatorReducerTest(num_parameters) {}
- ~MachineOperatorReducerTestWithParam() OVERRIDE {}
+ ~MachineOperatorReducerTestWithParam() override {}
};
@@ -233,56 +235,34 @@
0x000fffff, 0x0007ffff, 0x0003ffff, 0x0001ffff, 0x0000ffff, 0x00007fff,
0x00003fff, 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff};
-} // namespace
+
+const TruncationMode kTruncationModes[] = {TruncationMode::kJavaScript,
+ TruncationMode::kRoundToZero};
-// -----------------------------------------------------------------------------
-// Unary operators
-
-
-namespace {
-
-struct UnaryOperator {
+struct ComparisonBinaryOperator {
const Operator* (MachineOperatorBuilder::*constructor)();
const char* constructor_name;
};
-std::ostream& operator<<(std::ostream& os, const UnaryOperator& unop) {
- return os << unop.constructor_name;
+std::ostream& operator<<(std::ostream& os,
+ ComparisonBinaryOperator const& cbop) {
+ return os << cbop.constructor_name;
}
-static const UnaryOperator kUnaryOperators[] = {
- {&MachineOperatorBuilder::ChangeInt32ToFloat64, "ChangeInt32ToFloat64"},
- {&MachineOperatorBuilder::ChangeUint32ToFloat64, "ChangeUint32ToFloat64"},
- {&MachineOperatorBuilder::ChangeFloat64ToInt32, "ChangeFloat64ToInt32"},
- {&MachineOperatorBuilder::ChangeFloat64ToUint32, "ChangeFloat64ToUint32"},
- {&MachineOperatorBuilder::ChangeInt32ToInt64, "ChangeInt32ToInt64"},
- {&MachineOperatorBuilder::ChangeUint32ToUint64, "ChangeUint32ToUint64"},
- {&MachineOperatorBuilder::TruncateFloat64ToInt32, "TruncateFloat64ToInt32"},
- {&MachineOperatorBuilder::TruncateInt64ToInt32, "TruncateInt64ToInt32"}};
+const ComparisonBinaryOperator kComparisonBinaryOperators[] = {
+#define OPCODE(Opcode) \
+ { &MachineOperatorBuilder::Opcode, #Opcode } \
+ ,
+ MACHINE_COMPARE_BINOP_LIST(OPCODE)
+#undef OPCODE
+};
} // namespace
-typedef MachineOperatorReducerTestWithParam<UnaryOperator>
- MachineUnaryOperatorReducerTest;
-
-
-TEST_P(MachineUnaryOperatorReducerTest, Parameter) {
- const UnaryOperator unop = GetParam();
- Reduction reduction =
- Reduce(graph()->NewNode((machine()->*unop.constructor)(), Parameter(0)));
- EXPECT_FALSE(reduction.Changed());
-}
-
-
-INSTANTIATE_TEST_CASE_P(MachineOperatorReducerTest,
- MachineUnaryOperatorReducerTest,
- ::testing::ValuesIn(kUnaryOperators));
-
-
// -----------------------------------------------------------------------------
// ChangeFloat64ToFloat32
@@ -438,19 +418,22 @@
TEST_F(MachineOperatorReducerTest,
TruncateFloat64ToInt32WithChangeInt32ToFloat64) {
- Node* value = Parameter(0);
- Reduction reduction = Reduce(graph()->NewNode(
- machine()->TruncateFloat64ToInt32(),
- graph()->NewNode(machine()->ChangeInt32ToFloat64(), value)));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_EQ(value, reduction.replacement());
+ TRACED_FOREACH(TruncationMode, mode, kTruncationModes) {
+ Node* value = Parameter(0);
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->TruncateFloat64ToInt32(mode),
+ graph()->NewNode(machine()->ChangeInt32ToFloat64(), value)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_EQ(value, reduction.replacement());
+ }
}
TEST_F(MachineOperatorReducerTest, TruncateFloat64ToInt32WithConstant) {
TRACED_FOREACH(double, x, kFloat64Values) {
Reduction reduction = Reduce(graph()->NewNode(
- machine()->TruncateFloat64ToInt32(), Float64Constant(x)));
+ machine()->TruncateFloat64ToInt32(TruncationMode::kJavaScript),
+ Float64Constant(x)));
ASSERT_TRUE(reduction.Changed());
EXPECT_THAT(reduction.replacement(), IsInt32Constant(DoubleToInt32(x)));
}
@@ -461,13 +444,17 @@
Node* const p0 = Parameter(0);
Node* const p1 = Parameter(1);
Node* const merge = graph()->start();
- Reduction reduction = Reduce(graph()->NewNode(
- machine()->TruncateFloat64ToInt32(),
- graph()->NewNode(common()->Phi(kMachFloat64, 2), p0, p1, merge)));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(),
- IsPhi(kMachInt32, IsTruncateFloat64ToInt32(p0),
- IsTruncateFloat64ToInt32(p1), merge));
+ TRACED_FOREACH(TruncationMode, mode, kTruncationModes) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ machine()->TruncateFloat64ToInt32(mode),
+ graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2), p0,
+ p1, merge)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(
+ reduction.replacement(),
+ IsPhi(MachineRepresentation::kWord32, IsTruncateFloat64ToInt32(p0),
+ IsTruncateFloat64ToInt32(p1), merge));
+ }
}
@@ -501,6 +488,30 @@
// Word32And
+TEST_F(MachineOperatorReducerTest, Word32AndWithWord32ShlWithConstant) {
+ Node* const p0 = Parameter(0);
+
+ TRACED_FORRANGE(int32_t, l, 1, 31) {
+ TRACED_FORRANGE(int32_t, k, 1, l) {
+ // (x << L) & (-1 << K) => x << L
+ Reduction const r1 = Reduce(graph()->NewNode(
+ machine()->Word32And(),
+ graph()->NewNode(machine()->Word32Shl(), p0, Int32Constant(l)),
+ Int32Constant(-1 << k)));
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_THAT(r1.replacement(), IsWord32Shl(p0, IsInt32Constant(l)));
+
+ // (-1 << K) & (x << L) => x << L
+ Reduction const r2 = Reduce(graph()->NewNode(
+ machine()->Word32And(), Int32Constant(-1 << k),
+ graph()->NewNode(machine()->Word32Shl(), p0, Int32Constant(l))));
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(), IsWord32Shl(p0, IsInt32Constant(l)));
+ }
+ }
+}
+
+
TEST_F(MachineOperatorReducerTest, Word32AndWithWord32AndWithConstant) {
Node* const p0 = Parameter(0);
@@ -571,6 +582,33 @@
}
+TEST_F(MachineOperatorReducerTest, Word32AndWithInt32MulAndConstant) {
+ Node* const p0 = Parameter(0);
+
+ TRACED_FORRANGE(int32_t, l, 1, 31) {
+ TRACED_FOREACH(int32_t, k, kInt32Values) {
+ if ((k << l) == 0) continue;
+
+ // (x * (K << L)) & (-1 << L) => x * (K << L)
+ Reduction const r1 = Reduce(graph()->NewNode(
+ machine()->Word32And(),
+ graph()->NewNode(machine()->Int32Mul(), p0, Int32Constant(k << l)),
+ Int32Constant(-1 << l)));
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_THAT(r1.replacement(), IsInt32Mul(p0, IsInt32Constant(k << l)));
+
+ // ((K << L) * x) & (-1 << L) => x * (K << L)
+ Reduction const r2 = Reduce(graph()->NewNode(
+ machine()->Word32And(),
+ graph()->NewNode(machine()->Int32Mul(), Int32Constant(k << l), p0),
+ Int32Constant(-1 << l)));
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(), IsInt32Mul(p0, IsInt32Constant(k << l)));
+ }
+ }
+}
+
+
TEST_F(MachineOperatorReducerTest,
Word32AndWithInt32AddAndInt32MulAndConstant) {
Node* const p0 = Parameter(0);
@@ -608,6 +646,27 @@
}
+TEST_F(MachineOperatorReducerTest, Word32AndWithComparisonAndConstantOne) {
+ Node* const p0 = Parameter(0);
+ Node* const p1 = Parameter(1);
+ TRACED_FOREACH(ComparisonBinaryOperator, cbop, kComparisonBinaryOperators) {
+ Node* cmp = graph()->NewNode((machine()->*cbop.constructor)(), p0, p1);
+
+ // cmp & 1 => cmp
+ Reduction const r1 =
+ Reduce(graph()->NewNode(machine()->Word32And(), cmp, Int32Constant(1)));
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_EQ(cmp, r1.replacement());
+
+ // 1 & cmp => cmp
+ Reduction const r2 =
+ Reduce(graph()->NewNode(machine()->Word32And(), Int32Constant(1), cmp));
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_EQ(cmp, r2.replacement());
+ }
+}
+
+
// -----------------------------------------------------------------------------
// Word32Xor
@@ -749,12 +808,30 @@
// Word32Sar
+TEST_F(MachineOperatorReducerTest, Word32SarWithWord32ShlAndComparison) {
+ Node* const p0 = Parameter(0);
+ Node* const p1 = Parameter(1);
+
+ TRACED_FOREACH(ComparisonBinaryOperator, cbop, kComparisonBinaryOperators) {
+ Node* cmp = graph()->NewNode((machine()->*cbop.constructor)(), p0, p1);
+
+ // cmp << 31 >> 31 => 0 - cmp
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Word32Sar(),
+ graph()->NewNode(machine()->Word32Shl(), cmp, Int32Constant(31)),
+ Int32Constant(31)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsInt32Sub(IsInt32Constant(0), cmp));
+ }
+}
+
+
TEST_F(MachineOperatorReducerTest, Word32SarWithWord32ShlAndLoad) {
Node* const p0 = Parameter(0);
Node* const p1 = Parameter(1);
{
- Node* const l = graph()->NewNode(machine()->Load(kMachInt8), p0, p1,
- graph()->start(), graph()->start());
+ Node* const l = graph()->NewNode(machine()->Load(MachineType::Int8()), p0,
+ p1, graph()->start(), graph()->start());
Reduction const r = Reduce(graph()->NewNode(
machine()->Word32Sar(),
graph()->NewNode(machine()->Word32Shl(), l, Int32Constant(24)),
@@ -763,8 +840,8 @@
EXPECT_EQ(l, r.replacement());
}
{
- Node* const l = graph()->NewNode(machine()->Load(kMachInt16), p0, p1,
- graph()->start(), graph()->start());
+ Node* const l = graph()->NewNode(machine()->Load(MachineType::Int16()), p0,
+ p1, graph()->start(), graph()->start());
Reduction const r = Reduce(graph()->NewNode(
machine()->Word32Sar(),
graph()->NewNode(machine()->Word32Shl(), l, Int32Constant(16)),
@@ -842,6 +919,25 @@
// -----------------------------------------------------------------------------
+// Int32Sub
+
+
+TEST_F(MachineOperatorReducerTest, Int32SubWithConstant) {
+ Node* const p0 = Parameter(0);
+ TRACED_FOREACH(int32_t, k, kInt32Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Int32Sub(), p0, Int32Constant(k)));
+ ASSERT_TRUE(r.Changed());
+ if (k == 0) {
+ EXPECT_EQ(p0, r.replacement());
+ } else {
+ EXPECT_THAT(r.replacement(), IsInt32Add(p0, IsInt32Constant(-k)));
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
// Int32Div
@@ -1048,7 +1144,8 @@
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
- IsSelect(kMachInt32, IsInt32LessThan(p0, IsInt32Constant(0)),
+ IsSelect(MachineRepresentation::kWord32,
+ IsInt32LessThan(p0, IsInt32Constant(0)),
IsInt32Sub(IsInt32Constant(0),
IsWord32And(IsInt32Sub(IsInt32Constant(0), p0),
IsInt32Constant(mask))),
@@ -1063,7 +1160,8 @@
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
- IsSelect(kMachInt32, IsInt32LessThan(p0, IsInt32Constant(0)),
+ IsSelect(MachineRepresentation::kWord32,
+ IsInt32LessThan(p0, IsInt32Constant(0)),
IsInt32Sub(IsInt32Constant(0),
IsWord32And(IsInt32Sub(IsInt32Constant(0), p0),
IsInt32Constant(mask))),
@@ -1147,6 +1245,28 @@
// -----------------------------------------------------------------------------
+// Int32Add
+
+
+TEST_F(MachineOperatorReducerTest, Int32AddWithInt32SubWithConstantZero) {
+ Node* const p0 = Parameter(0);
+ Node* const p1 = Parameter(1);
+
+ Reduction const r1 = Reduce(graph()->NewNode(
+ machine()->Int32Add(),
+ graph()->NewNode(machine()->Int32Sub(), Int32Constant(0), p0), p1));
+ ASSERT_TRUE(r1.Changed());
+ EXPECT_THAT(r1.replacement(), IsInt32Sub(p1, p0));
+
+ Reduction const r2 = Reduce(graph()->NewNode(
+ machine()->Int32Add(), p0,
+ graph()->NewNode(machine()->Int32Sub(), Int32Constant(0), p1)));
+ ASSERT_TRUE(r2.Changed());
+ EXPECT_THAT(r2.replacement(), IsInt32Sub(p0, p1));
+}
+
+
+// -----------------------------------------------------------------------------
// Int32AddWithOverflow
@@ -1284,11 +1404,163 @@
// -----------------------------------------------------------------------------
+// Float64InsertLowWord32
+
+
+TEST_F(MachineOperatorReducerTest, Float64InsertLowWord32WithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ TRACED_FOREACH(uint32_t, y, kUint32Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64InsertLowWord32(),
+ Float64Constant(x), Uint32Constant(y)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(
+ r.replacement(),
+ IsFloat64Constant(BitEq(bit_cast<double>(
+ (bit_cast<uint64_t>(x) & V8_UINT64_C(0xFFFFFFFF00000000)) | y))));
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Float64InsertHighWord32
+
+
+TEST_F(MachineOperatorReducerTest, Float64InsertHighWord32WithConstant) {
+ TRACED_FOREACH(double, x, kFloat64Values) {
+ TRACED_FOREACH(uint32_t, y, kUint32Values) {
+ Reduction const r =
+ Reduce(graph()->NewNode(machine()->Float64InsertHighWord32(),
+ Float64Constant(x), Uint32Constant(y)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFloat64Constant(BitEq(bit_cast<double>(
+ (bit_cast<uint64_t>(x) & V8_UINT64_C(0xFFFFFFFF)) |
+ (static_cast<uint64_t>(y) << 32)))));
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Float64Equal
+
+
+TEST_F(MachineOperatorReducerTest, Float64EqualWithFloat32Conversions) {
+ Node* const p0 = Parameter(0);
+ Node* const p1 = Parameter(1);
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Float64Equal(),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), p0),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), p1)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32Equal(p0, p1));
+}
+
+
+TEST_F(MachineOperatorReducerTest, Float64EqualWithFloat32Constant) {
+ Node* const p0 = Parameter(0);
+ TRACED_FOREACH(float, x, kFloat32Values) {
+ Reduction r = Reduce(graph()->NewNode(
+ machine()->Float64Equal(),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), p0),
+ Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32Equal(p0, IsFloat32Constant(x)));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Float64LessThan
+
+
+TEST_F(MachineOperatorReducerTest, Float64LessThanWithFloat32Conversions) {
+ Node* const p0 = Parameter(0);
+ Node* const p1 = Parameter(1);
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Float64LessThan(),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), p0),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), p1)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32LessThan(p0, p1));
+}
+
+
+TEST_F(MachineOperatorReducerTest, Float64LessThanWithFloat32Constant) {
+ Node* const p0 = Parameter(0);
+ {
+ TRACED_FOREACH(float, x, kFloat32Values) {
+ Reduction r = Reduce(graph()->NewNode(
+ machine()->Float64LessThan(),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), p0),
+ Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32LessThan(p0, IsFloat32Constant(x)));
+ }
+ }
+ {
+ TRACED_FOREACH(float, x, kFloat32Values) {
+ Reduction r = Reduce(graph()->NewNode(
+ machine()->Float64LessThan(), Float64Constant(x),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), p0)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32LessThan(IsFloat32Constant(x), p0));
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Float64LessThanOrEqual
+
+
+TEST_F(MachineOperatorReducerTest,
+ Float64LessThanOrEqualWithFloat32Conversions) {
+ Node* const p0 = Parameter(0);
+ Node* const p1 = Parameter(1);
+ Reduction const r = Reduce(graph()->NewNode(
+ machine()->Float64LessThanOrEqual(),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), p0),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), p1)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsFloat32LessThanOrEqual(p0, p1));
+}
+
+
+TEST_F(MachineOperatorReducerTest, Float64LessThanOrEqualWithFloat32Constant) {
+ Node* const p0 = Parameter(0);
+ {
+ TRACED_FOREACH(float, x, kFloat32Values) {
+ Reduction r = Reduce(graph()->NewNode(
+ machine()->Float64LessThanOrEqual(),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), p0),
+ Float64Constant(x)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFloat32LessThanOrEqual(p0, IsFloat32Constant(x)));
+ }
+ }
+ {
+ TRACED_FOREACH(float, x, kFloat32Values) {
+ Reduction r = Reduce(graph()->NewNode(
+ machine()->Float64LessThanOrEqual(), Float64Constant(x),
+ graph()->NewNode(machine()->ChangeFloat32ToFloat64(), p0)));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsFloat32LessThanOrEqual(IsFloat32Constant(x), p0));
+ }
+ }
+}
+
+
+// -----------------------------------------------------------------------------
// Store
TEST_F(MachineOperatorReducerTest, StoreRepWord8WithWord32And) {
- const StoreRepresentation rep(kRepWord8, kNoWriteBarrier);
+ const StoreRepresentation rep(MachineRepresentation::kWord8, kNoWriteBarrier);
Node* const base = Parameter(0);
Node* const index = Parameter(1);
Node* const value = Parameter(2);
@@ -1310,7 +1582,7 @@
TEST_F(MachineOperatorReducerTest, StoreRepWord8WithWord32SarAndWord32Shl) {
- const StoreRepresentation rep(kRepWord8, kNoWriteBarrier);
+ const StoreRepresentation rep(MachineRepresentation::kWord8, kNoWriteBarrier);
Node* const base = Parameter(0);
Node* const index = Parameter(1);
Node* const value = Parameter(2);
@@ -1334,7 +1606,8 @@
TEST_F(MachineOperatorReducerTest, StoreRepWord16WithWord32And) {
- const StoreRepresentation rep(kRepWord16, kNoWriteBarrier);
+ const StoreRepresentation rep(MachineRepresentation::kWord16,
+ kNoWriteBarrier);
Node* const base = Parameter(0);
Node* const index = Parameter(1);
Node* const value = Parameter(2);
@@ -1356,7 +1629,8 @@
TEST_F(MachineOperatorReducerTest, StoreRepWord16WithWord32SarAndWord32Shl) {
- const StoreRepresentation rep(kRepWord16, kNoWriteBarrier);
+ const StoreRepresentation rep(MachineRepresentation::kWord16,
+ kNoWriteBarrier);
Node* const base = Parameter(0);
Node* const index = Parameter(1);
Node* const value = Parameter(2);
diff --git a/test/unittests/compiler/machine-operator-unittest.cc b/test/unittests/compiler/machine-operator-unittest.cc
index 6e0df2a..59eb484 100644
--- a/test/unittests/compiler/machine-operator-unittest.cc
+++ b/test/unittests/compiler/machine-operator-unittest.cc
@@ -17,26 +17,38 @@
template <typename T>
class MachineOperatorTestWithParam
: public TestWithZone,
- public ::testing::WithParamInterface< ::testing::tuple<MachineType, T> > {
+ public ::testing::WithParamInterface<
+ ::testing::tuple<MachineRepresentation, T> > {
protected:
- MachineType type() const { return ::testing::get<0>(B::GetParam()); }
+ MachineRepresentation representation() const {
+ return ::testing::get<0>(B::GetParam());
+ }
const T& GetParam() const { return ::testing::get<1>(B::GetParam()); }
private:
- typedef ::testing::WithParamInterface< ::testing::tuple<MachineType, T> > B;
+ typedef ::testing::WithParamInterface<
+ ::testing::tuple<MachineRepresentation, T> > B;
};
namespace {
-const MachineType kMachineReps[] = {kRepWord32, kRepWord64};
+const MachineRepresentation kMachineReps[] = {MachineRepresentation::kWord32,
+ MachineRepresentation::kWord64};
-const MachineType kMachineTypes[] = {
- kMachFloat32, kMachFloat64, kMachInt8, kMachUint8, kMachInt16,
- kMachUint16, kMachInt32, kMachUint32, kMachInt64, kMachUint64,
- kMachPtr, kMachAnyTagged, kRepBit, kRepWord8, kRepWord16,
- kRepWord32, kRepWord64, kRepFloat32, kRepFloat64, kRepTagged};
+const MachineType kMachineTypesForAccess[] = {
+ MachineType::Float32(), MachineType::Float64(), MachineType::Int8(),
+ MachineType::Uint8(), MachineType::Int16(), MachineType::Uint16(),
+ MachineType::Int32(), MachineType::Uint32(), MachineType::Int64(),
+ MachineType::Uint64(), MachineType::AnyTagged()};
+
+
+const MachineRepresentation kRepresentationsForStore[] = {
+ MachineRepresentation::kFloat32, MachineRepresentation::kFloat64,
+ MachineRepresentation::kWord8, MachineRepresentation::kWord16,
+ MachineRepresentation::kWord32, MachineRepresentation::kWord64,
+ MachineRepresentation::kTagged};
} // namespace
@@ -50,14 +62,14 @@
TEST_P(MachineLoadOperatorTest, InstancesAreGloballyShared) {
- MachineOperatorBuilder machine1(zone(), type());
- MachineOperatorBuilder machine2(zone(), type());
+ MachineOperatorBuilder machine1(zone(), representation());
+ MachineOperatorBuilder machine2(zone(), representation());
EXPECT_EQ(machine1.Load(GetParam()), machine2.Load(GetParam()));
}
TEST_P(MachineLoadOperatorTest, NumberOfInputsAndOutputs) {
- MachineOperatorBuilder machine(zone(), type());
+ MachineOperatorBuilder machine(zone(), representation());
const Operator* op = machine.Load(GetParam());
EXPECT_EQ(2, op->ValueInputCount());
@@ -72,21 +84,22 @@
TEST_P(MachineLoadOperatorTest, OpcodeIsCorrect) {
- MachineOperatorBuilder machine(zone(), type());
+ MachineOperatorBuilder machine(zone(), representation());
EXPECT_EQ(IrOpcode::kLoad, machine.Load(GetParam())->opcode());
}
TEST_P(MachineLoadOperatorTest, ParameterIsCorrect) {
- MachineOperatorBuilder machine(zone(), type());
+ MachineOperatorBuilder machine(zone(), representation());
EXPECT_EQ(GetParam(),
OpParameter<LoadRepresentation>(machine.Load(GetParam())));
}
-INSTANTIATE_TEST_CASE_P(MachineOperatorTest, MachineLoadOperatorTest,
- ::testing::Combine(::testing::ValuesIn(kMachineReps),
- ::testing::ValuesIn(kMachineTypes)));
+INSTANTIATE_TEST_CASE_P(
+ MachineOperatorTest, MachineLoadOperatorTest,
+ ::testing::Combine(::testing::ValuesIn(kMachineReps),
+ ::testing::ValuesIn(kMachineTypesForAccess)));
// -----------------------------------------------------------------------------
@@ -95,27 +108,29 @@
class MachineStoreOperatorTest
: public MachineOperatorTestWithParam<
- ::testing::tuple<MachineType, WriteBarrierKind> > {
+ ::testing::tuple<MachineRepresentation, WriteBarrierKind> > {
protected:
StoreRepresentation GetParam() const {
return StoreRepresentation(
- ::testing::get<0>(MachineOperatorTestWithParam<
- ::testing::tuple<MachineType, WriteBarrierKind> >::GetParam()),
- ::testing::get<1>(MachineOperatorTestWithParam<
- ::testing::tuple<MachineType, WriteBarrierKind> >::GetParam()));
+ ::testing::get<0>(
+ MachineOperatorTestWithParam< ::testing::tuple<
+ MachineRepresentation, WriteBarrierKind> >::GetParam()),
+ ::testing::get<1>(
+ MachineOperatorTestWithParam< ::testing::tuple<
+ MachineRepresentation, WriteBarrierKind> >::GetParam()));
}
};
TEST_P(MachineStoreOperatorTest, InstancesAreGloballyShared) {
- MachineOperatorBuilder machine1(zone(), type());
- MachineOperatorBuilder machine2(zone(), type());
+ MachineOperatorBuilder machine1(zone(), representation());
+ MachineOperatorBuilder machine2(zone(), representation());
EXPECT_EQ(machine1.Store(GetParam()), machine2.Store(GetParam()));
}
TEST_P(MachineStoreOperatorTest, NumberOfInputsAndOutputs) {
- MachineOperatorBuilder machine(zone(), type());
+ MachineOperatorBuilder machine(zone(), representation());
const Operator* op = machine.Store(GetParam());
EXPECT_EQ(3, op->ValueInputCount());
@@ -130,13 +145,13 @@
TEST_P(MachineStoreOperatorTest, OpcodeIsCorrect) {
- MachineOperatorBuilder machine(zone(), type());
+ MachineOperatorBuilder machine(zone(), representation());
EXPECT_EQ(IrOpcode::kStore, machine.Store(GetParam())->opcode());
}
TEST_P(MachineStoreOperatorTest, ParameterIsCorrect) {
- MachineOperatorBuilder machine(zone(), type());
+ MachineOperatorBuilder machine(zone(), representation());
EXPECT_EQ(GetParam(),
OpParameter<StoreRepresentation>(machine.Store(GetParam())));
}
@@ -146,125 +161,208 @@
MachineOperatorTest, MachineStoreOperatorTest,
::testing::Combine(
::testing::ValuesIn(kMachineReps),
- ::testing::Combine(::testing::ValuesIn(kMachineTypes),
+ ::testing::Combine(::testing::ValuesIn(kRepresentationsForStore),
::testing::Values(kNoWriteBarrier,
kFullWriteBarrier))));
-
+#endif
// -----------------------------------------------------------------------------
// Pure operators.
-
namespace {
struct PureOperator {
const Operator* (MachineOperatorBuilder::*constructor)();
- IrOpcode::Value opcode;
+ char const* const constructor_name;
int value_input_count;
int control_input_count;
int value_output_count;
};
-std::ostream& operator<<(std::ostream& os, const PureOperator& pop) {
- return os << IrOpcode::Mnemonic(pop.opcode);
+std::ostream& operator<<(std::ostream& os, PureOperator const& pop) {
+ return os << pop.constructor_name;
}
-
const PureOperator kPureOperators[] = {
#define PURE(Name, value_input_count, control_input_count, value_output_count) \
{ \
- &MachineOperatorBuilder::Name, IrOpcode::k##Name, value_input_count, \
+ &MachineOperatorBuilder::Name, #Name, value_input_count, \
control_input_count, value_output_count \
}
- PURE(Word32And, 2, 0, 1), PURE(Word32Or, 2, 0, 1), PURE(Word32Xor, 2, 0, 1),
- PURE(Word32Shl, 2, 0, 1), PURE(Word32Shr, 2, 0, 1),
- PURE(Word32Sar, 2, 0, 1), PURE(Word32Ror, 2, 0, 1),
- PURE(Word32Equal, 2, 0, 1), PURE(Word64And, 2, 0, 1),
- PURE(Word64Or, 2, 0, 1), PURE(Word64Xor, 2, 0, 1), PURE(Word64Shl, 2, 0, 1),
- PURE(Word64Shr, 2, 0, 1), PURE(Word64Sar, 2, 0, 1),
- PURE(Word64Ror, 2, 0, 1), PURE(Word64Equal, 2, 0, 1),
- PURE(Int32Add, 2, 0, 1), PURE(Int32AddWithOverflow, 2, 0, 2),
- PURE(Int32Sub, 2, 0, 1), PURE(Int32SubWithOverflow, 2, 0, 2),
- PURE(Int32Mul, 2, 0, 1), PURE(Int32MulHigh, 2, 0, 1),
- PURE(Int32Div, 2, 1, 1), PURE(Uint32Div, 2, 1, 1), PURE(Int32Mod, 2, 1, 1),
- PURE(Uint32Mod, 2, 1, 1), PURE(Int32LessThan, 2, 0, 1),
- PURE(Int32LessThanOrEqual, 2, 0, 1), PURE(Uint32LessThan, 2, 0, 1),
- PURE(Uint32LessThanOrEqual, 2, 0, 1), PURE(Int64Add, 2, 0, 1),
- PURE(Int64Sub, 2, 0, 1), PURE(Int64Mul, 2, 0, 1), PURE(Int64Div, 2, 0, 1),
- PURE(Uint64Div, 2, 0, 1), PURE(Int64Mod, 2, 0, 1), PURE(Uint64Mod, 2, 0, 1),
- PURE(Int64LessThan, 2, 0, 1), PURE(Int64LessThanOrEqual, 2, 0, 1),
- PURE(Uint64LessThan, 2, 0, 1), PURE(ChangeFloat32ToFloat64, 1, 0, 1),
- PURE(ChangeFloat64ToInt32, 1, 0, 1), PURE(ChangeFloat64ToUint32, 1, 0, 1),
- PURE(ChangeInt32ToInt64, 1, 0, 1), PURE(ChangeUint32ToFloat64, 1, 0, 1),
- PURE(ChangeUint32ToUint64, 1, 0, 1),
- PURE(TruncateFloat64ToFloat32, 1, 0, 1),
- PURE(TruncateFloat64ToInt32, 1, 0, 1), PURE(TruncateInt64ToInt32, 1, 0, 1),
- PURE(Float64Add, 2, 0, 1), PURE(Float64Sub, 2, 0, 1),
- PURE(Float64Mul, 2, 0, 1), PURE(Float64Div, 2, 0, 1),
- PURE(Float64Mod, 2, 0, 1), PURE(Float64Sqrt, 1, 0, 1),
- PURE(Float64Equal, 2, 0, 1), PURE(Float64LessThan, 2, 0, 1),
- PURE(Float64LessThanOrEqual, 2, 0, 1), PURE(LoadStackPointer, 0, 0, 1),
- PURE(Float64Floor, 1, 0, 1), PURE(Float64Ceil, 1, 0, 1),
- PURE(Float64RoundTruncate, 1, 0, 1), PURE(Float64RoundTiesAway, 1, 0, 1)
+ PURE(Word32And, 2, 0, 1), // --
+ PURE(Word32Or, 2, 0, 1), // --
+ PURE(Word32Xor, 2, 0, 1), // --
+ PURE(Word32Shl, 2, 0, 1), // --
+ PURE(Word32Shr, 2, 0, 1), // --
+ PURE(Word32Sar, 2, 0, 1), // --
+ PURE(Word32Ror, 2, 0, 1), // --
+ PURE(Word32Equal, 2, 0, 1), // --
+ PURE(Word32Clz, 1, 0, 1), // --
+ PURE(Word64And, 2, 0, 1), // --
+ PURE(Word64Or, 2, 0, 1), // --
+ PURE(Word64Xor, 2, 0, 1), // --
+ PURE(Word64Shl, 2, 0, 1), // --
+ PURE(Word64Shr, 2, 0, 1), // --
+ PURE(Word64Sar, 2, 0, 1), // --
+ PURE(Word64Ror, 2, 0, 1), // --
+ PURE(Word64Equal, 2, 0, 1), // --
+ PURE(Int32Add, 2, 0, 1), // --
+ PURE(Int32AddWithOverflow, 2, 0, 2), // --
+ PURE(Int32Sub, 2, 0, 1), // --
+ PURE(Int32SubWithOverflow, 2, 0, 2), // --
+ PURE(Int32Mul, 2, 0, 1), // --
+ PURE(Int32MulHigh, 2, 0, 1), // --
+ PURE(Int32Div, 2, 1, 1), // --
+ PURE(Uint32Div, 2, 1, 1), // --
+ PURE(Int32Mod, 2, 1, 1), // --
+ PURE(Uint32Mod, 2, 1, 1), // --
+ PURE(Int32LessThan, 2, 0, 1), // --
+ PURE(Int32LessThanOrEqual, 2, 0, 1), // --
+ PURE(Uint32LessThan, 2, 0, 1), // --
+ PURE(Uint32LessThanOrEqual, 2, 0, 1), // --
+ PURE(Int64Add, 2, 0, 1), // --
+ PURE(Int64Sub, 2, 0, 1), // --
+ PURE(Int64Mul, 2, 0, 1), // --
+ PURE(Int64Div, 2, 1, 1), // --
+ PURE(Uint64Div, 2, 1, 1), // --
+ PURE(Int64Mod, 2, 1, 1), // --
+ PURE(Uint64Mod, 2, 1, 1), // --
+ PURE(Int64LessThan, 2, 0, 1), // --
+ PURE(Int64LessThanOrEqual, 2, 0, 1), // --
+ PURE(Uint64LessThan, 2, 0, 1), // --
+ PURE(Uint64LessThanOrEqual, 2, 0, 1), // --
+ PURE(ChangeFloat32ToFloat64, 1, 0, 1), // --
+ PURE(ChangeFloat64ToInt32, 1, 0, 1), // --
+ PURE(ChangeFloat64ToUint32, 1, 0, 1), // --
+ PURE(ChangeInt32ToInt64, 1, 0, 1), // --
+ PURE(ChangeUint32ToFloat64, 1, 0, 1), // --
+ PURE(ChangeUint32ToUint64, 1, 0, 1), // --
+ PURE(TruncateFloat64ToFloat32, 1, 0, 1), // --
+ PURE(TruncateInt64ToInt32, 1, 0, 1), // --
+ PURE(Float32Abs, 1, 0, 1), // --
+ PURE(Float32Add, 2, 0, 1), // --
+ PURE(Float32Sub, 2, 0, 1), // --
+ PURE(Float32Mul, 2, 0, 1), // --
+ PURE(Float32Div, 2, 0, 1), // --
+ PURE(Float32Sqrt, 1, 0, 1), // --
+ PURE(Float32Equal, 2, 0, 1), // --
+ PURE(Float32LessThan, 2, 0, 1), // --
+ PURE(Float32LessThanOrEqual, 2, 0, 1), // --
+ PURE(Float64Abs, 1, 0, 1), // --
+ PURE(Float64Add, 2, 0, 1), // --
+ PURE(Float64Sub, 2, 0, 1), // --
+ PURE(Float64Mul, 2, 0, 1), // --
+ PURE(Float64Div, 2, 0, 1), // --
+ PURE(Float64Mod, 2, 0, 1), // --
+ PURE(Float64Sqrt, 1, 0, 1), // --
+ PURE(Float64Equal, 2, 0, 1), // --
+ PURE(Float64LessThan, 2, 0, 1), // --
+ PURE(Float64LessThanOrEqual, 2, 0, 1), // --
+ PURE(LoadStackPointer, 0, 0, 1), // --
+ PURE(Float64ExtractLowWord32, 1, 0, 1), // --
+ PURE(Float64ExtractHighWord32, 1, 0, 1), // --
+ PURE(Float64InsertLowWord32, 2, 0, 1), // --
+ PURE(Float64InsertHighWord32, 2, 0, 1), // --
#undef PURE
};
+} // namespace
-typedef MachineOperatorTestWithParam<PureOperator> MachinePureOperatorTest;
+class MachinePureOperatorTest : public TestWithZone {
+ protected:
+ MachineRepresentation word_type() {
+ return MachineType::PointerRepresentation();
+ }
+};
+
+TEST_F(MachinePureOperatorTest, PureOperators) {
+ TRACED_FOREACH(MachineRepresentation, machine_rep1, kMachineReps) {
+ MachineOperatorBuilder machine1(zone(), machine_rep1);
+ TRACED_FOREACH(MachineRepresentation, machine_rep2, kMachineReps) {
+ MachineOperatorBuilder machine2(zone(), machine_rep2);
+ TRACED_FOREACH(PureOperator, pop, kPureOperators) {
+ const Operator* op1 = (machine1.*pop.constructor)();
+ const Operator* op2 = (machine2.*pop.constructor)();
+ EXPECT_EQ(op1, op2);
+ EXPECT_EQ(pop.value_input_count, op1->ValueInputCount());
+ EXPECT_EQ(pop.control_input_count, op1->ControlInputCount());
+ EXPECT_EQ(pop.value_output_count, op1->ValueOutputCount());
+ }
+ }
+ }
+}
+
+
+// Optional operators.
+
+namespace {
+
+struct OptionalOperatorEntry {
+ const OptionalOperator (MachineOperatorBuilder::*constructor)();
+ MachineOperatorBuilder::Flag enabling_flag;
+ char const* const constructor_name;
+ int value_input_count;
+ int control_input_count;
+ int value_output_count;
+};
+
+
+std::ostream& operator<<(std::ostream& os, OptionalOperatorEntry const& pop) {
+ return os << pop.constructor_name;
+}
+
+const OptionalOperatorEntry kOptionalOperators[] = {
+#define OPTIONAL_ENTRY(Name, value_input_count, control_input_count, \
+ value_output_count) \
+ { \
+ &MachineOperatorBuilder::Name, MachineOperatorBuilder::k##Name, #Name, \
+ value_input_count, control_input_count, value_output_count \
+ }
+ OPTIONAL_ENTRY(Float32Max, 2, 0, 1), // --
+ OPTIONAL_ENTRY(Float32Min, 2, 0, 1), // --
+ OPTIONAL_ENTRY(Float64Max, 2, 0, 1), // --
+ OPTIONAL_ENTRY(Float64Min, 2, 0, 1), // --
+ OPTIONAL_ENTRY(Float64RoundDown, 1, 0, 1), // --
+ OPTIONAL_ENTRY(Float64RoundTruncate, 1, 0, 1), // --
+ OPTIONAL_ENTRY(Float64RoundTiesAway, 1, 0, 1), // --
+#undef OPTIONAL_ENTRY
+};
} // namespace
-TEST_P(MachinePureOperatorTest, InstancesAreGloballyShared) {
- const PureOperator& pop = GetParam();
- MachineOperatorBuilder machine1(zone(), type());
- MachineOperatorBuilder machine2(zone(), type());
- EXPECT_EQ((machine1.*pop.constructor)(), (machine2.*pop.constructor)());
+class MachineOptionalOperatorTest : public TestWithZone {
+ protected:
+ MachineRepresentation word_rep() {
+ return MachineType::PointerRepresentation();
+ }
+};
+
+
+TEST_F(MachineOptionalOperatorTest, OptionalOperators) {
+ TRACED_FOREACH(OptionalOperatorEntry, pop, kOptionalOperators) {
+ TRACED_FOREACH(MachineRepresentation, machine_rep1, kMachineReps) {
+ MachineOperatorBuilder machine1(zone(), machine_rep1, pop.enabling_flag);
+ TRACED_FOREACH(MachineRepresentation, machine_rep2, kMachineReps) {
+ MachineOperatorBuilder machine2(zone(), machine_rep2,
+ pop.enabling_flag);
+ const Operator* op1 = (machine1.*pop.constructor)().op();
+ const Operator* op2 = (machine2.*pop.constructor)().op();
+ EXPECT_EQ(op1, op2);
+ EXPECT_EQ(pop.value_input_count, op1->ValueInputCount());
+ EXPECT_EQ(pop.control_input_count, op1->ControlInputCount());
+ EXPECT_EQ(pop.value_output_count, op1->ValueOutputCount());
+
+ MachineOperatorBuilder machine3(zone(), word_rep());
+ EXPECT_TRUE((machine1.*pop.constructor)().IsSupported());
+ EXPECT_FALSE((machine3.*pop.constructor)().IsSupported());
+ }
+ }
+ }
}
-TEST_P(MachinePureOperatorTest, NumberOfInputsAndOutputs) {
- MachineOperatorBuilder machine(zone(), type());
- const PureOperator& pop = GetParam();
- const Operator* op = (machine.*pop.constructor)();
-
- EXPECT_EQ(pop.value_input_count, op->ValueInputCount());
- EXPECT_EQ(0, op->EffectInputCount());
- EXPECT_EQ(pop.control_input_count, op->ControlInputCount());
- EXPECT_EQ(pop.value_input_count + pop.control_input_count,
- OperatorProperties::GetTotalInputCount(op));
-
- EXPECT_EQ(pop.value_output_count, op->ValueOutputCount());
- EXPECT_EQ(0, op->EffectOutputCount());
- EXPECT_EQ(0, op->ControlOutputCount());
-}
-
-
-TEST_P(MachinePureOperatorTest, MarkedAsPure) {
- MachineOperatorBuilder machine(zone(), type());
- const PureOperator& pop = GetParam();
- const Operator* op = (machine.*pop.constructor)();
- EXPECT_TRUE(op->HasProperty(Operator::kPure));
-}
-
-
-TEST_P(MachinePureOperatorTest, OpcodeIsCorrect) {
- MachineOperatorBuilder machine(zone(), type());
- const PureOperator& pop = GetParam();
- const Operator* op = (machine.*pop.constructor)();
- EXPECT_EQ(pop.opcode, op->opcode());
-}
-
-
-INSTANTIATE_TEST_CASE_P(
- MachineOperatorTest, MachinePureOperatorTest,
- ::testing::Combine(::testing::ValuesIn(kMachineReps),
- ::testing::ValuesIn(kPureOperators)));
-
-#endif // GTEST_HAS_COMBINE
-
-
// -----------------------------------------------------------------------------
// Pseudo operators.
@@ -277,7 +375,7 @@
TEST_F(MachineOperatorTest, PseudoOperatorsWhenWordSizeIs32Bit) {
- MachineOperatorBuilder machine(zone(), kRepWord32);
+ MachineOperatorBuilder machine(zone(), MachineRepresentation::kWord32);
EXPECT_EQ(machine.Word32And(), machine.WordAnd());
EXPECT_EQ(machine.Word32Or(), machine.WordOr());
EXPECT_EQ(machine.Word32Xor(), machine.WordXor());
@@ -299,7 +397,7 @@
TEST_F(MachineOperatorTest, PseudoOperatorsWhenWordSizeIs64Bit) {
- MachineOperatorBuilder machine(zone(), kRepWord64);
+ MachineOperatorBuilder machine(zone(), MachineRepresentation::kWord64);
EXPECT_EQ(machine.Word64And(), machine.WordAnd());
EXPECT_EQ(machine.Word64Or(), machine.WordOr());
EXPECT_EQ(machine.Word64Xor(), machine.WordXor());
diff --git a/test/unittests/compiler/mips/OWNERS b/test/unittests/compiler/mips/OWNERS
index 5508ba6..89455a4 100644
--- a/test/unittests/compiler/mips/OWNERS
+++ b/test/unittests/compiler/mips/OWNERS
@@ -3,3 +3,4 @@
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc b/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
index 0b3a0f5..122c398 100644
--- a/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
+++ b/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc
@@ -41,20 +41,20 @@
const FPCmp kFPCmpInstructions[] = {
{{&RawMachineAssembler::Float64Equal, "Float64Equal", kMipsCmpD,
- kMachFloat64},
- kUnorderedEqual},
+ MachineType::Float64()},
+ kEqual},
{{&RawMachineAssembler::Float64LessThan, "Float64LessThan", kMipsCmpD,
- kMachFloat64},
- kUnorderedLessThan},
+ MachineType::Float64()},
+ kUnsignedLessThan},
{{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
- kMipsCmpD, kMachFloat64},
- kUnorderedLessThanOrEqual},
+ kMipsCmpD, MachineType::Float64()},
+ kUnsignedLessThanOrEqual},
{{&RawMachineAssembler::Float64GreaterThan, "Float64GreaterThan", kMipsCmpD,
- kMachFloat64},
- kUnorderedLessThan},
+ MachineType::Float64()},
+ kUnsignedLessThan},
{{&RawMachineAssembler::Float64GreaterThanOrEqual,
- "Float64GreaterThanOrEqual", kMipsCmpD, kMachFloat64},
- kUnorderedLessThanOrEqual}};
+ "Float64GreaterThanOrEqual", kMipsCmpD, MachineType::Float64()},
+ kUnsignedLessThanOrEqual}};
struct Conversion {
// The machine_type field in MachInst1 represents the destination type.
@@ -69,12 +69,14 @@
const MachInst2 kLogicalInstructions[] = {
- {&RawMachineAssembler::WordAnd, "WordAnd", kMipsAnd, kMachInt16},
- {&RawMachineAssembler::WordOr, "WordOr", kMipsOr, kMachInt16},
- {&RawMachineAssembler::WordXor, "WordXor", kMipsXor, kMachInt16},
- {&RawMachineAssembler::Word32And, "Word32And", kMipsAnd, kMachInt32},
- {&RawMachineAssembler::Word32Or, "Word32Or", kMipsOr, kMachInt32},
- {&RawMachineAssembler::Word32Xor, "Word32Xor", kMipsXor, kMachInt32}};
+ {&RawMachineAssembler::WordAnd, "WordAnd", kMipsAnd, MachineType::Int16()},
+ {&RawMachineAssembler::WordOr, "WordOr", kMipsOr, MachineType::Int16()},
+ {&RawMachineAssembler::WordXor, "WordXor", kMipsXor, MachineType::Int16()},
+ {&RawMachineAssembler::Word32And, "Word32And", kMipsAnd,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kMipsOr, MachineType::Int32()},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kMipsXor,
+ MachineType::Int32()}};
// ----------------------------------------------------------------------------
@@ -83,14 +85,18 @@
const MachInst2 kShiftInstructions[] = {
- {&RawMachineAssembler::WordShl, "WordShl", kMipsShl, kMachInt16},
- {&RawMachineAssembler::WordShr, "WordShr", kMipsShr, kMachInt16},
- {&RawMachineAssembler::WordSar, "WordSar", kMipsSar, kMachInt16},
- {&RawMachineAssembler::WordRor, "WordRor", kMipsRor, kMachInt16},
- {&RawMachineAssembler::Word32Shl, "Word32Shl", kMipsShl, kMachInt32},
- {&RawMachineAssembler::Word32Shr, "Word32Shr", kMipsShr, kMachInt32},
- {&RawMachineAssembler::Word32Sar, "Word32Sar", kMipsSar, kMachInt32},
- {&RawMachineAssembler::Word32Ror, "Word32Ror", kMipsRor, kMachInt32}};
+ {&RawMachineAssembler::WordShl, "WordShl", kMipsShl, MachineType::Int16()},
+ {&RawMachineAssembler::WordShr, "WordShr", kMipsShr, MachineType::Int16()},
+ {&RawMachineAssembler::WordSar, "WordSar", kMipsSar, MachineType::Int16()},
+ {&RawMachineAssembler::WordRor, "WordRor", kMipsRor, MachineType::Int16()},
+ {&RawMachineAssembler::Word32Shl, "Word32Shl", kMipsShl,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word32Shr, "Word32Shr", kMipsShr,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word32Sar, "Word32Sar", kMipsSar,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word32Ror, "Word32Ror", kMipsRor,
+ MachineType::Int32()}};
// ----------------------------------------------------------------------------
@@ -99,11 +105,16 @@
const MachInst2 kMulDivInstructions[] = {
- {&RawMachineAssembler::Int32Mul, "Int32Mul", kMipsMul, kMachInt32},
- {&RawMachineAssembler::Int32Div, "Int32Div", kMipsDiv, kMachInt32},
- {&RawMachineAssembler::Uint32Div, "Uint32Div", kMipsDivU, kMachUint32},
- {&RawMachineAssembler::Float64Mul, "Float64Mul", kMipsMulD, kMachFloat64},
- {&RawMachineAssembler::Float64Div, "Float64Div", kMipsDivD, kMachFloat64}};
+ {&RawMachineAssembler::Int32Mul, "Int32Mul", kMipsMul,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32Div, "Int32Div", kMipsDiv,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Uint32Div, "Uint32Div", kMipsDivU,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Float64Mul, "Float64Mul", kMipsMulD,
+ MachineType::Float64()},
+ {&RawMachineAssembler::Float64Div, "Float64Div", kMipsDivD,
+ MachineType::Float64()}};
// ----------------------------------------------------------------------------
@@ -112,9 +123,12 @@
const MachInst2 kModInstructions[] = {
- {&RawMachineAssembler::Int32Mod, "Int32Mod", kMipsMod, kMachInt32},
- {&RawMachineAssembler::Uint32Mod, "Int32UMod", kMipsModU, kMachInt32},
- {&RawMachineAssembler::Float64Mod, "Float64Mod", kMipsModD, kMachFloat64}};
+ {&RawMachineAssembler::Int32Mod, "Int32Mod", kMipsMod,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Uint32Mod, "Int32UMod", kMipsModU,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Float64Mod, "Float64Mod", kMipsModD,
+ MachineType::Float64()}};
// ----------------------------------------------------------------------------
@@ -123,8 +137,10 @@
const MachInst2 kFPArithInstructions[] = {
- {&RawMachineAssembler::Float64Add, "Float64Add", kMipsAddD, kMachFloat64},
- {&RawMachineAssembler::Float64Sub, "Float64Sub", kMipsSubD, kMachFloat64}};
+ {&RawMachineAssembler::Float64Add, "Float64Add", kMipsAddD,
+ MachineType::Float64()},
+ {&RawMachineAssembler::Float64Sub, "Float64Sub", kMipsSubD,
+ MachineType::Float64()}};
// ----------------------------------------------------------------------------
@@ -133,12 +149,14 @@
const MachInst2 kAddSubInstructions[] = {
- {&RawMachineAssembler::Int32Add, "Int32Add", kMipsAdd, kMachInt32},
- {&RawMachineAssembler::Int32Sub, "Int32Sub", kMipsSub, kMachInt32},
+ {&RawMachineAssembler::Int32Add, "Int32Add", kMipsAdd,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32Sub, "Int32Sub", kMipsSub,
+ MachineType::Int32()},
{&RawMachineAssembler::Int32AddWithOverflow, "Int32AddWithOverflow",
- kMipsAddOvf, kMachInt32},
+ kMipsAddOvf, MachineType::Int32()},
{&RawMachineAssembler::Int32SubWithOverflow, "Int32SubWithOverflow",
- kMipsSubOvf, kMachInt32}};
+ kMipsSubOvf, MachineType::Int32()}};
// ----------------------------------------------------------------------------
@@ -147,9 +165,11 @@
const MachInst1 kAddSubOneInstructions[] = {
- {&RawMachineAssembler::Int32Neg, "Int32Neg", kMipsSub, kMachInt32},
+ {&RawMachineAssembler::Int32Neg, "Int32Neg", kMipsSub,
+ MachineType::Int32()},
// TODO(dusmil): check this ...
- // {&RawMachineAssembler::WordEqual , "WordEqual" , kMipsTst, kMachInt32}
+ // {&RawMachineAssembler::WordEqual , "WordEqual" , kMipsTst,
+ // MachineType::Int32()}
};
@@ -159,31 +179,35 @@
const IntCmp kCmpInstructions[] = {
- {{&RawMachineAssembler::WordEqual, "WordEqual", kMipsCmp, kMachInt16}, 1U},
- {{&RawMachineAssembler::WordNotEqual, "WordNotEqual", kMipsCmp, kMachInt16},
+ {{&RawMachineAssembler::WordEqual, "WordEqual", kMipsCmp,
+ MachineType::Int16()},
1U},
- {{&RawMachineAssembler::Word32Equal, "Word32Equal", kMipsCmp, kMachInt32},
+ {{&RawMachineAssembler::WordNotEqual, "WordNotEqual", kMipsCmp,
+ MachineType::Int16()},
+ 1U},
+ {{&RawMachineAssembler::Word32Equal, "Word32Equal", kMipsCmp,
+ MachineType::Int32()},
1U},
{{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kMipsCmp,
- kMachInt32},
+ MachineType::Int32()},
1U},
{{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kMipsCmp,
- kMachInt32},
+ MachineType::Int32()},
1U},
{{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
- kMipsCmp, kMachInt32},
+ kMipsCmp, MachineType::Int32()},
1U},
{{&RawMachineAssembler::Int32GreaterThan, "Int32GreaterThan", kMipsCmp,
- kMachInt32},
+ MachineType::Int32()},
1U},
{{&RawMachineAssembler::Int32GreaterThanOrEqual, "Int32GreaterThanOrEqual",
- kMipsCmp, kMachInt32},
+ kMipsCmp, MachineType::Int32()},
1U},
{{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kMipsCmp,
- kMachUint32},
+ MachineType::Uint32()},
1U},
{{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
- kMipsCmp, kMachUint32},
+ kMipsCmp, MachineType::Uint32()},
1U}};
@@ -200,23 +224,51 @@
// integers.
// mips instruction: cvt_d_w
{{&RawMachineAssembler::ChangeInt32ToFloat64, "ChangeInt32ToFloat64",
- kMipsCvtDW, kMachFloat64},
- kMachInt32},
+ kMipsCvtDW, MachineType::Float64()},
+ MachineType::Int32()},
// mips instruction: cvt_d_uw
{{&RawMachineAssembler::ChangeUint32ToFloat64, "ChangeUint32ToFloat64",
- kMipsCvtDUw, kMachFloat64},
- kMachInt32},
+ kMipsCvtDUw, MachineType::Float64()},
+ MachineType::Int32()},
// mips instruction: trunc_w_d
{{&RawMachineAssembler::ChangeFloat64ToInt32, "ChangeFloat64ToInt32",
- kMipsTruncWD, kMachFloat64},
- kMachInt32},
+ kMipsTruncWD, MachineType::Float64()},
+ MachineType::Int32()},
// mips instruction: trunc_uw_d
{{&RawMachineAssembler::ChangeFloat64ToUint32, "ChangeFloat64ToUint32",
- kMipsTruncUwD, kMachFloat64},
- kMachInt32}};
+ kMipsTruncUwD, MachineType::Float64()},
+ MachineType::Int32()}};
+
+const Conversion kFloat64RoundInstructions[] = {
+ {{&RawMachineAssembler::Float64RoundUp, "Float64RoundUp", kMipsCeilWD,
+ MachineType::Int32()},
+ MachineType::Float64()},
+ {{&RawMachineAssembler::Float64RoundDown, "Float64RoundDown", kMipsFloorWD,
+ MachineType::Int32()},
+ MachineType::Float64()},
+ {{&RawMachineAssembler::Float64RoundTiesEven, "Float64RoundTiesEven",
+ kMipsRoundWD, MachineType::Int32()},
+ MachineType::Float64()},
+ {{&RawMachineAssembler::Float64RoundTruncate, "Float64RoundTruncate",
+ kMipsTruncWD, MachineType::Int32()},
+ MachineType::Float64()}};
+
+const Conversion kFloat32RoundInstructions[] = {
+ {{&RawMachineAssembler::Float32RoundUp, "Float32RoundUp", kMipsCeilWS,
+ MachineType::Int32()},
+ MachineType::Float32()},
+ {{&RawMachineAssembler::Float32RoundDown, "Float32RoundDown", kMipsFloorWS,
+ MachineType::Int32()},
+ MachineType::Float32()},
+ {{&RawMachineAssembler::Float32RoundTiesEven, "Float32RoundTiesEven",
+ kMipsRoundWS, MachineType::Int32()},
+ MachineType::Float32()},
+ {{&RawMachineAssembler::Float32RoundTruncate, "Float32RoundTruncate",
+ kMipsTruncWS, MachineType::Int32()},
+ MachineType::Float32()}};
} // namespace
@@ -226,7 +278,8 @@
TEST_P(InstructionSelectorFPCmpTest, Parameter) {
const FPCmp cmp = GetParam();
- StreamBuilder m(this, kMachInt32, cmp.mi.machine_type, cmp.mi.machine_type);
+ StreamBuilder m(this, MachineType::Int32(), cmp.mi.machine_type,
+ cmp.mi.machine_type);
m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -278,7 +331,8 @@
TEST_P(InstructionSelectorShiftTest, Immediate) {
const MachInst2 dpi = GetParam();
const MachineType type = dpi.machine_type;
- TRACED_FORRANGE(int32_t, imm, 0, (ElementSizeOf(type) * 8) - 1) {
+ TRACED_FORRANGE(int32_t, imm, 0,
+ ((1 << ElementSizeLog2Of(type.representation())) * 8) - 1) {
StreamBuilder m(this, type, type);
m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
Stream s = m.Build();
@@ -296,6 +350,65 @@
::testing::ValuesIn(kShiftInstructions));
+TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediate) {
+ // The available shift operand range is `0 <= imm < 32`, but we also test
+ // that immediates outside this range are handled properly (modulo-32).
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1f;
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ uint32_t jnk = rng()->NextInt();
+ jnk = (lsb > 0) ? (jnk >> (32 - lsb)) : 0;
+ uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
+ m.Int32Constant(shift)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsExt, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1f;
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ uint32_t jnk = rng()->NextInt();
+ jnk = (lsb > 0) ? (jnk >> (32 - lsb)) : 0;
+ uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
+ m.Int32Constant(shift)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsExt, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32ShlWithWord32And) {
+ TRACED_FORRANGE(int32_t, shift, 0, 30) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r =
+ m.Word32Shl(m.Word32And(p0, m.Int32Constant((1 << (31 - shift)) - 1)),
+ m.Int32Constant(shift + 1));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsShl, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+}
+
+
// ----------------------------------------------------------------------------
// Logical instructions.
// ----------------------------------------------------------------------------
@@ -322,6 +435,117 @@
::testing::ValuesIn(kLogicalInstructions));
+TEST_F(InstructionSelectorTest, Word32XorMinusOneWithParameter) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Parameter(0), m.Int32Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsNor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Int32Constant(-1), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsNor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32XorMinusOneWithWord32Or) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Word32Or(m.Parameter(0), m.Parameter(0)),
+ m.Int32Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsNor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Int32Constant(-1),
+ m.Word32Or(m.Parameter(0), m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsNor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithImmediateWithWord32Shr) {
+ // The available shift operand range is `0 <= imm < 32`, but we also test
+ // that immediates outside this range are handled properly (modulo-32).
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1f;
+ TRACED_FORRANGE(int32_t, width, 1, 31) {
+ uint32_t msk = (1 << width) - 1;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)),
+ m.Int32Constant(msk)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsExt, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ int32_t actual_width = (lsb + width > 32) ? (32 - lsb) : width;
+ EXPECT_EQ(actual_width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1f;
+ TRACED_FORRANGE(int32_t, width, 1, 31) {
+ uint32_t msk = (1 << width) - 1;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(
+ m.Word32And(m.Int32Constant(msk),
+ m.Word32Shr(m.Parameter(0), m.Int32Constant(shift))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsExt, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ int32_t actual_width = (lsb + width > 32) ? (32 - lsb) : width;
+ EXPECT_EQ(actual_width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndToClearBits) {
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ int32_t mask = ~((1 << shift) - 1);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32And(m.Parameter(0), m.Int32Constant(mask)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsIns, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(shift, s.ToInt32(s[0]->InputAt(2)));
+ }
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ int32_t mask = ~((1 << shift) - 1);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32And(m.Int32Constant(mask), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsIns, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(shift, s.ToInt32(s[0]->InputAt(2)));
+ }
+}
+
+
// ----------------------------------------------------------------------------
// MUL/DIV instructions.
// ----------------------------------------------------------------------------
@@ -478,6 +702,81 @@
::testing::ValuesIn(kConversionInstructions));
+typedef InstructionSelectorTestWithParam<Conversion>
+ CombineChangeFloat64ToInt32WithRoundFloat64;
+
+TEST_P(CombineChangeFloat64ToInt32WithRoundFloat64, Parameter) {
+ {
+ const Conversion conv = GetParam();
+ StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
+ m.Return(m.ChangeFloat64ToInt32((m.*conv.mi.constructor)(m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ CombineChangeFloat64ToInt32WithRoundFloat64,
+ ::testing::ValuesIn(kFloat64RoundInstructions));
+
+
+typedef InstructionSelectorTestWithParam<Conversion>
+ CombineChangeFloat32ToInt32WithRoundFloat32;
+
+TEST_P(CombineChangeFloat32ToInt32WithRoundFloat32, Parameter) {
+ {
+ const Conversion conv = GetParam();
+ StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
+ m.Return(m.ChangeFloat64ToInt32(
+ m.ChangeFloat32ToFloat64((m.*conv.mi.constructor)(m.Parameter(0)))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ CombineChangeFloat32ToInt32WithRoundFloat32,
+ ::testing::ValuesIn(kFloat32RoundInstructions));
+
+
+TEST_F(InstructionSelectorTest, ChangeFloat64ToInt32OfChangeFloat32ToFloat64) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float32());
+ m.Return(m.ChangeFloat64ToInt32(m.ChangeFloat32ToFloat64(m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsTruncWS, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest,
+ TruncateFloat64ToFloat32OfChangeInt32ToFloat64) {
+ {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Int32());
+ m.Return(
+ m.TruncateFloat64ToFloat32(m.ChangeInt32ToFloat64(m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsCvtSW, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
// ----------------------------------------------------------------------------
// Loads and stores.
// ----------------------------------------------------------------------------
@@ -492,13 +791,13 @@
static const MemoryAccess kMemoryAccesses[] = {
- {kMachInt8, kMipsLb, kMipsSb},
- {kMachUint8, kMipsLbu, kMipsSb},
- {kMachInt16, kMipsLh, kMipsSh},
- {kMachUint16, kMipsLhu, kMipsSh},
- {kMachInt32, kMipsLw, kMipsSw},
- {kRepFloat32, kMipsLwc1, kMipsSwc1},
- {kRepFloat64, kMipsLdc1, kMipsSdc1}};
+ {MachineType::Int8(), kMipsLb, kMipsSb},
+ {MachineType::Uint8(), kMipsLbu, kMipsSb},
+ {MachineType::Int16(), kMipsLh, kMipsSh},
+ {MachineType::Uint16(), kMipsLhu, kMipsSh},
+ {MachineType::Int32(), kMipsLw, kMipsSw},
+ {MachineType::Float32(), kMipsLwc1, kMipsSwc1},
+ {MachineType::Float64(), kMipsLdc1, kMipsSdc1}};
struct MemoryAccessImm {
@@ -537,49 +836,49 @@
const MemoryAccessImm kMemoryAccessesImm[] = {
- {kMachInt8,
+ {MachineType::Int8(),
kMipsLb,
kMipsSb,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachUint8,
+ {MachineType::Uint8(),
kMipsLbu,
kMipsSb,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachInt16,
+ {MachineType::Int16(),
kMipsLh,
kMipsSh,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachUint16,
+ {MachineType::Uint16(),
kMipsLhu,
kMipsSh,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachInt32,
+ {MachineType::Int32(),
kMipsLw,
kMipsSw,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachFloat32,
+ {MachineType::Float32(),
kMipsLwc1,
kMipsSwc1,
&InstructionSelectorTest::Stream::IsDouble,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachFloat64,
+ {MachineType::Float64(),
kMipsLdc1,
kMipsSdc1,
&InstructionSelectorTest::Stream::IsDouble,
@@ -589,37 +888,37 @@
const MemoryAccessImm1 kMemoryAccessImmMoreThan16bit[] = {
- {kMachInt8,
+ {MachineType::Int8(),
kMipsLb,
kMipsSb,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachInt8,
+ {MachineType::Int8(),
kMipsLbu,
kMipsSb,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachInt16,
+ {MachineType::Int16(),
kMipsLh,
kMipsSh,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachInt16,
+ {MachineType::Int16(),
kMipsLhu,
kMipsSh,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachInt32,
+ {MachineType::Int32(),
kMipsLw,
kMipsSw,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachFloat32,
+ {MachineType::Float32(),
kMipsLwc1,
kMipsSwc1,
&InstructionSelectorTest::Stream::IsDouble,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachFloat64,
+ {MachineType::Float64(),
kMipsLdc1,
kMipsSdc1,
&InstructionSelectorTest::Stream::IsDouble,
@@ -634,7 +933,8 @@
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer(),
+ MachineType::Int32());
m.Return(m.Load(memacc.type, m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -645,8 +945,10 @@
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Parameter(1));
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Int32(), memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0), m.Parameter(1),
+ kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -672,7 +974,7 @@
TEST_P(InstructionSelectorMemoryAccessImmTest, LoadWithImmediateIndex) {
const MemoryAccessImm memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, memacc.type, kMachPtr);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -695,9 +997,10 @@
TEST_P(InstructionSelectorMemoryAccessImmTest, StoreWithImmediateIndex) {
const MemoryAccessImm memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
- m.Parameter(1));
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -729,7 +1032,7 @@
LoadWithImmediateIndex) {
const MemoryAccessImm1 memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, memacc.type, kMachPtr);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
@@ -747,9 +1050,10 @@
StoreWithImmediateIndex) {
const MemoryAccessImm1 memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
- m.Parameter(1));
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
@@ -775,7 +1079,7 @@
TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -787,7 +1091,7 @@
EXPECT_EQ(kEqual, s[0]->flags_condition());
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -800,6 +1104,120 @@
}
}
+
+TEST_F(InstructionSelectorTest, Word32Clz) {
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32Clz(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsClz, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float32Abs) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsAbsS, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float64Abs) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsAbsD, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float32Max) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Float32Max(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ // Float32Max is `(b < a) ? a : b`.
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsFloat32Max, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float32Min) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Float32Min(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ // Float32Min is `(a < b) ? a : b`.
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsFloat32Min, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float64Max) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Float64Max(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ // Float64Max is `(b < a) ? a : b`.
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsFloat64Max, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float64Min) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Float64Min(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ // Float64Min is `(a < b) ? a : b`.
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMipsFloat64Min, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/test/unittests/compiler/mips64/OWNERS b/test/unittests/compiler/mips64/OWNERS
index 5508ba6..89455a4 100644
--- a/test/unittests/compiler/mips64/OWNERS
+++ b/test/unittests/compiler/mips64/OWNERS
@@ -3,3 +3,4 @@
akos.palfi@imgtec.com
balazs.kilvady@imgtec.com
dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc b/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
index a39ae75..d9cd96f 100644
--- a/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
+++ b/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
@@ -41,20 +41,20 @@
const FPCmp kFPCmpInstructions[] = {
{{&RawMachineAssembler::Float64Equal, "Float64Equal", kMips64CmpD,
- kMachFloat64},
- kUnorderedEqual},
+ MachineType::Float64()},
+ kEqual},
{{&RawMachineAssembler::Float64LessThan, "Float64LessThan", kMips64CmpD,
- kMachFloat64},
- kUnorderedLessThan},
+ MachineType::Float64()},
+ kUnsignedLessThan},
{{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
- kMips64CmpD, kMachFloat64},
- kUnorderedLessThanOrEqual},
+ kMips64CmpD, MachineType::Float64()},
+ kUnsignedLessThanOrEqual},
{{&RawMachineAssembler::Float64GreaterThan, "Float64GreaterThan",
- kMips64CmpD, kMachFloat64},
- kUnorderedLessThan},
+ kMips64CmpD, MachineType::Float64()},
+ kUnsignedLessThan},
{{&RawMachineAssembler::Float64GreaterThanOrEqual,
- "Float64GreaterThanOrEqual", kMips64CmpD, kMachFloat64},
- kUnorderedLessThanOrEqual}};
+ "Float64GreaterThanOrEqual", kMips64CmpD, MachineType::Float64()},
+ kUnsignedLessThanOrEqual}};
struct Conversion {
// The machine_type field in MachInst1 represents the destination type.
@@ -69,12 +69,18 @@
const MachInst2 kLogicalInstructions[] = {
- {&RawMachineAssembler::Word32And, "Word32And", kMips64And, kMachInt32},
- {&RawMachineAssembler::Word64And, "Word64And", kMips64And, kMachInt64},
- {&RawMachineAssembler::Word32Or, "Word32Or", kMips64Or, kMachInt32},
- {&RawMachineAssembler::Word64Or, "Word64Or", kMips64Or, kMachInt64},
- {&RawMachineAssembler::Word32Xor, "Word32Xor", kMips64Xor, kMachInt32},
- {&RawMachineAssembler::Word64Xor, "Word64Xor", kMips64Xor, kMachInt64}};
+ {&RawMachineAssembler::Word32And, "Word32And", kMips64And,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64And, "Word64And", kMips64And,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kMips64Or,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Or, "Word64Or", kMips64Or,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kMips64Xor,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Xor, "Word64Xor", kMips64Xor,
+ MachineType::Int64()}};
// ----------------------------------------------------------------------------
@@ -83,14 +89,22 @@
const MachInst2 kShiftInstructions[] = {
- {&RawMachineAssembler::Word32Shl, "Word32Shl", kMips64Shl, kMachInt32},
- {&RawMachineAssembler::Word64Shl, "Word64Shl", kMips64Dshl, kMachInt64},
- {&RawMachineAssembler::Word32Shr, "Word32Shr", kMips64Shr, kMachInt32},
- {&RawMachineAssembler::Word64Shr, "Word64Shr", kMips64Dshr, kMachInt64},
- {&RawMachineAssembler::Word32Sar, "Word32Sar", kMips64Sar, kMachInt32},
- {&RawMachineAssembler::Word64Sar, "Word64Sar", kMips64Dsar, kMachInt64},
- {&RawMachineAssembler::Word32Ror, "Word32Ror", kMips64Ror, kMachInt32},
- {&RawMachineAssembler::Word64Ror, "Word64Ror", kMips64Dror, kMachInt64}};
+ {&RawMachineAssembler::Word32Shl, "Word32Shl", kMips64Shl,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Shl, "Word64Shl", kMips64Dshl,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Shr, "Word32Shr", kMips64Shr,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Shr, "Word64Shr", kMips64Dshr,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Sar, "Word32Sar", kMips64Sar,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Sar, "Word64Sar", kMips64Dsar,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Ror, "Word32Ror", kMips64Ror,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Ror, "Word64Ror", kMips64Dror,
+ MachineType::Int64()}};
// ----------------------------------------------------------------------------
@@ -99,15 +113,22 @@
const MachInst2 kMulDivInstructions[] = {
- {&RawMachineAssembler::Int32Mul, "Int32Mul", kMips64Mul, kMachInt32},
- {&RawMachineAssembler::Int32Div, "Int32Div", kMips64Div, kMachInt32},
- {&RawMachineAssembler::Uint32Div, "Uint32Div", kMips64DivU, kMachUint32},
- {&RawMachineAssembler::Int64Mul, "Int64Mul", kMips64Dmul, kMachInt64},
- {&RawMachineAssembler::Int64Div, "Int64Div", kMips64Ddiv, kMachInt64},
- {&RawMachineAssembler::Uint64Div, "Uint64Div", kMips64DdivU, kMachUint64},
- {&RawMachineAssembler::Float64Mul, "Float64Mul", kMips64MulD, kMachFloat64},
+ {&RawMachineAssembler::Int32Mul, "Int32Mul", kMips64Mul,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32Div, "Int32Div", kMips64Div,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Uint32Div, "Uint32Div", kMips64DivU,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Int64Mul, "Int64Mul", kMips64Dmul,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Int64Div, "Int64Div", kMips64Ddiv,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Uint64Div, "Uint64Div", kMips64DdivU,
+ MachineType::Uint64()},
+ {&RawMachineAssembler::Float64Mul, "Float64Mul", kMips64MulD,
+ MachineType::Float64()},
{&RawMachineAssembler::Float64Div, "Float64Div", kMips64DivD,
- kMachFloat64}};
+ MachineType::Float64()}};
// ----------------------------------------------------------------------------
@@ -116,10 +137,12 @@
const MachInst2 kModInstructions[] = {
- {&RawMachineAssembler::Int32Mod, "Int32Mod", kMips64Mod, kMachInt32},
- {&RawMachineAssembler::Uint32Mod, "Uint32Mod", kMips64ModU, kMachInt32},
+ {&RawMachineAssembler::Int32Mod, "Int32Mod", kMips64Mod,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Uint32Mod, "Uint32Mod", kMips64ModU,
+ MachineType::Int32()},
{&RawMachineAssembler::Float64Mod, "Float64Mod", kMips64ModD,
- kMachFloat64}};
+ MachineType::Float64()}};
// ----------------------------------------------------------------------------
@@ -128,9 +151,10 @@
const MachInst2 kFPArithInstructions[] = {
- {&RawMachineAssembler::Float64Add, "Float64Add", kMips64AddD, kMachFloat64},
+ {&RawMachineAssembler::Float64Add, "Float64Add", kMips64AddD,
+ MachineType::Float64()},
{&RawMachineAssembler::Float64Sub, "Float64Sub", kMips64SubD,
- kMachFloat64}};
+ MachineType::Float64()}};
// ----------------------------------------------------------------------------
@@ -139,10 +163,14 @@
const MachInst2 kAddSubInstructions[] = {
- {&RawMachineAssembler::Int32Add, "Int32Add", kMips64Add, kMachInt32},
- {&RawMachineAssembler::Int64Add, "Int64Add", kMips64Dadd, kMachInt64},
- {&RawMachineAssembler::Int32Sub, "Int32Sub", kMips64Sub, kMachInt32},
- {&RawMachineAssembler::Int64Sub, "Int64Sub", kMips64Dsub, kMachInt64}};
+ {&RawMachineAssembler::Int32Add, "Int32Add", kMips64Add,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int64Add, "Int64Add", kMips64Dadd,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Int32Sub, "Int32Sub", kMips64Sub,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int64Sub, "Int64Sub", kMips64Dsub,
+ MachineType::Int64()}};
// ----------------------------------------------------------------------------
@@ -151,8 +179,10 @@
const MachInst1 kAddSubOneInstructions[] = {
- {&RawMachineAssembler::Int32Neg, "Int32Neg", kMips64Sub, kMachInt32},
- {&RawMachineAssembler::Int64Neg, "Int64Neg", kMips64Dsub, kMachInt64}};
+ {&RawMachineAssembler::Int32Neg, "Int32Neg", kMips64Sub,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int64Neg, "Int64Neg", kMips64Dsub,
+ MachineType::Int64()}};
// ----------------------------------------------------------------------------
@@ -161,34 +191,35 @@
const IntCmp kCmpInstructions[] = {
- {{&RawMachineAssembler::WordEqual, "WordEqual", kMips64Cmp, kMachInt64},
+ {{&RawMachineAssembler::WordEqual, "WordEqual", kMips64Cmp,
+ MachineType::Int64()},
1U},
{{&RawMachineAssembler::WordNotEqual, "WordNotEqual", kMips64Cmp,
- kMachInt64},
+ MachineType::Int64()},
1U},
- {{&RawMachineAssembler::Word32Equal, "Word32Equal", kMips64Cmp32,
- kMachInt32},
+ {{&RawMachineAssembler::Word32Equal, "Word32Equal", kMips64Cmp,
+ MachineType::Int32()},
1U},
- {{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kMips64Cmp32,
- kMachInt32},
+ {{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kMips64Cmp,
+ MachineType::Int32()},
1U},
- {{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kMips64Cmp32,
- kMachInt32},
+ {{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kMips64Cmp,
+ MachineType::Int32()},
1U},
{{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
- kMips64Cmp32, kMachInt32},
+ kMips64Cmp, MachineType::Int32()},
1U},
- {{&RawMachineAssembler::Int32GreaterThan, "Int32GreaterThan", kMips64Cmp32,
- kMachInt32},
+ {{&RawMachineAssembler::Int32GreaterThan, "Int32GreaterThan", kMips64Cmp,
+ MachineType::Int32()},
1U},
{{&RawMachineAssembler::Int32GreaterThanOrEqual, "Int32GreaterThanOrEqual",
- kMips64Cmp32, kMachInt32},
+ kMips64Cmp, MachineType::Int32()},
1U},
- {{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kMips64Cmp32,
- kMachUint32},
+ {{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kMips64Cmp,
+ MachineType::Uint32()},
1U},
{{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
- kMips64Cmp32, kMachUint32},
+ kMips64Cmp, MachineType::Uint32()},
1U}};
@@ -206,28 +237,56 @@
// mips instructions:
// mtc1, cvt.d.w
{{&RawMachineAssembler::ChangeInt32ToFloat64, "ChangeInt32ToFloat64",
- kMips64CvtDW, kMachFloat64},
- kMachInt32},
+ kMips64CvtDW, MachineType::Float64()},
+ MachineType::Int32()},
// mips instructions:
// cvt.d.uw
{{&RawMachineAssembler::ChangeUint32ToFloat64, "ChangeUint32ToFloat64",
- kMips64CvtDUw, kMachFloat64},
- kMachInt32},
+ kMips64CvtDUw, MachineType::Float64()},
+ MachineType::Int32()},
// mips instructions:
// mfc1, trunc double to word, for more details look at mips macro
// asm and mips asm file
{{&RawMachineAssembler::ChangeFloat64ToInt32, "ChangeFloat64ToInt32",
- kMips64TruncWD, kMachFloat64},
- kMachInt32},
+ kMips64TruncWD, MachineType::Float64()},
+ MachineType::Int32()},
// mips instructions:
// trunc double to unsigned word, for more details look at mips macro
// asm and mips asm file
{{&RawMachineAssembler::ChangeFloat64ToUint32, "ChangeFloat64ToUint32",
- kMips64TruncUwD, kMachFloat64},
- kMachInt32}};
+ kMips64TruncUwD, MachineType::Float64()},
+ MachineType::Int32()}};
+
+const Conversion kFloat64RoundInstructions[] = {
+ {{&RawMachineAssembler::Float64RoundUp, "Float64RoundUp", kMips64CeilWD,
+ MachineType::Int32()},
+ MachineType::Float64()},
+ {{&RawMachineAssembler::Float64RoundDown, "Float64RoundDown",
+ kMips64FloorWD, MachineType::Int32()},
+ MachineType::Float64()},
+ {{&RawMachineAssembler::Float64RoundTiesEven, "Float64RoundTiesEven",
+ kMips64RoundWD, MachineType::Int32()},
+ MachineType::Float64()},
+ {{&RawMachineAssembler::Float64RoundTruncate, "Float64RoundTruncate",
+ kMips64TruncWD, MachineType::Int32()},
+ MachineType::Float64()}};
+
+const Conversion kFloat32RoundInstructions[] = {
+ {{&RawMachineAssembler::Float32RoundUp, "Float32RoundUp", kMips64CeilWS,
+ MachineType::Int32()},
+ MachineType::Float32()},
+ {{&RawMachineAssembler::Float32RoundDown, "Float32RoundDown",
+ kMips64FloorWS, MachineType::Int32()},
+ MachineType::Float32()},
+ {{&RawMachineAssembler::Float32RoundTiesEven, "Float32RoundTiesEven",
+ kMips64RoundWS, MachineType::Int32()},
+ MachineType::Float32()},
+ {{&RawMachineAssembler::Float32RoundTruncate, "Float32RoundTruncate",
+ kMips64TruncWS, MachineType::Int32()},
+ MachineType::Float32()}};
} // namespace
@@ -236,7 +295,8 @@
TEST_P(InstructionSelectorFPCmpTest, Parameter) {
const FPCmp cmp = GetParam();
- StreamBuilder m(this, kMachInt32, cmp.mi.machine_type, cmp.mi.machine_type);
+ StreamBuilder m(this, MachineType::Int32(), cmp.mi.machine_type,
+ cmp.mi.machine_type);
m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -280,7 +340,8 @@
TEST_P(InstructionSelectorShiftTest, Immediate) {
const MachInst2 dpi = GetParam();
const MachineType type = dpi.machine_type;
- TRACED_FORRANGE(int32_t, imm, 0, (ElementSizeOf(type) * 8) - 1) {
+ TRACED_FORRANGE(int32_t, imm, 0,
+ ((1 << ElementSizeLog2Of(type.representation())) * 8) - 1) {
StreamBuilder m(this, type, type);
m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
Stream s = m.Build();
@@ -296,6 +357,140 @@
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
::testing::ValuesIn(kShiftInstructions));
+TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediate) {
+ // The available shift operand range is `0 <= imm < 32`, but we also test
+ // that immediates outside this range are handled properly (modulo-32).
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1f;
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ uint32_t jnk = rng()->NextInt();
+ jnk = (lsb > 0) ? (jnk >> (32 - lsb)) : 0;
+ uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
+ m.Int32Constant(shift)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Ext, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1f;
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ uint32_t jnk = rng()->NextInt();
+ jnk = (lsb > 0) ? (jnk >> (32 - lsb)) : 0;
+ uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
+ m.Int32Constant(shift)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Ext, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64ShrWithWord64AndWithImmediate) {
+ // The available shift operand range is `0 <= imm < 64`, but we also test
+ // that immediates outside this range are handled properly (modulo-64).
+ TRACED_FORRANGE(int32_t, shift, -64, 127) {
+ int32_t lsb = shift & 0x3f;
+ TRACED_FORRANGE(int32_t, width, 1, 64 - lsb) {
+ uint64_t jnk = rng()->NextInt64();
+ jnk = (lsb > 0) ? (jnk >> (64 - lsb)) : 0;
+ uint64_t msk =
+ ((V8_UINT64_C(0xffffffffffffffff) >> (64 - width)) << lsb) | jnk;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Shr(m.Word64And(m.Parameter(0), m.Int64Constant(msk)),
+ m.Int64Constant(shift)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Dext, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt64(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, shift, -64, 127) {
+ int32_t lsb = shift & 0x3f;
+ TRACED_FORRANGE(int32_t, width, 1, 64 - lsb) {
+ uint64_t jnk = rng()->NextInt64();
+ jnk = (lsb > 0) ? (jnk >> (64 - lsb)) : 0;
+ uint64_t msk =
+ ((V8_UINT64_C(0xffffffffffffffff) >> (64 - width)) << lsb) | jnk;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Shr(m.Word64And(m.Int64Constant(msk), m.Parameter(0)),
+ m.Int64Constant(shift)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Dext, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt64(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndToClearBits) {
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ int32_t mask = ~((1 << shift) - 1);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32And(m.Parameter(0), m.Int32Constant(mask)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Ins, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(shift, s.ToInt32(s[0]->InputAt(2)));
+ }
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ int32_t mask = ~((1 << shift) - 1);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32And(m.Int32Constant(mask), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Ins, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(shift, s.ToInt32(s[0]->InputAt(2)));
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64AndToClearBits) {
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ int64_t mask = ~((1 << shift) - 1);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64And(m.Parameter(0), m.Int64Constant(mask)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Dins, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(shift, s.ToInt32(s[0]->InputAt(2)));
+ }
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ int64_t mask = ~((1 << shift) - 1);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64And(m.Int64Constant(mask), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Dins, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(shift, s.ToInt32(s[0]->InputAt(2)));
+ }
+}
+
+
// ----------------------------------------------------------------------------
// Logical instructions.
// ----------------------------------------------------------------------------
@@ -318,6 +513,215 @@
INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorLogicalTest,
::testing::ValuesIn(kLogicalInstructions));
+
+TEST_F(InstructionSelectorTest, Word64XorMinusOneWithParameter) {
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Xor(m.Parameter(0), m.Int64Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Xor(m.Int64Constant(-1), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32XorMinusOneWithParameter) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Parameter(0), m.Int32Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Int32Constant(-1), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64XorMinusOneWithWord64Or) {
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Xor(m.Word64Or(m.Parameter(0), m.Parameter(0)),
+ m.Int64Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Xor(m.Int64Constant(-1),
+ m.Word64Or(m.Parameter(0), m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32XorMinusOneWithWord32Or) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Word32Or(m.Parameter(0), m.Parameter(0)),
+ m.Int32Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Int32Constant(-1),
+ m.Word32Or(m.Parameter(0), m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithImmediateWithWord32Shr) {
+ // The available shift operand range is `0 <= imm < 32`, but we also test
+ // that immediates outside this range are handled properly (modulo-32).
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1f;
+ TRACED_FORRANGE(int32_t, width, 1, 31) {
+ uint32_t msk = (1 << width) - 1;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)),
+ m.Int32Constant(msk)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Ext, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ int32_t actual_width = (lsb + width > 32) ? (32 - lsb) : width;
+ EXPECT_EQ(actual_width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1f;
+ TRACED_FORRANGE(int32_t, width, 1, 31) {
+ uint32_t msk = (1 << width) - 1;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(
+ m.Word32And(m.Int32Constant(msk),
+ m.Word32Shr(m.Parameter(0), m.Int32Constant(shift))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Ext, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ int32_t actual_width = (lsb + width > 32) ? (32 - lsb) : width;
+ EXPECT_EQ(actual_width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64AndWithImmediateWithWord64Shr) {
+ // The available shift operand range is `0 <= imm < 64`, but we also test
+ // that immediates outside this range are handled properly (modulo-64).
+ TRACED_FORRANGE(int64_t, shift, -64, 127) {
+ int64_t lsb = shift & 0x3f;
+ TRACED_FORRANGE(int64_t, width, 1, 63) {
+ uint64_t msk = (V8_UINT64_C(1) << width) - 1;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64And(m.Word64Shr(m.Parameter(0), m.Int64Constant(shift)),
+ m.Int64Constant(msk)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Dext, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt64(s[0]->InputAt(1)));
+ int64_t actual_width = (lsb + width > 64) ? (64 - lsb) : width;
+ EXPECT_EQ(actual_width, s.ToInt64(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int64_t, shift, -64, 127) {
+ int64_t lsb = shift & 0x3f;
+ TRACED_FORRANGE(int64_t, width, 1, 63) {
+ uint64_t msk = (V8_UINT64_C(1) << width) - 1;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(
+ m.Word64And(m.Int64Constant(msk),
+ m.Word64Shr(m.Parameter(0), m.Int64Constant(shift))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Dext, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt64(s[0]->InputAt(1)));
+ int64_t actual_width = (lsb + width > 64) ? (64 - lsb) : width;
+ EXPECT_EQ(actual_width, s.ToInt64(s[0]->InputAt(2)));
+ }
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32ShlWithWord32And) {
+ TRACED_FORRANGE(int32_t, shift, 0, 30) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r =
+ m.Word32Shl(m.Word32And(p0, m.Int32Constant((1 << (31 - shift)) - 1)),
+ m.Int32Constant(shift + 1));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Shl, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64ShlWithWord64And) {
+ TRACED_FORRANGE(int32_t, shift, 0, 62) {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const r =
+ m.Word64Shl(m.Word64And(p0, m.Int64Constant((1L << (63 - shift)) - 1)),
+ m.Int64Constant(shift + 1));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Dshl, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+}
+
+
// ----------------------------------------------------------------------------
// MUL/DIV instructions.
// ----------------------------------------------------------------------------
@@ -445,6 +849,145 @@
InstructionSelectorConversionTest,
::testing::ValuesIn(kConversionInstructions));
+TEST_F(InstructionSelectorTest, ChangesFromToSmi) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.TruncateInt64ToInt32(
+ m.Word64Sar(m.Parameter(0), m.Int32Constant(32))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Dsar, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(
+ m.Word64Shl(m.ChangeInt32ToInt64(m.Parameter(0)), m.Int32Constant(32)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Dshl, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+typedef InstructionSelectorTestWithParam<Conversion>
+ CombineChangeFloat64ToInt32WithRoundFloat64;
+
+TEST_P(CombineChangeFloat64ToInt32WithRoundFloat64, Parameter) {
+ {
+ const Conversion conv = GetParam();
+ StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
+ m.Return(m.ChangeFloat64ToInt32((m.*conv.mi.constructor)(m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ CombineChangeFloat64ToInt32WithRoundFloat64,
+ ::testing::ValuesIn(kFloat64RoundInstructions));
+
+typedef InstructionSelectorTestWithParam<Conversion>
+ CombineChangeFloat32ToInt32WithRoundFloat32;
+
+TEST_P(CombineChangeFloat32ToInt32WithRoundFloat32, Parameter) {
+ {
+ const Conversion conv = GetParam();
+ StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
+ m.Return(m.ChangeFloat64ToInt32(
+ m.ChangeFloat32ToFloat64((m.*conv.mi.constructor)(m.Parameter(0)))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+ CombineChangeFloat32ToInt32WithRoundFloat32,
+ ::testing::ValuesIn(kFloat32RoundInstructions));
+
+
+TEST_F(InstructionSelectorTest, ChangeFloat64ToInt32OfChangeFloat32ToFloat64) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float32());
+ m.Return(m.ChangeFloat64ToInt32(m.ChangeFloat32ToFloat64(m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64TruncWS, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest,
+ TruncateFloat64ToFloat32OfChangeInt32ToFloat64) {
+ {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Int32());
+ m.Return(
+ m.TruncateFloat64ToFloat32(m.ChangeInt32ToFloat64(m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64CvtSW, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, CombineShiftsWithMul) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Int32Mul(m.Word64Sar(m.Parameter(0), m.Int32Constant(32)),
+ m.Word64Sar(m.Parameter(0), m.Int32Constant(32))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64DMulHigh, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, CombineShiftsWithDivMod) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Int32Div(m.Word64Sar(m.Parameter(0), m.Int32Constant(32)),
+ m.Word64Sar(m.Parameter(0), m.Int32Constant(32))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Ddiv, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Int32Mod(m.Word64Sar(m.Parameter(0), m.Int32Constant(32)),
+ m.Word64Sar(m.Parameter(0), m.Int32Constant(32))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Dmod, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
// ----------------------------------------------------------------------------
// Loads and stores.
@@ -460,14 +1003,14 @@
};
static const MemoryAccess kMemoryAccesses[] = {
- {kMachInt8, kMips64Lb, kMips64Sb},
- {kMachUint8, kMips64Lbu, kMips64Sb},
- {kMachInt16, kMips64Lh, kMips64Sh},
- {kMachUint16, kMips64Lhu, kMips64Sh},
- {kMachInt32, kMips64Lw, kMips64Sw},
- {kRepFloat32, kMips64Lwc1, kMips64Swc1},
- {kRepFloat64, kMips64Ldc1, kMips64Sdc1},
- {kMachInt64, kMips64Ld, kMips64Sd}};
+ {MachineType::Int8(), kMips64Lb, kMips64Sb},
+ {MachineType::Uint8(), kMips64Lbu, kMips64Sb},
+ {MachineType::Int16(), kMips64Lh, kMips64Sh},
+ {MachineType::Uint16(), kMips64Lhu, kMips64Sh},
+ {MachineType::Int32(), kMips64Lw, kMips64Sw},
+ {MachineType::Float32(), kMips64Lwc1, kMips64Swc1},
+ {MachineType::Float64(), kMips64Ldc1, kMips64Sdc1},
+ {MachineType::Int64(), kMips64Ld, kMips64Sd}};
struct MemoryAccessImm {
@@ -506,56 +1049,56 @@
const MemoryAccessImm kMemoryAccessesImm[] = {
- {kMachInt8,
+ {MachineType::Int8(),
kMips64Lb,
kMips64Sb,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachUint8,
+ {MachineType::Uint8(),
kMips64Lbu,
kMips64Sb,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachInt16,
+ {MachineType::Int16(),
kMips64Lh,
kMips64Sh,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachUint16,
+ {MachineType::Uint16(),
kMips64Lhu,
kMips64Sh,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachInt32,
+ {MachineType::Int32(),
kMips64Lw,
kMips64Sw,
&InstructionSelectorTest::Stream::IsInteger,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachFloat32,
+ {MachineType::Float32(),
kMips64Lwc1,
kMips64Swc1,
&InstructionSelectorTest::Stream::IsDouble,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachFloat64,
+ {MachineType::Float64(),
kMips64Ldc1,
kMips64Sdc1,
&InstructionSelectorTest::Stream::IsDouble,
{-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
-87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
- {kMachInt64,
+ {MachineType::Int64(),
kMips64Ld,
kMips64Sd,
&InstructionSelectorTest::Stream::IsInteger,
@@ -565,42 +1108,42 @@
const MemoryAccessImm1 kMemoryAccessImmMoreThan16bit[] = {
- {kMachInt8,
+ {MachineType::Int8(),
kMips64Lb,
kMips64Sb,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachInt8,
+ {MachineType::Int8(),
kMips64Lbu,
kMips64Sb,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachInt16,
+ {MachineType::Int16(),
kMips64Lh,
kMips64Sh,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachInt16,
+ {MachineType::Int16(),
kMips64Lhu,
kMips64Sh,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachInt32,
+ {MachineType::Int32(),
kMips64Lw,
kMips64Sw,
&InstructionSelectorTest::Stream::IsInteger,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachFloat32,
+ {MachineType::Float32(),
kMips64Lwc1,
kMips64Swc1,
&InstructionSelectorTest::Stream::IsDouble,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachFloat64,
+ {MachineType::Float64(),
kMips64Ldc1,
kMips64Sdc1,
&InstructionSelectorTest::Stream::IsDouble,
{-65000, -55000, 32777, 55000, 65000}},
- {kMachInt64,
+ {MachineType::Int64(),
kMips64Ld,
kMips64Sd,
&InstructionSelectorTest::Stream::IsInteger,
@@ -614,7 +1157,8 @@
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer(),
+ MachineType::Int32());
m.Return(m.Load(memacc.type, m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -625,8 +1169,10 @@
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Parameter(1));
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Int32(), memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0), m.Parameter(1),
+ kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -650,7 +1196,7 @@
TEST_P(InstructionSelectorMemoryAccessImmTest, LoadWithImmediateIndex) {
const MemoryAccessImm memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, memacc.type, kMachPtr);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -673,9 +1219,10 @@
TEST_P(InstructionSelectorMemoryAccessImmTest, StoreWithImmediateIndex) {
const MemoryAccessImm memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
- m.Parameter(1));
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -705,7 +1252,7 @@
LoadWithImmediateIndex) {
const MemoryAccessImm1 memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, memacc.type, kMachPtr);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
@@ -722,9 +1269,10 @@
StoreWithImmediateIndex) {
const MemoryAccessImm1 memacc = GetParam();
TRACED_FOREACH(int32_t, index, memacc.immediates) {
- StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
- m.Parameter(1));
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(2U, s.size());
@@ -749,11 +1297,11 @@
TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kMips64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(kMips64Cmp, s[0]->arch_opcode());
EXPECT_EQ(kMode_None, s[0]->addressing_mode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
@@ -761,11 +1309,11 @@
EXPECT_EQ(kEqual, s[0]->flags_condition());
}
{
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kMips64Cmp32, s[0]->arch_opcode());
+ EXPECT_EQ(kMips64Cmp, s[0]->arch_opcode());
EXPECT_EQ(kMode_None, s[0]->addressing_mode());
ASSERT_EQ(2U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
@@ -777,7 +1325,7 @@
TEST_F(InstructionSelectorTest, Word64EqualWithZero) {
{
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64Equal(m.Parameter(0), m.Int64Constant(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -789,7 +1337,7 @@
EXPECT_EQ(kEqual, s[0]->flags_condition());
}
{
- StreamBuilder m(this, kMachInt64, kMachInt64);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
m.Return(m.Word64Equal(m.Int32Constant(0), m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -802,6 +1350,134 @@
}
}
+
+TEST_F(InstructionSelectorTest, Word32Clz) {
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32Clz(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Clz, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Word64Clz) {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Uint64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word64Clz(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Dclz, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float32Abs) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64AbsS, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float64Abs) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64AbsD, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float32Max) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Float32Max(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ // Float32Max is `(b < a) ? a : b`.
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Float32Max, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float32Min) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(),
+ MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Float32Min(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ // Float32Min is `(a < b) ? a : b`.
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Float32Min, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float64Max) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Float64Max(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ // Float64Max is `(b < a) ? a : b`.
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Float64Max, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
+TEST_F(InstructionSelectorTest, Float64Min) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Float64Min(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ // Float64Min is `(a < b) ? a : b`.
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kMips64Float64Min, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/test/unittests/compiler/move-optimizer-unittest.cc b/test/unittests/compiler/move-optimizer-unittest.cc
index 5b956f0..413c58b 100644
--- a/test/unittests/compiler/move-optimizer-unittest.cc
+++ b/test/unittests/compiler/move-optimizer-unittest.cc
@@ -11,37 +11,29 @@
class MoveOptimizerTest : public InstructionSequenceTest {
public:
- GapInstruction* LastGap() {
- auto instruction = sequence()->instructions().back();
- if (!instruction->IsGapMoves()) {
- instruction = *(sequence()->instructions().rbegin() + 1);
- }
- return GapInstruction::cast(instruction);
+ Instruction* LastInstruction() { return sequence()->instructions().back(); }
+
+ void AddMove(Instruction* instr, TestOperand from, TestOperand to,
+ Instruction::GapPosition pos = Instruction::START) {
+ auto parallel_move = instr->GetOrCreateParallelMove(pos, zone());
+ parallel_move->AddMove(ConvertMoveArg(from), ConvertMoveArg(to));
}
- void AddMove(GapInstruction* gap, TestOperand from, TestOperand to,
- GapInstruction::InnerPosition pos = GapInstruction::START) {
- auto parallel_move = gap->GetOrCreateParallelMove(pos, zone());
- parallel_move->AddMove(ConvertMoveArg(from), ConvertMoveArg(to), zone());
- }
-
- int NonRedundantSize(ParallelMove* move) {
+ int NonRedundantSize(ParallelMove* moves) {
int i = 0;
- auto ops = move->move_operands();
- for (auto op = ops->begin(); op != ops->end(); ++op) {
- if (op->IsRedundant()) continue;
+ for (auto move : *moves) {
+ if (move->IsRedundant()) continue;
i++;
}
return i;
}
- bool Contains(ParallelMove* move, TestOperand from_op, TestOperand to_op) {
+ bool Contains(ParallelMove* moves, TestOperand from_op, TestOperand to_op) {
auto from = ConvertMoveArg(from_op);
auto to = ConvertMoveArg(to_op);
- auto ops = move->move_operands();
- for (auto op = ops->begin(); op != ops->end(); ++op) {
- if (op->IsRedundant()) continue;
- if (op->source()->Equals(from) && op->destination()->Equals(to)) {
+ for (auto move : *moves) {
+ if (move->IsRedundant()) continue;
+ if (move->source().Equals(from) && move->destination().Equals(to)) {
return true;
}
}
@@ -68,48 +60,78 @@
}
private:
- InstructionOperand* ConvertMoveArg(TestOperand op) {
+ InstructionOperand ConvertMoveArg(TestOperand op) {
CHECK_EQ(kNoValue, op.vreg_.value_);
CHECK_NE(kNoValue, op.value_);
switch (op.type_) {
case kConstant:
- return ConstantOperand::Create(op.value_, zone());
+ return ConstantOperand(op.value_);
case kFixedSlot:
- return StackSlotOperand::Create(op.value_, zone());
+ return AllocatedOperand(LocationOperand::STACK_SLOT,
+ MachineRepresentation::kWord32, op.value_);
case kFixedRegister:
CHECK(0 <= op.value_ && op.value_ < num_general_registers());
- return RegisterOperand::Create(op.value_, zone());
+ return AllocatedOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kWord32, op.value_);
+ case kExplicit:
+ CHECK(0 <= op.value_ && op.value_ < num_general_registers());
+ return ExplicitOperand(LocationOperand::REGISTER,
+ MachineRepresentation::kWord32, op.value_);
default:
break;
}
CHECK(false);
- return nullptr;
+ return InstructionOperand();
}
};
TEST_F(MoveOptimizerTest, RemovesRedundant) {
StartBlock();
- AddMove(LastGap(), Reg(0), Reg(1));
- EmitNop();
- AddMove(LastGap(), Reg(1), Reg(0));
- EmitNop();
+ auto first_instr = EmitNop();
+ AddMove(first_instr, Reg(0), Reg(1));
+ auto last_instr = EmitNop();
+ AddMove(last_instr, Reg(1), Reg(0));
EndBlock(Last());
Optimize();
- auto gap = LastGap();
- auto move = gap->parallel_moves()[0];
+ CHECK_EQ(0, NonRedundantSize(first_instr->parallel_moves()[0]));
+ auto move = last_instr->parallel_moves()[0];
CHECK_EQ(1, NonRedundantSize(move));
CHECK(Contains(move, Reg(0), Reg(1)));
}
+TEST_F(MoveOptimizerTest, RemovesRedundantExplicit) {
+ int first_reg_index =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
+ ->GetAllocatableGeneralCode(0);
+ int second_reg_index =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN)
+ ->GetAllocatableGeneralCode(1);
+
+ StartBlock();
+ auto first_instr = EmitNop();
+ AddMove(first_instr, Reg(first_reg_index), ExplicitReg(second_reg_index));
+ auto last_instr = EmitNop();
+ AddMove(last_instr, Reg(second_reg_index), Reg(first_reg_index));
+ EndBlock(Last());
+
+ Optimize();
+
+ CHECK_EQ(0, NonRedundantSize(first_instr->parallel_moves()[0]));
+ auto move = last_instr->parallel_moves()[0];
+ CHECK_EQ(1, NonRedundantSize(move));
+ CHECK(Contains(move, Reg(first_reg_index), ExplicitReg(second_reg_index)));
+}
+
+
TEST_F(MoveOptimizerTest, SplitsConstants) {
StartBlock();
EndBlock(Last());
- auto gap = LastGap();
+ auto gap = LastInstruction();
AddMove(gap, Const(1), Slot(0));
AddMove(gap, Const(1), Slot(1));
AddMove(gap, Const(1), Reg(0));
@@ -128,6 +150,102 @@
CHECK(Contains(move, Reg(0), Slot(2)));
}
+
+TEST_F(MoveOptimizerTest, SimpleMerge) {
+ StartBlock();
+ EndBlock(Branch(Imm(), 1, 2));
+
+ StartBlock();
+ EndBlock(Jump(2));
+ AddMove(LastInstruction(), Reg(0), Reg(1));
+
+ StartBlock();
+ EndBlock(Jump(1));
+ AddMove(LastInstruction(), Reg(0), Reg(1));
+
+ StartBlock();
+ EndBlock(Last());
+
+ auto last = LastInstruction();
+
+ Optimize();
+
+ auto move = last->parallel_moves()[0];
+ CHECK_EQ(1, NonRedundantSize(move));
+ CHECK(Contains(move, Reg(0), Reg(1)));
+}
+
+
+TEST_F(MoveOptimizerTest, SimpleMergeCycle) {
+ StartBlock();
+ EndBlock(Branch(Imm(), 1, 2));
+
+ StartBlock();
+ EndBlock(Jump(2));
+ auto gap_0 = LastInstruction();
+ AddMove(gap_0, Reg(0), Reg(1));
+ AddMove(LastInstruction(), Reg(1), Reg(0));
+
+ StartBlock();
+ EndBlock(Jump(1));
+ auto gap_1 = LastInstruction();
+ AddMove(gap_1, Reg(0), Reg(1));
+ AddMove(gap_1, Reg(1), Reg(0));
+
+ StartBlock();
+ EndBlock(Last());
+
+ auto last = LastInstruction();
+
+ Optimize();
+
+ CHECK(gap_0->AreMovesRedundant());
+ CHECK(gap_1->AreMovesRedundant());
+ auto move = last->parallel_moves()[0];
+ CHECK_EQ(2, NonRedundantSize(move));
+ CHECK(Contains(move, Reg(0), Reg(1)));
+ CHECK(Contains(move, Reg(1), Reg(0)));
+}
+
+
+TEST_F(MoveOptimizerTest, GapsCanMoveOverInstruction) {
+ StartBlock();
+ int const_index = 1;
+ DefineConstant(const_index);
+ Instruction* ctant_def = LastInstruction();
+ AddMove(ctant_def, Reg(1), Reg(0));
+
+ Instruction* last = EmitNop();
+ AddMove(last, Const(const_index), Reg(0));
+ AddMove(last, Reg(0), Reg(1));
+ EndBlock(Last());
+ Optimize();
+
+ ParallelMove* inst1_start =
+ ctant_def->GetParallelMove(Instruction::GapPosition::START);
+ ParallelMove* inst1_end =
+ ctant_def->GetParallelMove(Instruction::GapPosition::END);
+ ParallelMove* last_start =
+ last->GetParallelMove(Instruction::GapPosition::START);
+ CHECK(inst1_start == nullptr || inst1_start->size() == 0);
+ CHECK(inst1_end == nullptr || inst1_end->size() == 0);
+ CHECK(last_start->size() == 2);
+ int redundants = 0;
+ int assignment = 0;
+ for (MoveOperands* move : *last_start) {
+ if (move->IsRedundant()) {
+ ++redundants;
+ } else {
+ ++assignment;
+ CHECK(move->destination().IsRegister());
+ CHECK(move->source().IsConstant());
+ }
+ }
+ CHECK_EQ(1, redundants);
+ CHECK_EQ(1, assignment);
+}
+
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/test/unittests/compiler/node-cache-unittest.cc b/test/unittests/compiler/node-cache-unittest.cc
new file mode 100644
index 0000000..3c92876
--- /dev/null
+++ b/test/unittests/compiler/node-cache-unittest.cc
@@ -0,0 +1,159 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node-cache.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::Contains;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef GraphTest NodeCacheTest;
+
+TEST_F(NodeCacheTest, Int32Constant_back_to_back) {
+ Int32NodeCache cache;
+
+ for (int i = -2000000000; i < 2000000000; i += 3315177) {
+ Node** pos = cache.Find(zone(), i);
+ ASSERT_TRUE(pos != nullptr);
+ for (int j = 0; j < 3; j++) {
+ Node** npos = cache.Find(zone(), i);
+ EXPECT_EQ(pos, npos);
+ }
+ }
+}
+
+
+TEST_F(NodeCacheTest, Int32Constant_five) {
+ Int32NodeCache cache;
+ int32_t constants[] = {static_cast<int32_t>(0x80000000), -77, 0, 1, -1};
+ Node* nodes[arraysize(constants)];
+
+ for (size_t i = 0; i < arraysize(constants); i++) {
+ int32_t k = constants[i];
+ Node* node = graph()->NewNode(common()->Int32Constant(k));
+ *cache.Find(zone(), k) = nodes[i] = node;
+ }
+
+ for (size_t i = 0; i < arraysize(constants); i++) {
+ int32_t k = constants[i];
+ EXPECT_EQ(nodes[i], *cache.Find(zone(), k));
+ }
+}
+
+
+TEST_F(NodeCacheTest, Int32Constant_hits) {
+ Int32NodeCache cache;
+ const int32_t kSize = 1500;
+ Node** nodes = zone()->NewArray<Node*>(kSize);
+
+ for (int i = 0; i < kSize; i++) {
+ int32_t v = i * -55;
+ nodes[i] = graph()->NewNode(common()->Int32Constant(v));
+ *cache.Find(zone(), v) = nodes[i];
+ }
+
+ int hits = 0;
+ for (int i = 0; i < kSize; i++) {
+ int32_t v = i * -55;
+ Node** pos = cache.Find(zone(), v);
+ if (*pos != NULL) {
+ EXPECT_EQ(nodes[i], *pos);
+ hits++;
+ }
+ }
+ EXPECT_LT(4, hits);
+}
+
+
+TEST_F(NodeCacheTest, Int64Constant_back_to_back) {
+ Int64NodeCache cache;
+
+ for (int64_t i = -2000000000; i < 2000000000; i += 3315177) {
+ Node** pos = cache.Find(zone(), i);
+ ASSERT_TRUE(pos != nullptr);
+ for (int j = 0; j < 3; j++) {
+ Node** npos = cache.Find(zone(), i);
+ EXPECT_EQ(pos, npos);
+ }
+ }
+}
+
+
+TEST_F(NodeCacheTest, Int64Constant_hits) {
+ Int64NodeCache cache;
+ const int32_t kSize = 1500;
+ Node** nodes = zone()->NewArray<Node*>(kSize);
+
+ for (int i = 0; i < kSize; i++) {
+ int64_t v = static_cast<int64_t>(i) * static_cast<int64_t>(5003001);
+ nodes[i] = graph()->NewNode(common()->Int32Constant(i));
+ *cache.Find(zone(), v) = nodes[i];
+ }
+
+ int hits = 0;
+ for (int i = 0; i < kSize; i++) {
+ int64_t v = static_cast<int64_t>(i) * static_cast<int64_t>(5003001);
+ Node** pos = cache.Find(zone(), v);
+ if (*pos != NULL) {
+ EXPECT_EQ(nodes[i], *pos);
+ hits++;
+ }
+ }
+ EXPECT_LT(4, hits);
+}
+
+
+TEST_F(NodeCacheTest, GetCachedNodes_int32) {
+ Int32NodeCache cache;
+ int32_t constants[] = {0, 311, 12, 13, 14, 555, -555, -44, -33, -22, -11,
+ 0, 311, 311, 412, 412, 11, 11, -33, -33, -22, -11};
+
+ for (size_t i = 0; i < arraysize(constants); i++) {
+ int32_t k = constants[i];
+ Node** pos = cache.Find(zone(), k);
+ if (*pos != NULL) {
+ ZoneVector<Node*> nodes(zone());
+ cache.GetCachedNodes(&nodes);
+ EXPECT_THAT(nodes, Contains(*pos));
+ } else {
+ ZoneVector<Node*> nodes(zone());
+ Node* n = graph()->NewNode(common()->Int32Constant(k));
+ *pos = n;
+ cache.GetCachedNodes(&nodes);
+ EXPECT_THAT(nodes, Contains(n));
+ }
+ }
+}
+
+
+TEST_F(NodeCacheTest, GetCachedNodes_int64) {
+ Int64NodeCache cache;
+ int64_t constants[] = {0, 311, 12, 13, 14, 555, -555, -44, -33, -22, -11,
+ 0, 311, 311, 412, 412, 11, 11, -33, -33, -22, -11};
+
+ for (size_t i = 0; i < arraysize(constants); i++) {
+ int64_t k = constants[i];
+ Node** pos = cache.Find(zone(), k);
+ if (*pos != NULL) {
+ ZoneVector<Node*> nodes(zone());
+ cache.GetCachedNodes(&nodes);
+ EXPECT_THAT(nodes, Contains(*pos));
+ } else {
+ ZoneVector<Node*> nodes(zone());
+ Node* n = graph()->NewNode(common()->Int64Constant(k));
+ *pos = n;
+ cache.GetCachedNodes(&nodes);
+ EXPECT_THAT(nodes, Contains(n));
+ }
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/node-matchers-unittest.cc b/test/unittests/compiler/node-matchers-unittest.cc
index 85db9db..f0cc407 100644
--- a/test/unittests/compiler/node-matchers-unittest.cc
+++ b/test/unittests/compiler/node-matchers-unittest.cc
@@ -19,7 +19,7 @@
class NodeMatcherTest : public GraphTest {
public:
NodeMatcherTest() : machine_(zone()) {}
- ~NodeMatcherTest() OVERRIDE {}
+ ~NodeMatcherTest() override {}
MachineOperatorBuilder* machine() { return &machine_; }
@@ -38,7 +38,8 @@
EXPECT_EQ(base, matcher->base());
EXPECT_EQ(displacement, matcher->displacement());
}
-};
+
+} // namespace
TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
@@ -728,6 +729,224 @@
}
+TEST_F(NodeMatcherTest, BranchMatcher_match) {
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+
+ {
+ Node* branch = graph()->NewNode(common()->Branch(), zero, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ BranchMatcher matcher(branch);
+ EXPECT_TRUE(matcher.Matched());
+ EXPECT_EQ(branch, matcher.Branch());
+ EXPECT_EQ(if_true, matcher.IfTrue());
+ EXPECT_EQ(if_false, matcher.IfFalse());
+ }
+
+ {
+ Node* branch = graph()->NewNode(common()->Branch(), zero, graph()->start());
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ BranchMatcher matcher(branch);
+ EXPECT_TRUE(matcher.Matched());
+ EXPECT_EQ(branch, matcher.Branch());
+ EXPECT_EQ(if_true, matcher.IfTrue());
+ EXPECT_EQ(if_false, matcher.IfFalse());
+ }
+
+ {
+ Node* branch = graph()->NewNode(common()->Branch(), zero, graph()->start());
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* other = graph()->NewNode(common()->IfValue(33), branch);
+ BranchMatcher matcher(branch);
+ EXPECT_TRUE(matcher.Matched());
+ EXPECT_EQ(branch, matcher.Branch());
+ EXPECT_EQ(if_true, matcher.IfTrue());
+ EXPECT_EQ(if_false, matcher.IfFalse());
+ USE(other);
+ }
+}
+
+
+TEST_F(NodeMatcherTest, BranchMatcher_fail) {
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+
+ {
+ Node* branch = graph()->NewNode(common()->Branch(), zero, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ BranchMatcher matcher(branch);
+ EXPECT_FALSE(matcher.Matched());
+ USE(if_true);
+ }
+
+ {
+ Node* branch = graph()->NewNode(common()->Branch(), zero, graph()->start());
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ BranchMatcher matcher(branch);
+ EXPECT_FALSE(matcher.Matched());
+ USE(if_false);
+ }
+
+ {
+ BranchMatcher matcher(zero);
+ EXPECT_FALSE(matcher.Matched());
+ }
+
+ {
+ Node* branch = graph()->NewNode(common()->Branch(), zero, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ EXPECT_TRUE(BranchMatcher(branch).Matched());
+ EXPECT_FALSE(BranchMatcher(if_true).Matched());
+ EXPECT_FALSE(BranchMatcher(if_false).Matched());
+ }
+
+ {
+ Node* sw = graph()->NewNode(common()->Switch(5), zero, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), sw);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), sw);
+ EXPECT_FALSE(BranchMatcher(sw).Matched());
+ EXPECT_FALSE(BranchMatcher(if_true).Matched());
+ EXPECT_FALSE(BranchMatcher(if_false).Matched());
+ }
+
+ {
+ Node* branch = graph()->NewNode(common()->Branch(), zero, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_value = graph()->NewNode(common()->IfValue(2), branch);
+ BranchMatcher matcher(branch);
+ EXPECT_FALSE(matcher.Matched());
+ EXPECT_FALSE(BranchMatcher(if_true).Matched());
+ EXPECT_FALSE(BranchMatcher(if_value).Matched());
+ }
+}
+
+
+TEST_F(NodeMatcherTest, DiamondMatcher_match) {
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+
+ {
+ Node* branch = graph()->NewNode(common()->Branch(), zero, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ DiamondMatcher matcher(merge);
+ EXPECT_TRUE(matcher.Matched());
+ EXPECT_EQ(branch, matcher.Branch());
+ EXPECT_EQ(if_true, matcher.IfTrue());
+ EXPECT_EQ(if_false, matcher.IfFalse());
+ EXPECT_EQ(merge, matcher.Merge());
+ }
+
+ {
+ Node* branch = graph()->NewNode(common()->Branch(), zero, graph()->start());
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ DiamondMatcher matcher(merge);
+ EXPECT_TRUE(matcher.Matched());
+ EXPECT_EQ(branch, matcher.Branch());
+ EXPECT_EQ(if_true, matcher.IfTrue());
+ EXPECT_EQ(if_false, matcher.IfFalse());
+ EXPECT_EQ(merge, matcher.Merge());
+ }
+
+ {
+ Node* branch = graph()->NewNode(common()->Branch(), zero, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_false, if_true);
+ DiamondMatcher matcher(merge);
+ EXPECT_TRUE(matcher.Matched());
+ EXPECT_EQ(branch, matcher.Branch());
+ EXPECT_EQ(if_true, matcher.IfTrue());
+ EXPECT_EQ(if_false, matcher.IfFalse());
+ EXPECT_EQ(merge, matcher.Merge());
+ }
+}
+
+
+TEST_F(NodeMatcherTest, DiamondMatcher_fail) {
+ Node* zero = graph()->NewNode(common()->Int32Constant(0));
+
+ {
+ Node* branch = graph()->NewNode(common()->Branch(), zero, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_value = graph()->NewNode(common()->IfValue(1), branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_value);
+ DiamondMatcher matcher(merge);
+ EXPECT_FALSE(matcher.Matched());
+ }
+
+ {
+ Node* branch = graph()->NewNode(common()->Branch(), zero, graph()->start());
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* if_value = graph()->NewNode(common()->IfValue(1), branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_false, if_value);
+ DiamondMatcher matcher(merge);
+ EXPECT_FALSE(matcher.Matched());
+ }
+
+ {
+ Node* branch = graph()->NewNode(common()->Branch(), zero, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ DiamondMatcher matcher(merge);
+ EXPECT_TRUE(matcher.Matched());
+ EXPECT_EQ(branch, matcher.Branch());
+ EXPECT_EQ(if_true, matcher.IfTrue());
+ EXPECT_EQ(if_false, matcher.IfFalse());
+ EXPECT_EQ(merge, matcher.Merge());
+
+ EXPECT_FALSE(DiamondMatcher(branch).Matched()); // Must be the merge.
+ EXPECT_FALSE(DiamondMatcher(if_true).Matched());
+ EXPECT_FALSE(DiamondMatcher(if_false).Matched());
+ }
+
+ {
+ Node* branch = graph()->NewNode(common()->Branch(), zero, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* merge = graph()->NewNode(common()->Merge(3), if_true, if_false,
+ graph()->start());
+ DiamondMatcher matcher(merge);
+ EXPECT_FALSE(matcher.Matched()); // Too many inputs to merge.
+ }
+
+ {
+ Node* branch = graph()->NewNode(common()->Branch(), zero, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+ Node* if_false = graph()->start();
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ DiamondMatcher matcher(merge);
+ EXPECT_FALSE(matcher.Matched());
+ }
+
+ {
+ Node* branch = graph()->NewNode(common()->Branch(), zero, graph()->start());
+ Node* if_true = graph()->start();
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ DiamondMatcher matcher(merge);
+ EXPECT_FALSE(matcher.Matched());
+ }
+
+ {
+ Node* branch1 =
+ graph()->NewNode(common()->Branch(), zero, graph()->start());
+ Node* branch2 =
+ graph()->NewNode(common()->Branch(), zero, graph()->start());
+ Node* if_true = graph()->NewNode(common()->IfTrue(), branch1);
+ Node* if_false = graph()->NewNode(common()->IfFalse(), branch2);
+ Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+ DiamondMatcher matcher(merge);
+ EXPECT_FALSE(matcher.Matched());
+ }
+}
+
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/test/unittests/compiler/node-properties-unittest.cc b/test/unittests/compiler/node-properties-unittest.cc
new file mode 100644
index 0000000..463948d
--- /dev/null
+++ b/test/unittests/compiler/node-properties-unittest.cc
@@ -0,0 +1,125 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/node-properties.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::AnyOf;
+using testing::ElementsAre;
+using testing::IsNull;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class NodePropertiesTest : public TestWithZone {
+ public:
+ Node* NewMockNode(const Operator* op) {
+ return Node::New(zone(), 0, op, 0, nullptr, false);
+ }
+ Node* NewMockNode(const Operator* op, Node* n1) {
+ Node* nodes[] = {n1};
+ return Node::New(zone(), 0, op, arraysize(nodes), nodes, false);
+ }
+ Node* NewMockNode(const Operator* op, Node* n1, Node* n2) {
+ Node* nodes[] = {n1, n2};
+ return Node::New(zone(), 0, op, arraysize(nodes), nodes, false);
+ }
+};
+
+namespace {
+
+const Operator kMockOperator(IrOpcode::kDead, Operator::kNoProperties,
+ "MockOperator", 0, 0, 0, 1, 1, 2);
+const Operator kMockCallOperator(IrOpcode::kCall, Operator::kNoProperties,
+ "MockCallOperator", 0, 0, 0, 0, 0, 2);
+
+const IfExceptionHint kNoHint = IfExceptionHint::kLocallyCaught;
+
+} // namespace
+
+
+TEST_F(NodePropertiesTest, ReplaceUses) {
+ CommonOperatorBuilder common(zone());
+ Node* node = NewMockNode(&kMockOperator);
+ Node* effect = NewMockNode(&kMockOperator);
+ Node* use_value = NewMockNode(common.Return(), node);
+ Node* use_effect = NewMockNode(common.EffectPhi(1), node);
+ Node* use_success = NewMockNode(common.IfSuccess(), node);
+ Node* use_exception = NewMockNode(common.IfException(kNoHint), effect, node);
+ Node* r_value = NewMockNode(&kMockOperator);
+ Node* r_effect = NewMockNode(&kMockOperator);
+ Node* r_success = NewMockNode(&kMockOperator);
+ Node* r_exception = NewMockNode(&kMockOperator);
+ NodeProperties::ReplaceUses(node, r_value, r_effect, r_success, r_exception);
+ EXPECT_EQ(r_value, use_value->InputAt(0));
+ EXPECT_EQ(r_effect, use_effect->InputAt(0));
+ EXPECT_EQ(r_success, use_success->InputAt(0));
+ EXPECT_EQ(r_exception, use_exception->InputAt(1));
+ EXPECT_EQ(0, node->UseCount());
+ EXPECT_EQ(1, r_value->UseCount());
+ EXPECT_EQ(1, r_effect->UseCount());
+ EXPECT_EQ(1, r_success->UseCount());
+ EXPECT_EQ(1, r_exception->UseCount());
+ EXPECT_THAT(r_value->uses(), ElementsAre(use_value));
+ EXPECT_THAT(r_effect->uses(), ElementsAre(use_effect));
+ EXPECT_THAT(r_success->uses(), ElementsAre(use_success));
+ EXPECT_THAT(r_exception->uses(), ElementsAre(use_exception));
+}
+
+
+TEST_F(NodePropertiesTest, FindProjection) {
+ CommonOperatorBuilder common(zone());
+ Node* start = NewMockNode(common.Start(1));
+ Node* proj0 = NewMockNode(common.Projection(0), start);
+ Node* proj1 = NewMockNode(common.Projection(1), start);
+ EXPECT_EQ(proj0, NodeProperties::FindProjection(start, 0));
+ EXPECT_EQ(proj1, NodeProperties::FindProjection(start, 1));
+ EXPECT_THAT(NodeProperties::FindProjection(start, 2), IsNull());
+ EXPECT_THAT(NodeProperties::FindProjection(start, 1234567890), IsNull());
+}
+
+
+TEST_F(NodePropertiesTest, CollectControlProjections_Branch) {
+ Node* result[2];
+ CommonOperatorBuilder common(zone());
+ Node* branch = NewMockNode(common.Branch());
+ Node* if_false = NewMockNode(common.IfFalse(), branch);
+ Node* if_true = NewMockNode(common.IfTrue(), branch);
+ NodeProperties::CollectControlProjections(branch, result, arraysize(result));
+ EXPECT_EQ(if_true, result[0]);
+ EXPECT_EQ(if_false, result[1]);
+}
+
+
+TEST_F(NodePropertiesTest, CollectControlProjections_Call) {
+ Node* result[2];
+ CommonOperatorBuilder common(zone());
+ Node* call = NewMockNode(&kMockCallOperator);
+ Node* if_ex = NewMockNode(common.IfException(kNoHint), call, call);
+ Node* if_ok = NewMockNode(common.IfSuccess(), call);
+ NodeProperties::CollectControlProjections(call, result, arraysize(result));
+ EXPECT_EQ(if_ok, result[0]);
+ EXPECT_EQ(if_ex, result[1]);
+}
+
+
+TEST_F(NodePropertiesTest, CollectControlProjections_Switch) {
+ Node* result[3];
+ CommonOperatorBuilder common(zone());
+ Node* sw = NewMockNode(common.Switch(3));
+ Node* if_default = NewMockNode(common.IfDefault(), sw);
+ Node* if_value1 = NewMockNode(common.IfValue(1), sw);
+ Node* if_value2 = NewMockNode(common.IfValue(2), sw);
+ NodeProperties::CollectControlProjections(sw, result, arraysize(result));
+ EXPECT_THAT(result[0], AnyOf(if_value1, if_value2));
+ EXPECT_THAT(result[1], AnyOf(if_value1, if_value2));
+ EXPECT_EQ(if_default, result[2]);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/node-test-utils.cc b/test/unittests/compiler/node-test-utils.cc
index 74afda9..54168ee 100644
--- a/test/unittests/compiler/node-test-utils.cc
+++ b/test/unittests/compiler/node-test-utils.cc
@@ -4,9 +4,15 @@
#include "test/unittests/compiler/node-test-utils.h"
+#include <vector>
+
#include "src/assembler.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
+#include "src/handles-inl.h"
+#include "src/objects.h"
using testing::_;
using testing::MakeMatcher;
@@ -16,12 +22,17 @@
namespace v8 {
namespace internal {
+
+bool operator==(Handle<HeapObject> const& lhs, Handle<HeapObject> const& rhs) {
+ return lhs.is_identical_to(rhs);
+}
+
namespace compiler {
namespace {
template <typename T>
-bool PrintMatchAndExplain(const T& value, const char* value_name,
+bool PrintMatchAndExplain(const T& value, const std::string& value_name,
const Matcher<T>& value_matcher,
MatchResultListener* listener) {
StringMatchResultListener value_listener;
@@ -40,12 +51,12 @@
public:
explicit NodeMatcher(IrOpcode::Value opcode) : opcode_(opcode) {}
- void DescribeTo(std::ostream* os) const OVERRIDE {
+ void DescribeTo(std::ostream* os) const override {
*os << "is a " << IrOpcode::Mnemonic(opcode_) << " node";
}
bool MatchAndExplain(Node* node,
- MatchResultListener* listener) const OVERRIDE {
+ MatchResultListener* listener) const override {
if (node == NULL) {
*listener << "which is NULL";
return false;
@@ -63,7 +74,7 @@
};
-class IsBranchMatcher FINAL : public NodeMatcher {
+class IsBranchMatcher final : public NodeMatcher {
public:
IsBranchMatcher(const Matcher<Node*>& value_matcher,
const Matcher<Node*>& control_matcher)
@@ -71,7 +82,7 @@
value_matcher_(value_matcher),
control_matcher_(control_matcher) {}
- void DescribeTo(std::ostream* os) const FINAL {
+ void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
*os << " whose value (";
value_matcher_.DescribeTo(os);
@@ -80,7 +91,7 @@
*os << ")";
}
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
"value", value_matcher_, listener) &&
@@ -94,15 +105,102 @@
};
-class IsMergeMatcher FINAL : public NodeMatcher {
+class IsSwitchMatcher final : public NodeMatcher {
public:
- IsMergeMatcher(const Matcher<Node*>& control0_matcher,
- const Matcher<Node*>& control1_matcher)
- : NodeMatcher(IrOpcode::kMerge),
+ IsSwitchMatcher(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kSwitch),
+ value_matcher_(value_matcher),
+ control_matcher_(control_matcher) {}
+
+ void DescribeTo(std::ostream* os) const final {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose value (";
+ value_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+ "value", value_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> value_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsIfValueMatcher final : public NodeMatcher {
+ public:
+ IsIfValueMatcher(const Matcher<int32_t>& value_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kIfValue),
+ value_matcher_(value_matcher),
+ control_matcher_(control_matcher) {}
+
+ void DescribeTo(std::ostream* os) const final {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose value (";
+ value_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(OpParameter<int32_t>(node->op()), "value",
+ value_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<int32_t> value_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsControl1Matcher final : public NodeMatcher {
+ public:
+ IsControl1Matcher(IrOpcode::Value opcode,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(opcode), control_matcher_(control_matcher) {}
+
+ void DescribeTo(std::ostream* os) const final {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsControl2Matcher final : public NodeMatcher {
+ public:
+ IsControl2Matcher(IrOpcode::Value opcode,
+ const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher)
+ : NodeMatcher(opcode),
control0_matcher_(control0_matcher),
control1_matcher_(control1_matcher) {}
- void DescribeTo(std::ostream* os) const FINAL {
+ void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
*os << " whose control0 (";
control0_matcher_.DescribeTo(os);
@@ -111,7 +209,7 @@
*os << ")";
}
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetControlInput(node, 0),
"control0", control0_matcher_, listener) &&
@@ -125,39 +223,77 @@
};
-class IsControl1Matcher FINAL : public NodeMatcher {
+class IsControl3Matcher final : public NodeMatcher {
public:
- IsControl1Matcher(IrOpcode::Value opcode,
- const Matcher<Node*>& control_matcher)
- : NodeMatcher(opcode), control_matcher_(control_matcher) {}
+ IsControl3Matcher(IrOpcode::Value opcode,
+ const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher,
+ const Matcher<Node*>& control2_matcher)
+ : NodeMatcher(opcode),
+ control0_matcher_(control0_matcher),
+ control1_matcher_(control1_matcher),
+ control2_matcher_(control2_matcher) {}
- void DescribeTo(std::ostream* os) const FINAL {
+ void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
- *os << " whose control (";
- control_matcher_.DescribeTo(os);
+ *os << " whose control0 (";
+ control0_matcher_.DescribeTo(os);
+ *os << ") and control1 (";
+ control1_matcher_.DescribeTo(os);
+ *os << ") and control2 (";
+ control2_matcher_.DescribeTo(os);
*os << ")";
}
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(NodeProperties::GetControlInput(node),
- "control", control_matcher_, listener));
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node, 0),
+ "control0", control0_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node, 1),
+ "control1", control1_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node, 2),
+ "control2", control2_matcher_, listener));
}
private:
- const Matcher<Node*> control_matcher_;
+ const Matcher<Node*> control0_matcher_;
+ const Matcher<Node*> control1_matcher_;
+ const Matcher<Node*> control2_matcher_;
};
-class IsFinishMatcher FINAL : public NodeMatcher {
+class IsBeginRegionMatcher final : public NodeMatcher {
public:
- IsFinishMatcher(const Matcher<Node*>& value_matcher,
- const Matcher<Node*>& effect_matcher)
- : NodeMatcher(IrOpcode::kFinish),
+ explicit IsBeginRegionMatcher(const Matcher<Node*>& effect_matcher)
+ : NodeMatcher(IrOpcode::kBeginRegion), effect_matcher_(effect_matcher) {}
+
+ void DescribeTo(std::ostream* os) const final {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose effect (";
+ effect_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+ effect_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> effect_matcher_;
+};
+
+
+class IsFinishRegionMatcher final : public NodeMatcher {
+ public:
+ IsFinishRegionMatcher(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher)
+ : NodeMatcher(IrOpcode::kFinishRegion),
value_matcher_(value_matcher),
effect_matcher_(effect_matcher) {}
- void DescribeTo(std::ostream* os) const FINAL {
+ void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
*os << " whose value (";
value_matcher_.DescribeTo(os);
@@ -166,7 +302,7 @@
*os << ")";
}
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
"value", value_matcher_, listener) &&
@@ -180,20 +316,89 @@
};
+class IsReturnMatcher final : public NodeMatcher {
+ public:
+ IsReturnMatcher(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kReturn),
+ value_matcher_(value_matcher),
+ effect_matcher_(effect_matcher),
+ control_matcher_(control_matcher) {}
+
+ void DescribeTo(std::ostream* os) const final {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose value (";
+ value_matcher_.DescribeTo(os);
+ *os << ") and effect (";
+ effect_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+ "value", value_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+ effect_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> value_matcher_;
+ const Matcher<Node*> effect_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsTerminateMatcher final : public NodeMatcher {
+ public:
+ IsTerminateMatcher(const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kTerminate),
+ effect_matcher_(effect_matcher),
+ control_matcher_(control_matcher) {}
+
+ void DescribeTo(std::ostream* os) const final {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose effect (";
+ effect_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+ effect_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Node*> effect_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
template <typename T>
-class IsConstantMatcher FINAL : public NodeMatcher {
+class IsConstantMatcher final : public NodeMatcher {
public:
IsConstantMatcher(IrOpcode::Value opcode, const Matcher<T>& value_matcher)
: NodeMatcher(opcode), value_matcher_(value_matcher) {}
- void DescribeTo(std::ostream* os) const FINAL {
+ void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
*os << " whose value (";
value_matcher_.DescribeTo(os);
*os << ")";
}
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(OpParameter<T>(node), "value", value_matcher_,
listener));
@@ -204,9 +409,9 @@
};
-class IsSelectMatcher FINAL : public NodeMatcher {
+class IsSelectMatcher final : public NodeMatcher {
public:
- IsSelectMatcher(const Matcher<MachineType>& type_matcher,
+ IsSelectMatcher(const Matcher<MachineRepresentation>& type_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& value2_matcher)
@@ -216,9 +421,9 @@
value1_matcher_(value1_matcher),
value2_matcher_(value2_matcher) {}
- void DescribeTo(std::ostream* os) const FINAL {
+ void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
- *os << " whose type (";
+ *os << " whose representation (";
type_matcher_.DescribeTo(os);
*os << "), value0 (";
value0_matcher_.DescribeTo(os);
@@ -229,29 +434,30 @@
*os << ")";
}
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(OpParameter<MachineType>(node), "type",
- type_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
- "value0", value0_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
- "value1", value1_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
- "value2", value2_matcher_, listener));
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (
+ NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(SelectParametersOf(node->op()).representation(),
+ "representation", type_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "value0",
+ value0_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), "value1",
+ value1_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2), "value2",
+ value2_matcher_, listener));
}
private:
- const Matcher<MachineType> type_matcher_;
+ const Matcher<MachineRepresentation> type_matcher_;
const Matcher<Node*> value0_matcher_;
const Matcher<Node*> value1_matcher_;
const Matcher<Node*> value2_matcher_;
};
-class IsPhiMatcher FINAL : public NodeMatcher {
+class IsPhiMatcher final : public NodeMatcher {
public:
- IsPhiMatcher(const Matcher<MachineType>& type_matcher,
+ IsPhiMatcher(const Matcher<MachineRepresentation>& type_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& control_matcher)
@@ -261,9 +467,9 @@
value1_matcher_(value1_matcher),
control_matcher_(control_matcher) {}
- void DescribeTo(std::ostream* os) const FINAL {
+ void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
- *os << " whose type (";
+ *os << " whose representation (";
type_matcher_.DescribeTo(os);
*os << "), value0 (";
value0_matcher_.DescribeTo(os);
@@ -274,10 +480,10 @@
*os << ")";
}
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(OpParameter<MachineType>(node), "type",
- type_matcher_, listener) &&
+ PrintMatchAndExplain(PhiRepresentationOf(node->op()),
+ "representation", type_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
"value0", value0_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
@@ -287,14 +493,66 @@
}
private:
- const Matcher<MachineType> type_matcher_;
+ const Matcher<MachineRepresentation> type_matcher_;
const Matcher<Node*> value0_matcher_;
const Matcher<Node*> value1_matcher_;
const Matcher<Node*> control_matcher_;
};
-class IsEffectPhiMatcher FINAL : public NodeMatcher {
+class IsPhi2Matcher final : public NodeMatcher {
+ public:
+ IsPhi2Matcher(const Matcher<MachineRepresentation>& type_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kPhi),
+ type_matcher_(type_matcher),
+ value0_matcher_(value0_matcher),
+ value1_matcher_(value1_matcher),
+ value2_matcher_(value2_matcher),
+ control_matcher_(control_matcher) {}
+
+ void DescribeTo(std::ostream* os) const final {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose representation (";
+ type_matcher_.DescribeTo(os);
+ *os << "), value0 (";
+ value0_matcher_.DescribeTo(os);
+ *os << "), value1 (";
+ value1_matcher_.DescribeTo(os);
+ *os << "), value2 (";
+ value2_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(PhiRepresentationOf(node->op()),
+ "representation", type_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+ "value0", value0_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+ "value1", value1_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
+ "value2", value2_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+ "control", control_matcher_, listener));
+ }
+
+ private:
+ const Matcher<MachineRepresentation> type_matcher_;
+ const Matcher<Node*> value0_matcher_;
+ const Matcher<Node*> value1_matcher_;
+ const Matcher<Node*> value2_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsEffectPhiMatcher final : public NodeMatcher {
public:
IsEffectPhiMatcher(const Matcher<Node*>& effect0_matcher,
const Matcher<Node*>& effect1_matcher,
@@ -304,7 +562,7 @@
effect1_matcher_(effect1_matcher),
control_matcher_(control_matcher) {}
- void DescribeTo(std::ostream* os) const FINAL {
+ void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
*os << "), effect0 (";
effect0_matcher_.DescribeTo(os);
@@ -315,7 +573,7 @@
*os << ")";
}
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetEffectInput(node, 0),
"effect0", effect0_matcher_, listener) &&
@@ -332,7 +590,50 @@
};
-class IsProjectionMatcher FINAL : public NodeMatcher {
+class IsEffectSetMatcher final : public NodeMatcher {
+ public:
+ IsEffectSetMatcher(const Matcher<Node*>& effect0_matcher,
+ const Matcher<Node*>& effect1_matcher)
+ : NodeMatcher(IrOpcode::kEffectSet),
+ effect0_matcher_(effect0_matcher),
+ effect1_matcher_(effect1_matcher) {}
+
+ void DescribeTo(std::ostream* os) const final {
+ NodeMatcher::DescribeTo(os);
+ *os << "), effect0 (";
+ effect0_matcher_.DescribeTo(os);
+ *os << ") and effect1 (";
+ effect1_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ if (!NodeMatcher::MatchAndExplain(node, listener)) return false;
+
+ Node* effect0 = NodeProperties::GetEffectInput(node, 0);
+ Node* effect1 = NodeProperties::GetEffectInput(node, 1);
+
+ {
+ // Try matching in the reverse order first.
+ StringMatchResultListener value_listener;
+ if (effect0_matcher_.MatchAndExplain(effect1, &value_listener) &&
+ effect1_matcher_.MatchAndExplain(effect0, &value_listener)) {
+ return true;
+ }
+ }
+
+ return PrintMatchAndExplain(effect0, "effect0", effect0_matcher_,
+ listener) &&
+ PrintMatchAndExplain(effect1, "effect1", effect1_matcher_, listener);
+ }
+
+ private:
+ const Matcher<Node*> effect0_matcher_;
+ const Matcher<Node*> effect1_matcher_;
+};
+
+
+class IsProjectionMatcher final : public NodeMatcher {
public:
IsProjectionMatcher(const Matcher<size_t>& index_matcher,
const Matcher<Node*>& base_matcher)
@@ -340,7 +641,7 @@
index_matcher_(index_matcher),
base_matcher_(base_matcher) {}
- void DescribeTo(std::ostream* os) const FINAL {
+ void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
*os << " whose index (";
index_matcher_.DescribeTo(os);
@@ -349,7 +650,7 @@
*os << ")";
}
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(OpParameter<size_t>(node), "index",
index_matcher_, listener) &&
@@ -363,41 +664,178 @@
};
-class IsCall2Matcher FINAL : public NodeMatcher {
+class IsCallMatcher final : public NodeMatcher {
public:
- IsCall2Matcher(const Matcher<CallDescriptor*>& descriptor_matcher,
- const Matcher<Node*>& value0_matcher,
- const Matcher<Node*>& value1_matcher,
- const Matcher<Node*>& effect_matcher,
- const Matcher<Node*>& control_matcher)
+ IsCallMatcher(const Matcher<const CallDescriptor*>& descriptor_matcher,
+ const std::vector<Matcher<Node*>>& value_matchers,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher)
: NodeMatcher(IrOpcode::kCall),
descriptor_matcher_(descriptor_matcher),
- value0_matcher_(value0_matcher),
- value1_matcher_(value1_matcher),
+ value_matchers_(value_matchers),
effect_matcher_(effect_matcher),
control_matcher_(control_matcher) {}
- void DescribeTo(std::ostream* os) const FINAL {
+ void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
- *os << " whose value0 (";
- value0_matcher_.DescribeTo(os);
- *os << ") and value1 (";
- value1_matcher_.DescribeTo(os);
- *os << ") and effect (";
+ for (size_t i = 0; i < value_matchers_.size(); ++i) {
+ if (i == 0) {
+ *os << " whose value0 (";
+ } else {
+ *os << "), value" << i << " (";
+ }
+ value_matchers_[i].DescribeTo(os);
+ }
+ *os << "), effect (";
effect_matcher_.DescribeTo(os);
*os << ") and control (";
control_matcher_.DescribeTo(os);
*os << ")";
}
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ if (!NodeMatcher::MatchAndExplain(node, listener) ||
+ !PrintMatchAndExplain(OpParameter<const CallDescriptor*>(node),
+ "descriptor", descriptor_matcher_, listener)) {
+ return false;
+ }
+ for (size_t i = 0; i < value_matchers_.size(); ++i) {
+ std::ostringstream ost;
+ ost << "value" << i;
+ if (!PrintMatchAndExplain(
+ NodeProperties::GetValueInput(node, static_cast<int>(i)),
+ ost.str(), value_matchers_[i], listener)) {
+ return false;
+ }
+ }
+ Node* effect_node = nullptr;
+ Node* control_node = nullptr;
+ if (NodeProperties::FirstEffectIndex(node) < node->InputCount()) {
+ effect_node = NodeProperties::GetEffectInput(node);
+ }
+ if (NodeProperties::FirstControlIndex(node) < node->InputCount()) {
+ control_node = NodeProperties::GetControlInput(node);
+ }
+ return (PrintMatchAndExplain(effect_node, "effect", effect_matcher_,
+ listener) &&
+ PrintMatchAndExplain(control_node, "control", control_matcher_,
+ listener));
+ }
+
+ private:
+ const Matcher<const CallDescriptor*> descriptor_matcher_;
+ const std::vector<Matcher<Node*>> value_matchers_;
+ const Matcher<Node*> effect_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsTailCallMatcher final : public NodeMatcher {
+ public:
+ IsTailCallMatcher(const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const std::vector<Matcher<Node*>>& value_matchers,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kTailCall),
+ descriptor_matcher_(descriptor_matcher),
+ value_matchers_(value_matchers),
+ effect_matcher_(effect_matcher),
+ control_matcher_(control_matcher) {}
+
+ void DescribeTo(std::ostream* os) const final {
+ NodeMatcher::DescribeTo(os);
+ for (size_t i = 0; i < value_matchers_.size(); ++i) {
+ if (i == 0) {
+ *os << " whose value0 (";
+ } else {
+ *os << "), value" << i << " (";
+ }
+ value_matchers_[i].DescribeTo(os);
+ }
+ *os << "), effect (";
+ effect_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ if (!NodeMatcher::MatchAndExplain(node, listener) ||
+ !PrintMatchAndExplain(OpParameter<CallDescriptor const*>(node),
+ "descriptor", descriptor_matcher_, listener)) {
+ return false;
+ }
+ for (size_t i = 0; i < value_matchers_.size(); ++i) {
+ std::ostringstream ost;
+ ost << "value" << i;
+ if (!PrintMatchAndExplain(
+ NodeProperties::GetValueInput(node, static_cast<int>(i)),
+ ost.str(), value_matchers_[i], listener)) {
+ return false;
+ }
+ }
+ Node* effect_node = nullptr;
+ Node* control_node = nullptr;
+ if (NodeProperties::FirstEffectIndex(node) < node->InputCount()) {
+ effect_node = NodeProperties::GetEffectInput(node);
+ }
+ if (NodeProperties::FirstControlIndex(node) < node->InputCount()) {
+ control_node = NodeProperties::GetControlInput(node);
+ }
+ return (PrintMatchAndExplain(effect_node, "effect", effect_matcher_,
+ listener) &&
+ PrintMatchAndExplain(control_node, "control", control_matcher_,
+ listener));
+ }
+
+ private:
+ const Matcher<CallDescriptor const*> descriptor_matcher_;
+ const std::vector<Matcher<Node*>> value_matchers_;
+ const Matcher<Node*> effect_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsReferenceEqualMatcher final : public NodeMatcher {
+ public:
+ IsReferenceEqualMatcher(const Matcher<Type*>& type_matcher,
+ const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher)
+ : NodeMatcher(IrOpcode::kReferenceEqual),
+ type_matcher_(type_matcher),
+ lhs_matcher_(lhs_matcher),
+ rhs_matcher_(rhs_matcher) {}
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(OpParameter<CallDescriptor*>(node),
- "descriptor", descriptor_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
- "value0", value0_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
- "value1", value1_matcher_, listener) &&
+ // TODO(bmeurer): The type parameter is currently ignored.
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "lhs",
+ lhs_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), "rhs",
+ rhs_matcher_, listener));
+ }
+
+ private:
+ const Matcher<Type*> type_matcher_;
+ const Matcher<Node*> lhs_matcher_;
+ const Matcher<Node*> rhs_matcher_;
+};
+
+
+class IsAllocateMatcher final : public NodeMatcher {
+ public:
+ IsAllocateMatcher(const Matcher<Node*>& size_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kAllocate),
+ size_matcher_(size_matcher),
+ effect_matcher_(effect_matcher),
+ control_matcher_(control_matcher) {}
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "size",
+ size_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
effect_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetControlInput(node),
@@ -405,79 +843,13 @@
}
private:
- const Matcher<CallDescriptor*> descriptor_matcher_;
- const Matcher<Node*> value0_matcher_;
- const Matcher<Node*> value1_matcher_;
+ const Matcher<Node*> size_matcher_;
const Matcher<Node*> effect_matcher_;
const Matcher<Node*> control_matcher_;
};
-class IsCall4Matcher FINAL : public NodeMatcher {
- public:
- IsCall4Matcher(const Matcher<CallDescriptor*>& descriptor_matcher,
- const Matcher<Node*>& value0_matcher,
- const Matcher<Node*>& value1_matcher,
- const Matcher<Node*>& value2_matcher,
- const Matcher<Node*>& value3_matcher,
- const Matcher<Node*>& effect_matcher,
- const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kCall),
- descriptor_matcher_(descriptor_matcher),
- value0_matcher_(value0_matcher),
- value1_matcher_(value1_matcher),
- value2_matcher_(value2_matcher),
- value3_matcher_(value3_matcher),
- effect_matcher_(effect_matcher),
- control_matcher_(control_matcher) {}
-
- void DescribeTo(std::ostream* os) const FINAL {
- NodeMatcher::DescribeTo(os);
- *os << " whose value0 (";
- value0_matcher_.DescribeTo(os);
- *os << ") and value1 (";
- value1_matcher_.DescribeTo(os);
- *os << ") and value2 (";
- value2_matcher_.DescribeTo(os);
- *os << ") and value3 (";
- value3_matcher_.DescribeTo(os);
- *os << ") and effect (";
- effect_matcher_.DescribeTo(os);
- *os << ") and control (";
- control_matcher_.DescribeTo(os);
- *os << ")";
- }
-
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
- return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(OpParameter<CallDescriptor*>(node),
- "descriptor", descriptor_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
- "value0", value0_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
- "value1", value1_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
- "value2", value2_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 3),
- "value3", value3_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
- effect_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetControlInput(node),
- "control", control_matcher_, listener));
- }
-
- private:
- const Matcher<CallDescriptor*> descriptor_matcher_;
- const Matcher<Node*> value0_matcher_;
- const Matcher<Node*> value1_matcher_;
- const Matcher<Node*> value2_matcher_;
- const Matcher<Node*> value3_matcher_;
- const Matcher<Node*> effect_matcher_;
- const Matcher<Node*> control_matcher_;
-};
-
-
-class IsLoadFieldMatcher FINAL : public NodeMatcher {
+class IsLoadFieldMatcher final : public NodeMatcher {
public:
IsLoadFieldMatcher(const Matcher<FieldAccess>& access_matcher,
const Matcher<Node*>& base_matcher,
@@ -489,7 +861,7 @@
effect_matcher_(effect_matcher),
control_matcher_(control_matcher) {}
- void DescribeTo(std::ostream* os) const FINAL {
+ void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
*os << " whose access (";
access_matcher_.DescribeTo(os);
@@ -502,7 +874,7 @@
*os << ")";
}
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(OpParameter<FieldAccess>(node), "access",
access_matcher_, listener) &&
@@ -522,7 +894,7 @@
};
-class IsStoreFieldMatcher FINAL : public NodeMatcher {
+class IsStoreFieldMatcher final : public NodeMatcher {
public:
IsStoreFieldMatcher(const Matcher<FieldAccess>& access_matcher,
const Matcher<Node*>& base_matcher,
@@ -536,7 +908,7 @@
effect_matcher_(effect_matcher),
control_matcher_(control_matcher) {}
- void DescribeTo(std::ostream* os) const FINAL {
+ void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
*os << " whose access (";
access_matcher_.DescribeTo(os);
@@ -551,7 +923,7 @@
*os << ")";
}
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(OpParameter<FieldAccess>(node), "access",
access_matcher_, listener) &&
@@ -574,7 +946,7 @@
};
-class IsLoadBufferMatcher FINAL : public NodeMatcher {
+class IsLoadBufferMatcher final : public NodeMatcher {
public:
IsLoadBufferMatcher(const Matcher<BufferAccess>& access_matcher,
const Matcher<Node*>& buffer_matcher,
@@ -590,7 +962,7 @@
effect_matcher_(effect_matcher),
control_matcher_(control_matcher) {}
- void DescribeTo(std::ostream* os) const FINAL {
+ void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
*os << " whose access (";
access_matcher_.DescribeTo(os);
@@ -607,7 +979,7 @@
*os << ")";
}
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(BufferAccessOf(node->op()), "access",
access_matcher_, listener) &&
@@ -633,7 +1005,7 @@
};
-class IsStoreBufferMatcher FINAL : public NodeMatcher {
+class IsStoreBufferMatcher final : public NodeMatcher {
public:
IsStoreBufferMatcher(const Matcher<BufferAccess>& access_matcher,
const Matcher<Node*>& buffer_matcher,
@@ -651,7 +1023,7 @@
effect_matcher_(effect_matcher),
control_matcher_(control_matcher) {}
- void DescribeTo(std::ostream* os) const FINAL {
+ void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
*os << " whose access (";
access_matcher_.DescribeTo(os);
@@ -670,7 +1042,7 @@
*os << ")";
}
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(BufferAccessOf(node->op()), "access",
access_matcher_, listener) &&
@@ -699,7 +1071,7 @@
};
-class IsLoadElementMatcher FINAL : public NodeMatcher {
+class IsLoadElementMatcher final : public NodeMatcher {
public:
IsLoadElementMatcher(const Matcher<ElementAccess>& access_matcher,
const Matcher<Node*>& base_matcher,
@@ -713,7 +1085,7 @@
effect_matcher_(effect_matcher),
control_matcher_(control_matcher) {}
- void DescribeTo(std::ostream* os) const FINAL {
+ void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
*os << " whose access (";
access_matcher_.DescribeTo(os);
@@ -728,7 +1100,7 @@
*os << ")";
}
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(OpParameter<ElementAccess>(node), "access",
access_matcher_, listener) &&
@@ -751,7 +1123,7 @@
};
-class IsStoreElementMatcher FINAL : public NodeMatcher {
+class IsStoreElementMatcher final : public NodeMatcher {
public:
IsStoreElementMatcher(const Matcher<ElementAccess>& access_matcher,
const Matcher<Node*>& base_matcher,
@@ -767,7 +1139,7 @@
effect_matcher_(effect_matcher),
control_matcher_(control_matcher) {}
- void DescribeTo(std::ostream* os) const FINAL {
+ void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
*os << " whose access (";
access_matcher_.DescribeTo(os);
@@ -784,7 +1156,7 @@
*os << ")";
}
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(OpParameter<ElementAccess>(node), "access",
access_matcher_, listener) &&
@@ -810,7 +1182,7 @@
};
-class IsLoadMatcher FINAL : public NodeMatcher {
+class IsLoadMatcher final : public NodeMatcher {
public:
IsLoadMatcher(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
@@ -824,7 +1196,7 @@
effect_matcher_(effect_matcher),
control_matcher_(control_matcher) {}
- void DescribeTo(std::ostream* os) const FINAL {
+ void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
*os << " whose rep (";
rep_matcher_.DescribeTo(os);
@@ -839,7 +1211,15 @@
*os << ")";
}
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ Node* effect_node = nullptr;
+ Node* control_node = nullptr;
+ if (NodeProperties::FirstEffectIndex(node) < node->InputCount()) {
+ effect_node = NodeProperties::GetEffectInput(node);
+ }
+ if (NodeProperties::FirstControlIndex(node) < node->InputCount()) {
+ control_node = NodeProperties::GetControlInput(node);
+ }
return (NodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(OpParameter<LoadRepresentation>(node), "rep",
rep_matcher_, listener) &&
@@ -847,10 +1227,10 @@
base_matcher_, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
"index", index_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
- effect_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetControlInput(node),
- "control", control_matcher_, listener));
+ PrintMatchAndExplain(effect_node, "effect", effect_matcher_,
+ listener) &&
+ PrintMatchAndExplain(control_node, "control", control_matcher_,
+ listener));
}
private:
@@ -862,7 +1242,74 @@
};
-class IsToNumberMatcher FINAL : public NodeMatcher {
+class IsStoreMatcher final : public NodeMatcher {
+ public:
+ IsStoreMatcher(const Matcher<StoreRepresentation>& rep_matcher,
+ const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& index_matcher,
+ const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher)
+ : NodeMatcher(IrOpcode::kStore),
+ rep_matcher_(rep_matcher),
+ base_matcher_(base_matcher),
+ index_matcher_(index_matcher),
+ value_matcher_(value_matcher),
+ effect_matcher_(effect_matcher),
+ control_matcher_(control_matcher) {}
+
+ void DescribeTo(std::ostream* os) const final {
+ NodeMatcher::DescribeTo(os);
+ *os << " whose rep (";
+ rep_matcher_.DescribeTo(os);
+ *os << "), base (";
+ base_matcher_.DescribeTo(os);
+ *os << "), index (";
+ index_matcher_.DescribeTo(os);
+ *os << "), value (";
+ value_matcher_.DescribeTo(os);
+ *os << "), effect (";
+ effect_matcher_.DescribeTo(os);
+ *os << ") and control (";
+ control_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ Node* effect_node = nullptr;
+ Node* control_node = nullptr;
+ if (NodeProperties::FirstEffectIndex(node) < node->InputCount()) {
+ effect_node = NodeProperties::GetEffectInput(node);
+ }
+ if (NodeProperties::FirstControlIndex(node) < node->InputCount()) {
+ control_node = NodeProperties::GetControlInput(node);
+ }
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(OpParameter<StoreRepresentation>(node), "rep",
+ rep_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
+ base_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+ "index", index_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
+ "value", value_matcher_, listener) &&
+ PrintMatchAndExplain(effect_node, "effect", effect_matcher_,
+ listener) &&
+ PrintMatchAndExplain(control_node, "control", control_matcher_,
+ listener));
+ }
+
+ private:
+ const Matcher<StoreRepresentation> rep_matcher_;
+ const Matcher<Node*> base_matcher_;
+ const Matcher<Node*> index_matcher_;
+ const Matcher<Node*> value_matcher_;
+ const Matcher<Node*> effect_matcher_;
+ const Matcher<Node*> control_matcher_;
+};
+
+
+class IsToNumberMatcher final : public NodeMatcher {
public:
IsToNumberMatcher(const Matcher<Node*>& base_matcher,
const Matcher<Node*>& context_matcher,
@@ -874,7 +1321,7 @@
effect_matcher_(effect_matcher),
control_matcher_(control_matcher) {}
- void DescribeTo(std::ostream* os) const FINAL {
+ void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
*os << " whose base (";
base_matcher_.DescribeTo(os);
@@ -887,7 +1334,7 @@
*os << ")";
}
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
base_matcher_, listener) &&
@@ -907,66 +1354,38 @@
};
-class IsStoreMatcher FINAL : public NodeMatcher {
+class IsLoadContextMatcher final : public NodeMatcher {
public:
- IsStoreMatcher(const Matcher<StoreRepresentation>& rep_matcher,
- const Matcher<Node*>& base_matcher,
- const Matcher<Node*>& index_matcher,
- const Matcher<Node*>& value_matcher,
- const Matcher<Node*>& effect_matcher,
- const Matcher<Node*>& control_matcher)
- : NodeMatcher(IrOpcode::kStore),
- rep_matcher_(rep_matcher),
- base_matcher_(base_matcher),
- index_matcher_(index_matcher),
- value_matcher_(value_matcher),
- effect_matcher_(effect_matcher),
- control_matcher_(control_matcher) {}
+ IsLoadContextMatcher(const Matcher<ContextAccess>& access_matcher,
+ const Matcher<Node*>& context_matcher)
+ : NodeMatcher(IrOpcode::kJSLoadContext),
+ access_matcher_(access_matcher),
+ context_matcher_(context_matcher) {}
- void DescribeTo(std::ostream* os) const FINAL {
+ void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
- *os << " whose rep (";
- rep_matcher_.DescribeTo(os);
- *os << "), base (";
- base_matcher_.DescribeTo(os);
- *os << "), index (";
- index_matcher_.DescribeTo(os);
- *os << "), value (";
- value_matcher_.DescribeTo(os);
- *os << "), effect (";
- effect_matcher_.DescribeTo(os);
- *os << ") and control (";
- control_matcher_.DescribeTo(os);
+ *os << " whose access (";
+ access_matcher_.DescribeTo(os);
+ *os << ") and context (";
+ context_matcher_.DescribeTo(os);
*os << ")";
}
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
- PrintMatchAndExplain(OpParameter<StoreRepresentation>(node), "rep",
- rep_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
- base_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
- "index", index_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
- "value", value_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
- effect_matcher_, listener) &&
- PrintMatchAndExplain(NodeProperties::GetControlInput(node),
- "control", control_matcher_, listener));
+ PrintMatchAndExplain(OpParameter<ContextAccess>(node), "access",
+ access_matcher_, listener) &&
+ PrintMatchAndExplain(NodeProperties::GetContextInput(node),
+ "context", context_matcher_, listener));
}
private:
- const Matcher<StoreRepresentation> rep_matcher_;
- const Matcher<Node*> base_matcher_;
- const Matcher<Node*> index_matcher_;
- const Matcher<Node*> value_matcher_;
- const Matcher<Node*> effect_matcher_;
- const Matcher<Node*> control_matcher_;
+ const Matcher<ContextAccess> access_matcher_;
+ const Matcher<Node*> context_matcher_;
};
-class IsBinopMatcher FINAL : public NodeMatcher {
+class IsBinopMatcher final : public NodeMatcher {
public:
IsBinopMatcher(IrOpcode::Value opcode, const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher)
@@ -974,7 +1393,7 @@
lhs_matcher_(lhs_matcher),
rhs_matcher_(rhs_matcher) {}
- void DescribeTo(std::ostream* os) const FINAL {
+ void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
*os << " whose lhs (";
lhs_matcher_.DescribeTo(os);
@@ -983,7 +1402,7 @@
*os << ")";
}
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "lhs",
lhs_matcher_, listener) &&
@@ -997,19 +1416,19 @@
};
-class IsUnopMatcher FINAL : public NodeMatcher {
+class IsUnopMatcher final : public NodeMatcher {
public:
IsUnopMatcher(IrOpcode::Value opcode, const Matcher<Node*>& input_matcher)
: NodeMatcher(opcode), input_matcher_(input_matcher) {}
- void DescribeTo(std::ostream* os) const FINAL {
+ void DescribeTo(std::ostream* os) const final {
NodeMatcher::DescribeTo(os);
*os << " whose input (";
input_matcher_.DescribeTo(os);
*os << ")";
}
- bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
return (NodeMatcher::MatchAndExplain(node, listener) &&
PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
"input", input_matcher_, listener));
@@ -1018,6 +1437,54 @@
private:
const Matcher<Node*> input_matcher_;
};
+
+
+class IsParameterMatcher final : public NodeMatcher {
+ public:
+ explicit IsParameterMatcher(const Matcher<int>& index_matcher)
+ : NodeMatcher(IrOpcode::kParameter), index_matcher_(index_matcher) {}
+
+ void DescribeTo(std::ostream* os) const override {
+ *os << "is a Parameter node with index(";
+ index_matcher_.DescribeTo(os);
+ *os << ")";
+ }
+
+ bool MatchAndExplain(Node* node, MatchResultListener* listener) const final {
+ return (NodeMatcher::MatchAndExplain(node, listener) &&
+ PrintMatchAndExplain(ParameterIndexOf(node->op()), "index",
+ index_matcher_, listener));
+ }
+
+ private:
+ const Matcher<int> index_matcher_;
+};
+
+} // namespace
+
+
+Matcher<Node*> IsDead() {
+ return MakeMatcher(new NodeMatcher(IrOpcode::kDead));
+}
+
+
+Matcher<Node*> IsEnd(const Matcher<Node*>& control0_matcher) {
+ return MakeMatcher(new IsControl1Matcher(IrOpcode::kEnd, control0_matcher));
+}
+
+
+Matcher<Node*> IsEnd(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher) {
+ return MakeMatcher(new IsControl2Matcher(IrOpcode::kEnd, control0_matcher,
+ control1_matcher));
+}
+
+
+Matcher<Node*> IsEnd(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher,
+ const Matcher<Node*>& control2_matcher) {
+ return MakeMatcher(new IsControl3Matcher(IrOpcode::kEnd, control0_matcher,
+ control1_matcher, control2_matcher));
}
@@ -1029,7 +1496,31 @@
Matcher<Node*> IsMerge(const Matcher<Node*>& control0_matcher,
const Matcher<Node*>& control1_matcher) {
- return MakeMatcher(new IsMergeMatcher(control0_matcher, control1_matcher));
+ return MakeMatcher(new IsControl2Matcher(IrOpcode::kMerge, control0_matcher,
+ control1_matcher));
+}
+
+
+Matcher<Node*> IsMerge(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher,
+ const Matcher<Node*>& control2_matcher) {
+ return MakeMatcher(new IsControl3Matcher(IrOpcode::kMerge, control0_matcher,
+ control1_matcher, control2_matcher));
+}
+
+
+Matcher<Node*> IsLoop(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher) {
+ return MakeMatcher(new IsControl2Matcher(IrOpcode::kLoop, control0_matcher,
+ control1_matcher));
+}
+
+
+Matcher<Node*> IsLoop(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher,
+ const Matcher<Node*>& control2_matcher) {
+ return MakeMatcher(new IsControl3Matcher(IrOpcode::kLoop, control0_matcher,
+ control1_matcher, control2_matcher));
}
@@ -1044,14 +1535,52 @@
}
-Matcher<Node*> IsValueEffect(const Matcher<Node*>& value_matcher) {
- return MakeMatcher(new IsUnopMatcher(IrOpcode::kValueEffect, value_matcher));
+Matcher<Node*> IsIfSuccess(const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(
+ new IsControl1Matcher(IrOpcode::kIfSuccess, control_matcher));
}
-Matcher<Node*> IsFinish(const Matcher<Node*>& value_matcher,
- const Matcher<Node*>& effect_matcher) {
- return MakeMatcher(new IsFinishMatcher(value_matcher, effect_matcher));
+Matcher<Node*> IsSwitch(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsSwitchMatcher(value_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsIfValue(const Matcher<int32_t>& value_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsIfValueMatcher(value_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsIfDefault(const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(
+ new IsControl1Matcher(IrOpcode::kIfDefault, control_matcher));
+}
+
+
+Matcher<Node*> IsBeginRegion(const Matcher<Node*>& effect_matcher) {
+ return MakeMatcher(new IsBeginRegionMatcher(effect_matcher));
+}
+
+
+Matcher<Node*> IsFinishRegion(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher) {
+ return MakeMatcher(new IsFinishRegionMatcher(value_matcher, effect_matcher));
+}
+
+
+Matcher<Node*> IsReturn(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(
+ new IsReturnMatcher(value_matcher, effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsTerminate(const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsTerminateMatcher(effect_matcher, control_matcher));
}
@@ -1062,10 +1591,9 @@
}
-Matcher<Node*> IsHeapConstant(
- const Matcher<Unique<HeapObject> >& value_matcher) {
- return MakeMatcher(new IsConstantMatcher<Unique<HeapObject> >(
- IrOpcode::kHeapConstant, value_matcher));
+Matcher<Node*> IsHeapConstant(Handle<HeapObject> value) {
+ return MakeMatcher(new IsConstantMatcher<Handle<HeapObject>>(
+ IrOpcode::kHeapConstant, value));
}
@@ -1099,7 +1627,7 @@
}
-Matcher<Node*> IsSelect(const Matcher<MachineType>& type_matcher,
+Matcher<Node*> IsSelect(const Matcher<MachineRepresentation>& type_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& value2_matcher) {
@@ -1108,7 +1636,7 @@
}
-Matcher<Node*> IsPhi(const Matcher<MachineType>& type_matcher,
+Matcher<Node*> IsPhi(const Matcher<MachineRepresentation>& type_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& merge_matcher) {
@@ -1117,6 +1645,17 @@
}
+Matcher<Node*> IsPhi(const Matcher<MachineRepresentation>& type_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& merge_matcher) {
+ return MakeMatcher(new IsPhi2Matcher(type_matcher, value0_matcher,
+ value1_matcher, value2_matcher,
+ merge_matcher));
+}
+
+
Matcher<Node*> IsEffectPhi(const Matcher<Node*>& effect0_matcher,
const Matcher<Node*>& effect1_matcher,
const Matcher<Node*>& merge_matcher) {
@@ -1125,33 +1664,257 @@
}
+Matcher<Node*> IsEffectSet(const Matcher<Node*>& effect0_matcher,
+ const Matcher<Node*>& effect1_matcher) {
+ return MakeMatcher(new IsEffectSetMatcher(effect0_matcher, effect1_matcher));
+}
+
+
Matcher<Node*> IsProjection(const Matcher<size_t>& index_matcher,
const Matcher<Node*>& base_matcher) {
return MakeMatcher(new IsProjectionMatcher(index_matcher, base_matcher));
}
-Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
+Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher) {
- return MakeMatcher(new IsCall2Matcher(descriptor_matcher, value0_matcher,
- value1_matcher, effect_matcher,
- control_matcher));
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ value_matchers.push_back(value1_matcher);
+ return MakeMatcher(new IsCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
}
-Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
+Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ value_matchers.push_back(value1_matcher);
+ value_matchers.push_back(value2_matcher);
+ return MakeMatcher(new IsCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& value2_matcher,
const Matcher<Node*>& value3_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher) {
- return MakeMatcher(new IsCall4Matcher(
- descriptor_matcher, value0_matcher, value1_matcher, value2_matcher,
- value3_matcher, effect_matcher, control_matcher));
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ value_matchers.push_back(value1_matcher);
+ value_matchers.push_back(value2_matcher);
+ value_matchers.push_back(value3_matcher);
+ return MakeMatcher(new IsCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& value4_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ value_matchers.push_back(value1_matcher);
+ value_matchers.push_back(value2_matcher);
+ value_matchers.push_back(value3_matcher);
+ value_matchers.push_back(value4_matcher);
+ return MakeMatcher(new IsCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& value4_matcher,
+ const Matcher<Node*>& value5_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ value_matchers.push_back(value1_matcher);
+ value_matchers.push_back(value2_matcher);
+ value_matchers.push_back(value3_matcher);
+ value_matchers.push_back(value4_matcher);
+ value_matchers.push_back(value5_matcher);
+ return MakeMatcher(new IsCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsCall(
+ const Matcher<const CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& value4_matcher, const Matcher<Node*>& value5_matcher,
+ const Matcher<Node*>& value6_matcher, const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ value_matchers.push_back(value1_matcher);
+ value_matchers.push_back(value2_matcher);
+ value_matchers.push_back(value3_matcher);
+ value_matchers.push_back(value4_matcher);
+ value_matchers.push_back(value5_matcher);
+ value_matchers.push_back(value6_matcher);
+ return MakeMatcher(new IsCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ value_matchers.push_back(value1_matcher);
+ return MakeMatcher(new IsTailCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ value_matchers.push_back(value1_matcher);
+ value_matchers.push_back(value2_matcher);
+ return MakeMatcher(new IsTailCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ value_matchers.push_back(value1_matcher);
+ value_matchers.push_back(value2_matcher);
+ value_matchers.push_back(value3_matcher);
+ return MakeMatcher(new IsTailCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& value4_matcher, const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ value_matchers.push_back(value1_matcher);
+ value_matchers.push_back(value2_matcher);
+ value_matchers.push_back(value3_matcher);
+ value_matchers.push_back(value4_matcher);
+ return MakeMatcher(new IsTailCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& value4_matcher, const Matcher<Node*>& value5_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ value_matchers.push_back(value1_matcher);
+ value_matchers.push_back(value2_matcher);
+ value_matchers.push_back(value3_matcher);
+ value_matchers.push_back(value4_matcher);
+ value_matchers.push_back(value5_matcher);
+ return MakeMatcher(new IsTailCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& value4_matcher, const Matcher<Node*>& value5_matcher,
+ const Matcher<Node*>& value6_matcher, const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ value_matchers.push_back(value1_matcher);
+ value_matchers.push_back(value2_matcher);
+ value_matchers.push_back(value3_matcher);
+ value_matchers.push_back(value4_matcher);
+ value_matchers.push_back(value5_matcher);
+ value_matchers.push_back(value6_matcher);
+ return MakeMatcher(new IsTailCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& value4_matcher, const Matcher<Node*>& value5_matcher,
+ const Matcher<Node*>& value6_matcher, const Matcher<Node*>& value7_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ std::vector<Matcher<Node*>> value_matchers;
+ value_matchers.push_back(value0_matcher);
+ value_matchers.push_back(value1_matcher);
+ value_matchers.push_back(value2_matcher);
+ value_matchers.push_back(value3_matcher);
+ value_matchers.push_back(value4_matcher);
+ value_matchers.push_back(value5_matcher);
+ value_matchers.push_back(value6_matcher);
+ value_matchers.push_back(value7_matcher);
+ return MakeMatcher(new IsTailCallMatcher(descriptor_matcher, value_matchers,
+ effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsReferenceEqual(const Matcher<Type*>& type_matcher,
+ const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher) {
+ return MakeMatcher(
+ new IsReferenceEqualMatcher(type_matcher, lhs_matcher, rhs_matcher));
+}
+
+
+Matcher<Node*> IsAllocate(const Matcher<Node*>& size_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(
+ new IsAllocateMatcher(size_matcher, effect_matcher, control_matcher));
}
@@ -1233,15 +1996,6 @@
}
-Matcher<Node*> IsToNumber(const Matcher<Node*>& base_matcher,
- const Matcher<Node*>& context_matcher,
- const Matcher<Node*>& effect_matcher,
- const Matcher<Node*>& control_matcher) {
- return MakeMatcher(new IsToNumberMatcher(base_matcher, context_matcher,
- effect_matcher, control_matcher));
-}
-
-
Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
@@ -1254,6 +2008,31 @@
}
+Matcher<Node*> IsToNumber(const Matcher<Node*>& base_matcher,
+ const Matcher<Node*>& context_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher) {
+ return MakeMatcher(new IsToNumberMatcher(base_matcher, context_matcher,
+ effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsLoadContext(const Matcher<ContextAccess>& access_matcher,
+ const Matcher<Node*>& context_matcher) {
+ return MakeMatcher(new IsLoadContextMatcher(access_matcher, context_matcher));
+}
+
+
+Matcher<Node*> IsParameter(const Matcher<int> index_matcher) {
+ return MakeMatcher(new IsParameterMatcher(index_matcher));
+}
+
+
+Matcher<Node*> IsLoadFramePointer() {
+ return MakeMatcher(new NodeMatcher(IrOpcode::kLoadFramePointer));
+}
+
+
#define IS_BINOP_MATCHER(Name) \
Matcher<Node*> Is##Name(const Matcher<Node*>& lhs_matcher, \
const Matcher<Node*>& rhs_matcher) { \
@@ -1264,13 +2043,18 @@
IS_BINOP_MATCHER(NumberLessThan)
IS_BINOP_MATCHER(NumberSubtract)
IS_BINOP_MATCHER(NumberMultiply)
+IS_BINOP_MATCHER(NumberShiftLeft)
+IS_BINOP_MATCHER(NumberShiftRight)
+IS_BINOP_MATCHER(NumberShiftRightLogical)
IS_BINOP_MATCHER(Word32And)
+IS_BINOP_MATCHER(Word32Or)
IS_BINOP_MATCHER(Word32Sar)
IS_BINOP_MATCHER(Word32Shl)
IS_BINOP_MATCHER(Word32Shr)
IS_BINOP_MATCHER(Word32Ror)
IS_BINOP_MATCHER(Word32Equal)
IS_BINOP_MATCHER(Word64And)
+IS_BINOP_MATCHER(Word64Or)
IS_BINOP_MATCHER(Word64Sar)
IS_BINOP_MATCHER(Word64Shl)
IS_BINOP_MATCHER(Word64Equal)
@@ -1282,7 +2066,19 @@
IS_BINOP_MATCHER(Int32LessThan)
IS_BINOP_MATCHER(Uint32LessThan)
IS_BINOP_MATCHER(Uint32LessThanOrEqual)
+IS_BINOP_MATCHER(Int64Add)
+IS_BINOP_MATCHER(Int64Sub)
+IS_BINOP_MATCHER(JSAdd)
+IS_BINOP_MATCHER(Float32Max)
+IS_BINOP_MATCHER(Float32Min)
+IS_BINOP_MATCHER(Float32Equal)
+IS_BINOP_MATCHER(Float32LessThan)
+IS_BINOP_MATCHER(Float32LessThanOrEqual)
+IS_BINOP_MATCHER(Float64Max)
+IS_BINOP_MATCHER(Float64Min)
IS_BINOP_MATCHER(Float64Sub)
+IS_BINOP_MATCHER(Float64InsertLowWord32)
+IS_BINOP_MATCHER(Float64InsertHighWord32)
#undef IS_BINOP_MATCHER
@@ -1290,7 +2086,6 @@
Matcher<Node*> Is##Name(const Matcher<Node*>& input_matcher) { \
return MakeMatcher(new IsUnopMatcher(IrOpcode::k##Name, input_matcher)); \
}
-IS_UNOP_MATCHER(AnyToBoolean)
IS_UNOP_MATCHER(BooleanNot)
IS_UNOP_MATCHER(ChangeFloat64ToInt32)
IS_UNOP_MATCHER(ChangeFloat64ToUint32)
@@ -1301,13 +2096,18 @@
IS_UNOP_MATCHER(TruncateFloat64ToFloat32)
IS_UNOP_MATCHER(TruncateFloat64ToInt32)
IS_UNOP_MATCHER(TruncateInt64ToInt32)
+IS_UNOP_MATCHER(Float32Abs)
+IS_UNOP_MATCHER(Float64Abs)
IS_UNOP_MATCHER(Float64Sqrt)
-IS_UNOP_MATCHER(Float64Floor)
-IS_UNOP_MATCHER(Float64Ceil)
+IS_UNOP_MATCHER(Float64RoundDown)
IS_UNOP_MATCHER(Float64RoundTruncate)
IS_UNOP_MATCHER(Float64RoundTiesAway)
+IS_UNOP_MATCHER(Float64ExtractLowWord32)
+IS_UNOP_MATCHER(Float64ExtractHighWord32)
IS_UNOP_MATCHER(NumberToInt32)
IS_UNOP_MATCHER(NumberToUint32)
+IS_UNOP_MATCHER(ObjectIsSmi)
+IS_UNOP_MATCHER(Word32Clz)
#undef IS_UNOP_MATCHER
} // namespace compiler
diff --git a/test/unittests/compiler/node-test-utils.h b/test/unittests/compiler/node-test-utils.h
index 02b6e43..8592f30 100644
--- a/test/unittests/compiler/node-test-utils.h
+++ b/test/unittests/compiler/node-test-utils.h
@@ -6,7 +6,7 @@
#define V8_UNITTESTS_COMPILER_NODE_TEST_UTILS_H_
#include "src/compiler/machine-operator.h"
-#include "src/compiler/machine-type.h"
+#include "src/machine-type.h"
#include "testing/gmock/include/gmock/gmock.h"
namespace v8 {
@@ -14,15 +14,21 @@
// Forward declarations.
class ExternalReference;
+template <typename T>
+class Handle;
class HeapObject;
-template <class T>
-class Unique;
+template <class>
+class TypeImpl;
+enum TypeofMode : int;
+struct ZoneTypeConfig;
+typedef TypeImpl<ZoneTypeConfig> Type;
namespace compiler {
// Forward declarations.
class BufferAccess;
class CallDescriptor;
+class ContextAccess;
struct ElementAccess;
struct FieldAccess;
class Node;
@@ -31,52 +37,165 @@
using ::testing::Matcher;
+Matcher<Node*> IsDead();
+Matcher<Node*> IsEnd(const Matcher<Node*>& control0_matcher);
+Matcher<Node*> IsEnd(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher);
+Matcher<Node*> IsEnd(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher,
+ const Matcher<Node*>& control2_matcher);
Matcher<Node*> IsBranch(const Matcher<Node*>& value_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsMerge(const Matcher<Node*>& control0_matcher,
const Matcher<Node*>& control1_matcher);
+Matcher<Node*> IsMerge(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher,
+ const Matcher<Node*>& control2_matcher);
+Matcher<Node*> IsLoop(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher);
+Matcher<Node*> IsLoop(const Matcher<Node*>& control0_matcher,
+ const Matcher<Node*>& control1_matcher,
+ const Matcher<Node*>& control2_matcher);
Matcher<Node*> IsIfTrue(const Matcher<Node*>& control_matcher);
Matcher<Node*> IsIfFalse(const Matcher<Node*>& control_matcher);
-Matcher<Node*> IsValueEffect(const Matcher<Node*>& value_matcher);
-Matcher<Node*> IsFinish(const Matcher<Node*>& value_matcher,
- const Matcher<Node*>& effect_matcher);
+Matcher<Node*> IsIfSuccess(const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsSwitch(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsIfValue(const Matcher<int32_t>& value_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsIfDefault(const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsBeginRegion(const Matcher<Node*>& effect_matcher);
+Matcher<Node*> IsFinishRegion(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher);
+Matcher<Node*> IsReturn(const Matcher<Node*>& value_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsTerminate(const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
Matcher<Node*> IsExternalConstant(
const Matcher<ExternalReference>& value_matcher);
-Matcher<Node*> IsHeapConstant(
- const Matcher<Unique<HeapObject> >& value_matcher);
+Matcher<Node*> IsHeapConstant(Handle<HeapObject> value);
Matcher<Node*> IsFloat32Constant(const Matcher<float>& value_matcher);
Matcher<Node*> IsFloat64Constant(const Matcher<double>& value_matcher);
Matcher<Node*> IsInt32Constant(const Matcher<int32_t>& value_matcher);
Matcher<Node*> IsInt64Constant(const Matcher<int64_t>& value_matcher);
Matcher<Node*> IsNumberConstant(const Matcher<double>& value_matcher);
-Matcher<Node*> IsSelect(const Matcher<MachineType>& type_matcher,
+Matcher<Node*> IsSelect(const Matcher<MachineRepresentation>& type_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& value2_matcher);
-Matcher<Node*> IsPhi(const Matcher<MachineType>& type_matcher,
+Matcher<Node*> IsPhi(const Matcher<MachineRepresentation>& type_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& merge_matcher);
+Matcher<Node*> IsPhi(const Matcher<MachineRepresentation>& type_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& merge_matcher);
Matcher<Node*> IsEffectPhi(const Matcher<Node*>& effect0_matcher,
const Matcher<Node*>& effect1_matcher,
const Matcher<Node*>& merge_matcher);
+Matcher<Node*> IsEffectSet(const Matcher<Node*>& effect0_matcher,
+ const Matcher<Node*>& effect1_matcher);
Matcher<Node*> IsProjection(const Matcher<size_t>& index_matcher,
const Matcher<Node*>& base_matcher);
-Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
+Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
-Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
+Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
const Matcher<Node*>& value0_matcher,
const Matcher<Node*>& value1_matcher,
const Matcher<Node*>& value2_matcher,
const Matcher<Node*>& value3_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& value4_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsCall(const Matcher<const CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher,
+ const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher,
+ const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& value4_matcher,
+ const Matcher<Node*>& value5_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsCall(
+ const Matcher<const CallDescriptor*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& value4_matcher, const Matcher<Node*>& value5_matcher,
+ const Matcher<Node*>& value6_matcher, const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& value4_matcher, const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& value4_matcher, const Matcher<Node*>& value5_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& value4_matcher, const Matcher<Node*>& value5_matcher,
+ const Matcher<Node*>& value6_matcher, const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsTailCall(
+ const Matcher<CallDescriptor const*>& descriptor_matcher,
+ const Matcher<Node*>& value0_matcher, const Matcher<Node*>& value1_matcher,
+ const Matcher<Node*>& value2_matcher, const Matcher<Node*>& value3_matcher,
+ const Matcher<Node*>& value4_matcher, const Matcher<Node*>& value5_matcher,
+ const Matcher<Node*>& value6_matcher, const Matcher<Node*>& value7_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
-Matcher<Node*> IsAnyToBoolean(const Matcher<Node*>& value_matcher);
+
Matcher<Node*> IsBooleanNot(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsReferenceEqual(const Matcher<Type*>& type_matcher,
+ const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsNumberEqual(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsNumberLessThan(const Matcher<Node*>& lhs_matcher,
@@ -85,6 +204,15 @@
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsNumberMultiply(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsNumberShiftLeft(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsNumberShiftRight(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsNumberShiftRightLogical(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsAllocate(const Matcher<Node*>& size_matcher,
+ const Matcher<Node*>& effect_matcher,
+ const Matcher<Node*>& control_matcher);
Matcher<Node*> IsLoadField(const Matcher<FieldAccess>& access_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& effect_matcher,
@@ -118,6 +246,7 @@
const Matcher<Node*>& value_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsObjectIsSmi(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
@@ -132,6 +261,8 @@
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsWord32And(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32Or(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord32Sar(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord32Shl(const Matcher<Node*>& lhs_matcher,
@@ -142,8 +273,11 @@
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord32Equal(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32Clz(const Matcher<Node*>& value_matcher);
Matcher<Node*> IsWord64And(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord64Or(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord64Shl(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsWord64Sar(const Matcher<Node*>& lhs_matcher,
@@ -166,6 +300,12 @@
const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsUint32LessThanOrEqual(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt64Add(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt64Sub(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsJSAdd(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsChangeFloat64ToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeFloat64ToUint32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsChangeInt32ToFloat64(const Matcher<Node*>& input_matcher);
@@ -175,19 +315,44 @@
Matcher<Node*> IsTruncateFloat64ToFloat32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateFloat64ToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsTruncateInt64ToInt32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat32Max(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsFloat32Min(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsFloat32Abs(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat32Equal(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsFloat32LessThan(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsFloat32LessThanOrEqual(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsFloat64Max(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsFloat64Min(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsFloat64Sub(const Matcher<Node*>& lhs_matcher,
const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsFloat64Abs(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64Sqrt(const Matcher<Node*>& input_matcher);
-Matcher<Node*> IsFloat64Floor(const Matcher<Node*>& input_matcher);
-Matcher<Node*> IsFloat64Ceil(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64RoundDown(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64RoundTruncate(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsFloat64RoundTiesAway(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64ExtractLowWord32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64ExtractHighWord32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64InsertLowWord32(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsFloat64InsertHighWord32(const Matcher<Node*>& lhs_matcher,
+ const Matcher<Node*>& rhs_matcher);
Matcher<Node*> IsToNumber(const Matcher<Node*>& base_matcher,
const Matcher<Node*>& context_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsLoadContext(const Matcher<ContextAccess>& access_matcher,
+ const Matcher<Node*>& context_matcher);
Matcher<Node*> IsNumberToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsNumberToUint32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsParameter(const Matcher<int> index_matcher);
+Matcher<Node*> IsLoadFramePointer();
} // namespace compiler
} // namespace internal
diff --git a/test/unittests/compiler/node-unittest.cc b/test/unittests/compiler/node-unittest.cc
new file mode 100644
index 0000000..5341f69
--- /dev/null
+++ b/test/unittests/compiler/node-unittest.cc
@@ -0,0 +1,261 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock-support.h"
+
+using testing::ElementsAre;
+using testing::UnorderedElementsAre;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef TestWithZone NodeTest;
+
+
+namespace {
+
+const IrOpcode::Value kOpcode0 = static_cast<IrOpcode::Value>(0);
+const IrOpcode::Value kOpcode1 = static_cast<IrOpcode::Value>(1);
+const IrOpcode::Value kOpcode2 = static_cast<IrOpcode::Value>(2);
+
+const Operator kOp0(kOpcode0, Operator::kNoProperties, "Op0", 0, 0, 0, 1, 0, 0);
+const Operator kOp1(kOpcode1, Operator::kNoProperties, "Op1", 1, 0, 0, 1, 0, 0);
+const Operator kOp2(kOpcode2, Operator::kNoProperties, "Op2", 2, 0, 0, 1, 0, 0);
+
+} // namespace
+
+
+TEST_F(NodeTest, New) {
+ Node* const node = Node::New(zone(), 1, &kOp0, 0, nullptr, false);
+ EXPECT_EQ(1U, node->id());
+ EXPECT_EQ(0, node->UseCount());
+ EXPECT_TRUE(node->uses().empty());
+ EXPECT_EQ(0, node->InputCount());
+ EXPECT_TRUE(node->inputs().empty());
+ EXPECT_EQ(&kOp0, node->op());
+ EXPECT_EQ(kOpcode0, node->opcode());
+}
+
+
+TEST_F(NodeTest, NewWithInputs) {
+ Node* n0 = Node::New(zone(), 0, &kOp0, 0, nullptr, false);
+ EXPECT_EQ(0, n0->UseCount());
+ EXPECT_EQ(0, n0->InputCount());
+ Node* n1 = Node::New(zone(), 1, &kOp1, 1, &n0, false);
+ EXPECT_EQ(1, n0->UseCount());
+ EXPECT_THAT(n0->uses(), UnorderedElementsAre(n1));
+ EXPECT_EQ(0, n1->UseCount());
+ EXPECT_EQ(1, n1->InputCount());
+ EXPECT_EQ(n0, n1->InputAt(0));
+ Node* n0_n1[] = {n0, n1};
+ Node* n2 = Node::New(zone(), 2, &kOp2, 2, n0_n1, false);
+ EXPECT_EQ(2, n0->UseCount());
+ EXPECT_THAT(n0->uses(), UnorderedElementsAre(n1, n2));
+ EXPECT_THAT(n1->uses(), UnorderedElementsAre(n2));
+ EXPECT_EQ(2, n2->InputCount());
+ EXPECT_EQ(n0, n2->InputAt(0));
+ EXPECT_EQ(n1, n2->InputAt(1));
+}
+
+
+TEST_F(NodeTest, InputIteratorEmpty) {
+ Node* node = Node::New(zone(), 0, &kOp0, 0, nullptr, false);
+ EXPECT_EQ(node->inputs().begin(), node->inputs().end());
+}
+
+
+TEST_F(NodeTest, InputIteratorOne) {
+ Node* n0 = Node::New(zone(), 0, &kOp0, 0, nullptr, false);
+ Node* n1 = Node::New(zone(), 1, &kOp1, 1, &n0, false);
+ EXPECT_THAT(n1->inputs(), ElementsAre(n0));
+}
+
+
+TEST_F(NodeTest, InputIteratorTwo) {
+ Node* n0 = Node::New(zone(), 0, &kOp0, 0, nullptr, false);
+ Node* n1 = Node::New(zone(), 1, &kOp1, 1, &n0, false);
+ Node* n0_n1[] = {n0, n1};
+ Node* n2 = Node::New(zone(), 2, &kOp2, 2, n0_n1, false);
+ EXPECT_THAT(n2->inputs(), ElementsAre(n0, n1));
+}
+
+
+TEST_F(NodeTest, UseIteratorEmpty) {
+ Node* node = Node::New(zone(), 0, &kOp0, 0, nullptr, false);
+ EXPECT_EQ(node->uses().begin(), node->uses().end());
+}
+
+
+TEST_F(NodeTest, UseIteratorOne) {
+ Node* n0 = Node::New(zone(), 0, &kOp0, 0, nullptr, false);
+ Node* n1 = Node::New(zone(), 1, &kOp1, 1, &n0, false);
+ EXPECT_THAT(n0->uses(), ElementsAre(n1));
+}
+
+
+TEST_F(NodeTest, UseIteratorTwo) {
+ Node* n0 = Node::New(zone(), 0, &kOp0, 0, nullptr, false);
+ Node* n1 = Node::New(zone(), 1, &kOp1, 1, &n0, false);
+ Node* n0_n1[] = {n0, n1};
+ Node* n2 = Node::New(zone(), 2, &kOp2, 2, n0_n1, false);
+ EXPECT_THAT(n0->uses(), UnorderedElementsAre(n1, n2));
+}
+
+
+TEST_F(NodeTest, OwnedBy) {
+ Node* n0 = Node::New(zone(), 0, &kOp0, 0, nullptr, false);
+ EXPECT_FALSE(n0->OwnedBy(n0));
+ Node* n1 = Node::New(zone(), 1, &kOp1, 1, &n0, false);
+ EXPECT_FALSE(n0->OwnedBy(n0));
+ EXPECT_FALSE(n1->OwnedBy(n1));
+ EXPECT_TRUE(n0->OwnedBy(n1));
+ Node* n0_n1[] = {n0, n1};
+ Node* n2 = Node::New(zone(), 2, &kOp2, 2, n0_n1, false);
+ EXPECT_FALSE(n0->OwnedBy(n0));
+ EXPECT_FALSE(n1->OwnedBy(n1));
+ EXPECT_FALSE(n2->OwnedBy(n2));
+ EXPECT_FALSE(n0->OwnedBy(n1));
+ EXPECT_FALSE(n0->OwnedBy(n2));
+ EXPECT_TRUE(n1->OwnedBy(n2));
+ EXPECT_TRUE(n0->OwnedBy(n1, n2));
+ n2->ReplaceInput(0, n2);
+ EXPECT_TRUE(n0->OwnedBy(n1));
+ EXPECT_TRUE(n1->OwnedBy(n2));
+ n2->ReplaceInput(1, n0);
+ EXPECT_FALSE(n0->OwnedBy(n1));
+ EXPECT_FALSE(n1->OwnedBy(n2));
+}
+
+
+TEST_F(NodeTest, ReplaceUsesNone) {
+ Node* n0 = Node::New(zone(), 0, &kOp0, 0, nullptr, false);
+ Node* n1 = Node::New(zone(), 1, &kOp1, 1, &n0, false);
+ Node* n0_n1[] = {n0, n1};
+ Node* n2 = Node::New(zone(), 2, &kOp2, 2, n0_n1, false);
+ Node* node = Node::New(zone(), 42, &kOp0, 0, nullptr, false);
+ EXPECT_TRUE(node->uses().empty());
+ node->ReplaceUses(n0);
+ EXPECT_TRUE(node->uses().empty());
+ node->ReplaceUses(n1);
+ EXPECT_TRUE(node->uses().empty());
+ node->ReplaceUses(n2);
+ EXPECT_TRUE(node->uses().empty());
+}
+
+
+TEST_F(NodeTest, AppendInput) {
+ Node* n0 = Node::New(zone(), 0, &kOp0, 0, nullptr, false);
+ Node* n1 = Node::New(zone(), 1, &kOp1, 1, &n0, false);
+ Node* node = Node::New(zone(), 12345, &kOp0, 0, nullptr, true);
+ EXPECT_TRUE(node->inputs().empty());
+ node->AppendInput(zone(), n0);
+ EXPECT_FALSE(node->inputs().empty());
+ EXPECT_THAT(node->inputs(), ElementsAre(n0));
+ node->AppendInput(zone(), n1);
+ EXPECT_THAT(node->inputs(), ElementsAre(n0, n1));
+ node->AppendInput(zone(), n0);
+ EXPECT_THAT(node->inputs(), ElementsAre(n0, n1, n0));
+ node->AppendInput(zone(), n0);
+ EXPECT_THAT(node->inputs(), ElementsAre(n0, n1, n0, n0));
+ node->AppendInput(zone(), n1);
+ EXPECT_THAT(node->inputs(), ElementsAre(n0, n1, n0, n0, n1));
+}
+
+
+TEST_F(NodeTest, TrimThenAppend) {
+ Node* n0 = Node::New(zone(), 0, &kOp0, 0, nullptr, false);
+ Node* n1 = Node::New(zone(), 1, &kOp0, 0, nullptr, false);
+ Node* n2 = Node::New(zone(), 2, &kOp0, 0, nullptr, false);
+ Node* n3 = Node::New(zone(), 3, &kOp0, 0, nullptr, false);
+ Node* n4 = Node::New(zone(), 4, &kOp0, 0, nullptr, false);
+ Node* n5 = Node::New(zone(), 5, &kOp0, 0, nullptr, false);
+ Node* n6 = Node::New(zone(), 6, &kOp0, 0, nullptr, false);
+ Node* n7 = Node::New(zone(), 7, &kOp0, 0, nullptr, false);
+ Node* n8 = Node::New(zone(), 8, &kOp0, 0, nullptr, false);
+ Node* n9 = Node::New(zone(), 9, &kOp0, 0, nullptr, false);
+ Node* node = Node::New(zone(), 12345, &kOp0, 0, nullptr, true);
+
+ EXPECT_TRUE(node->inputs().empty());
+
+ node->AppendInput(zone(), n0);
+ EXPECT_FALSE(node->inputs().empty());
+ EXPECT_THAT(node->inputs(), ElementsAre(n0));
+
+ node->TrimInputCount(0);
+ EXPECT_TRUE(node->inputs().empty());
+
+ node->AppendInput(zone(), n1);
+ EXPECT_FALSE(node->inputs().empty());
+ EXPECT_THAT(node->inputs(), ElementsAre(n1));
+
+ node->AppendInput(zone(), n2);
+ EXPECT_FALSE(node->inputs().empty());
+ EXPECT_THAT(node->inputs(), ElementsAre(n1, n2));
+
+ node->TrimInputCount(1);
+ EXPECT_FALSE(node->inputs().empty());
+ EXPECT_THAT(node->inputs(), ElementsAre(n1));
+
+ node->AppendInput(zone(), n3);
+ EXPECT_FALSE(node->inputs().empty());
+ EXPECT_THAT(node->inputs(), ElementsAre(n1, n3));
+
+ node->AppendInput(zone(), n4);
+ EXPECT_FALSE(node->inputs().empty());
+ EXPECT_THAT(node->inputs(), ElementsAre(n1, n3, n4));
+
+ node->AppendInput(zone(), n5);
+ EXPECT_FALSE(node->inputs().empty());
+ EXPECT_THAT(node->inputs(), ElementsAre(n1, n3, n4, n5));
+
+ node->AppendInput(zone(), n6);
+ EXPECT_FALSE(node->inputs().empty());
+ EXPECT_THAT(node->inputs(), ElementsAre(n1, n3, n4, n5, n6));
+
+ node->AppendInput(zone(), n7);
+ EXPECT_FALSE(node->inputs().empty());
+ EXPECT_THAT(node->inputs(), ElementsAre(n1, n3, n4, n5, n6, n7));
+
+ node->TrimInputCount(4);
+ EXPECT_THAT(node->inputs(), ElementsAre(n1, n3, n4, n5));
+
+ node->AppendInput(zone(), n8);
+ EXPECT_FALSE(node->inputs().empty());
+ EXPECT_THAT(node->inputs(), ElementsAre(n1, n3, n4, n5, n8));
+
+ node->AppendInput(zone(), n9);
+ EXPECT_FALSE(node->inputs().empty());
+ EXPECT_THAT(node->inputs(), ElementsAre(n1, n3, n4, n5, n8, n9));
+}
+
+
+TEST_F(NodeTest, BigNodes) {
+ static const int kMaxSize = 512;
+ Node* inputs[kMaxSize];
+
+ Node* n0 = Node::New(zone(), 0, &kOp0, 0, nullptr, false);
+ Node* n1 = Node::New(zone(), 1, &kOp1, 1, &n0, false);
+
+ for (int i = 0; i < kMaxSize; i++) {
+ inputs[i] = i & 1 ? n0 : n1;
+ }
+
+ for (int size = 13; size <= kMaxSize; size += 9) {
+ Node* node = Node::New(zone(), 12345, &kOp0, size, inputs, false);
+ EXPECT_EQ(size, node->InputCount());
+
+ for (int i = 0; i < size; i++) {
+ EXPECT_EQ(inputs[i], node->InputAt(i));
+ }
+ }
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/opcodes-unittest.cc b/test/unittests/compiler/opcodes-unittest.cc
new file mode 100644
index 0000000..3bb65c2
--- /dev/null
+++ b/test/unittests/compiler/opcodes-unittest.cc
@@ -0,0 +1,147 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/opcodes.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+bool IsCommonOpcode(IrOpcode::Value opcode) {
+ switch (opcode) {
+#define OPCODE(Opcode) \
+ case IrOpcode::k##Opcode: \
+ return true;
+ COMMON_OP_LIST(OPCODE)
+ CONTROL_OP_LIST(OPCODE)
+#undef OPCODE
+ default:
+ return false;
+ }
+}
+
+
+bool IsControlOpcode(IrOpcode::Value opcode) {
+ switch (opcode) {
+#define OPCODE(Opcode) \
+ case IrOpcode::k##Opcode: \
+ return true;
+ CONTROL_OP_LIST(OPCODE)
+#undef OPCODE
+ default:
+ return false;
+ }
+}
+
+
+bool IsJsOpcode(IrOpcode::Value opcode) {
+ switch (opcode) {
+#define OPCODE(Opcode) \
+ case IrOpcode::k##Opcode: \
+ return true;
+ JS_OP_LIST(OPCODE)
+#undef OPCODE
+ default:
+ return false;
+ }
+}
+
+
+bool IsConstantOpcode(IrOpcode::Value opcode) {
+ switch (opcode) {
+#define OPCODE(Opcode) \
+ case IrOpcode::k##Opcode: \
+ return true;
+ CONSTANT_OP_LIST(OPCODE)
+#undef OPCODE
+ default:
+ return false;
+ }
+}
+
+
+bool IsComparisonOpcode(IrOpcode::Value opcode) {
+ switch (opcode) {
+#define OPCODE(Opcode) \
+ case IrOpcode::k##Opcode: \
+ return true;
+ JS_COMPARE_BINOP_LIST(OPCODE)
+ SIMPLIFIED_COMPARE_BINOP_LIST(OPCODE)
+ MACHINE_COMPARE_BINOP_LIST(OPCODE)
+#undef OPCODE
+ default:
+ return false;
+ }
+}
+
+
+const IrOpcode::Value kInvalidOpcode = static_cast<IrOpcode::Value>(123456789);
+
+} // namespace
+
+
+TEST(IrOpcodeTest, IsCommonOpcode) {
+ EXPECT_FALSE(IrOpcode::IsCommonOpcode(kInvalidOpcode));
+#define OPCODE(Opcode) \
+ EXPECT_EQ(IsCommonOpcode(IrOpcode::k##Opcode), \
+ IrOpcode::IsCommonOpcode(IrOpcode::k##Opcode));
+ ALL_OP_LIST(OPCODE)
+#undef OPCODE
+}
+
+
+TEST(IrOpcodeTest, IsControlOpcode) {
+ EXPECT_FALSE(IrOpcode::IsControlOpcode(kInvalidOpcode));
+#define OPCODE(Opcode) \
+ EXPECT_EQ(IsControlOpcode(IrOpcode::k##Opcode), \
+ IrOpcode::IsControlOpcode(IrOpcode::k##Opcode));
+ ALL_OP_LIST(OPCODE)
+#undef OPCODE
+}
+
+
+TEST(IrOpcodeTest, IsJsOpcode) {
+ EXPECT_FALSE(IrOpcode::IsJsOpcode(kInvalidOpcode));
+#define OPCODE(Opcode) \
+ EXPECT_EQ(IsJsOpcode(IrOpcode::k##Opcode), \
+ IrOpcode::IsJsOpcode(IrOpcode::k##Opcode));
+ ALL_OP_LIST(OPCODE)
+#undef OPCODE
+}
+
+
+TEST(IrOpcodeTest, IsConstantOpcode) {
+ EXPECT_FALSE(IrOpcode::IsConstantOpcode(kInvalidOpcode));
+#define OPCODE(Opcode) \
+ EXPECT_EQ(IsConstantOpcode(IrOpcode::k##Opcode), \
+ IrOpcode::IsConstantOpcode(IrOpcode::k##Opcode));
+ ALL_OP_LIST(OPCODE)
+#undef OPCODE
+}
+
+
+TEST(IrOpcodeTest, IsComparisonOpcode) {
+ EXPECT_FALSE(IrOpcode::IsComparisonOpcode(kInvalidOpcode));
+#define OPCODE(Opcode) \
+ EXPECT_EQ(IsComparisonOpcode(IrOpcode::k##Opcode), \
+ IrOpcode::IsComparisonOpcode(IrOpcode::k##Opcode));
+ ALL_OP_LIST(OPCODE)
+#undef OPCODE
+}
+
+
+TEST(IrOpcodeTest, Mnemonic) {
+ EXPECT_STREQ("UnknownOpcode", IrOpcode::Mnemonic(kInvalidOpcode));
+#define OPCODE(Opcode) \
+ EXPECT_STREQ(#Opcode, IrOpcode::Mnemonic(IrOpcode::k##Opcode));
+ ALL_OP_LIST(OPCODE)
+#undef OPCODE
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/ppc/OWNERS b/test/unittests/compiler/ppc/OWNERS
new file mode 100644
index 0000000..eb007cb
--- /dev/null
+++ b/test/unittests/compiler/ppc/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc b/test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc
new file mode 100644
index 0000000..5fe72ee
--- /dev/null
+++ b/test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc
@@ -0,0 +1,11 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/register-allocator-unittest.cc b/test/unittests/compiler/register-allocator-unittest.cc
index 12dedbd..c5ff90f 100644
--- a/test/unittests/compiler/register-allocator-unittest.cc
+++ b/test/unittests/compiler/register-allocator-unittest.cc
@@ -9,6 +9,77 @@
namespace internal {
namespace compiler {
+
+namespace {
+
+// We can't just use the size of the moves collection, because of
+// redundant moves which need to be discounted.
+int GetMoveCount(const ParallelMove& moves) {
+ int move_count = 0;
+ for (auto move : moves) {
+ if (move->IsEliminated() || move->IsRedundant()) continue;
+ ++move_count;
+ }
+ return move_count;
+}
+
+
+bool AreOperandsOfSameType(
+ const AllocatedOperand& op,
+ const InstructionSequenceTest::TestOperand& test_op) {
+ bool test_op_is_reg =
+ (test_op.type_ ==
+ InstructionSequenceTest::TestOperandType::kFixedRegister ||
+ test_op.type_ == InstructionSequenceTest::TestOperandType::kRegister);
+
+ return (op.IsRegister() && test_op_is_reg) ||
+ (op.IsStackSlot() && !test_op_is_reg);
+}
+
+
+bool AllocatedOperandMatches(
+ const AllocatedOperand& op,
+ const InstructionSequenceTest::TestOperand& test_op) {
+ return AreOperandsOfSameType(op, test_op) &&
+ ((op.IsRegister() ? op.GetRegister().code() : op.index()) ==
+ test_op.value_ ||
+ test_op.value_ == InstructionSequenceTest::kNoValue);
+}
+
+
+int GetParallelMoveCount(int instr_index, Instruction::GapPosition gap_pos,
+ const InstructionSequence* sequence) {
+ const ParallelMove* moves =
+ sequence->InstructionAt(instr_index)->GetParallelMove(gap_pos);
+ if (moves == nullptr) return 0;
+ return GetMoveCount(*moves);
+}
+
+
+bool IsParallelMovePresent(int instr_index, Instruction::GapPosition gap_pos,
+ const InstructionSequence* sequence,
+ const InstructionSequenceTest::TestOperand& src,
+ const InstructionSequenceTest::TestOperand& dest) {
+ const ParallelMove* moves =
+ sequence->InstructionAt(instr_index)->GetParallelMove(gap_pos);
+ EXPECT_NE(nullptr, moves);
+
+ bool found_match = false;
+ for (auto move : *moves) {
+ if (move->IsEliminated() || move->IsRedundant()) continue;
+ if (AllocatedOperandMatches(AllocatedOperand::cast(move->source()), src) &&
+ AllocatedOperandMatches(AllocatedOperand::cast(move->destination()),
+ dest)) {
+ found_match = true;
+ break;
+ }
+ }
+ return found_match;
+}
+
+} // namespace
+
+
class RegisterAllocatorTest : public InstructionSequenceTest {
public:
void Allocate() {
@@ -42,9 +113,9 @@
StartLoop(1);
StartBlock();
- auto phi = Phi(i_reg);
+ auto phi = Phi(i_reg, 2);
auto ipp = EmitOI(Same(), Reg(phi), Use(DefineConstant()));
- Extend(phi, ipp);
+ SetInput(phi, 1, ipp);
EndBlock(Jump(0));
EndLoop();
@@ -206,14 +277,14 @@
StartBlock();
for (size_t i = 0; i < arraysize(parameters); ++i) {
- phis[i] = Phi(parameters[i]);
+ phis[i] = Phi(parameters[i], 2);
}
// Perform some computations.
// something like phi[i] += const
for (size_t i = 0; i < arraysize(parameters); ++i) {
auto result = EmitOI(Same(), Reg(phis[i]), Use(constant));
- Extend(phis[i], result);
+ SetInput(phis[i], 1, result);
}
EndBlock(Branch(Reg(DefineConstant()), 1, 2));
@@ -301,6 +372,31 @@
}
+TEST_F(RegisterAllocatorTest, SplitBeforeInstruction2) {
+ const int kNumRegs = 6;
+ SetNumRegs(kNumRegs, kNumRegs);
+
+ StartBlock();
+
+ // Stack parameters/spilled values.
+ auto p_0 = Define(Slot(-1));
+ auto p_1 = Define(Slot(-2));
+
+ // Fill registers.
+ VReg values[kNumRegs];
+ for (size_t i = 0; i < arraysize(values); ++i) {
+ values[i] = Define(Reg(static_cast<int>(i)));
+ }
+
+ // values[0] and [1] will be split in the second half of this instruction.
+ EmitOOI(Reg(0), Reg(1), Reg(p_0, 0), Reg(p_1, 1));
+ EmitI(Reg(values[0]), Reg(values[1]));
+ EndBlock(Last());
+
+ Allocate();
+}
+
+
TEST_F(RegisterAllocatorTest, NestedDiamondPhiMerge) {
// Outer diamond.
StartBlock();
@@ -432,6 +528,260 @@
}
+TEST_F(RegisterAllocatorTest, RegressionLoadConstantBeforeSpill) {
+ StartBlock();
+ // Fill registers.
+ VReg values[kDefaultNRegs];
+ for (size_t i = arraysize(values); i > 0; --i) {
+ values[i - 1] = Define(Reg(static_cast<int>(i - 1)));
+ }
+ auto c = DefineConstant();
+ auto to_spill = Define(Reg());
+ EndBlock(Jump(1));
+
+ {
+ StartLoop(1);
+
+ StartBlock();
+ // Create a use for c in second half of prev block's last gap
+ Phi(c);
+ for (size_t i = arraysize(values); i > 0; --i) {
+ Phi(values[i - 1]);
+ }
+ EndBlock(Jump(1));
+
+ EndLoop();
+ }
+
+ StartBlock();
+ // Force c to split within to_spill's definition.
+ EmitI(Reg(c));
+ EmitI(Reg(to_spill));
+ EndBlock(Last());
+
+ Allocate();
+}
+
+
+TEST_F(RegisterAllocatorTest, DiamondWithCallFirstBlock) {
+ StartBlock();
+ auto x = EmitOI(Reg(0));
+ EndBlock(Branch(Reg(x), 1, 2));
+
+ StartBlock();
+ EmitCall(Slot(-1));
+ auto occupy = EmitOI(Reg(0));
+ EndBlock(Jump(2));
+
+ StartBlock();
+ EndBlock(FallThrough());
+
+ StartBlock();
+ Use(occupy);
+ Return(Reg(x));
+ EndBlock();
+ Allocate();
+}
+
+
+TEST_F(RegisterAllocatorTest, DiamondWithCallSecondBlock) {
+ StartBlock();
+ auto x = EmitOI(Reg(0));
+ EndBlock(Branch(Reg(x), 1, 2));
+
+ StartBlock();
+ EndBlock(Jump(2));
+
+ StartBlock();
+ EmitCall(Slot(-1));
+ auto occupy = EmitOI(Reg(0));
+ EndBlock(FallThrough());
+
+ StartBlock();
+ Use(occupy);
+ Return(Reg(x));
+ EndBlock();
+ Allocate();
+}
+
+
+TEST_F(RegisterAllocatorTest, SingleDeferredBlockSpill) {
+ StartBlock(); // B0
+ auto var = EmitOI(Reg(0));
+ EndBlock(Branch(Reg(var), 1, 2));
+
+ StartBlock(); // B1
+ EndBlock(Jump(2));
+
+ StartBlock(true); // B2
+ EmitCall(Slot(-1), Slot(var));
+ EndBlock();
+
+ StartBlock(); // B3
+ EmitNop();
+ EndBlock();
+
+ StartBlock(); // B4
+ Return(Reg(var, 0));
+ EndBlock();
+
+ Allocate();
+
+ const int var_def_index = 1;
+ const int call_index = 3;
+ int expect_no_moves =
+ FLAG_turbo_preprocess_ranges ? var_def_index : call_index;
+ int expect_spill_move =
+ FLAG_turbo_preprocess_ranges ? call_index : var_def_index;
+
+ // We should have no parallel moves at the "expect_no_moves" position.
+ EXPECT_EQ(
+ 0, GetParallelMoveCount(expect_no_moves, Instruction::START, sequence()));
+
+ // The spill should be performed at the position expect_spill_move.
+ EXPECT_TRUE(IsParallelMovePresent(expect_spill_move, Instruction::START,
+ sequence(), Reg(0), Slot(0)));
+}
+
+
+TEST_F(RegisterAllocatorTest, MultipleDeferredBlockSpills) {
+ if (!FLAG_turbo_preprocess_ranges) return;
+
+ StartBlock(); // B0
+ auto var1 = EmitOI(Reg(0));
+ auto var2 = EmitOI(Reg(1));
+ auto var3 = EmitOI(Reg(2));
+ EndBlock(Branch(Reg(var1, 0), 1, 2));
+
+ StartBlock(true); // B1
+ EmitCall(Slot(-2), Slot(var1));
+ EndBlock(Jump(2));
+
+ StartBlock(true); // B2
+ EmitCall(Slot(-1), Slot(var2));
+ EndBlock();
+
+ StartBlock(); // B3
+ EmitNop();
+ EndBlock();
+
+ StartBlock(); // B4
+ Return(Reg(var3, 2));
+ EndBlock();
+
+ const int def_of_v2 = 3;
+ const int call_in_b1 = 4;
+ const int call_in_b2 = 6;
+ const int end_of_b1 = 5;
+ const int end_of_b2 = 7;
+ const int start_of_b3 = 8;
+
+ Allocate();
+ // TODO(mtrofin): at the moment, the linear allocator spills var1 and var2,
+ // so only var3 is spilled in deferred blocks. Greedy avoids spilling 1&2.
+ // Expand the test once greedy is back online with this facility.
+ const int var3_reg = 2;
+ const int var3_slot = 2;
+
+ EXPECT_FALSE(IsParallelMovePresent(def_of_v2, Instruction::START, sequence(),
+ Reg(var3_reg), Slot()));
+ EXPECT_TRUE(IsParallelMovePresent(call_in_b1, Instruction::START, sequence(),
+ Reg(var3_reg), Slot(var3_slot)));
+ EXPECT_TRUE(IsParallelMovePresent(end_of_b1, Instruction::START, sequence(),
+ Slot(var3_slot), Reg()));
+
+ EXPECT_TRUE(IsParallelMovePresent(call_in_b2, Instruction::START, sequence(),
+ Reg(var3_reg), Slot(var3_slot)));
+ EXPECT_TRUE(IsParallelMovePresent(end_of_b2, Instruction::START, sequence(),
+ Slot(var3_slot), Reg()));
+
+
+ EXPECT_EQ(0,
+ GetParallelMoveCount(start_of_b3, Instruction::START, sequence()));
+}
+
+
+namespace {
+
+enum class ParameterType { kFixedSlot, kSlot, kRegister, kFixedRegister };
+
+const ParameterType kParameterTypes[] = {
+ ParameterType::kFixedSlot, ParameterType::kSlot, ParameterType::kRegister,
+ ParameterType::kFixedRegister};
+
+class SlotConstraintTest : public RegisterAllocatorTest,
+ public ::testing::WithParamInterface<
+ ::testing::tuple<ParameterType, int>> {
+ public:
+ static const int kMaxVariant = 5;
+
+ protected:
+ ParameterType parameter_type() const {
+ return ::testing::get<0>(B::GetParam());
+ }
+ int variant() const { return ::testing::get<1>(B::GetParam()); }
+
+ private:
+ typedef ::testing::WithParamInterface<::testing::tuple<ParameterType, int>> B;
+};
+
+} // namespace
+
+
+#if GTEST_HAS_COMBINE
+
+TEST_P(SlotConstraintTest, SlotConstraint) {
+ StartBlock();
+ VReg p_0;
+ switch (parameter_type()) {
+ case ParameterType::kFixedSlot:
+ p_0 = Parameter(Slot(-1));
+ break;
+ case ParameterType::kSlot:
+ p_0 = Parameter(Slot(-1));
+ break;
+ case ParameterType::kRegister:
+ p_0 = Parameter(Reg());
+ break;
+ case ParameterType::kFixedRegister:
+ p_0 = Parameter(Reg(1));
+ break;
+ }
+ switch (variant()) {
+ case 0:
+ EmitI(Slot(p_0), Reg(p_0));
+ break;
+ case 1:
+ EmitI(Slot(p_0));
+ break;
+ case 2:
+ EmitI(Reg(p_0));
+ EmitI(Slot(p_0));
+ break;
+ case 3:
+ EmitI(Slot(p_0));
+ EmitI(Reg(p_0));
+ break;
+ case 4:
+ EmitI(Slot(p_0, -1), Slot(p_0), Reg(p_0), Reg(p_0, 1));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ EndBlock(Last());
+
+ Allocate();
+}
+
+
+INSTANTIATE_TEST_CASE_P(
+ RegisterAllocatorTest, SlotConstraintTest,
+ ::testing::Combine(::testing::ValuesIn(kParameterTypes),
+ ::testing::Range(0, SlotConstraintTest::kMaxVariant)));
+
+#endif // GTEST_HAS_COMBINE
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/test/unittests/compiler/schedule-unittest.cc b/test/unittests/compiler/schedule-unittest.cc
new file mode 100644
index 0000000..bc82535
--- /dev/null
+++ b/test/unittests/compiler/schedule-unittest.cc
@@ -0,0 +1,249 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node.h"
+#include "src/compiler/schedule.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::ElementsAre;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef TestWithIsolateAndZone BasicBlockTest;
+
+
+TEST_F(BasicBlockTest, Constructor) {
+ int const id = random_number_generator()->NextInt();
+ BasicBlock b(zone(), BasicBlock::Id::FromInt(id));
+ EXPECT_FALSE(b.deferred());
+ EXPECT_GT(0, b.dominator_depth());
+ EXPECT_EQ(nullptr, b.dominator());
+ EXPECT_EQ(nullptr, b.rpo_next());
+ EXPECT_EQ(id, b.id().ToInt());
+}
+
+
+TEST_F(BasicBlockTest, GetCommonDominator1) {
+ BasicBlock b(zone(), BasicBlock::Id::FromInt(0));
+ EXPECT_EQ(&b, BasicBlock::GetCommonDominator(&b, &b));
+}
+
+
+TEST_F(BasicBlockTest, GetCommonDominator2) {
+ BasicBlock b0(zone(), BasicBlock::Id::FromInt(0));
+ BasicBlock b1(zone(), BasicBlock::Id::FromInt(1));
+ BasicBlock b2(zone(), BasicBlock::Id::FromInt(2));
+ b0.set_dominator_depth(0);
+ b1.set_dominator(&b0);
+ b1.set_dominator_depth(1);
+ b2.set_dominator(&b1);
+ b2.set_dominator_depth(2);
+ EXPECT_EQ(&b0, BasicBlock::GetCommonDominator(&b0, &b1));
+ EXPECT_EQ(&b0, BasicBlock::GetCommonDominator(&b0, &b2));
+ EXPECT_EQ(&b0, BasicBlock::GetCommonDominator(&b1, &b0));
+ EXPECT_EQ(&b0, BasicBlock::GetCommonDominator(&b2, &b0));
+ EXPECT_EQ(&b1, BasicBlock::GetCommonDominator(&b1, &b2));
+ EXPECT_EQ(&b1, BasicBlock::GetCommonDominator(&b2, &b1));
+}
+
+
+TEST_F(BasicBlockTest, GetCommonDominator3) {
+ BasicBlock b0(zone(), BasicBlock::Id::FromInt(0));
+ BasicBlock b1(zone(), BasicBlock::Id::FromInt(1));
+ BasicBlock b2(zone(), BasicBlock::Id::FromInt(2));
+ BasicBlock b3(zone(), BasicBlock::Id::FromInt(3));
+ b0.set_dominator_depth(0);
+ b1.set_dominator(&b0);
+ b1.set_dominator_depth(1);
+ b2.set_dominator(&b0);
+ b2.set_dominator_depth(1);
+ b3.set_dominator(&b2);
+ b3.set_dominator_depth(2);
+ EXPECT_EQ(&b0, BasicBlock::GetCommonDominator(&b1, &b3));
+ EXPECT_EQ(&b0, BasicBlock::GetCommonDominator(&b3, &b1));
+}
+
+
+typedef TestWithZone ScheduleTest;
+
+
+namespace {
+
+const Operator kCallOperator(IrOpcode::kCall, Operator::kNoProperties,
+ "MockCall", 0, 0, 0, 0, 0, 0);
+const Operator kBranchOperator(IrOpcode::kBranch, Operator::kNoProperties,
+ "MockBranch", 0, 0, 0, 0, 0, 0);
+const Operator kDummyOperator(IrOpcode::kParameter, Operator::kNoProperties,
+ "Dummy", 0, 0, 0, 0, 0, 0);
+
+} // namespace
+
+
+TEST_F(ScheduleTest, Constructor) {
+ Schedule schedule(zone());
+ EXPECT_NE(nullptr, schedule.start());
+ EXPECT_EQ(schedule.start(),
+ schedule.GetBlockById(BasicBlock::Id::FromInt(0)));
+ EXPECT_NE(nullptr, schedule.end());
+ EXPECT_EQ(schedule.end(), schedule.GetBlockById(BasicBlock::Id::FromInt(1)));
+ EXPECT_NE(schedule.start(), schedule.end());
+}
+
+
+TEST_F(ScheduleTest, AddNode) {
+ Schedule schedule(zone());
+ BasicBlock* start = schedule.start();
+
+ Node* node0 = Node::New(zone(), 0, &kDummyOperator, 0, nullptr, false);
+ EXPECT_EQ(nullptr, schedule.block(node0));
+ schedule.AddNode(start, node0);
+ EXPECT_EQ(start, schedule.block(node0));
+ EXPECT_THAT(*start, ElementsAre(node0));
+
+ Node* node1 = Node::New(zone(), 1, &kDummyOperator, 0, nullptr, false);
+ EXPECT_EQ(nullptr, schedule.block(node1));
+ schedule.AddNode(start, node1);
+ EXPECT_EQ(start, schedule.block(node1));
+ EXPECT_THAT(*start, ElementsAre(node0, node1));
+
+ EXPECT_TRUE(schedule.SameBasicBlock(node0, node1));
+}
+
+
+TEST_F(ScheduleTest, AddGoto) {
+ Schedule schedule(zone());
+ BasicBlock* start = schedule.start();
+ BasicBlock* end = schedule.end();
+
+ BasicBlock* block = schedule.NewBasicBlock();
+ schedule.AddGoto(start, block);
+
+ EXPECT_EQ(0u, start->PredecessorCount());
+ EXPECT_EQ(1u, start->SuccessorCount());
+ EXPECT_EQ(block, start->SuccessorAt(0));
+ EXPECT_THAT(start->successors(), ElementsAre(block));
+
+ EXPECT_EQ(1u, block->PredecessorCount());
+ EXPECT_EQ(0u, block->SuccessorCount());
+ EXPECT_EQ(start, block->PredecessorAt(0));
+ EXPECT_THAT(block->predecessors(), ElementsAre(start));
+
+ EXPECT_EQ(0u, end->PredecessorCount());
+ EXPECT_EQ(0u, end->SuccessorCount());
+}
+
+
+TEST_F(ScheduleTest, AddCall) {
+ Schedule schedule(zone());
+ BasicBlock* start = schedule.start();
+
+ Node* call = Node::New(zone(), 0, &kCallOperator, 0, nullptr, false);
+ BasicBlock* sblock = schedule.NewBasicBlock();
+ BasicBlock* eblock = schedule.NewBasicBlock();
+ schedule.AddCall(start, call, sblock, eblock);
+
+ EXPECT_EQ(start, schedule.block(call));
+
+ EXPECT_EQ(0u, start->PredecessorCount());
+ EXPECT_EQ(2u, start->SuccessorCount());
+ EXPECT_EQ(sblock, start->SuccessorAt(0));
+ EXPECT_EQ(eblock, start->SuccessorAt(1));
+ EXPECT_THAT(start->successors(), ElementsAre(sblock, eblock));
+
+ EXPECT_EQ(1u, sblock->PredecessorCount());
+ EXPECT_EQ(0u, sblock->SuccessorCount());
+ EXPECT_EQ(start, sblock->PredecessorAt(0));
+ EXPECT_THAT(sblock->predecessors(), ElementsAre(start));
+
+ EXPECT_EQ(1u, eblock->PredecessorCount());
+ EXPECT_EQ(0u, eblock->SuccessorCount());
+ EXPECT_EQ(start, eblock->PredecessorAt(0));
+ EXPECT_THAT(eblock->predecessors(), ElementsAre(start));
+}
+
+
+TEST_F(ScheduleTest, AddBranch) {
+ Schedule schedule(zone());
+ BasicBlock* start = schedule.start();
+
+ Node* branch = Node::New(zone(), 0, &kBranchOperator, 0, nullptr, false);
+ BasicBlock* tblock = schedule.NewBasicBlock();
+ BasicBlock* fblock = schedule.NewBasicBlock();
+ schedule.AddBranch(start, branch, tblock, fblock);
+
+ EXPECT_EQ(start, schedule.block(branch));
+
+ EXPECT_EQ(0u, start->PredecessorCount());
+ EXPECT_EQ(2u, start->SuccessorCount());
+ EXPECT_EQ(tblock, start->SuccessorAt(0));
+ EXPECT_EQ(fblock, start->SuccessorAt(1));
+ EXPECT_THAT(start->successors(), ElementsAre(tblock, fblock));
+
+ EXPECT_EQ(1u, tblock->PredecessorCount());
+ EXPECT_EQ(0u, tblock->SuccessorCount());
+ EXPECT_EQ(start, tblock->PredecessorAt(0));
+ EXPECT_THAT(tblock->predecessors(), ElementsAre(start));
+
+ EXPECT_EQ(1u, fblock->PredecessorCount());
+ EXPECT_EQ(0u, fblock->SuccessorCount());
+ EXPECT_EQ(start, fblock->PredecessorAt(0));
+ EXPECT_THAT(fblock->predecessors(), ElementsAre(start));
+}
+
+
+TEST_F(ScheduleTest, AddReturn) {
+ Schedule schedule(zone());
+ BasicBlock* start = schedule.start();
+ BasicBlock* end = schedule.end();
+
+ Node* node = Node::New(zone(), 0, &kDummyOperator, 0, nullptr, false);
+ schedule.AddReturn(start, node);
+
+ EXPECT_EQ(0u, start->PredecessorCount());
+ EXPECT_EQ(1u, start->SuccessorCount());
+ EXPECT_EQ(end, start->SuccessorAt(0));
+ EXPECT_THAT(start->successors(), ElementsAre(end));
+}
+
+
+TEST_F(ScheduleTest, InsertBranch) {
+ Schedule schedule(zone());
+ BasicBlock* start = schedule.start();
+ BasicBlock* end = schedule.end();
+
+ Node* node = Node::New(zone(), 0, &kDummyOperator, 0, nullptr, false);
+ Node* branch = Node::New(zone(), 0, &kBranchOperator, 0, nullptr, false);
+ BasicBlock* tblock = schedule.NewBasicBlock();
+ BasicBlock* fblock = schedule.NewBasicBlock();
+ BasicBlock* mblock = schedule.NewBasicBlock();
+
+ schedule.AddReturn(start, node);
+ schedule.AddGoto(tblock, mblock);
+ schedule.AddGoto(fblock, mblock);
+ schedule.InsertBranch(start, mblock, branch, tblock, fblock);
+
+ EXPECT_EQ(0u, start->PredecessorCount());
+ EXPECT_EQ(2u, start->SuccessorCount());
+ EXPECT_EQ(tblock, start->SuccessorAt(0));
+ EXPECT_EQ(fblock, start->SuccessorAt(1));
+ EXPECT_THAT(start->successors(), ElementsAre(tblock, fblock));
+
+ EXPECT_EQ(2u, mblock->PredecessorCount());
+ EXPECT_EQ(1u, mblock->SuccessorCount());
+ EXPECT_EQ(end, mblock->SuccessorAt(0));
+ EXPECT_THAT(mblock->predecessors(), ElementsAre(tblock, fblock));
+ EXPECT_THAT(mblock->successors(), ElementsAre(end));
+
+ EXPECT_EQ(1u, end->PredecessorCount());
+ EXPECT_EQ(0u, end->SuccessorCount());
+ EXPECT_EQ(mblock, end->PredecessorAt(0));
+ EXPECT_THAT(end->predecessors(), ElementsAre(mblock));
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/scheduler-unittest.cc b/test/unittests/compiler/scheduler-unittest.cc
new file mode 100644
index 0000000..523c8ce
--- /dev/null
+++ b/test/unittests/compiler/scheduler-unittest.cc
@@ -0,0 +1,1164 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/scheduler.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/source-position.h"
+#include "src/compiler/verifier.h"
+#include "test/unittests/compiler/compiler-test-utils.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::AnyOf;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class SchedulerTest : public TestWithIsolateAndZone {
+ public:
+ SchedulerTest()
+ : graph_(zone()), common_(zone()), simplified_(zone()), js_(zone()) {}
+
+ Schedule* ComputeAndVerifySchedule(size_t expected) {
+ if (FLAG_trace_turbo) {
+ OFStream os(stdout);
+ SourcePositionTable table(graph());
+ os << AsJSON(*graph(), &table);
+ }
+
+ Schedule* schedule =
+ Scheduler::ComputeSchedule(zone(), graph(), Scheduler::kSplitNodes);
+
+ if (FLAG_trace_turbo_scheduler) {
+ OFStream os(stdout);
+ os << *schedule << std::endl;
+ }
+ ScheduleVerifier::Run(schedule);
+ EXPECT_EQ(expected, GetScheduledNodeCount(schedule));
+ return schedule;
+ }
+
+ size_t GetScheduledNodeCount(const Schedule* schedule) {
+ size_t node_count = 0;
+ for (auto block : *schedule->rpo_order()) {
+ node_count += block->NodeCount();
+ if (block->control() != BasicBlock::kNone) ++node_count;
+ }
+ return node_count;
+ }
+
+ Graph* graph() { return &graph_; }
+ CommonOperatorBuilder* common() { return &common_; }
+ SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+ JSOperatorBuilder* js() { return &js_; }
+
+ private:
+ Graph graph_;
+ CommonOperatorBuilder common_;
+ SimplifiedOperatorBuilder simplified_;
+ JSOperatorBuilder js_;
+};
+
+
+class SchedulerRPOTest : public SchedulerTest {
+ public:
+ SchedulerRPOTest() {}
+
+ // TODO(titzer): pull RPO tests out to their own file.
+ void CheckRPONumbers(BasicBlockVector* order, size_t expected,
+ bool loops_allowed) {
+ CHECK(expected == order->size());
+ for (int i = 0; i < static_cast<int>(order->size()); i++) {
+ CHECK(order->at(i)->rpo_number() == i);
+ if (!loops_allowed) {
+ CHECK(!order->at(i)->loop_end());
+ CHECK(!order->at(i)->loop_header());
+ }
+ }
+ }
+
+ void CheckLoop(BasicBlockVector* order, BasicBlock** blocks, int body_size) {
+ BasicBlock* header = blocks[0];
+ BasicBlock* end = header->loop_end();
+ CHECK(end);
+ CHECK_GT(end->rpo_number(), 0);
+ CHECK_EQ(body_size, end->rpo_number() - header->rpo_number());
+ for (int i = 0; i < body_size; i++) {
+ CHECK_GE(blocks[i]->rpo_number(), header->rpo_number());
+ CHECK_LT(blocks[i]->rpo_number(), end->rpo_number());
+ CHECK(header->LoopContains(blocks[i]));
+ CHECK(header->IsLoopHeader() || blocks[i]->loop_header() == header);
+ }
+ if (header->rpo_number() > 0) {
+ CHECK_NE(order->at(header->rpo_number() - 1)->loop_header(), header);
+ }
+ if (end->rpo_number() < static_cast<int>(order->size())) {
+ CHECK_NE(order->at(end->rpo_number())->loop_header(), header);
+ }
+ }
+
+ struct TestLoop {
+ int count;
+ BasicBlock** nodes;
+ BasicBlock* header() { return nodes[0]; }
+ BasicBlock* last() { return nodes[count - 1]; }
+ ~TestLoop() { delete[] nodes; }
+ };
+
+ TestLoop* CreateLoop(Schedule* schedule, int count) {
+ TestLoop* loop = new TestLoop();
+ loop->count = count;
+ loop->nodes = new BasicBlock* [count];
+ for (int i = 0; i < count; i++) {
+ loop->nodes[i] = schedule->NewBasicBlock();
+ if (i > 0) {
+ schedule->AddSuccessorForTesting(loop->nodes[i - 1], loop->nodes[i]);
+ }
+ }
+ schedule->AddSuccessorForTesting(loop->nodes[count - 1], loop->nodes[0]);
+ return loop;
+ }
+};
+
+
+namespace {
+
+const Operator kHeapConstant(IrOpcode::kHeapConstant, Operator::kPure,
+ "HeapConstant", 0, 0, 0, 1, 0, 0);
+const Operator kIntAdd(IrOpcode::kInt32Add, Operator::kPure, "Int32Add", 2, 0,
+ 0, 1, 0, 0);
+const Operator kMockCall(IrOpcode::kCall, Operator::kNoProperties, "MockCall",
+ 0, 0, 1, 1, 1, 2);
+const Operator kMockTailCall(IrOpcode::kTailCall, Operator::kNoProperties,
+ "MockTailCall", 1, 1, 1, 0, 0, 1);
+
+} // namespace
+
+
+// -----------------------------------------------------------------------------
+// Special reverse-post-order block ordering.
+
+
+TEST_F(SchedulerRPOTest, Degenerate1) {
+ Schedule schedule(zone());
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 1, false);
+ EXPECT_EQ(schedule.start(), order->at(0));
+}
+
+
+TEST_F(SchedulerRPOTest, Degenerate2) {
+ Schedule schedule(zone());
+
+ schedule.AddGoto(schedule.start(), schedule.end());
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 2, false);
+ EXPECT_EQ(schedule.start(), order->at(0));
+ EXPECT_EQ(schedule.end(), order->at(1));
+}
+
+
+TEST_F(SchedulerRPOTest, Line) {
+ for (int i = 0; i < 10; i++) {
+ Schedule schedule(zone());
+
+ BasicBlock* last = schedule.start();
+ for (int j = 0; j < i; j++) {
+ BasicBlock* block = schedule.NewBasicBlock();
+ block->set_deferred(i & 1);
+ schedule.AddGoto(last, block);
+ last = block;
+ }
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 1 + i, false);
+
+ for (size_t i = 0; i < schedule.BasicBlockCount(); i++) {
+ BasicBlock* block = schedule.GetBlockById(BasicBlock::Id::FromSize(i));
+ if (block->rpo_number() >= 0 && block->SuccessorCount() == 1) {
+ EXPECT_EQ(block->rpo_number() + 1, block->SuccessorAt(0)->rpo_number());
+ }
+ }
+ }
+}
+
+
+TEST_F(SchedulerRPOTest, SelfLoop) {
+ Schedule schedule(zone());
+ schedule.AddSuccessorForTesting(schedule.start(), schedule.start());
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 1, true);
+ BasicBlock* loop[] = {schedule.start()};
+ CheckLoop(order, loop, 1);
+}
+
+
+TEST_F(SchedulerRPOTest, EntryLoop) {
+ Schedule schedule(zone());
+ BasicBlock* body = schedule.NewBasicBlock();
+ schedule.AddSuccessorForTesting(schedule.start(), body);
+ schedule.AddSuccessorForTesting(body, schedule.start());
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 2, true);
+ BasicBlock* loop[] = {schedule.start(), body};
+ CheckLoop(order, loop, 2);
+}
+
+
+TEST_F(SchedulerRPOTest, EndLoop) {
+ Schedule schedule(zone());
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
+ schedule.AddSuccessorForTesting(schedule.start(), loop1->header());
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 3, true);
+ CheckLoop(order, loop1->nodes, loop1->count);
+}
+
+
+TEST_F(SchedulerRPOTest, EndLoopNested) {
+ Schedule schedule(zone());
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
+ schedule.AddSuccessorForTesting(schedule.start(), loop1->header());
+ schedule.AddSuccessorForTesting(loop1->last(), schedule.start());
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 3, true);
+ CheckLoop(order, loop1->nodes, loop1->count);
+}
+
+
+TEST_F(SchedulerRPOTest, Diamond) {
+ Schedule schedule(zone());
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* D = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(A, C);
+ schedule.AddSuccessorForTesting(B, D);
+ schedule.AddSuccessorForTesting(C, D);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 4, false);
+
+ EXPECT_EQ(0, A->rpo_number());
+ EXPECT_THAT(B->rpo_number(), AnyOf(1, 2));
+ EXPECT_THAT(C->rpo_number(), AnyOf(1, 2));
+ EXPECT_EQ(3, D->rpo_number());
+}
+
+
+TEST_F(SchedulerRPOTest, Loop1) {
+ Schedule schedule(zone());
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* D = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(C, B);
+ schedule.AddSuccessorForTesting(C, D);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 4, true);
+ BasicBlock* loop[] = {B, C};
+ CheckLoop(order, loop, 2);
+}
+
+
+TEST_F(SchedulerRPOTest, Loop2) {
+ Schedule schedule(zone());
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* D = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(C, B);
+ schedule.AddSuccessorForTesting(B, D);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 4, true);
+ BasicBlock* loop[] = {B, C};
+ CheckLoop(order, loop, 2);
+}
+
+
+TEST_F(SchedulerRPOTest, LoopN) {
+ for (int i = 0; i < 11; i++) {
+ Schedule schedule(zone());
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* D = schedule.NewBasicBlock();
+ BasicBlock* E = schedule.NewBasicBlock();
+ BasicBlock* F = schedule.NewBasicBlock();
+ BasicBlock* G = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(C, D);
+ schedule.AddSuccessorForTesting(D, E);
+ schedule.AddSuccessorForTesting(E, F);
+ schedule.AddSuccessorForTesting(F, B);
+ schedule.AddSuccessorForTesting(B, G);
+
+ // Throw in extra backedges from time to time.
+ if (i == 1) schedule.AddSuccessorForTesting(B, B);
+ if (i == 2) schedule.AddSuccessorForTesting(C, B);
+ if (i == 3) schedule.AddSuccessorForTesting(D, B);
+ if (i == 4) schedule.AddSuccessorForTesting(E, B);
+ if (i == 5) schedule.AddSuccessorForTesting(F, B);
+
+ // Throw in extra loop exits from time to time.
+ if (i == 6) schedule.AddSuccessorForTesting(B, G);
+ if (i == 7) schedule.AddSuccessorForTesting(C, G);
+ if (i == 8) schedule.AddSuccessorForTesting(D, G);
+ if (i == 9) schedule.AddSuccessorForTesting(E, G);
+ if (i == 10) schedule.AddSuccessorForTesting(F, G);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 7, true);
+ BasicBlock* loop[] = {B, C, D, E, F};
+ CheckLoop(order, loop, 5);
+ }
+}
+
+
+TEST_F(SchedulerRPOTest, LoopNest1) {
+ Schedule schedule(zone());
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* D = schedule.NewBasicBlock();
+ BasicBlock* E = schedule.NewBasicBlock();
+ BasicBlock* F = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(C, D);
+ schedule.AddSuccessorForTesting(D, C);
+ schedule.AddSuccessorForTesting(D, E);
+ schedule.AddSuccessorForTesting(E, B);
+ schedule.AddSuccessorForTesting(E, F);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 6, true);
+ BasicBlock* loop1[] = {B, C, D, E};
+ CheckLoop(order, loop1, 4);
+
+ BasicBlock* loop2[] = {C, D};
+ CheckLoop(order, loop2, 2);
+}
+
+
+TEST_F(SchedulerRPOTest, LoopNest2) {
+ Schedule schedule(zone());
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* D = schedule.NewBasicBlock();
+ BasicBlock* E = schedule.NewBasicBlock();
+ BasicBlock* F = schedule.NewBasicBlock();
+ BasicBlock* G = schedule.NewBasicBlock();
+ BasicBlock* H = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(C, D);
+ schedule.AddSuccessorForTesting(D, E);
+ schedule.AddSuccessorForTesting(E, F);
+ schedule.AddSuccessorForTesting(F, G);
+ schedule.AddSuccessorForTesting(G, H);
+
+ schedule.AddSuccessorForTesting(E, D);
+ schedule.AddSuccessorForTesting(F, C);
+ schedule.AddSuccessorForTesting(G, B);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 8, true);
+ BasicBlock* loop1[] = {B, C, D, E, F, G};
+ CheckLoop(order, loop1, 6);
+
+ BasicBlock* loop2[] = {C, D, E, F};
+ CheckLoop(order, loop2, 4);
+
+ BasicBlock* loop3[] = {D, E};
+ CheckLoop(order, loop3, 2);
+}
+
+
+TEST_F(SchedulerRPOTest, LoopFollow1) {
+ Schedule schedule(zone());
+
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
+ base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* E = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->header(), loop2->header());
+ schedule.AddSuccessorForTesting(loop2->last(), E);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+
+ EXPECT_EQ(schedule.BasicBlockCount(), order->size());
+ CheckLoop(order, loop1->nodes, loop1->count);
+ CheckLoop(order, loop2->nodes, loop2->count);
+}
+
+
+TEST_F(SchedulerRPOTest, LoopFollow2) {
+ Schedule schedule(zone());
+
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
+ base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* S = schedule.NewBasicBlock();
+ BasicBlock* E = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->header(), S);
+ schedule.AddSuccessorForTesting(S, loop2->header());
+ schedule.AddSuccessorForTesting(loop2->last(), E);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+
+ EXPECT_EQ(schedule.BasicBlockCount(), order->size());
+ CheckLoop(order, loop1->nodes, loop1->count);
+ CheckLoop(order, loop2->nodes, loop2->count);
+}
+
+
+TEST_F(SchedulerRPOTest, LoopFollowN) {
+ for (int size = 1; size < 5; size++) {
+ for (int exit = 0; exit < size; exit++) {
+ Schedule schedule(zone());
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, size));
+ BasicBlock* A = schedule.start();
+ BasicBlock* E = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->nodes[exit], loop2->header());
+ schedule.AddSuccessorForTesting(loop2->nodes[exit], E);
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+
+ EXPECT_EQ(schedule.BasicBlockCount(), order->size());
+ CheckLoop(order, loop1->nodes, loop1->count);
+ CheckLoop(order, loop2->nodes, loop2->count);
+ }
+ }
+}
+
+
+TEST_F(SchedulerRPOTest, NestedLoopFollow1) {
+ Schedule schedule(zone());
+
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
+ base::SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* E = schedule.end();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->header(), loop2->header());
+ schedule.AddSuccessorForTesting(loop2->last(), C);
+ schedule.AddSuccessorForTesting(C, E);
+ schedule.AddSuccessorForTesting(C, B);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+
+ EXPECT_EQ(schedule.BasicBlockCount(), order->size());
+ CheckLoop(order, loop1->nodes, loop1->count);
+ CheckLoop(order, loop2->nodes, loop2->count);
+
+ BasicBlock* loop3[] = {B, loop1->nodes[0], loop2->nodes[0], C};
+ CheckLoop(order, loop3, 4);
+}
+
+
+TEST_F(SchedulerRPOTest, LoopBackedges1) {
+ int size = 8;
+ for (int i = 0; i < size; i++) {
+ for (int j = 0; j < size; j++) {
+ Schedule schedule(zone());
+ BasicBlock* A = schedule.start();
+ BasicBlock* E = schedule.end();
+
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->last(), E);
+
+ schedule.AddSuccessorForTesting(loop1->nodes[i], loop1->header());
+ schedule.AddSuccessorForTesting(loop1->nodes[j], E);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+ CheckLoop(order, loop1->nodes, loop1->count);
+ }
+ }
+}
+
+
+TEST_F(SchedulerRPOTest, LoopOutedges1) {
+ int size = 8;
+ for (int i = 0; i < size; i++) {
+ for (int j = 0; j < size; j++) {
+ Schedule schedule(zone());
+ BasicBlock* A = schedule.start();
+ BasicBlock* D = schedule.NewBasicBlock();
+ BasicBlock* E = schedule.end();
+
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->last(), E);
+
+ schedule.AddSuccessorForTesting(loop1->nodes[i], loop1->header());
+ schedule.AddSuccessorForTesting(loop1->nodes[j], D);
+ schedule.AddSuccessorForTesting(D, E);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+ CheckLoop(order, loop1->nodes, loop1->count);
+ }
+ }
+}
+
+
+TEST_F(SchedulerRPOTest, LoopOutedges2) {
+ int size = 8;
+ for (int i = 0; i < size; i++) {
+ Schedule schedule(zone());
+ BasicBlock* A = schedule.start();
+ BasicBlock* E = schedule.end();
+
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->last(), E);
+
+ for (int j = 0; j < size; j++) {
+ BasicBlock* O = schedule.NewBasicBlock();
+ schedule.AddSuccessorForTesting(loop1->nodes[j], O);
+ schedule.AddSuccessorForTesting(O, E);
+ }
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+ CheckLoop(order, loop1->nodes, loop1->count);
+ }
+}
+
+
+TEST_F(SchedulerRPOTest, LoopOutloops1) {
+ int size = 8;
+ for (int i = 0; i < size; i++) {
+ Schedule schedule(zone());
+ BasicBlock* A = schedule.start();
+ BasicBlock* E = schedule.end();
+ base::SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+ schedule.AddSuccessorForTesting(A, loop1->header());
+ schedule.AddSuccessorForTesting(loop1->last(), E);
+
+ TestLoop** loopN = new TestLoop* [size];
+ for (int j = 0; j < size; j++) {
+ loopN[j] = CreateLoop(&schedule, 2);
+ schedule.AddSuccessorForTesting(loop1->nodes[j], loopN[j]->header());
+ schedule.AddSuccessorForTesting(loopN[j]->last(), E);
+ }
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+ CheckLoop(order, loop1->nodes, loop1->count);
+
+ for (int j = 0; j < size; j++) {
+ CheckLoop(order, loopN[j]->nodes, loopN[j]->count);
+ delete loopN[j];
+ }
+ delete[] loopN;
+ }
+}
+
+
+TEST_F(SchedulerRPOTest, LoopMultibackedge) {
+ Schedule schedule(zone());
+
+ BasicBlock* A = schedule.start();
+ BasicBlock* B = schedule.NewBasicBlock();
+ BasicBlock* C = schedule.NewBasicBlock();
+ BasicBlock* D = schedule.NewBasicBlock();
+ BasicBlock* E = schedule.NewBasicBlock();
+
+ schedule.AddSuccessorForTesting(A, B);
+ schedule.AddSuccessorForTesting(B, C);
+ schedule.AddSuccessorForTesting(B, D);
+ schedule.AddSuccessorForTesting(B, E);
+ schedule.AddSuccessorForTesting(C, B);
+ schedule.AddSuccessorForTesting(D, B);
+ schedule.AddSuccessorForTesting(E, B);
+
+ BasicBlockVector* order = Scheduler::ComputeSpecialRPO(zone(), &schedule);
+ CheckRPONumbers(order, 5, true);
+
+ BasicBlock* loop1[] = {B, C, D, E};
+ CheckLoop(order, loop1, 4);
+}
+
+
+// -----------------------------------------------------------------------------
+// Graph end-to-end scheduling.
+
+
+TEST_F(SchedulerTest, BuildScheduleEmpty) {
+ graph()->SetStart(graph()->NewNode(common()->Start(0)));
+ graph()->SetEnd(graph()->NewNode(common()->End(1), graph()->start()));
+ USE(Scheduler::ComputeSchedule(zone(), graph(), Scheduler::kNoFlags));
+}
+
+
+TEST_F(SchedulerTest, BuildScheduleOneParameter) {
+ graph()->SetStart(graph()->NewNode(common()->Start(0)));
+
+ Node* p1 = graph()->NewNode(common()->Parameter(0), graph()->start());
+ Node* ret = graph()->NewNode(common()->Return(), p1, graph()->start(),
+ graph()->start());
+
+ graph()->SetEnd(graph()->NewNode(common()->End(1), ret));
+
+ USE(Scheduler::ComputeSchedule(zone(), graph(), Scheduler::kNoFlags));
+}
+
+
+namespace {
+
+Node* CreateDiamond(Graph* graph, CommonOperatorBuilder* common, Node* cond) {
+ Node* tv = graph->NewNode(common->Int32Constant(6));
+ Node* fv = graph->NewNode(common->Int32Constant(7));
+ Node* br = graph->NewNode(common->Branch(), cond, graph->start());
+ Node* t = graph->NewNode(common->IfTrue(), br);
+ Node* f = graph->NewNode(common->IfFalse(), br);
+ Node* m = graph->NewNode(common->Merge(2), t, f);
+ Node* phi =
+ graph->NewNode(common->Phi(MachineRepresentation::kTagged, 2), tv, fv, m);
+ return phi;
+}
+
+} // namespace
+
+
+TARGET_TEST_F(SchedulerTest, FloatingDiamond1) {
+ Node* start = graph()->NewNode(common()->Start(1));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* d1 = CreateDiamond(graph(), common(), p0);
+ Node* ret = graph()->NewNode(common()->Return(), d1, start, start);
+ Node* end = graph()->NewNode(common()->End(1), ret);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(13);
+}
+
+
+TARGET_TEST_F(SchedulerTest, FloatingDiamond2) {
+ Node* start = graph()->NewNode(common()->Start(2));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* p1 = graph()->NewNode(common()->Parameter(1), start);
+ Node* d1 = CreateDiamond(graph(), common(), p0);
+ Node* d2 = CreateDiamond(graph(), common(), p1);
+ Node* add = graph()->NewNode(&kIntAdd, d1, d2);
+ Node* ret = graph()->NewNode(common()->Return(), add, start, start);
+ Node* end = graph()->NewNode(common()->End(1), ret);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(24);
+}
+
+
+TARGET_TEST_F(SchedulerTest, FloatingDiamond3) {
+ Node* start = graph()->NewNode(common()->Start(2));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* p1 = graph()->NewNode(common()->Parameter(1), start);
+ Node* d1 = CreateDiamond(graph(), common(), p0);
+ Node* d2 = CreateDiamond(graph(), common(), p1);
+ Node* add = graph()->NewNode(&kIntAdd, d1, d2);
+ Node* d3 = CreateDiamond(graph(), common(), add);
+ Node* ret = graph()->NewNode(common()->Return(), d3, start, start);
+ Node* end = graph()->NewNode(common()->End(1), ret);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(33);
+}
+
+
+TARGET_TEST_F(SchedulerTest, NestedFloatingDiamonds) {
+ Node* start = graph()->NewNode(common()->Start(2));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+
+ Node* fv = graph()->NewNode(common()->Int32Constant(7));
+ Node* br = graph()->NewNode(common()->Branch(), p0, graph()->start());
+ Node* t = graph()->NewNode(common()->IfTrue(), br);
+ Node* f = graph()->NewNode(common()->IfFalse(), br);
+
+ Node* map = graph()->NewNode(
+ simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()), p0, p0,
+ start, f);
+ Node* br1 = graph()->NewNode(common()->Branch(), map, graph()->start());
+ Node* t1 = graph()->NewNode(common()->IfTrue(), br1);
+ Node* f1 = graph()->NewNode(common()->IfFalse(), br1);
+ Node* m1 = graph()->NewNode(common()->Merge(2), t1, f1);
+ Node* ttrue = graph()->NewNode(common()->Int32Constant(1));
+ Node* ffalse = graph()->NewNode(common()->Int32Constant(0));
+ Node* phi1 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), ttrue, ffalse, m1);
+
+
+ Node* m = graph()->NewNode(common()->Merge(2), t, f);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ fv, phi1, m);
+ Node* ephi1 = graph()->NewNode(common()->EffectPhi(2), start, map, m);
+
+ Node* ret = graph()->NewNode(common()->Return(), phi, ephi1, start);
+ Node* end = graph()->NewNode(common()->End(1), ret);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(23);
+}
+
+
+TARGET_TEST_F(SchedulerTest, NestedFloatingDiamondWithChain) {
+ Node* start = graph()->NewNode(common()->Start(2));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* p1 = graph()->NewNode(common()->Parameter(1), start);
+ Node* c = graph()->NewNode(common()->Int32Constant(7));
+
+ Node* brA1 = graph()->NewNode(common()->Branch(), p0, graph()->start());
+ Node* tA1 = graph()->NewNode(common()->IfTrue(), brA1);
+ Node* fA1 = graph()->NewNode(common()->IfFalse(), brA1);
+ Node* mA1 = graph()->NewNode(common()->Merge(2), tA1, fA1);
+ Node* phiA1 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), p0, p1, mA1);
+
+ Node* brB1 = graph()->NewNode(common()->Branch(), p1, graph()->start());
+ Node* tB1 = graph()->NewNode(common()->IfTrue(), brB1);
+ Node* fB1 = graph()->NewNode(common()->IfFalse(), brB1);
+ Node* mB1 = graph()->NewNode(common()->Merge(2), tB1, fB1);
+ Node* phiB1 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), p0, p1, mB1);
+
+ Node* brA2 = graph()->NewNode(common()->Branch(), phiB1, mA1);
+ Node* tA2 = graph()->NewNode(common()->IfTrue(), brA2);
+ Node* fA2 = graph()->NewNode(common()->IfFalse(), brA2);
+ Node* mA2 = graph()->NewNode(common()->Merge(2), tA2, fA2);
+ Node* phiA2 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), phiB1, c, mA2);
+
+ Node* brB2 = graph()->NewNode(common()->Branch(), phiA1, mB1);
+ Node* tB2 = graph()->NewNode(common()->IfTrue(), brB2);
+ Node* fB2 = graph()->NewNode(common()->IfFalse(), brB2);
+ Node* mB2 = graph()->NewNode(common()->Merge(2), tB2, fB2);
+ Node* phiB2 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), phiA1, c, mB2);
+
+ Node* add = graph()->NewNode(&kIntAdd, phiA2, phiB2);
+ Node* ret = graph()->NewNode(common()->Return(), add, start, start);
+ Node* end = graph()->NewNode(common()->End(1), ret);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(36);
+}
+
+
+TARGET_TEST_F(SchedulerTest, NestedFloatingDiamondWithLoop) {
+ Node* start = graph()->NewNode(common()->Start(2));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+
+ Node* fv = graph()->NewNode(common()->Int32Constant(7));
+ Node* br = graph()->NewNode(common()->Branch(), p0, graph()->start());
+ Node* t = graph()->NewNode(common()->IfTrue(), br);
+ Node* f = graph()->NewNode(common()->IfFalse(), br);
+
+ Node* loop = graph()->NewNode(common()->Loop(2), f, start);
+ Node* ind = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ p0, p0, loop);
+
+ Node* add = graph()->NewNode(&kIntAdd, ind, fv);
+ Node* br1 = graph()->NewNode(common()->Branch(), add, loop);
+ Node* t1 = graph()->NewNode(common()->IfTrue(), br1);
+ Node* f1 = graph()->NewNode(common()->IfFalse(), br1);
+
+ loop->ReplaceInput(1, t1); // close loop.
+ ind->ReplaceInput(1, ind); // close induction variable.
+
+ Node* m = graph()->NewNode(common()->Merge(2), t, f1);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ fv, ind, m);
+
+ Node* ret = graph()->NewNode(common()->Return(), phi, start, start);
+ Node* end = graph()->NewNode(common()->End(1), ret);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(20);
+}
+
+
+TARGET_TEST_F(SchedulerTest, LoopedFloatingDiamond1) {
+ Node* start = graph()->NewNode(common()->Start(2));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+
+ Node* c = graph()->NewNode(common()->Int32Constant(7));
+ Node* loop = graph()->NewNode(common()->Loop(2), start, start);
+ Node* ind = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ p0, p0, loop);
+ Node* add = graph()->NewNode(&kIntAdd, ind, c);
+
+ Node* br = graph()->NewNode(common()->Branch(), add, loop);
+ Node* t = graph()->NewNode(common()->IfTrue(), br);
+ Node* f = graph()->NewNode(common()->IfFalse(), br);
+
+ Node* br1 = graph()->NewNode(common()->Branch(), p0, graph()->start());
+ Node* t1 = graph()->NewNode(common()->IfTrue(), br1);
+ Node* f1 = graph()->NewNode(common()->IfFalse(), br1);
+ Node* m1 = graph()->NewNode(common()->Merge(2), t1, f1);
+ Node* phi1 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), add, p0, m1);
+
+ loop->ReplaceInput(1, t); // close loop.
+ ind->ReplaceInput(1, phi1); // close induction variable.
+
+ Node* ret = graph()->NewNode(common()->Return(), ind, start, f);
+ Node* end = graph()->NewNode(common()->End(2), ret, f);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(20);
+}
+
+
+TARGET_TEST_F(SchedulerTest, LoopedFloatingDiamond2) {
+ Node* start = graph()->NewNode(common()->Start(2));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+
+ Node* c = graph()->NewNode(common()->Int32Constant(7));
+ Node* loop = graph()->NewNode(common()->Loop(2), start, start);
+ Node* ind = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ p0, p0, loop);
+
+ Node* br1 = graph()->NewNode(common()->Branch(), p0, graph()->start());
+ Node* t1 = graph()->NewNode(common()->IfTrue(), br1);
+ Node* f1 = graph()->NewNode(common()->IfFalse(), br1);
+ Node* m1 = graph()->NewNode(common()->Merge(2), t1, f1);
+ Node* phi1 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), c, ind, m1);
+
+ Node* add = graph()->NewNode(&kIntAdd, ind, phi1);
+
+ Node* br = graph()->NewNode(common()->Branch(), add, loop);
+ Node* t = graph()->NewNode(common()->IfTrue(), br);
+ Node* f = graph()->NewNode(common()->IfFalse(), br);
+
+ loop->ReplaceInput(1, t); // close loop.
+ ind->ReplaceInput(1, add); // close induction variable.
+
+ Node* ret = graph()->NewNode(common()->Return(), ind, start, f);
+ Node* end = graph()->NewNode(common()->End(2), ret, f);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(20);
+}
+
+
+TARGET_TEST_F(SchedulerTest, LoopedFloatingDiamond3) {
+ Node* start = graph()->NewNode(common()->Start(2));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+
+ Node* c = graph()->NewNode(common()->Int32Constant(7));
+ Node* loop = graph()->NewNode(common()->Loop(2), start, start);
+ Node* ind = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ p0, p0, loop);
+
+ Node* br1 = graph()->NewNode(common()->Branch(), p0, graph()->start());
+ Node* t1 = graph()->NewNode(common()->IfTrue(), br1);
+ Node* f1 = graph()->NewNode(common()->IfFalse(), br1);
+
+ Node* loop1 = graph()->NewNode(common()->Loop(2), t1, start);
+ Node* ind1 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), p0, p0, loop);
+
+ Node* add1 = graph()->NewNode(&kIntAdd, ind1, c);
+ Node* br2 = graph()->NewNode(common()->Branch(), add1, loop1);
+ Node* t2 = graph()->NewNode(common()->IfTrue(), br2);
+ Node* f2 = graph()->NewNode(common()->IfFalse(), br2);
+
+ loop1->ReplaceInput(1, t2); // close inner loop.
+ ind1->ReplaceInput(1, ind1); // close inner induction variable.
+
+ Node* m1 = graph()->NewNode(common()->Merge(2), f1, f2);
+ Node* phi1 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), c, ind1, m1);
+
+ Node* add = graph()->NewNode(&kIntAdd, ind, phi1);
+
+ Node* br = graph()->NewNode(common()->Branch(), add, loop);
+ Node* t = graph()->NewNode(common()->IfTrue(), br);
+ Node* f = graph()->NewNode(common()->IfFalse(), br);
+
+ loop->ReplaceInput(1, t); // close loop.
+ ind->ReplaceInput(1, add); // close induction variable.
+
+ Node* ret = graph()->NewNode(common()->Return(), ind, start, f);
+ Node* end = graph()->NewNode(common()->End(2), ret, f);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(28);
+}
+
+
+TARGET_TEST_F(SchedulerTest, PhisPushedDownToDifferentBranches) {
+ Node* start = graph()->NewNode(common()->Start(2));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* p1 = graph()->NewNode(common()->Parameter(1), start);
+
+ Node* v1 = graph()->NewNode(common()->Int32Constant(1));
+ Node* v2 = graph()->NewNode(common()->Int32Constant(2));
+ Node* v3 = graph()->NewNode(common()->Int32Constant(3));
+ Node* v4 = graph()->NewNode(common()->Int32Constant(4));
+ Node* br = graph()->NewNode(common()->Branch(), p0, graph()->start());
+ Node* t = graph()->NewNode(common()->IfTrue(), br);
+ Node* f = graph()->NewNode(common()->IfFalse(), br);
+ Node* m = graph()->NewNode(common()->Merge(2), t, f);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ v1, v2, m);
+ Node* phi2 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), v3, v4, m);
+
+ Node* br2 = graph()->NewNode(common()->Branch(), p1, graph()->start());
+ Node* t2 = graph()->NewNode(common()->IfTrue(), br2);
+ Node* f2 = graph()->NewNode(common()->IfFalse(), br2);
+ Node* m2 = graph()->NewNode(common()->Merge(2), t2, f2);
+ Node* phi3 = graph()->NewNode(
+ common()->Phi(MachineRepresentation::kTagged, 2), phi, phi2, m2);
+
+ Node* ret = graph()->NewNode(common()->Return(), phi3, start, start);
+ Node* end = graph()->NewNode(common()->End(1), ret);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(24);
+}
+
+
+TARGET_TEST_F(SchedulerTest, BranchHintTrue) {
+ Node* start = graph()->NewNode(common()->Start(1));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* tv = graph()->NewNode(common()->Int32Constant(6));
+ Node* fv = graph()->NewNode(common()->Int32Constant(7));
+ Node* br = graph()->NewNode(common()->Branch(BranchHint::kTrue), p0, start);
+ Node* t = graph()->NewNode(common()->IfTrue(), br);
+ Node* f = graph()->NewNode(common()->IfFalse(), br);
+ Node* m = graph()->NewNode(common()->Merge(2), t, f);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ tv, fv, m);
+ Node* ret = graph()->NewNode(common()->Return(), phi, start, start);
+ Node* end = graph()->NewNode(common()->End(1), ret);
+
+ graph()->SetEnd(end);
+
+ Schedule* schedule = ComputeAndVerifySchedule(13);
+ // Make sure the false block is marked as deferred.
+ EXPECT_FALSE(schedule->block(t)->deferred());
+ EXPECT_TRUE(schedule->block(f)->deferred());
+}
+
+
+TARGET_TEST_F(SchedulerTest, BranchHintFalse) {
+ Node* start = graph()->NewNode(common()->Start(1));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* tv = graph()->NewNode(common()->Int32Constant(6));
+ Node* fv = graph()->NewNode(common()->Int32Constant(7));
+ Node* br = graph()->NewNode(common()->Branch(BranchHint::kFalse), p0, start);
+ Node* t = graph()->NewNode(common()->IfTrue(), br);
+ Node* f = graph()->NewNode(common()->IfFalse(), br);
+ Node* m = graph()->NewNode(common()->Merge(2), t, f);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ tv, fv, m);
+ Node* ret = graph()->NewNode(common()->Return(), phi, start, start);
+ Node* end = graph()->NewNode(common()->End(1), ret);
+
+ graph()->SetEnd(end);
+
+ Schedule* schedule = ComputeAndVerifySchedule(13);
+ // Make sure the true block is marked as deferred.
+ EXPECT_TRUE(schedule->block(t)->deferred());
+ EXPECT_FALSE(schedule->block(f)->deferred());
+}
+
+
+TARGET_TEST_F(SchedulerTest, CallException) {
+ Node* start = graph()->NewNode(common()->Start(1));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* c1 = graph()->NewNode(&kMockCall, start);
+ Node* ok1 = graph()->NewNode(common()->IfSuccess(), c1);
+ Node* ex1 = graph()->NewNode(
+ common()->IfException(IfExceptionHint::kLocallyUncaught), c1, c1);
+ Node* c2 = graph()->NewNode(&kMockCall, ok1);
+ Node* ok2 = graph()->NewNode(common()->IfSuccess(), c2);
+ Node* ex2 = graph()->NewNode(
+ common()->IfException(IfExceptionHint::kLocallyUncaught), c2, c2);
+ Node* hdl = graph()->NewNode(common()->Merge(2), ex1, ex2);
+ Node* m = graph()->NewNode(common()->Merge(2), ok2, hdl);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
+ c2, p0, m);
+ Node* ret = graph()->NewNode(common()->Return(), phi, start, m);
+ Node* end = graph()->NewNode(common()->End(1), ret);
+
+ graph()->SetEnd(end);
+
+ Schedule* schedule = ComputeAndVerifySchedule(17);
+ // Make sure the exception blocks as well as the handler are deferred.
+ EXPECT_TRUE(schedule->block(ex1)->deferred());
+ EXPECT_TRUE(schedule->block(ex2)->deferred());
+ EXPECT_TRUE(schedule->block(hdl)->deferred());
+ EXPECT_FALSE(schedule->block(m)->deferred());
+}
+
+
+TARGET_TEST_F(SchedulerTest, TailCall) {
+ Node* start = graph()->NewNode(common()->Start(1));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* call = graph()->NewNode(&kMockTailCall, p0, start, start);
+ Node* end = graph()->NewNode(common()->End(1), call);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(4);
+}
+
+
+TARGET_TEST_F(SchedulerTest, Switch) {
+ Node* start = graph()->NewNode(common()->Start(1));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* sw = graph()->NewNode(common()->Switch(3), p0, start);
+ Node* c0 = graph()->NewNode(common()->IfValue(0), sw);
+ Node* v0 = graph()->NewNode(common()->Int32Constant(11));
+ Node* c1 = graph()->NewNode(common()->IfValue(1), sw);
+ Node* v1 = graph()->NewNode(common()->Int32Constant(22));
+ Node* d = graph()->NewNode(common()->IfDefault(), sw);
+ Node* vd = graph()->NewNode(common()->Int32Constant(33));
+ Node* m = graph()->NewNode(common()->Merge(3), c0, c1, d);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 3),
+ v0, v1, vd, m);
+ Node* ret = graph()->NewNode(common()->Return(), phi, start, m);
+ Node* end = graph()->NewNode(common()->End(1), ret);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(16);
+}
+
+
+TARGET_TEST_F(SchedulerTest, FloatingSwitch) {
+ Node* start = graph()->NewNode(common()->Start(1));
+ graph()->SetStart(start);
+
+ Node* p0 = graph()->NewNode(common()->Parameter(0), start);
+ Node* sw = graph()->NewNode(common()->Switch(3), p0, start);
+ Node* c0 = graph()->NewNode(common()->IfValue(0), sw);
+ Node* v0 = graph()->NewNode(common()->Int32Constant(11));
+ Node* c1 = graph()->NewNode(common()->IfValue(1), sw);
+ Node* v1 = graph()->NewNode(common()->Int32Constant(22));
+ Node* d = graph()->NewNode(common()->IfDefault(), sw);
+ Node* vd = graph()->NewNode(common()->Int32Constant(33));
+ Node* m = graph()->NewNode(common()->Merge(3), c0, c1, d);
+ Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 3),
+ v0, v1, vd, m);
+ Node* ret = graph()->NewNode(common()->Return(), phi, start, start);
+ Node* end = graph()->NewNode(common()->End(1), ret);
+
+ graph()->SetEnd(end);
+
+ ComputeAndVerifySchedule(16);
+}
+
+
+TARGET_TEST_F(SchedulerTest, Terminate) {
+ Node* start = graph()->NewNode(common()->Start(1));
+ graph()->SetStart(start);
+
+ Node* loop = graph()->NewNode(common()->Loop(2), start, start);
+ loop->ReplaceInput(1, loop); // self loop, NTL.
+
+ Node* effect = graph()->NewNode(common()->EffectPhi(2), start, start, loop);
+ effect->ReplaceInput(1, effect); // self loop.
+
+ Node* terminate = graph()->NewNode(common()->Terminate(), effect, loop);
+ Node* end = graph()->NewNode(common()->End(1), terminate);
+ graph()->SetEnd(end);
+
+ Schedule* schedule = ComputeAndVerifySchedule(6);
+ BasicBlock* block = schedule->block(loop);
+ EXPECT_EQ(block, schedule->block(effect));
+ EXPECT_GE(block->rpo_number(), 0);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/select-lowering-unittest.cc b/test/unittests/compiler/select-lowering-unittest.cc
index 51efc83..43cfd84 100644
--- a/test/unittests/compiler/select-lowering-unittest.cc
+++ b/test/unittests/compiler/select-lowering-unittest.cc
@@ -34,7 +34,8 @@
Node* const p2 = Parameter(2);
Node* const p3 = Parameter(3);
Node* const p4 = Parameter(4);
- Node* const s0 = graph()->NewNode(common()->Select(kMachInt32), p0, p1, p2);
+ Node* const s0 = graph()->NewNode(
+ common()->Select(MachineRepresentation::kWord32), p0, p1, p2);
Capture<Node*> branch;
Capture<Node*> merge;
@@ -44,26 +45,27 @@
EXPECT_THAT(
r.replacement(),
IsPhi(
- kMachInt32, p1, p2,
+ MachineRepresentation::kWord32, p1, p2,
AllOf(CaptureEq(&merge),
IsMerge(IsIfTrue(CaptureEq(&branch)),
IsIfFalse(AllOf(CaptureEq(&branch),
IsBranch(p0, graph()->start())))))));
}
{
- Reduction const r =
- Reduce(graph()->NewNode(common()->Select(kMachInt32), p0, p3, p4));
+ Reduction const r = Reduce(graph()->NewNode(
+ common()->Select(MachineRepresentation::kWord32), p0, p3, p4));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(), IsPhi(kMachInt32, p3, p4, CaptureEq(&merge)));
+ EXPECT_THAT(r.replacement(), IsPhi(MachineRepresentation::kWord32, p3, p4,
+ CaptureEq(&merge)));
}
{
// We must not reuse the diamond if it is reachable from either else/then
// values of the Select, because the resulting graph can not be scheduled.
- Reduction const r =
- Reduce(graph()->NewNode(common()->Select(kMachInt32), p0, s0, p0));
+ Reduction const r = Reduce(graph()->NewNode(
+ common()->Select(MachineRepresentation::kWord32), p0, s0, p0));
ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsPhi(kMachInt32, s0, p0, Not(CaptureEq(&merge))));
+ EXPECT_THAT(r.replacement(), IsPhi(MachineRepresentation::kWord32, s0, p0,
+ Not(CaptureEq(&merge))));
}
}
diff --git a/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/test/unittests/compiler/simplified-operator-reducer-unittest.cc
index e5f46c0..f571898 100644
--- a/test/unittests/compiler/simplified-operator-reducer-unittest.cc
+++ b/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -4,10 +4,10 @@
#include "src/compiler/access-builder.h"
#include "src/compiler/js-graph.h"
-#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/simplified-operator-reducer.h"
-#include "src/conversions.h"
+#include "src/conversions-inl.h"
#include "src/types.h"
#include "test/unittests/compiler/graph-unittest.h"
#include "test/unittests/compiler/node-test-utils.h"
@@ -24,13 +24,14 @@
public:
explicit SimplifiedOperatorReducerTest(int num_parameters = 1)
: TypedGraphTest(num_parameters), simplified_(zone()) {}
- ~SimplifiedOperatorReducerTest() OVERRIDE {}
+ ~SimplifiedOperatorReducerTest() override {}
protected:
Reduction Reduce(Node* node) {
MachineOperatorBuilder machine(zone());
JSOperatorBuilder javascript(zone());
- JSGraph jsgraph(graph(), common(), &javascript, &machine);
+ JSGraph jsgraph(isolate(), graph(), common(), &javascript, simplified(),
+ &machine);
SimplifiedOperatorReducer reducer(&jsgraph);
return reducer.Reduce(node);
}
@@ -49,59 +50,48 @@
public:
explicit SimplifiedOperatorReducerTestWithParam(int num_parameters = 1)
: SimplifiedOperatorReducerTest(num_parameters) {}
- ~SimplifiedOperatorReducerTestWithParam() OVERRIDE {}
+ ~SimplifiedOperatorReducerTestWithParam() override {}
};
namespace {
-static const double kFloat64Values[] = {
- -V8_INFINITY, -6.52696e+290, -1.05768e+290, -5.34203e+268, -1.01997e+268,
+const double kFloat64Values[] = {
+ -V8_INFINITY, -6.52696e+290, -1.05768e+290, -5.34203e+268, -1.01997e+268,
-8.22758e+266, -1.58402e+261, -5.15246e+241, -5.92107e+226, -1.21477e+226,
- -1.67913e+188, -1.6257e+184, -2.60043e+170, -2.52941e+168, -3.06033e+116,
- -4.56201e+52, -3.56788e+50, -9.9066e+38, -3.07261e+31, -2.1271e+09,
- -1.91489e+09, -1.73053e+09, -9.30675e+08, -26030, -20453,
- -15790, -11699, -111, -97, -78,
- -63, -58, -1.53858e-06, -2.98914e-12, -1.14741e-39,
- -8.20347e-57, -1.48932e-59, -3.17692e-66, -8.93103e-81, -3.91337e-83,
- -6.0489e-92, -8.83291e-113, -4.28266e-117, -1.92058e-178, -2.0567e-192,
+ -1.67913e+188, -1.6257e+184, -2.60043e+170, -2.52941e+168, -3.06033e+116,
+ -4.56201e+52, -3.56788e+50, -9.9066e+38, -3.07261e+31, -2.1271e+09,
+ -1.91489e+09, -1.73053e+09, -9.30675e+08, -26030, -20453, -15790, -11699,
+ -111, -97, -78, -63, -58, -1.53858e-06, -2.98914e-12, -1.14741e-39,
+ -8.20347e-57, -1.48932e-59, -3.17692e-66, -8.93103e-81, -3.91337e-83,
+ -6.0489e-92, -8.83291e-113, -4.28266e-117, -1.92058e-178, -2.0567e-192,
-1.68167e-194, -1.51841e-214, -3.98738e-234, -7.31851e-242, -2.21875e-253,
- -1.11612e-293, -0.0, 0.0, 2.22507e-308, 1.06526e-307,
- 4.16643e-227, 6.76624e-223, 2.0432e-197, 3.16254e-184, 1.37315e-173,
- 2.88603e-172, 1.54155e-99, 4.42923e-81, 1.40539e-73, 5.4462e-73,
- 1.24064e-58, 3.11167e-58, 2.75826e-39, 0.143815, 58,
- 67, 601, 7941, 11644, 13697,
- 25680, 29882, 1.32165e+08, 1.62439e+08, 4.16837e+08,
- 9.59097e+08, 1.32491e+09, 1.8728e+09, 1.0672e+17, 2.69606e+46,
- 1.98285e+79, 1.0098e+82, 7.93064e+88, 3.67444e+121, 9.36506e+123,
- 7.27954e+162, 3.05316e+168, 1.16171e+175, 1.64771e+189, 1.1622e+202,
- 2.00748e+239, 2.51778e+244, 3.90282e+306, 1.79769e+308, V8_INFINITY};
+ -1.11612e-293, -0.0, 0.0, 2.22507e-308, 1.06526e-307, 4.16643e-227,
+ 6.76624e-223, 2.0432e-197, 3.16254e-184, 1.37315e-173, 2.88603e-172,
+ 1.54155e-99, 4.42923e-81, 1.40539e-73, 5.4462e-73, 1.24064e-58, 3.11167e-58,
+ 2.75826e-39, 0.143815, 58, 67, 601, 7941, 11644, 13697, 25680, 29882,
+ 1.32165e+08, 1.62439e+08, 4.16837e+08, 9.59097e+08, 1.32491e+09, 1.8728e+09,
+ 1.0672e+17, 2.69606e+46, 1.98285e+79, 1.0098e+82, 7.93064e+88, 3.67444e+121,
+ 9.36506e+123, 7.27954e+162, 3.05316e+168, 1.16171e+175, 1.64771e+189,
+ 1.1622e+202, 2.00748e+239, 2.51778e+244, 3.90282e+306, 1.79769e+308,
+ V8_INFINITY};
-static const int32_t kInt32Values[] = {
+const int32_t kInt32Values[] = {
-2147483647 - 1, -2104508227, -2103151830, -1435284490, -1378926425,
- -1318814539, -1289388009, -1287537572, -1279026536, -1241605942,
- -1226046939, -941837148, -779818051, -413830641, -245798087,
- -184657557, -127145950, -105483328, -32325, -26653,
- -23858, -23834, -22363, -19858, -19044,
- -18744, -15528, -5309, -3372, -2093,
- -104, -98, -97, -93, -84,
- -80, -78, -76, -72, -58,
- -57, -56, -55, -45, -40,
- -34, -32, -25, -24, -5,
- -2, 0, 3, 10, 24,
- 34, 42, 46, 47, 48,
- 52, 56, 64, 65, 71,
- 76, 79, 81, 82, 97,
- 102, 103, 104, 106, 107,
- 109, 116, 122, 3653, 4485,
- 12405, 16504, 26262, 28704, 29755,
- 30554, 16476817, 605431957, 832401070, 873617242,
- 914205764, 1062628108, 1087581664, 1488498068, 1534668023,
- 1661587028, 1696896187, 1866841746, 2032089723, 2147483647};
+ -1318814539, -1289388009, -1287537572, -1279026536, -1241605942,
+ -1226046939, -941837148, -779818051, -413830641, -245798087, -184657557,
+ -127145950, -105483328, -32325, -26653, -23858, -23834, -22363, -19858,
+ -19044, -18744, -15528, -5309, -3372, -2093, -104, -98, -97, -93, -84, -80,
+ -78, -76, -72, -58, -57, -56, -55, -45, -40, -34, -32, -25, -24, -5, -2, 0,
+ 3, 10, 24, 34, 42, 46, 47, 48, 52, 56, 64, 65, 71, 76, 79, 81, 82, 97, 102,
+ 103, 104, 106, 107, 109, 116, 122, 3653, 4485, 12405, 16504, 26262, 28704,
+ 29755, 30554, 16476817, 605431957, 832401070, 873617242, 914205764,
+ 1062628108, 1087581664, 1488498068, 1534668023, 1661587028, 1696896187,
+ 1866841746, 2032089723, 2147483647};
-static const uint32_t kUint32Values[] = {
+const uint32_t kUint32Values[] = {
0x0, 0x5, 0x8, 0xc, 0xd, 0x26,
0x28, 0x29, 0x30, 0x34, 0x3e, 0x42,
0x50, 0x5b, 0x63, 0x71, 0x77, 0x7c,
@@ -120,93 +110,15 @@
0xbeb15c0d, 0xc171c53d, 0xc743dd38, 0xc8e2af50, 0xc98e2df0, 0xd9d1cdf9,
0xdcc91049, 0xe46f396d, 0xee991950, 0xef64e521, 0xf7aeefc9, 0xffffffff};
-} // namespace
-
-// -----------------------------------------------------------------------------
-// Unary operators
-
-
-namespace {
-
-struct UnaryOperator {
- const Operator* (SimplifiedOperatorBuilder::*constructor)();
- const char* constructor_name;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const UnaryOperator& unop) {
- return os << unop.constructor_name;
-}
-
-
-static const UnaryOperator kUnaryOperators[] = {
- {&SimplifiedOperatorBuilder::AnyToBoolean, "AnyToBoolean"},
- {&SimplifiedOperatorBuilder::BooleanNot, "BooleanNot"},
- {&SimplifiedOperatorBuilder::ChangeBitToBool, "ChangeBitToBool"},
- {&SimplifiedOperatorBuilder::ChangeBoolToBit, "ChangeBoolToBit"},
- {&SimplifiedOperatorBuilder::ChangeFloat64ToTagged,
- "ChangeFloat64ToTagged"},
- {&SimplifiedOperatorBuilder::ChangeInt32ToTagged, "ChangeInt32ToTagged"},
- {&SimplifiedOperatorBuilder::ChangeTaggedToFloat64,
- "ChangeTaggedToFloat64"},
- {&SimplifiedOperatorBuilder::ChangeTaggedToInt32, "ChangeTaggedToInt32"},
- {&SimplifiedOperatorBuilder::ChangeTaggedToUint32, "ChangeTaggedToUint32"},
- {&SimplifiedOperatorBuilder::ChangeUint32ToTagged, "ChangeUint32ToTagged"}};
+const double kNaNs[] = {-std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::quiet_NaN(),
+ bit_cast<double>(V8_UINT64_C(0x7FFFFFFFFFFFFFFF)),
+ bit_cast<double>(V8_UINT64_C(0xFFFFFFFFFFFFFFFF))};
} // namespace
-typedef SimplifiedOperatorReducerTestWithParam<UnaryOperator>
- SimplifiedUnaryOperatorTest;
-
-
-TEST_P(SimplifiedUnaryOperatorTest, Parameter) {
- const UnaryOperator& unop = GetParam();
- Reduction reduction = Reduce(graph()->NewNode(
- (simplified()->*unop.constructor)(), Parameter(Type::Any())));
- EXPECT_FALSE(reduction.Changed());
-}
-
-
-INSTANTIATE_TEST_CASE_P(SimplifiedOperatorReducerTest,
- SimplifiedUnaryOperatorTest,
- ::testing::ValuesIn(kUnaryOperators));
-
-
-// -----------------------------------------------------------------------------
-// AnyToBoolean
-
-
-TEST_F(SimplifiedOperatorReducerTest, AnyToBooleanWithBoolean) {
- Node* p = Parameter(Type::Boolean());
- Reduction r = Reduce(graph()->NewNode(simplified()->AnyToBoolean(), p));
- ASSERT_TRUE(r.Changed());
- EXPECT_EQ(p, r.replacement());
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, AnyToBooleanWithOrderedNumber) {
- Node* p = Parameter(Type::OrderedNumber());
- Reduction r = Reduce(graph()->NewNode(simplified()->AnyToBoolean(), p));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsBooleanNot(IsNumberEqual(p, IsNumberConstant(0))));
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, AnyToBooleanWithString) {
- Node* p = Parameter(Type::String());
- Reduction r = Reduce(graph()->NewNode(simplified()->AnyToBoolean(), p));
- ASSERT_TRUE(r.Changed());
- EXPECT_THAT(r.replacement(),
- IsBooleanNot(
- IsNumberEqual(IsLoadField(AccessBuilder::ForStringLength(), p,
- graph()->start(), graph()->start()),
- IsNumberConstant(0))));
-}
-
-
// -----------------------------------------------------------------------------
// BooleanNot
@@ -372,23 +284,13 @@
}
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithNaNConstant1) {
- Reduction reduction =
- Reduce(graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
- NumberConstant(-base::OS::nan_value())));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(),
- IsFloat64Constant(BitEq(-base::OS::nan_value())));
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithNaNConstant2) {
- Reduction reduction =
- Reduce(graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
- NumberConstant(base::OS::nan_value())));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(),
- IsFloat64Constant(BitEq(base::OS::nan_value())));
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithNaNConstant) {
+ TRACED_FOREACH(double, nan, kNaNs) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToFloat64(), NumberConstant(nan)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsFloat64Constant(BitEq(nan)));
+ }
}
@@ -428,21 +330,13 @@
}
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithNaNConstant1) {
- Reduction reduction =
- Reduce(graph()->NewNode(simplified()->ChangeTaggedToInt32(),
- NumberConstant(-base::OS::nan_value())));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithNaNConstant2) {
- Reduction reduction =
- Reduce(graph()->NewNode(simplified()->ChangeTaggedToInt32(),
- NumberConstant(base::OS::nan_value())));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithNaNConstant) {
+ TRACED_FOREACH(double, nan, kNaNs) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToInt32(), NumberConstant(nan)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+ }
}
@@ -483,21 +377,13 @@
}
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithNaNConstant1) {
- Reduction reduction =
- Reduce(graph()->NewNode(simplified()->ChangeTaggedToUint32(),
- NumberConstant(-base::OS::nan_value())));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
-}
-
-
-TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithNaNConstant2) {
- Reduction reduction =
- Reduce(graph()->NewNode(simplified()->ChangeTaggedToUint32(),
- NumberConstant(base::OS::nan_value())));
- ASSERT_TRUE(reduction.Changed());
- EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithNaNConstant) {
+ TRACED_FOREACH(double, nan, kNaNs) {
+ Reduction reduction = Reduce(graph()->NewNode(
+ simplified()->ChangeTaggedToUint32(), NumberConstant(nan)));
+ ASSERT_TRUE(reduction.Changed());
+ EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+ }
}
diff --git a/test/unittests/compiler/simplified-operator-unittest.cc b/test/unittests/compiler/simplified-operator-unittest.cc
index bc537fd..871189a 100644
--- a/test/unittests/compiler/simplified-operator-unittest.cc
+++ b/test/unittests/compiler/simplified-operator-unittest.cc
@@ -38,7 +38,6 @@
&SimplifiedOperatorBuilder::Name, IrOpcode::k##Name, \
Operator::kPure | properties, input_count \
}
- PURE(AnyToBoolean, Operator::kNoProperties, 1),
PURE(BooleanNot, Operator::kNoProperties, 1),
PURE(BooleanToNumber, Operator::kNoProperties, 1),
PURE(NumberEqual, Operator::kCommutative, 2),
@@ -49,12 +48,15 @@
PURE(NumberMultiply, Operator::kCommutative, 2),
PURE(NumberDivide, Operator::kNoProperties, 2),
PURE(NumberModulus, Operator::kNoProperties, 2),
+ PURE(NumberBitwiseOr, Operator::kCommutative, 2),
+ PURE(NumberBitwiseXor, Operator::kCommutative, 2),
+ PURE(NumberBitwiseAnd, Operator::kCommutative, 2),
+ PURE(NumberShiftLeft, Operator::kNoProperties, 2),
+ PURE(NumberShiftRight, Operator::kNoProperties, 2),
+ PURE(NumberShiftRightLogical, Operator::kNoProperties, 2),
PURE(NumberToInt32, Operator::kNoProperties, 1),
PURE(NumberToUint32, Operator::kNoProperties, 1),
- PURE(StringEqual, Operator::kCommutative, 2),
- PURE(StringLessThan, Operator::kNoProperties, 2),
- PURE(StringLessThanOrEqual, Operator::kNoProperties, 2),
- PURE(StringAdd, Operator::kNoProperties, 2),
+ PURE(PlainPrimitiveToNumber, Operator::kNoProperties, 1),
PURE(ChangeTaggedToInt32, Operator::kNoProperties, 1),
PURE(ChangeTaggedToUint32, Operator::kNoProperties, 1),
PURE(ChangeTaggedToFloat64, Operator::kNoProperties, 1),
@@ -63,8 +65,7 @@
PURE(ChangeFloat64ToTagged, Operator::kNoProperties, 1),
PURE(ChangeBoolToBit, Operator::kNoProperties, 1),
PURE(ChangeBitToBool, Operator::kNoProperties, 1),
- PURE(ObjectIsSmi, Operator::kNoProperties, 1),
- PURE(ObjectIsNonNegativeSmi, Operator::kNoProperties, 1)
+ PURE(ObjectIsSmi, Operator::kNoProperties, 1)
#undef PURE
};
@@ -199,37 +200,40 @@
namespace {
const ElementAccess kElementAccesses[] = {
- {kTaggedBase, FixedArray::kHeaderSize, Type::Any(), kMachAnyTagged},
- {kUntaggedBase, 0, Type::Any(), kMachInt8},
- {kUntaggedBase, 0, Type::Any(), kMachInt16},
- {kUntaggedBase, 0, Type::Any(), kMachInt32},
- {kUntaggedBase, 0, Type::Any(), kMachUint8},
- {kUntaggedBase, 0, Type::Any(), kMachUint16},
- {kUntaggedBase, 0, Type::Any(), kMachUint32},
- {kUntaggedBase, 0, Type::Signed32(), kMachInt8},
- {kUntaggedBase, 0, Type::Unsigned32(), kMachUint8},
- {kUntaggedBase, 0, Type::Signed32(), kMachInt16},
- {kUntaggedBase, 0, Type::Unsigned32(), kMachUint16},
- {kUntaggedBase, 0, Type::Signed32(), kMachInt32},
- {kUntaggedBase, 0, Type::Unsigned32(), kMachUint32},
- {kUntaggedBase, 0, Type::Number(), kRepFloat32},
- {kUntaggedBase, 0, Type::Number(), kRepFloat64},
+ {kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
+ MachineType::AnyTagged()},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Int8()},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Int16()},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Int32()},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Uint8()},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Uint16()},
+ {kUntaggedBase, 0, Type::Any(), MachineType::Uint32()},
+ {kUntaggedBase, 0, Type::Signed32(), MachineType::Int8()},
+ {kUntaggedBase, 0, Type::Unsigned32(), MachineType::Uint8()},
+ {kUntaggedBase, 0, Type::Signed32(), MachineType::Int16()},
+ {kUntaggedBase, 0, Type::Unsigned32(), MachineType::Uint16()},
+ {kUntaggedBase, 0, Type::Signed32(), MachineType::Int32()},
+ {kUntaggedBase, 0, Type::Unsigned32(), MachineType::Uint32()},
+ {kUntaggedBase, 0, Type::Number(),
+ MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone)},
+ {kUntaggedBase, 0, Type::Number(),
+ MachineType(MachineRepresentation::kFloat64, MachineSemantic::kNone)},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
- kMachInt8},
+ MachineType::Int8()},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
- kMachUint8},
+ MachineType::Uint8()},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
- kMachInt16},
+ MachineType::Int16()},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
- kMachUint16},
+ MachineType::Uint16()},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
- kMachInt32},
+ MachineType::Int32()},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
- kMachUint32},
+ MachineType::Uint32()},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
- kRepFloat32},
+ MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone)},
{kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
- kRepFloat64}};
+ MachineType(MachineRepresentation::kFloat32, MachineSemantic::kNone)}};
} // namespace
diff --git a/test/unittests/compiler/state-values-utils-unittest.cc b/test/unittests/compiler/state-values-utils-unittest.cc
new file mode 100644
index 0000000..311b90a
--- /dev/null
+++ b/test/unittests/compiler/state-values-utils-unittest.cc
@@ -0,0 +1,151 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/state-values-utils.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class StateValuesIteratorTest : public GraphTest {
+ public:
+ StateValuesIteratorTest() : GraphTest(3) {}
+
+ Node* StateValuesFromVector(NodeVector* nodes) {
+ int count = static_cast<int>(nodes->size());
+ return graph()->NewNode(common()->StateValues(count), count,
+ count == 0 ? nullptr : &(nodes->front()));
+ }
+};
+
+
+TEST_F(StateValuesIteratorTest, SimpleIteration) {
+ NodeVector inputs(zone());
+ const int count = 10;
+ for (int i = 0; i < count; i++) {
+ inputs.push_back(Int32Constant(i));
+ }
+ Node* state_values = StateValuesFromVector(&inputs);
+ int i = 0;
+ for (StateValuesAccess::TypedNode node : StateValuesAccess(state_values)) {
+ EXPECT_THAT(node.node, IsInt32Constant(i));
+ i++;
+ }
+ EXPECT_EQ(count, i);
+}
+
+
+TEST_F(StateValuesIteratorTest, EmptyIteration) {
+ NodeVector inputs(zone());
+ Node* state_values = StateValuesFromVector(&inputs);
+ for (auto node : StateValuesAccess(state_values)) {
+ USE(node);
+ FAIL();
+ }
+}
+
+
+TEST_F(StateValuesIteratorTest, NestedIteration) {
+ NodeVector inputs(zone());
+ int count = 0;
+ for (int i = 0; i < 8; i++) {
+ if (i == 2) {
+ // Single nested in index 2.
+ NodeVector nested_inputs(zone());
+ for (int j = 0; j < 8; j++) {
+ nested_inputs.push_back(Int32Constant(count++));
+ }
+ inputs.push_back(StateValuesFromVector(&nested_inputs));
+ } else if (i == 5) {
+ // Double nested at index 5.
+ NodeVector nested_inputs(zone());
+ for (int j = 0; j < 8; j++) {
+ if (j == 7) {
+ NodeVector doubly_nested_inputs(zone());
+ for (int k = 0; k < 2; k++) {
+ doubly_nested_inputs.push_back(Int32Constant(count++));
+ }
+ nested_inputs.push_back(StateValuesFromVector(&doubly_nested_inputs));
+ } else {
+ nested_inputs.push_back(Int32Constant(count++));
+ }
+ }
+ inputs.push_back(StateValuesFromVector(&nested_inputs));
+ } else {
+ inputs.push_back(Int32Constant(count++));
+ }
+ }
+ Node* state_values = StateValuesFromVector(&inputs);
+ int i = 0;
+ for (StateValuesAccess::TypedNode node : StateValuesAccess(state_values)) {
+ EXPECT_THAT(node.node, IsInt32Constant(i));
+ i++;
+ }
+ EXPECT_EQ(count, i);
+}
+
+
+TEST_F(StateValuesIteratorTest, TreeFromVector) {
+ int sizes[] = {0, 1, 2, 100, 5000, 30000};
+ TRACED_FOREACH(int, count, sizes) {
+ JSOperatorBuilder javascript(zone());
+ MachineOperatorBuilder machine(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), &javascript, nullptr,
+ &machine);
+
+ // Generate the input vector.
+ NodeVector inputs(zone());
+ for (int i = 0; i < count; i++) {
+ inputs.push_back(Int32Constant(i));
+ }
+
+ // Build the tree.
+ StateValuesCache builder(&jsgraph);
+ Node* values_node = builder.GetNodeForValues(
+ inputs.size() == 0 ? nullptr : &(inputs.front()), inputs.size());
+
+ // Check the tree contents with vector.
+ int i = 0;
+ for (StateValuesAccess::TypedNode node : StateValuesAccess(values_node)) {
+ EXPECT_THAT(node.node, IsInt32Constant(i));
+ i++;
+ }
+ EXPECT_EQ(inputs.size(), static_cast<size_t>(i));
+ }
+}
+
+
+TEST_F(StateValuesIteratorTest, BuildTreeIdentical) {
+ int sizes[] = {0, 1, 2, 100, 5000, 30000};
+ TRACED_FOREACH(int, count, sizes) {
+ JSOperatorBuilder javascript(zone());
+ MachineOperatorBuilder machine(zone());
+ JSGraph jsgraph(isolate(), graph(), common(), &javascript, nullptr,
+ &machine);
+
+ // Generate the input vector.
+ NodeVector inputs(zone());
+ for (int i = 0; i < count; i++) {
+ inputs.push_back(Int32Constant(i));
+ }
+
+ // Build two trees from the same data.
+ StateValuesCache builder(&jsgraph);
+ Node* node1 = builder.GetNodeForValues(
+ inputs.size() == 0 ? nullptr : &(inputs.front()), inputs.size());
+ Node* node2 = builder.GetNodeForValues(
+ inputs.size() == 0 ? nullptr : &(inputs.front()), inputs.size());
+
+ // The trees should be equal since the data was the same.
+ EXPECT_EQ(node1, node2);
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/tail-call-optimization-unittest.cc b/test/unittests/compiler/tail-call-optimization-unittest.cc
new file mode 100644
index 0000000..3441c68
--- /dev/null
+++ b/test/unittests/compiler/tail-call-optimization-unittest.cc
@@ -0,0 +1,174 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/linkage.h"
+#include "src/compiler/tail-call-optimization.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class TailCallOptimizationTest : public GraphTest {
+ public:
+ explicit TailCallOptimizationTest(int num_parameters = 1)
+ : GraphTest(num_parameters) {}
+ ~TailCallOptimizationTest() override {}
+
+ protected:
+ Reduction Reduce(Node* node) {
+ TailCallOptimization tco(common(), graph());
+ return tco.Reduce(node);
+ }
+};
+
+
+TEST_F(TailCallOptimizationTest, CallCodeObject0) {
+ MachineType kMachineSignature[] = {MachineType::AnyTagged(),
+ MachineType::AnyTagged()};
+ LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
+ LinkageLocation::ForRegister(1)};
+ const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
+ CallDescriptor::kCallCodeObject, MachineType::AnyTagged(),
+ LinkageLocation::ForRegister(0),
+ new (zone()) MachineSignature(1, 1, kMachineSignature),
+ new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
+ Operator::kNoProperties, 0, 0, CallDescriptor::kNoFlags);
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* call = graph()->NewNode(common()->Call(kCallDescriptor), p0, p1,
+ graph()->start(), graph()->start());
+ Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
+ Node* ret = graph()->NewNode(common()->Return(), call, call, if_success);
+ Reduction r = Reduce(ret);
+ ASSERT_FALSE(r.Changed());
+}
+
+
+TEST_F(TailCallOptimizationTest, CallCodeObject1) {
+ MachineType kMachineSignature[] = {MachineType::AnyTagged(),
+ MachineType::AnyTagged()};
+ LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
+ LinkageLocation::ForRegister(1)};
+ const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
+ CallDescriptor::kCallCodeObject, MachineType::AnyTagged(),
+ LinkageLocation::ForRegister(0),
+ new (zone()) MachineSignature(1, 1, kMachineSignature),
+ new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
+ Operator::kNoProperties, 0, 0, CallDescriptor::kSupportsTailCalls);
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* call = graph()->NewNode(common()->Call(kCallDescriptor), p0, p1,
+ graph()->start(), graph()->start());
+ Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
+ Node* if_exception = graph()->NewNode(
+ common()->IfException(IfExceptionHint::kLocallyUncaught), call, call);
+ Node* ret = graph()->NewNode(common()->Return(), call, call, if_success);
+ Node* end = graph()->NewNode(common()->End(1), if_exception);
+ graph()->SetEnd(end);
+ Reduction r = Reduce(ret);
+ ASSERT_FALSE(r.Changed());
+}
+
+
+TEST_F(TailCallOptimizationTest, CallCodeObject2) {
+ MachineType kMachineSignature[] = {MachineType::AnyTagged(),
+ MachineType::AnyTagged()};
+ LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
+ LinkageLocation::ForRegister(1)};
+ const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
+ CallDescriptor::kCallCodeObject, MachineType::AnyTagged(),
+ LinkageLocation::ForRegister(0),
+ new (zone()) MachineSignature(1, 1, kMachineSignature),
+ new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
+ Operator::kNoProperties, 0, 0, CallDescriptor::kSupportsTailCalls);
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* call = graph()->NewNode(common()->Call(kCallDescriptor), p0, p1,
+ graph()->start(), graph()->start());
+ Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
+ Node* ret = graph()->NewNode(common()->Return(), call, call, if_success);
+ Reduction r = Reduce(ret);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsTailCall(kCallDescriptor, p0, p1,
+ graph()->start(), graph()->start()));
+}
+
+
+TEST_F(TailCallOptimizationTest, CallJSFunction0) {
+ MachineType kMachineSignature[] = {MachineType::AnyTagged(),
+ MachineType::AnyTagged()};
+ LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
+ LinkageLocation::ForRegister(1)};
+ const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
+ CallDescriptor::kCallJSFunction, MachineType::AnyTagged(),
+ LinkageLocation::ForRegister(0),
+ new (zone()) MachineSignature(1, 1, kMachineSignature),
+ new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
+ Operator::kNoProperties, 0, 0, CallDescriptor::kNoFlags);
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* call = graph()->NewNode(common()->Call(kCallDescriptor), p0, p1,
+ graph()->start(), graph()->start());
+ Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
+ Node* ret = graph()->NewNode(common()->Return(), call, call, if_success);
+ Reduction r = Reduce(ret);
+ ASSERT_FALSE(r.Changed());
+}
+
+
+TEST_F(TailCallOptimizationTest, CallJSFunction1) {
+ MachineType kMachineSignature[] = {MachineType::AnyTagged(),
+ MachineType::AnyTagged()};
+ LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
+ LinkageLocation::ForRegister(1)};
+ const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
+ CallDescriptor::kCallJSFunction, MachineType::AnyTagged(),
+ LinkageLocation::ForRegister(0),
+ new (zone()) MachineSignature(1, 1, kMachineSignature),
+ new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
+ Operator::kNoProperties, 0, 0, CallDescriptor::kSupportsTailCalls);
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* call = graph()->NewNode(common()->Call(kCallDescriptor), p0, p1,
+ graph()->start(), graph()->start());
+ Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
+ Node* if_exception = graph()->NewNode(
+ common()->IfException(IfExceptionHint::kLocallyUncaught), call, call);
+ Node* ret = graph()->NewNode(common()->Return(), call, call, if_success);
+ Node* end = graph()->NewNode(common()->End(1), if_exception);
+ graph()->SetEnd(end);
+ Reduction r = Reduce(ret);
+ ASSERT_FALSE(r.Changed());
+}
+
+
+TEST_F(TailCallOptimizationTest, CallJSFunction2) {
+ MachineType kMachineSignature[] = {MachineType::AnyTagged(),
+ MachineType::AnyTagged()};
+ LinkageLocation kLocationSignature[] = {LinkageLocation::ForRegister(0),
+ LinkageLocation::ForRegister(1)};
+ const CallDescriptor* kCallDescriptor = new (zone()) CallDescriptor(
+ CallDescriptor::kCallJSFunction, MachineType::AnyTagged(),
+ LinkageLocation::ForRegister(0),
+ new (zone()) MachineSignature(1, 1, kMachineSignature),
+ new (zone()) LocationSignature(1, 1, kLocationSignature), 0,
+ Operator::kNoProperties, 0, 0, CallDescriptor::kSupportsTailCalls);
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ Node* call = graph()->NewNode(common()->Call(kCallDescriptor), p0, p1,
+ graph()->start(), graph()->start());
+ Node* if_success = graph()->NewNode(common()->IfSuccess(), call);
+ Node* ret = graph()->NewNode(common()->Return(), call, call, if_success);
+ Reduction r = Reduce(ret);
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(), IsTailCall(kCallDescriptor, p0, p1,
+ graph()->start(), graph()->start()));
+}
+
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/typer-unittest.cc b/test/unittests/compiler/typer-unittest.cc
new file mode 100644
index 0000000..6e4d4d5
--- /dev/null
+++ b/test/unittests/compiler/typer-unittest.cc
@@ -0,0 +1,435 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <functional>
+
+#include "src/codegen.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/operator-properties.h"
+#include "test/cctest/types-fuzz.h"
+#include "test/unittests/compiler/graph-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// TODO(titzer): generate a large set of deterministic inputs for these tests.
+class TyperTest : public TypedGraphTest {
+ public:
+ TyperTest()
+ : TypedGraphTest(3),
+ types_(zone(), isolate(), random_number_generator()),
+ javascript_(zone()) {
+ context_node_ = graph()->NewNode(common()->Parameter(2), graph()->start());
+ rng_ = random_number_generator();
+
+ integers.push_back(0);
+ integers.push_back(0);
+ integers.push_back(-1);
+ integers.push_back(+1);
+ integers.push_back(-V8_INFINITY);
+ integers.push_back(+V8_INFINITY);
+ for (int i = 0; i < 5; ++i) {
+ double x = rng_->NextInt();
+ integers.push_back(x);
+ x *= rng_->NextInt();
+ if (!IsMinusZero(x)) integers.push_back(x);
+ }
+
+ int32s.push_back(0);
+ int32s.push_back(0);
+ int32s.push_back(-1);
+ int32s.push_back(+1);
+ int32s.push_back(kMinInt);
+ int32s.push_back(kMaxInt);
+ for (int i = 0; i < 10; ++i) {
+ int32s.push_back(rng_->NextInt());
+ }
+ }
+
+ Types<Type, Type*, Zone> types_;
+ JSOperatorBuilder javascript_;
+ BinaryOperationHints const hints_ = BinaryOperationHints::Any();
+ Node* context_node_;
+ v8::base::RandomNumberGenerator* rng_;
+ std::vector<double> integers;
+ std::vector<double> int32s;
+
+ Type* TypeBinaryOp(const Operator* op, Type* lhs, Type* rhs) {
+ Node* p0 = Parameter(0);
+ Node* p1 = Parameter(1);
+ NodeProperties::SetType(p0, lhs);
+ NodeProperties::SetType(p1, rhs);
+ std::vector<Node*> inputs;
+ inputs.push_back(p0);
+ inputs.push_back(p1);
+ if (OperatorProperties::HasContextInput(op)) {
+ inputs.push_back(context_node_);
+ }
+ for (int i = 0; i < OperatorProperties::GetFrameStateInputCount(op); i++) {
+ inputs.push_back(EmptyFrameState());
+ }
+ for (int i = 0; i < op->EffectInputCount(); i++) {
+ inputs.push_back(graph()->start());
+ }
+ for (int i = 0; i < op->ControlInputCount(); i++) {
+ inputs.push_back(graph()->start());
+ }
+ Node* n = graph()->NewNode(op, static_cast<int>(inputs.size()),
+ &(inputs.front()));
+ return NodeProperties::GetType(n);
+ }
+
+ Type* RandomRange(bool int32 = false) {
+ std::vector<double>& numbers = int32 ? int32s : integers;
+ double i = numbers[rng_->NextInt(static_cast<int>(numbers.size()))];
+ double j = numbers[rng_->NextInt(static_cast<int>(numbers.size()))];
+ return NewRange(i, j);
+ }
+
+ Type* NewRange(double i, double j) {
+ if (i > j) std::swap(i, j);
+ return Type::Range(i, j, zone());
+ }
+
+ double RandomInt(double min, double max) {
+ switch (rng_->NextInt(4)) {
+ case 0:
+ return min;
+ case 1:
+ return max;
+ default:
+ break;
+ }
+ if (min == +V8_INFINITY) return +V8_INFINITY;
+ if (max == -V8_INFINITY) return -V8_INFINITY;
+ if (min == -V8_INFINITY && max == +V8_INFINITY) {
+ return rng_->NextInt() * static_cast<double>(rng_->NextInt());
+ }
+ double result = nearbyint(min + (max - min) * rng_->NextDouble());
+ if (IsMinusZero(result)) return 0;
+ if (std::isnan(result)) return rng_->NextInt(2) ? min : max;
+ DCHECK(min <= result && result <= max);
+ return result;
+ }
+
+ double RandomInt(Type::RangeType* range) {
+ return RandomInt(range->Min(), range->Max());
+ }
+
+ // Careful, this function runs O(max_width^5) trials.
+ template <class BinaryFunction>
+ void TestBinaryArithOpCloseToZero(const Operator* op, BinaryFunction opfun,
+ int max_width) {
+ const int min_min = -2 - max_width / 2;
+ const int max_min = 2 + max_width / 2;
+ for (int width = 0; width < max_width; width++) {
+ for (int lmin = min_min; lmin <= max_min; lmin++) {
+ for (int rmin = min_min; rmin <= max_min; rmin++) {
+ Type* r1 = NewRange(lmin, lmin + width);
+ Type* r2 = NewRange(rmin, rmin + width);
+ Type* expected_type = TypeBinaryOp(op, r1, r2);
+
+ for (int x1 = lmin; x1 < lmin + width; x1++) {
+ for (int x2 = rmin; x2 < rmin + width; x2++) {
+ double result_value = opfun(x1, x2);
+ Type* result_type = Type::Constant(
+ isolate()->factory()->NewNumber(result_value), zone());
+ EXPECT_TRUE(result_type->Is(expected_type));
+ }
+ }
+ }
+ }
+ }
+ }
+
+ template <class BinaryFunction>
+ void TestBinaryArithOp(const Operator* op, BinaryFunction opfun) {
+ TestBinaryArithOpCloseToZero(op, opfun, 8);
+ for (int i = 0; i < 100; ++i) {
+ Type::RangeType* r1 = RandomRange()->AsRange();
+ Type::RangeType* r2 = RandomRange()->AsRange();
+ Type* expected_type = TypeBinaryOp(op, r1, r2);
+ for (int i = 0; i < 10; i++) {
+ double x1 = RandomInt(r1);
+ double x2 = RandomInt(r2);
+ double result_value = opfun(x1, x2);
+ Type* result_type = Type::Constant(
+ isolate()->factory()->NewNumber(result_value), zone());
+ EXPECT_TRUE(result_type->Is(expected_type));
+ }
+ }
+ }
+
+ template <class BinaryFunction>
+ void TestBinaryCompareOp(const Operator* op, BinaryFunction opfun) {
+ for (int i = 0; i < 100; ++i) {
+ Type::RangeType* r1 = RandomRange()->AsRange();
+ Type::RangeType* r2 = RandomRange()->AsRange();
+ Type* expected_type = TypeBinaryOp(op, r1, r2);
+ for (int i = 0; i < 10; i++) {
+ double x1 = RandomInt(r1);
+ double x2 = RandomInt(r2);
+ bool result_value = opfun(x1, x2);
+ Type* result_type =
+ Type::Constant(result_value ? isolate()->factory()->true_value()
+ : isolate()->factory()->false_value(),
+ zone());
+ EXPECT_TRUE(result_type->Is(expected_type));
+ }
+ }
+ }
+
+ template <class BinaryFunction>
+ void TestBinaryBitOp(const Operator* op, BinaryFunction opfun) {
+ for (int i = 0; i < 100; ++i) {
+ Type::RangeType* r1 = RandomRange(true)->AsRange();
+ Type::RangeType* r2 = RandomRange(true)->AsRange();
+ Type* expected_type = TypeBinaryOp(op, r1, r2);
+ for (int i = 0; i < 10; i++) {
+ int32_t x1 = static_cast<int32_t>(RandomInt(r1));
+ int32_t x2 = static_cast<int32_t>(RandomInt(r2));
+ double result_value = opfun(x1, x2);
+ Type* result_type = Type::Constant(
+ isolate()->factory()->NewNumber(result_value), zone());
+ EXPECT_TRUE(result_type->Is(expected_type));
+ }
+ }
+ }
+
+ Type* RandomSubtype(Type* type) {
+ Type* subtype;
+ do {
+ subtype = types_.Fuzz();
+ } while (!subtype->Is(type));
+ return subtype;
+ }
+
+ void TestBinaryMonotonicity(const Operator* op) {
+ for (int i = 0; i < 50; ++i) {
+ Type* type1 = types_.Fuzz();
+ Type* type2 = types_.Fuzz();
+ Type* type = TypeBinaryOp(op, type1, type2);
+ Type* subtype1 = RandomSubtype(type1);
+ Type* subtype2 = RandomSubtype(type2);
+ Type* subtype = TypeBinaryOp(op, subtype1, subtype2);
+ EXPECT_TRUE(subtype->Is(type));
+ }
+ }
+};
+
+
+namespace {
+
+int32_t shift_left(int32_t x, int32_t y) { return x << y; }
+int32_t shift_right(int32_t x, int32_t y) { return x >> y; }
+int32_t bit_or(int32_t x, int32_t y) { return x | y; }
+int32_t bit_and(int32_t x, int32_t y) { return x & y; }
+int32_t bit_xor(int32_t x, int32_t y) { return x ^ y; }
+
+} // namespace
+
+
+//------------------------------------------------------------------------------
+// Soundness
+// For simplicity, we currently only test soundness on expression operators
+// that have a direct equivalent in C++. Also, testing is currently limited
+// to ranges as input types.
+
+
+TEST_F(TyperTest, TypeJSAdd) {
+ TestBinaryArithOp(javascript_.Add(LanguageMode::SLOPPY, hints_),
+ std::plus<double>());
+ TestBinaryArithOp(javascript_.Add(LanguageMode::STRONG, hints_),
+ std::plus<double>());
+}
+
+
+TEST_F(TyperTest, TypeJSSubtract) {
+ TestBinaryArithOp(javascript_.Subtract(LanguageMode::SLOPPY, hints_),
+ std::minus<double>());
+ TestBinaryArithOp(javascript_.Subtract(LanguageMode::STRONG, hints_),
+ std::minus<double>());
+}
+
+
+TEST_F(TyperTest, TypeJSMultiply) {
+ TestBinaryArithOp(javascript_.Multiply(LanguageMode::SLOPPY, hints_),
+ std::multiplies<double>());
+ TestBinaryArithOp(javascript_.Multiply(LanguageMode::STRONG, hints_),
+ std::multiplies<double>());
+}
+
+
+TEST_F(TyperTest, TypeJSDivide) {
+ TestBinaryArithOp(javascript_.Divide(LanguageMode::SLOPPY, hints_),
+ std::divides<double>());
+ TestBinaryArithOp(javascript_.Divide(LanguageMode::STRONG, hints_),
+ std::divides<double>());
+}
+
+
+TEST_F(TyperTest, TypeJSModulus) {
+ TestBinaryArithOp(javascript_.Modulus(LanguageMode::SLOPPY, hints_), modulo);
+ TestBinaryArithOp(javascript_.Modulus(LanguageMode::STRONG, hints_), modulo);
+}
+
+
+TEST_F(TyperTest, TypeJSBitwiseOr) {
+ TestBinaryBitOp(javascript_.BitwiseOr(LanguageMode::SLOPPY, hints_), bit_or);
+ TestBinaryBitOp(javascript_.BitwiseOr(LanguageMode::STRONG, hints_), bit_or);
+}
+
+
+TEST_F(TyperTest, TypeJSBitwiseAnd) {
+ TestBinaryBitOp(javascript_.BitwiseAnd(LanguageMode::SLOPPY, hints_),
+ bit_and);
+ TestBinaryBitOp(javascript_.BitwiseAnd(LanguageMode::STRONG, hints_),
+ bit_and);
+}
+
+
+TEST_F(TyperTest, TypeJSBitwiseXor) {
+ TestBinaryBitOp(javascript_.BitwiseXor(LanguageMode::SLOPPY, hints_),
+ bit_xor);
+ TestBinaryBitOp(javascript_.BitwiseXor(LanguageMode::STRONG, hints_),
+ bit_xor);
+}
+
+
+TEST_F(TyperTest, TypeJSShiftLeft) {
+ TestBinaryBitOp(javascript_.ShiftLeft(LanguageMode::SLOPPY, hints_),
+ shift_left);
+ TestBinaryBitOp(javascript_.ShiftLeft(LanguageMode::STRONG, hints_),
+ shift_left);
+}
+
+
+TEST_F(TyperTest, TypeJSShiftRight) {
+ TestBinaryBitOp(javascript_.ShiftRight(LanguageMode::SLOPPY, hints_),
+ shift_right);
+ TestBinaryBitOp(javascript_.ShiftRight(LanguageMode::STRONG, hints_),
+ shift_right);
+}
+
+
+TEST_F(TyperTest, TypeJSLessThan) {
+ TestBinaryCompareOp(javascript_.LessThan(LanguageMode::SLOPPY),
+ std::less<double>());
+ TestBinaryCompareOp(javascript_.LessThan(LanguageMode::STRONG),
+ std::less<double>());
+}
+
+
+TEST_F(TyperTest, TypeJSLessThanOrEqual) {
+ TestBinaryCompareOp(javascript_.LessThanOrEqual(LanguageMode::SLOPPY),
+ std::less_equal<double>());
+ TestBinaryCompareOp(javascript_.LessThanOrEqual(LanguageMode::STRONG),
+ std::less_equal<double>());
+}
+
+
+TEST_F(TyperTest, TypeJSGreaterThan) {
+ TestBinaryCompareOp(javascript_.GreaterThan(LanguageMode::SLOPPY),
+ std::greater<double>());
+ TestBinaryCompareOp(javascript_.GreaterThan(LanguageMode::STRONG),
+ std::greater<double>());
+}
+
+
+TEST_F(TyperTest, TypeJSGreaterThanOrEqual) {
+ TestBinaryCompareOp(javascript_.GreaterThanOrEqual(LanguageMode::SLOPPY),
+ std::greater_equal<double>());
+ TestBinaryCompareOp(javascript_.GreaterThanOrEqual(LanguageMode::STRONG),
+ std::greater_equal<double>());
+}
+
+
+TEST_F(TyperTest, TypeJSEqual) {
+ TestBinaryCompareOp(javascript_.Equal(), std::equal_to<double>());
+}
+
+
+TEST_F(TyperTest, TypeJSNotEqual) {
+ TestBinaryCompareOp(javascript_.NotEqual(), std::not_equal_to<double>());
+}
+
+
+// For numbers there's no difference between strict and non-strict equality.
+TEST_F(TyperTest, TypeJSStrictEqual) {
+ TestBinaryCompareOp(javascript_.StrictEqual(), std::equal_to<double>());
+}
+
+
+TEST_F(TyperTest, TypeJSStrictNotEqual) {
+ TestBinaryCompareOp(javascript_.StrictNotEqual(),
+ std::not_equal_to<double>());
+}
+
+
+//------------------------------------------------------------------------------
+// Monotonicity
+
+
+#define TEST_BINARY_MONOTONICITY(name) \
+ TEST_F(TyperTest, Monotonicity_##name) { \
+ TestBinaryMonotonicity(javascript_.name()); \
+ }
+TEST_BINARY_MONOTONICITY(Equal)
+TEST_BINARY_MONOTONICITY(NotEqual)
+TEST_BINARY_MONOTONICITY(StrictEqual)
+TEST_BINARY_MONOTONICITY(StrictNotEqual)
+#undef TEST_BINARY_MONOTONICITY
+
+
+#define TEST_BINARY_MONOTONICITY(name) \
+ TEST_F(TyperTest, Monotonicity_##name) { \
+ TestBinaryMonotonicity(javascript_.name(LanguageMode::SLOPPY)); \
+ TestBinaryMonotonicity(javascript_.name(LanguageMode::STRONG)); \
+ }
+TEST_BINARY_MONOTONICITY(LessThan)
+TEST_BINARY_MONOTONICITY(GreaterThan)
+TEST_BINARY_MONOTONICITY(LessThanOrEqual)
+TEST_BINARY_MONOTONICITY(GreaterThanOrEqual)
+#undef TEST_BINARY_MONOTONICITY
+
+
+#define TEST_BINARY_MONOTONICITY(name) \
+ TEST_F(TyperTest, Monotonicity_##name) { \
+ TestBinaryMonotonicity( \
+ javascript_.name(LanguageMode::SLOPPY, BinaryOperationHints::Any())); \
+ TestBinaryMonotonicity( \
+ javascript_.name(LanguageMode::STRONG, BinaryOperationHints::Any())); \
+ }
+TEST_BINARY_MONOTONICITY(BitwiseOr)
+TEST_BINARY_MONOTONICITY(BitwiseXor)
+TEST_BINARY_MONOTONICITY(BitwiseAnd)
+TEST_BINARY_MONOTONICITY(ShiftLeft)
+TEST_BINARY_MONOTONICITY(ShiftRight)
+TEST_BINARY_MONOTONICITY(ShiftRightLogical)
+TEST_BINARY_MONOTONICITY(Add)
+TEST_BINARY_MONOTONICITY(Subtract)
+TEST_BINARY_MONOTONICITY(Multiply)
+TEST_BINARY_MONOTONICITY(Divide)
+TEST_BINARY_MONOTONICITY(Modulus)
+#undef TEST_BINARY_MONOTONICITY
+
+
+//------------------------------------------------------------------------------
+// Regression tests
+
+
+TEST_F(TyperTest, TypeRegressInt32Constant) {
+ int values[] = {-5, 10};
+ for (auto i : values) {
+ Node* c = graph()->NewNode(common()->Int32Constant(i));
+ Type* type = NodeProperties::GetType(c);
+ EXPECT_TRUE(type->Is(NewRange(i, i)));
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/compiler/value-numbering-reducer-unittest.cc b/test/unittests/compiler/value-numbering-reducer-unittest.cc
index b6be0bf..c003033 100644
--- a/test/unittests/compiler/value-numbering-reducer-unittest.cc
+++ b/test/unittests/compiler/value-numbering-reducer-unittest.cc
@@ -5,6 +5,8 @@
#include <limits>
#include "src/compiler/graph.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
#include "src/compiler/value-numbering-reducer.h"
#include "test/unittests/test-utils.h"
@@ -20,8 +22,8 @@
};
-static const TestOperator kOp0(0, Operator::kEliminatable, 0, 1);
-static const TestOperator kOp1(1, Operator::kEliminatable, 1, 1);
+static const TestOperator kOp0(0, Operator::kIdempotent, 0, 1);
+static const TestOperator kOp1(1, Operator::kIdempotent, 1, 1);
class ValueNumberingReducerTest : public TestWithZone {
@@ -42,8 +44,8 @@
TEST_F(ValueNumberingReducerTest, AllInputsAreChecked) {
Node* na = graph()->NewNode(&kOp0);
Node* nb = graph()->NewNode(&kOp0);
- Node* n1 = graph()->NewNode(&kOp0, na);
- Node* n2 = graph()->NewNode(&kOp0, nb);
+ Node* n1 = graph()->NewNode(&kOp1, na);
+ Node* n2 = graph()->NewNode(&kOp1, nb);
EXPECT_FALSE(Reduce(n1).Changed());
EXPECT_FALSE(Reduce(n2).Changed());
}
@@ -71,20 +73,19 @@
static const size_t kMaxInputCount = 16;
Node* inputs[kMaxInputCount];
for (size_t i = 0; i < arraysize(inputs); ++i) {
- Operator::Opcode opcode = static_cast<Operator::Opcode>(
- std::numeric_limits<Operator::Opcode>::max() - i);
+ Operator::Opcode opcode = static_cast<Operator::Opcode>(kMaxInputCount + i);
inputs[i] = graph()->NewNode(
- new (zone()) TestOperator(opcode, Operator::kEliminatable, 0, 1));
+ new (zone()) TestOperator(opcode, Operator::kIdempotent, 0, 1));
}
TRACED_FORRANGE(size_t, input_count, 0, arraysize(inputs)) {
const TestOperator op1(static_cast<Operator::Opcode>(input_count),
- Operator::kEliminatable, input_count, 1);
+ Operator::kIdempotent, input_count, 1);
Node* n1 = graph()->NewNode(&op1, static_cast<int>(input_count), inputs);
Reduction r1 = Reduce(n1);
EXPECT_FALSE(r1.Changed());
const TestOperator op2(static_cast<Operator::Opcode>(input_count),
- Operator::kEliminatable, input_count, 1);
+ Operator::kIdempotent, input_count, 1);
Node* n2 = graph()->NewNode(&op2, static_cast<int>(input_count), inputs);
Reduction r2 = Reduce(n2);
EXPECT_TRUE(r2.Changed());
@@ -97,13 +98,12 @@
static const size_t kMaxInputCount = 16;
Node* inputs[kMaxInputCount];
for (size_t i = 0; i < arraysize(inputs); ++i) {
- Operator::Opcode opcode = static_cast<Operator::Opcode>(
- std::numeric_limits<Operator::Opcode>::max() - i);
+ Operator::Opcode opcode = static_cast<Operator::Opcode>(2 + i);
inputs[i] = graph()->NewNode(
- new (zone()) TestOperator(opcode, Operator::kEliminatable, 0, 1));
+ new (zone()) TestOperator(opcode, Operator::kIdempotent, 0, 1));
}
TRACED_FORRANGE(size_t, input_count, 0, arraysize(inputs)) {
- const TestOperator op1(1, Operator::kEliminatable, input_count, 1);
+ const TestOperator op1(1, Operator::kIdempotent, input_count, 1);
Node* n = graph()->NewNode(&op1, static_cast<int>(input_count), inputs);
Reduction r = Reduce(n);
EXPECT_FALSE(r.Changed());
diff --git a/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
index 9ef0fa5..d6ed732 100644
--- a/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
+++ b/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
@@ -15,18 +15,18 @@
TEST_F(InstructionSelectorTest, ChangeFloat32ToFloat64WithParameter) {
- StreamBuilder m(this, kMachFloat32, kMachFloat64);
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float64());
m.Return(m.ChangeFloat32ToFloat64(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSECvtss2sd, s[0]->arch_opcode());
+ EXPECT_EQ(kSSEFloat32ToFloat64, s[0]->arch_opcode());
EXPECT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
TEST_F(InstructionSelectorTest, ChangeInt32ToInt64WithParameter) {
- StreamBuilder m(this, kMachInt64, kMachInt32);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int32());
m.Return(m.ChangeInt32ToInt64(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -35,7 +35,7 @@
TEST_F(InstructionSelectorTest, ChangeUint32ToFloat64WithParameter) {
- StreamBuilder m(this, kMachFloat64, kMachUint32);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Uint32());
m.Return(m.ChangeUint32ToFloat64(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -44,7 +44,7 @@
TEST_F(InstructionSelectorTest, ChangeUint32ToUint64WithParameter) {
- StreamBuilder m(this, kMachUint64, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Uint32());
m.Return(m.ChangeUint32ToUint64(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -53,18 +53,18 @@
TEST_F(InstructionSelectorTest, TruncateFloat64ToFloat32WithParameter) {
- StreamBuilder m(this, kMachFloat64, kMachFloat32);
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float32());
m.Return(m.TruncateFloat64ToFloat32(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSECvtsd2ss, s[0]->arch_opcode());
+ EXPECT_EQ(kSSEFloat64ToFloat32, s[0]->arch_opcode());
EXPECT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
- StreamBuilder m(this, kMachInt32, kMachInt64);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int64());
m.Return(m.TruncateInt64ToInt32(m.Parameter(0)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -75,6 +75,7 @@
// -----------------------------------------------------------------------------
// Loads and stores
+
namespace {
struct MemoryAccess {
@@ -90,16 +91,16 @@
static const MemoryAccess kMemoryAccesses[] = {
- {kMachInt8, kX64Movsxbl, kX64Movb},
- {kMachUint8, kX64Movzxbl, kX64Movb},
- {kMachInt16, kX64Movsxwl, kX64Movw},
- {kMachUint16, kX64Movzxwl, kX64Movw},
- {kMachInt32, kX64Movl, kX64Movl},
- {kMachUint32, kX64Movl, kX64Movl},
- {kMachInt64, kX64Movq, kX64Movq},
- {kMachUint64, kX64Movq, kX64Movq},
- {kMachFloat32, kX64Movss, kX64Movss},
- {kMachFloat64, kX64Movsd, kX64Movsd}};
+ {MachineType::Int8(), kX64Movsxbl, kX64Movb},
+ {MachineType::Uint8(), kX64Movzxbl, kX64Movb},
+ {MachineType::Int16(), kX64Movsxwl, kX64Movw},
+ {MachineType::Uint16(), kX64Movzxwl, kX64Movw},
+ {MachineType::Int32(), kX64Movl, kX64Movl},
+ {MachineType::Uint32(), kX64Movl, kX64Movl},
+ {MachineType::Int64(), kX64Movq, kX64Movq},
+ {MachineType::Uint64(), kX64Movq, kX64Movq},
+ {MachineType::Float32(), kX64Movss, kX64Movss},
+ {MachineType::Float64(), kX64Movsd, kX64Movsd}};
} // namespace
@@ -110,7 +111,8 @@
TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+ StreamBuilder m(this, memacc.type, MachineType::Pointer(),
+ MachineType::Int32());
m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -122,8 +124,10 @@
TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
const MemoryAccess memacc = GetParam();
- StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
- m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Int32(), memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0), m.Parameter(1),
+ m.Parameter(2), kNoWriteBarrier);
m.Return(m.Int32Constant(0));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
@@ -137,6 +141,7 @@
InstructionSelectorMemoryAccessTest,
::testing::ValuesIn(kMemoryAccesses));
+
// -----------------------------------------------------------------------------
// ChangeUint32ToUint64.
@@ -188,7 +193,8 @@
TEST_P(InstructionSelectorChangeUint32ToUint64Test, ChangeUint32ToUint64) {
const BinaryOperation& bop = GetParam();
- StreamBuilder m(this, kMachUint64, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
m.Return(m.ChangeUint32ToUint64((m.*bop.constructor)(p0, p1)));
@@ -207,7 +213,7 @@
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) {
- StreamBuilder m(this, kMachInt32, kMachInt64);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int64());
Node* const p = m.Parameter(0);
Node* const t = m.TruncateInt64ToInt32(m.Word64Sar(p, m.Int64Constant(32)));
m.Return(t);
@@ -224,7 +230,7 @@
TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shr) {
- StreamBuilder m(this, kMachInt32, kMachInt64);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int64());
Node* const p = m.Parameter(0);
Node* const t = m.TruncateInt64ToInt32(m.Word64Shr(p, m.Int64Constant(32)));
m.Return(t);
@@ -245,7 +251,8 @@
TEST_F(InstructionSelectorTest, Int32AddWithInt32ParametersLea) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const a0 = m.Int32Add(p0, p1);
@@ -262,7 +269,7 @@
TEST_F(InstructionSelectorTest, Int32AddConstantAsLeaSingle) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const c0 = m.Int32Constant(15);
// If one of the add's operands is only used once, use an "leal", even though
@@ -284,7 +291,7 @@
TEST_F(InstructionSelectorTest, Int32AddConstantAsAdd) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const c0 = m.Int32Constant(1);
// If there is only a single use of an add's input and the immediate constant
@@ -302,7 +309,7 @@
TEST_F(InstructionSelectorTest, Int32AddConstantAsLeaDouble) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const c0 = m.Int32Constant(15);
// A second use of an add's input uses lea
@@ -319,7 +326,7 @@
TEST_F(InstructionSelectorTest, Int32AddCommutedConstantAsLeaSingle) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const c0 = m.Int32Constant(15);
// If one of the add's operands is only used once, use an "leal", even though
@@ -340,7 +347,7 @@
TEST_F(InstructionSelectorTest, Int32AddCommutedConstantAsLeaDouble) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const c0 = m.Int32Constant(15);
// A second use of an add's input uses lea
@@ -358,7 +365,8 @@
TEST_F(InstructionSelectorTest, Int32AddSimpleAsAdd) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
// If one of the add's operands is only used once, use an "leal", even though
@@ -379,7 +387,8 @@
TEST_F(InstructionSelectorTest, Int32AddSimpleAsLea) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
// If all of of the add's operands are used multiple times, use an "leal".
@@ -396,7 +405,8 @@
TEST_F(InstructionSelectorTest, Int32AddScaled2Mul) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
@@ -412,7 +422,8 @@
TEST_F(InstructionSelectorTest, Int32AddCommutedScaled2Mul) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
@@ -428,7 +439,8 @@
TEST_F(InstructionSelectorTest, Int32AddScaled2Shl) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Word32Shl(p1, m.Int32Constant(1));
@@ -444,7 +456,8 @@
TEST_F(InstructionSelectorTest, Int32AddCommutedScaled2Shl) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Word32Shl(p1, m.Int32Constant(1));
@@ -460,7 +473,8 @@
TEST_F(InstructionSelectorTest, Int32AddScaled4Mul) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(4));
@@ -476,7 +490,8 @@
TEST_F(InstructionSelectorTest, Int32AddScaled4Shl) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Word32Shl(p1, m.Int32Constant(2));
@@ -492,7 +507,8 @@
TEST_F(InstructionSelectorTest, Int32AddScaled8Mul) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(8));
@@ -508,7 +524,8 @@
TEST_F(InstructionSelectorTest, Int32AddScaled8Shl) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Word32Shl(p1, m.Int32Constant(3));
@@ -524,7 +541,8 @@
TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstant) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
@@ -542,7 +560,8 @@
TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle1) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
@@ -560,7 +579,8 @@
TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle2) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
@@ -578,7 +598,8 @@
TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle3) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
@@ -596,7 +617,8 @@
TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle4) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
@@ -614,7 +636,8 @@
TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle5) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
@@ -632,7 +655,8 @@
TEST_F(InstructionSelectorTest, Int32AddScaled2ShlWithConstant) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Word32Shl(p1, m.Int32Constant(1));
@@ -650,7 +674,8 @@
TEST_F(InstructionSelectorTest, Int32AddScaled4MulWithConstant) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(4));
@@ -668,7 +693,8 @@
TEST_F(InstructionSelectorTest, Int32AddScaled4ShlWithConstant) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Word32Shl(p1, m.Int32Constant(2));
@@ -686,7 +712,8 @@
TEST_F(InstructionSelectorTest, Int32AddScaled8MulWithConstant) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Int32Mul(p1, m.Int32Constant(8));
@@ -704,7 +731,8 @@
TEST_F(InstructionSelectorTest, Int32AddScaled8ShlWithConstant) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const s0 = m.Word32Shl(p1, m.Int32Constant(3));
@@ -722,7 +750,7 @@
TEST_F(InstructionSelectorTest, Int32SubConstantAsSub) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const c0 = m.Int32Constant(-1);
// If there is only a single use of on of the sub's non-constant input, use a
@@ -739,7 +767,7 @@
TEST_F(InstructionSelectorTest, Int32SubConstantAsLea) {
- StreamBuilder m(this, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const c0 = m.Int32Constant(-1);
// If there are multiple uses of on of the sub's non-constant input, use a
@@ -757,7 +785,8 @@
TEST_F(InstructionSelectorTest, Int32AddScaled2Other) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const p2 = m.Parameter(2);
@@ -786,7 +815,8 @@
TEST_F(InstructionSelectorTest, Int32MulWithInt32MulWithParameters) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const m0 = m.Int32Mul(p0, p1);
@@ -807,7 +837,8 @@
TEST_F(InstructionSelectorTest, Int32MulHigh) {
- StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32(),
+ MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Int32MulHigh(p0, p1);
@@ -827,7 +858,8 @@
TEST_F(InstructionSelectorTest, Uint32MulHigh) {
- StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const p1 = m.Parameter(1);
Node* const n = m.Uint32MulHigh(p0, p1);
@@ -847,7 +879,8 @@
TEST_F(InstructionSelectorTest, Int32Mul2BecomesLea) {
- StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const c1 = m.Int32Constant(2);
Node* const n = m.Int32Mul(p0, c1);
@@ -863,7 +896,8 @@
TEST_F(InstructionSelectorTest, Int32Mul3BecomesLea) {
- StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const c1 = m.Int32Constant(3);
Node* const n = m.Int32Mul(p0, c1);
@@ -879,7 +913,8 @@
TEST_F(InstructionSelectorTest, Int32Mul4BecomesLea) {
- StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const c1 = m.Int32Constant(4);
Node* const n = m.Int32Mul(p0, c1);
@@ -894,7 +929,8 @@
TEST_F(InstructionSelectorTest, Int32Mul5BecomesLea) {
- StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const c1 = m.Int32Constant(5);
Node* const n = m.Int32Mul(p0, c1);
@@ -910,7 +946,8 @@
TEST_F(InstructionSelectorTest, Int32Mul8BecomesLea) {
- StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const c1 = m.Int32Constant(8);
Node* const n = m.Int32Mul(p0, c1);
@@ -925,7 +962,8 @@
TEST_F(InstructionSelectorTest, Int32Mul9BecomesLea) {
- StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const c1 = m.Int32Constant(9);
Node* const n = m.Int32Mul(p0, c1);
@@ -945,7 +983,8 @@
TEST_F(InstructionSelectorTest, Int32Shl1BecomesLea) {
- StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const c1 = m.Int32Constant(1);
Node* const n = m.Word32Shl(p0, c1);
@@ -961,7 +1000,8 @@
TEST_F(InstructionSelectorTest, Int32Shl2BecomesLea) {
- StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const c1 = m.Int32Constant(2);
Node* const n = m.Word32Shl(p0, c1);
@@ -976,7 +1016,8 @@
TEST_F(InstructionSelectorTest, Int32Shl4BecomesLea) {
- StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32(),
+ MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const c1 = m.Int32Constant(3);
Node* const n = m.Word32Shl(p0, c1);
@@ -991,12 +1032,199 @@
// -----------------------------------------------------------------------------
-// Word64Shl.
+// Floating point operations.
+
+
+TEST_F(InstructionSelectorTest, Float32Abs) {
+ {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEFloat32Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Abs(p0);
+ m.Return(n);
+ Stream s = m.Build(AVX);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kAVXFloat32Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Float64Abs) {
+ {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEFloat64Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE(s.IsSameAsFirst(s[0]->Output()));
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Abs(p0);
+ m.Return(n);
+ Stream s = m.Build(AVX);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kAVXFloat64Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
+ {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
+ Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
+ Node* mul = m.Float64Mul(add, m.Parameter(1));
+ Node* sub = m.Float64Sub(mul, add);
+ Node* ret = m.Float64Div(mul, sub);
+ m.Return(ret);
+ Stream s = m.Build(AVX);
+ ASSERT_EQ(4U, s.size());
+ EXPECT_EQ(kAVXFloat64Add, s[0]->arch_opcode());
+ EXPECT_EQ(kAVXFloat64Mul, s[1]->arch_opcode());
+ EXPECT_EQ(kAVXFloat64Sub, s[2]->arch_opcode());
+ EXPECT_EQ(kAVXFloat64Div, s[3]->arch_opcode());
+ }
+ {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
+ Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
+ Node* mul = m.Float64Mul(add, m.Parameter(1));
+ Node* sub = m.Float64Sub(mul, add);
+ Node* ret = m.Float64Div(mul, sub);
+ m.Return(ret);
+ Stream s = m.Build();
+ ASSERT_EQ(4U, s.size());
+ EXPECT_EQ(kSSEFloat64Add, s[0]->arch_opcode());
+ EXPECT_EQ(kSSEFloat64Mul, s[1]->arch_opcode());
+ EXPECT_EQ(kSSEFloat64Sub, s[2]->arch_opcode());
+ EXPECT_EQ(kSSEFloat64Div, s[3]->arch_opcode());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Float32SubWithMinusZeroAndParameter) {
+ {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEFloat32Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Sub(m.Float32Constant(-0.0f), p0);
+ m.Return(n);
+ Stream s = m.Build(AVX);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kAVXFloat32Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Float64SubWithMinusZeroAndParameter) {
+ {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kSSEFloat64Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+ {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
+ m.Return(n);
+ Stream s = m.Build(AVX);
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kAVXFloat64Neg, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Miscellaneous.
+
+
+TEST_F(InstructionSelectorTest, Uint64LessThanWithLoadAndLoadStackPointer) {
+ StreamBuilder m(this, MachineType::Bool());
+ Node* const sl = m.Load(
+ MachineType::Pointer(),
+ m.ExternalConstant(ExternalReference::address_of_stack_limit(isolate())));
+ Node* const sp = m.LoadStackPointer();
+ Node* const n = m.Uint64LessThan(sl, sp);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64StackCheck, s[0]->arch_opcode());
+ ASSERT_EQ(0U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kUnsignedGreaterThan, s[0]->flags_condition());
+}
TEST_F(InstructionSelectorTest, Word64ShlWithChangeInt32ToInt64) {
TRACED_FORRANGE(int64_t, x, 32, 63) {
- StreamBuilder m(this, kMachInt64, kMachInt32);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Word64Shl(m.ChangeInt32ToInt64(p0), m.Int64Constant(x));
m.Return(n);
@@ -1015,7 +1243,7 @@
TEST_F(InstructionSelectorTest, Word64ShlWithChangeUint32ToUint64) {
TRACED_FORRANGE(int64_t, x, 32, 63) {
- StreamBuilder m(this, kMachInt64, kMachUint32);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Uint32());
Node* const p0 = m.Parameter(0);
Node* const n = m.Word64Shl(m.ChangeUint32ToUint64(p0), m.Int64Constant(x));
m.Return(n);
@@ -1032,35 +1260,78 @@
}
-TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
+TEST_F(InstructionSelectorTest, Word32AndWith0xff) {
{
- StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
- Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
- Node* mul = m.Float64Mul(add, m.Parameter(1));
- Node* sub = m.Float64Sub(mul, add);
- Node* ret = m.Float64Div(mul, sub);
- m.Return(ret);
- Stream s = m.Build(AVX);
- ASSERT_EQ(4U, s.size());
- EXPECT_EQ(kAVXFloat64Add, s[0]->arch_opcode());
- EXPECT_EQ(kAVXFloat64Mul, s[1]->arch_opcode());
- EXPECT_EQ(kAVXFloat64Sub, s[2]->arch_opcode());
- EXPECT_EQ(kAVXFloat64Div, s[3]->arch_opcode());
- }
- {
- StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
- Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
- Node* mul = m.Float64Mul(add, m.Parameter(1));
- Node* sub = m.Float64Sub(mul, add);
- Node* ret = m.Float64Div(mul, sub);
- m.Return(ret);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32And(p0, m.Int32Constant(0xff));
+ m.Return(n);
Stream s = m.Build();
- ASSERT_EQ(4U, s.size());
- EXPECT_EQ(kSSEFloat64Add, s[0]->arch_opcode());
- EXPECT_EQ(kSSEFloat64Mul, s[1]->arch_opcode());
- EXPECT_EQ(kSSEFloat64Sub, s[2]->arch_opcode());
- EXPECT_EQ(kSSEFloat64Div, s[3]->arch_opcode());
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movzxbl, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32And(m.Int32Constant(0xff), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movzxbl, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWith0xffff) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32And(p0, m.Int32Constant(0xffff));
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movzxwl, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32And(m.Int32Constant(0xffff), p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Movzxwl, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+ }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32Clz) {
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32Clz(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kX64Lzcnt32, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
}
} // namespace compiler
diff --git a/test/unittests/compiler/zone-pool-unittest.cc b/test/unittests/compiler/zone-pool-unittest.cc
index e23557a..3bfde4b 100644
--- a/test/unittests/compiler/zone-pool-unittest.cc
+++ b/test/unittests/compiler/zone-pool-unittest.cc
@@ -12,7 +12,7 @@
class ZonePoolTest : public TestWithIsolate {
public:
- ZonePoolTest() : zone_pool_(isolate()) {}
+ ZonePoolTest() {}
protected:
ZonePool* zone_pool() { return &zone_pool_; }
@@ -32,9 +32,9 @@
size_t Allocate(Zone* zone) {
size_t bytes = rng.NextInt(25) + 7;
- int size_before = zone->allocation_size();
- zone->New(static_cast<int>(bytes));
- return static_cast<size_t>(zone->allocation_size() - size_before);
+ size_t size_before = zone->allocation_size();
+ zone->New(bytes);
+ return zone->allocation_size() - size_before;
}
private:
diff --git a/test/unittests/counters-unittest.cc b/test/unittests/counters-unittest.cc
new file mode 100644
index 0000000..822a5c5
--- /dev/null
+++ b/test/unittests/counters-unittest.cc
@@ -0,0 +1,200 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "src/counters.h"
+#include "src/handles-inl.h"
+#include "src/objects-inl.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+class MockHistogram : public Histogram {
+ public:
+ void AddSample(int value) { samples_.push_back(value); }
+ std::vector<int>* samples() { return &samples_; }
+
+ private:
+ std::vector<int> samples_;
+};
+
+
+class AggregatedMemoryHistogramTest : public ::testing::Test {
+ public:
+ AggregatedMemoryHistogramTest() {
+ aggregated_ = AggregatedMemoryHistogram<MockHistogram>(&mock_);
+ }
+ virtual ~AggregatedMemoryHistogramTest() {}
+
+ void AddSample(double current_ms, double current_value) {
+ aggregated_.AddSample(current_ms, current_value);
+ }
+
+ std::vector<int>* samples() { return mock_.samples(); }
+
+ private:
+ AggregatedMemoryHistogram<MockHistogram> aggregated_;
+ MockHistogram mock_;
+};
+
+} // namespace
+
+
+TEST_F(AggregatedMemoryHistogramTest, OneSample1) {
+ FLAG_histogram_interval = 10;
+ AddSample(10, 1000);
+ AddSample(20, 1000);
+ EXPECT_EQ(1U, samples()->size());
+ EXPECT_EQ(1000, (*samples())[0]);
+}
+
+
+TEST_F(AggregatedMemoryHistogramTest, OneSample2) {
+ FLAG_histogram_interval = 10;
+ AddSample(10, 500);
+ AddSample(20, 1000);
+ EXPECT_EQ(1U, samples()->size());
+ EXPECT_EQ(750, (*samples())[0]);
+}
+
+
+TEST_F(AggregatedMemoryHistogramTest, OneSample3) {
+ FLAG_histogram_interval = 10;
+ AddSample(10, 500);
+ AddSample(15, 500);
+ AddSample(15, 1000);
+ AddSample(20, 1000);
+ EXPECT_EQ(1U, samples()->size());
+ EXPECT_EQ(750, (*samples())[0]);
+}
+
+
+TEST_F(AggregatedMemoryHistogramTest, OneSample4) {
+ FLAG_histogram_interval = 10;
+ AddSample(10, 500);
+ AddSample(15, 750);
+ AddSample(20, 1000);
+ EXPECT_EQ(1U, samples()->size());
+ EXPECT_EQ(750, (*samples())[0]);
+}
+
+
+TEST_F(AggregatedMemoryHistogramTest, TwoSamples1) {
+ FLAG_histogram_interval = 10;
+ AddSample(10, 1000);
+ AddSample(30, 1000);
+ EXPECT_EQ(2U, samples()->size());
+ EXPECT_EQ(1000, (*samples())[0]);
+ EXPECT_EQ(1000, (*samples())[1]);
+}
+
+
+TEST_F(AggregatedMemoryHistogramTest, TwoSamples2) {
+ FLAG_histogram_interval = 10;
+ AddSample(10, 1000);
+ AddSample(20, 1000);
+ AddSample(30, 1000);
+ EXPECT_EQ(2U, samples()->size());
+ EXPECT_EQ(1000, (*samples())[0]);
+ EXPECT_EQ(1000, (*samples())[1]);
+}
+
+
+TEST_F(AggregatedMemoryHistogramTest, TwoSamples3) {
+ FLAG_histogram_interval = 10;
+ AddSample(10, 1000);
+ AddSample(20, 1000);
+ AddSample(20, 500);
+ AddSample(30, 500);
+ EXPECT_EQ(2U, samples()->size());
+ EXPECT_EQ(1000, (*samples())[0]);
+ EXPECT_EQ(500, (*samples())[1]);
+}
+
+
+TEST_F(AggregatedMemoryHistogramTest, TwoSamples4) {
+ FLAG_histogram_interval = 10;
+ AddSample(10, 1000);
+ AddSample(30, 0);
+ EXPECT_EQ(2U, samples()->size());
+ EXPECT_EQ(750, (*samples())[0]);
+ EXPECT_EQ(250, (*samples())[1]);
+}
+
+
+TEST_F(AggregatedMemoryHistogramTest, TwoSamples5) {
+ FLAG_histogram_interval = 10;
+ AddSample(10, 0);
+ AddSample(30, 1000);
+ EXPECT_EQ(2U, samples()->size());
+ EXPECT_EQ(250, (*samples())[0]);
+ EXPECT_EQ(750, (*samples())[1]);
+}
+
+
+TEST_F(AggregatedMemoryHistogramTest, TwoSamples6) {
+ FLAG_histogram_interval = 10;
+ AddSample(10, 0);
+ AddSample(15, 1000);
+ AddSample(30, 1000);
+ EXPECT_EQ(2U, samples()->size());
+ EXPECT_EQ((500 + 1000) / 2, (*samples())[0]);
+ EXPECT_EQ(1000, (*samples())[1]);
+}
+
+
+TEST_F(AggregatedMemoryHistogramTest, TwoSamples7) {
+ FLAG_histogram_interval = 10;
+ AddSample(10, 0);
+ AddSample(15, 1000);
+ AddSample(25, 0);
+ AddSample(30, 1000);
+ EXPECT_EQ(2U, samples()->size());
+ EXPECT_EQ((500 + 750) / 2, (*samples())[0]);
+ EXPECT_EQ((250 + 500) / 2, (*samples())[1]);
+}
+
+
+TEST_F(AggregatedMemoryHistogramTest, TwoSamples8) {
+ FLAG_histogram_interval = 10;
+ AddSample(10, 1000);
+ AddSample(15, 0);
+ AddSample(25, 1000);
+ AddSample(30, 0);
+ EXPECT_EQ(2U, samples()->size());
+ EXPECT_EQ((500 + 250) / 2, (*samples())[0]);
+ EXPECT_EQ((750 + 500) / 2, (*samples())[1]);
+}
+
+
+TEST_F(AggregatedMemoryHistogramTest, ManySamples1) {
+ FLAG_histogram_interval = 10;
+ const int kMaxSamples = 1000;
+ AddSample(0, 0);
+ AddSample(10 * kMaxSamples, 10 * kMaxSamples);
+ EXPECT_EQ(static_cast<unsigned>(kMaxSamples), samples()->size());
+ for (int i = 0; i < kMaxSamples; i++) {
+ EXPECT_EQ(i * 10 + 5, (*samples())[i]);
+ }
+}
+
+
+TEST_F(AggregatedMemoryHistogramTest, ManySamples2) {
+ FLAG_histogram_interval = 10;
+ const int kMaxSamples = 1000;
+ AddSample(0, 0);
+ AddSample(10 * (2 * kMaxSamples), 10 * (2 * kMaxSamples));
+ EXPECT_EQ(static_cast<unsigned>(kMaxSamples), samples()->size());
+ for (int i = 0; i < kMaxSamples; i++) {
+ EXPECT_EQ(i * 10 + 5, (*samples())[i]);
+ }
+}
+
+
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/heap/bitmap-unittest.cc b/test/unittests/heap/bitmap-unittest.cc
new file mode 100644
index 0000000..a84437d
--- /dev/null
+++ b/test/unittests/heap/bitmap-unittest.cc
@@ -0,0 +1,107 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/spaces.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+using v8::internal::Bitmap;
+
+class BitmapTest : public ::testing::Test {
+ public:
+ static const uint32_t kBlackCell;
+ static const uint32_t kWhiteCell;
+ static const uint32_t kBlackByte;
+ static const uint32_t kWhiteByte;
+
+ BitmapTest() : memory_(new uint8_t[Bitmap::kSize]) {
+ memset(memory_, 0, Bitmap::kSize);
+ }
+
+ virtual ~BitmapTest() { delete[] memory_; }
+
+ Bitmap* bitmap() { return reinterpret_cast<Bitmap*>(memory_); }
+ uint8_t* raw_bitmap() { return memory_; }
+
+ private:
+ uint8_t* memory_;
+};
+
+
+const uint32_t BitmapTest::kBlackCell = 0xAAAAAAAA;
+const uint32_t BitmapTest::kWhiteCell = 0x00000000;
+const uint32_t BitmapTest::kBlackByte = 0xAA;
+const uint32_t BitmapTest::kWhiteByte = 0x00;
+
+
+TEST_F(BitmapTest, IsZeroInitialized) {
+ // We require all tests to start from a zero-initialized bitmap. Manually
+ // verify this invariant here.
+ for (size_t i = 0; i < Bitmap::kSize; i++) {
+ EXPECT_EQ(raw_bitmap()[i], kWhiteByte);
+ }
+}
+
+
+TEST_F(BitmapTest, Cells) {
+ Bitmap* bm = bitmap();
+ bm->cells()[1] = kBlackCell;
+ uint8_t* raw = raw_bitmap();
+ int second_cell_base = Bitmap::kBytesPerCell;
+ for (size_t i = 0; i < Bitmap::kBytesPerCell; i++) {
+ EXPECT_EQ(raw[second_cell_base + i], kBlackByte);
+ }
+}
+
+
+TEST_F(BitmapTest, CellsCount) {
+ int last_cell_index = bitmap()->CellsCount() - 1;
+ bitmap()->cells()[last_cell_index] = kBlackCell;
+ // Manually verify on raw memory.
+ uint8_t* raw = raw_bitmap();
+ for (size_t i = 0; i < Bitmap::kSize; i++) {
+ // Last cell should be set.
+ if (i >= (Bitmap::kSize - Bitmap::kBytesPerCell)) {
+ EXPECT_EQ(raw[i], kBlackByte);
+ } else {
+ EXPECT_EQ(raw[i], kWhiteByte);
+ }
+ }
+}
+
+
+TEST_F(BitmapTest, IsClean) {
+ Bitmap* bm = bitmap();
+ EXPECT_TRUE(bm->IsClean());
+ bm->cells()[0] = kBlackCell;
+ EXPECT_FALSE(bm->IsClean());
+}
+
+
+TEST_F(BitmapTest, ClearRange1) {
+ Bitmap* bm = bitmap();
+ bm->cells()[0] = kBlackCell;
+ bm->cells()[1] = kBlackCell;
+ bm->cells()[2] = kBlackCell;
+ bm->ClearRange(0, Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2);
+ EXPECT_EQ(bm->cells()[0], kWhiteCell);
+ EXPECT_EQ(bm->cells()[1], 0xAAAA0000);
+ EXPECT_EQ(bm->cells()[2], kBlackCell);
+}
+
+
+TEST_F(BitmapTest, ClearRange2) {
+ Bitmap* bm = bitmap();
+ bm->cells()[0] = kBlackCell;
+ bm->cells()[1] = kBlackCell;
+ bm->cells()[2] = kBlackCell;
+ bm->ClearRange(Bitmap::kBitsPerCell,
+ Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2);
+ EXPECT_EQ(bm->cells()[0], kBlackCell);
+ EXPECT_EQ(bm->cells()[1], 0xAAAA0000);
+ EXPECT_EQ(bm->cells()[2], kBlackCell);
+}
+
+} // namespace
diff --git a/test/unittests/heap/gc-idle-time-handler-unittest.cc b/test/unittests/heap/gc-idle-time-handler-unittest.cc
index 2076e60..6413e36 100644
--- a/test/unittests/heap/gc-idle-time-handler-unittest.cc
+++ b/test/unittests/heap/gc-idle-time-handler-unittest.cc
@@ -19,30 +19,18 @@
GCIdleTimeHandler* handler() { return &handler_; }
- GCIdleTimeHandler::HeapState DefaultHeapState() {
- GCIdleTimeHandler::HeapState result;
+ GCIdleTimeHeapState DefaultHeapState() {
+ GCIdleTimeHeapState result;
result.contexts_disposed = 0;
result.contexts_disposal_rate = GCIdleTimeHandler::kHighContextDisposalRate;
- result.size_of_objects = kSizeOfObjects;
result.incremental_marking_stopped = false;
- result.can_start_incremental_marking = true;
- result.sweeping_in_progress = false;
- result.mark_compact_speed_in_bytes_per_ms = kMarkCompactSpeed;
- result.incremental_marking_speed_in_bytes_per_ms = kMarkingSpeed;
- result.scavenge_speed_in_bytes_per_ms = kScavengeSpeed;
- result.used_new_space_size = 0;
- result.new_space_capacity = kNewSpaceCapacity;
- result.new_space_allocation_throughput_in_bytes_per_ms =
- kNewSpaceAllocationThroughput;
return result;
}
static const size_t kSizeOfObjects = 100 * MB;
static const size_t kMarkCompactSpeed = 200 * KB;
static const size_t kMarkingSpeed = 200 * KB;
- static const size_t kScavengeSpeed = 100 * KB;
- static const size_t kNewSpaceCapacity = 1 * MB;
- static const size_t kNewSpaceAllocationThroughput = 10 * KB;
+ static const int kMaxNotifications = 100;
private:
GCIdleTimeHandler handler_;
@@ -110,92 +98,35 @@
}
-TEST_F(GCIdleTimeHandlerTest, DoScavengeEmptyNewSpace) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- int idle_time_in_ms = 16;
- EXPECT_FALSE(GCIdleTimeHandler::ShouldDoScavenge(
- idle_time_in_ms, heap_state.new_space_capacity,
- heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
- heap_state.new_space_allocation_throughput_in_bytes_per_ms));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, DoScavengeFullNewSpace) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- heap_state.used_new_space_size = kNewSpaceCapacity;
- int idle_time_in_ms = 16;
- EXPECT_TRUE(GCIdleTimeHandler::ShouldDoScavenge(
- idle_time_in_ms, heap_state.new_space_capacity,
- heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
- heap_state.new_space_allocation_throughput_in_bytes_per_ms));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, DoScavengeUnknownScavengeSpeed) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- heap_state.used_new_space_size = kNewSpaceCapacity;
- heap_state.scavenge_speed_in_bytes_per_ms = 0;
- int idle_time_in_ms = 16;
- EXPECT_FALSE(GCIdleTimeHandler::ShouldDoScavenge(
- idle_time_in_ms, heap_state.new_space_capacity,
- heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
- heap_state.new_space_allocation_throughput_in_bytes_per_ms));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, DoScavengeLowScavengeSpeed) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- heap_state.used_new_space_size = kNewSpaceCapacity;
- heap_state.scavenge_speed_in_bytes_per_ms = 1 * KB;
- int idle_time_in_ms = 16;
- EXPECT_FALSE(GCIdleTimeHandler::ShouldDoScavenge(
- idle_time_in_ms, heap_state.new_space_capacity,
- heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
- heap_state.new_space_allocation_throughput_in_bytes_per_ms));
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, DoScavengeHighScavengeSpeed) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- heap_state.used_new_space_size = kNewSpaceCapacity;
- heap_state.scavenge_speed_in_bytes_per_ms = kNewSpaceCapacity;
- int idle_time_in_ms = 16;
- EXPECT_TRUE(GCIdleTimeHandler::ShouldDoScavenge(
- idle_time_in_ms, heap_state.new_space_capacity,
- heap_state.used_new_space_size, heap_state.scavenge_speed_in_bytes_per_ms,
- heap_state.new_space_allocation_throughput_in_bytes_per_ms));
-}
-
-
TEST_F(GCIdleTimeHandlerTest, ShouldDoMarkCompact) {
- size_t idle_time_in_ms = 16;
- EXPECT_TRUE(GCIdleTimeHandler::ShouldDoMarkCompact(idle_time_in_ms, 0, 0));
+ size_t idle_time_ms = GCIdleTimeHandler::kMaxScheduledIdleTime;
+ EXPECT_TRUE(GCIdleTimeHandler::ShouldDoMarkCompact(idle_time_ms, 0, 0));
}
TEST_F(GCIdleTimeHandlerTest, DontDoMarkCompact) {
- size_t idle_time_in_ms = 1;
+ size_t idle_time_ms = 1;
EXPECT_FALSE(GCIdleTimeHandler::ShouldDoMarkCompact(
- idle_time_in_ms, kSizeOfObjects, kMarkingSpeed));
+ idle_time_ms, kSizeOfObjects, kMarkingSpeed));
}
TEST_F(GCIdleTimeHandlerTest, ShouldDoFinalIncrementalMarkCompact) {
- size_t idle_time_in_ms = 16;
+ size_t idle_time_ms = 16;
EXPECT_TRUE(GCIdleTimeHandler::ShouldDoFinalIncrementalMarkCompact(
- idle_time_in_ms, 0, 0));
+ idle_time_ms, 0, 0));
}
TEST_F(GCIdleTimeHandlerTest, DontDoFinalIncrementalMarkCompact) {
- size_t idle_time_in_ms = 1;
+ size_t idle_time_ms = 1;
EXPECT_FALSE(GCIdleTimeHandler::ShouldDoFinalIncrementalMarkCompact(
- idle_time_in_ms, kSizeOfObjects, kMarkingSpeed));
+ idle_time_ms, kSizeOfObjects, kMarkingSpeed));
}
TEST_F(GCIdleTimeHandlerTest, ContextDisposeLowRate) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
heap_state.contexts_disposed = 1;
heap_state.incremental_marking_stopped = true;
double idle_time_ms = 0;
@@ -205,7 +136,7 @@
TEST_F(GCIdleTimeHandlerTest, ContextDisposeHighRate) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
heap_state.contexts_disposed = 1;
heap_state.contexts_disposal_rate =
GCIdleTimeHandler::kHighContextDisposalRate - 1;
@@ -216,22 +147,8 @@
}
-TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeLargeIdleTime) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- heap_state.contexts_disposed = 1;
- heap_state.contexts_disposal_rate = 1.0;
- heap_state.incremental_marking_stopped = true;
- heap_state.can_start_incremental_marking = false;
- size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
- double idle_time_ms =
- static_cast<double>((heap_state.size_of_objects + speed - 1) / speed);
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_FULL_GC, action.type);
-}
-
-
TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeZeroIdleTime) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
heap_state.contexts_disposed = 1;
heap_state.contexts_disposal_rate = 1.0;
heap_state.incremental_marking_stopped = true;
@@ -242,202 +159,97 @@
TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime1) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
heap_state.contexts_disposed = 1;
- heap_state.contexts_disposal_rate = 1.0;
- heap_state.incremental_marking_stopped = true;
- size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
- double idle_time_ms =
- static_cast<double>(heap_state.size_of_objects / speed - 1);
+ heap_state.contexts_disposal_rate =
+ GCIdleTimeHandler::kHighContextDisposalRate;
+ size_t speed = kMarkCompactSpeed;
+ double idle_time_ms = static_cast<double>(kSizeOfObjects / speed - 1);
GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+ EXPECT_EQ(DO_INCREMENTAL_STEP, action.type);
}
TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime2) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
heap_state.contexts_disposed = 1;
- heap_state.contexts_disposal_rate = 1.0;
- size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
- double idle_time_ms =
- static_cast<double>(heap_state.size_of_objects / speed - 1);
+ heap_state.contexts_disposal_rate =
+ GCIdleTimeHandler::kHighContextDisposalRate;
+ size_t speed = kMarkCompactSpeed;
+ double idle_time_ms = static_cast<double>(kSizeOfObjects / speed - 1);
GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+ EXPECT_EQ(DO_INCREMENTAL_STEP, action.type);
}
TEST_F(GCIdleTimeHandlerTest, IncrementalMarking1) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- size_t speed = heap_state.incremental_marking_speed_in_bytes_per_ms;
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
double idle_time_ms = 10;
GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
- EXPECT_GT(speed * static_cast<size_t>(idle_time_ms),
- static_cast<size_t>(action.parameter));
- EXPECT_LT(0, action.parameter);
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, IncrementalMarking2) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- heap_state.incremental_marking_stopped = true;
- size_t speed = heap_state.incremental_marking_speed_in_bytes_per_ms;
- double idle_time_ms = 10;
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
- EXPECT_GT(speed * static_cast<size_t>(idle_time_ms),
- static_cast<size_t>(action.parameter));
- EXPECT_LT(0, action.parameter);
+ EXPECT_EQ(DO_INCREMENTAL_STEP, action.type);
}
TEST_F(GCIdleTimeHandlerTest, NotEnoughTime) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
heap_state.incremental_marking_stopped = true;
- heap_state.can_start_incremental_marking = false;
- size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
- double idle_time_ms =
- static_cast<double>(heap_state.size_of_objects / speed - 1);
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_NOTHING, action.type);
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, StopEventually1) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- heap_state.incremental_marking_stopped = true;
- heap_state.can_start_incremental_marking = false;
- size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
- double idle_time_ms =
- static_cast<double>(heap_state.size_of_objects / speed + 1);
- for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_FULL_GC, action.type);
- handler()->NotifyIdleMarkCompact();
- }
+ size_t speed = kMarkCompactSpeed;
+ double idle_time_ms = static_cast<double>(kSizeOfObjects / speed - 1);
GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
EXPECT_EQ(DONE, action.type);
}
-TEST_F(GCIdleTimeHandlerTest, StopEventually2) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- double idle_time_ms = 10;
- for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
- // In this case we emulate incremental marking steps that finish with a
- // full gc.
- handler()->NotifyIdleMarkCompact();
- }
- heap_state.can_start_incremental_marking = false;
+TEST_F(GCIdleTimeHandlerTest, DoNotStartIncrementalMarking) {
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
+ heap_state.incremental_marking_stopped = true;
+ double idle_time_ms = 10.0;
GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
EXPECT_EQ(DONE, action.type);
}
-TEST_F(GCIdleTimeHandlerTest, ContinueAfterStop1) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+TEST_F(GCIdleTimeHandlerTest, ContinueAfterStop) {
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
heap_state.incremental_marking_stopped = true;
- heap_state.can_start_incremental_marking = false;
- size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
- double idle_time_ms =
- static_cast<double>(heap_state.size_of_objects / speed + 1);
- for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_FULL_GC, action.type);
- handler()->NotifyIdleMarkCompact();
- }
+ double idle_time_ms = 10.0;
GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
EXPECT_EQ(DONE, action.type);
- // Emulate mutator work.
- for (int i = 0; i < GCIdleTimeHandler::kIdleScavengeThreshold; i++) {
- handler()->NotifyScavenge();
- }
+ heap_state.incremental_marking_stopped = false;
action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_FULL_GC, action.type);
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, ContinueAfterStop2) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- double idle_time_ms = 10;
- for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- if (action.type == DONE) break;
- EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
- // In this case we try to emulate incremental marking steps the finish with
- // a full gc.
- handler()->NotifyIdleMarkCompact();
- }
- heap_state.can_start_incremental_marking = false;
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DONE, action.type);
- // Emulate mutator work.
- for (int i = 0; i < GCIdleTimeHandler::kIdleScavengeThreshold; i++) {
- handler()->NotifyScavenge();
- }
- heap_state.can_start_incremental_marking = true;
- action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, Scavenge) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- int idle_time_ms = 10;
- heap_state.used_new_space_size =
- heap_state.new_space_capacity -
- (kNewSpaceAllocationThroughput * idle_time_ms);
- GCIdleTimeAction action =
- handler()->Compute(static_cast<double>(idle_time_ms), heap_state);
- EXPECT_EQ(DO_SCAVENGE, action.type);
-}
-
-
-TEST_F(GCIdleTimeHandlerTest, ScavengeAndDone) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- int idle_time_ms = 10;
- heap_state.can_start_incremental_marking = false;
- heap_state.incremental_marking_stopped = true;
- heap_state.used_new_space_size =
- heap_state.new_space_capacity -
- (kNewSpaceAllocationThroughput * idle_time_ms);
- GCIdleTimeAction action =
- handler()->Compute(static_cast<double>(idle_time_ms), heap_state);
- EXPECT_EQ(DO_SCAVENGE, action.type);
- heap_state.used_new_space_size = 0;
- action = handler()->Compute(static_cast<double>(idle_time_ms), heap_state);
- EXPECT_EQ(DO_NOTHING, action.type);
+ EXPECT_EQ(DO_INCREMENTAL_STEP, action.type);
}
TEST_F(GCIdleTimeHandlerTest, ZeroIdleTimeNothingToDo) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- double idle_time_ms = 0;
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- EXPECT_EQ(DO_NOTHING, action.type);
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
+ for (int i = 0; i < kMaxNotifications; i++) {
+ GCIdleTimeAction action = handler()->Compute(0, heap_state);
+ EXPECT_EQ(DO_NOTHING, action.type);
+ }
}
-TEST_F(GCIdleTimeHandlerTest, ZeroIdleTimeDoNothingButStartIdleRound) {
- GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
- double idle_time_ms = 10;
- for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
- GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- if (action.type == DONE) break;
- EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
- // In this case we try to emulate incremental marking steps the finish with
- // a full gc.
- handler()->NotifyIdleMarkCompact();
+TEST_F(GCIdleTimeHandlerTest, SmallIdleTimeNothingToDo) {
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
+ heap_state.incremental_marking_stopped = true;
+ for (int i = 0; i < kMaxNotifications; i++) {
+ GCIdleTimeAction action = handler()->Compute(10, heap_state);
+ EXPECT_TRUE(DO_NOTHING == action.type || DONE == action.type);
}
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, DoneIfNotMakingProgressOnIncrementalMarking) {
+ // Regression test for crbug.com/489323.
+ GCIdleTimeHeapState heap_state = DefaultHeapState();
+
+ // Simulate incremental marking stopped and not eligible to start.
+ heap_state.incremental_marking_stopped = true;
+ double idle_time_ms = 10.0;
+ // We should return DONE if we cannot start incremental marking.
GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
- // Emulate mutator work.
- for (int i = 0; i < GCIdleTimeHandler::kIdleScavengeThreshold; i++) {
- handler()->NotifyScavenge();
- }
- action = handler()->Compute(0, heap_state);
- EXPECT_EQ(DO_NOTHING, action.type);
+ EXPECT_EQ(DONE, action.type);
}
} // namespace internal
diff --git a/test/unittests/heap/heap-unittest.cc b/test/unittests/heap/heap-unittest.cc
new file mode 100644
index 0000000..9492faf
--- /dev/null
+++ b/test/unittests/heap/heap-unittest.cc
@@ -0,0 +1,48 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cmath>
+#include <limits>
+
+#include "src/objects.h"
+#include "src/objects-inl.h"
+
+#include "src/handles.h"
+#include "src/handles-inl.h"
+
+#include "src/heap/heap.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+double Round(double x) {
+ // Round to three digits.
+ return floor(x * 1000 + 0.5) / 1000;
+}
+
+
+void CheckEqualRounded(double expected, double actual) {
+ expected = Round(expected);
+ actual = Round(actual);
+ EXPECT_DOUBLE_EQ(expected, actual);
+}
+
+
+TEST(Heap, HeapGrowingFactor) {
+ CheckEqualRounded(Heap::kMaxHeapGrowingFactor,
+ Heap::HeapGrowingFactor(34, 1));
+ CheckEqualRounded(3.553, Heap::HeapGrowingFactor(45, 1));
+ CheckEqualRounded(2.830, Heap::HeapGrowingFactor(50, 1));
+ CheckEqualRounded(1.478, Heap::HeapGrowingFactor(100, 1));
+ CheckEqualRounded(1.193, Heap::HeapGrowingFactor(200, 1));
+ CheckEqualRounded(1.121, Heap::HeapGrowingFactor(300, 1));
+ CheckEqualRounded(Heap::HeapGrowingFactor(300, 1),
+ Heap::HeapGrowingFactor(600, 2));
+ CheckEqualRounded(Heap::kMinHeapGrowingFactor,
+ Heap::HeapGrowingFactor(400, 1));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/heap/memory-reducer-unittest.cc b/test/unittests/heap/memory-reducer-unittest.cc
new file mode 100644
index 0000000..1088f01
--- /dev/null
+++ b/test/unittests/heap/memory-reducer-unittest.cc
@@ -0,0 +1,302 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/flags.h"
+#include "src/heap/memory-reducer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+MemoryReducer::State DoneState() {
+ return MemoryReducer::State(MemoryReducer::kDone, 0, 0.0, 1.0);
+}
+
+
+MemoryReducer::State WaitState(int started_gcs, double next_gc_start_ms) {
+ return MemoryReducer::State(MemoryReducer::kWait, started_gcs,
+ next_gc_start_ms, 1.0);
+}
+
+
+MemoryReducer::State RunState(int started_gcs, double next_gc_start_ms) {
+ return MemoryReducer::State(MemoryReducer::kRun, started_gcs,
+ next_gc_start_ms, 1.0);
+}
+
+
+MemoryReducer::Event MarkCompactEvent(double time_ms,
+ bool next_gc_likely_to_collect_more) {
+ MemoryReducer::Event event;
+ event.type = MemoryReducer::kMarkCompact;
+ event.time_ms = time_ms;
+ event.next_gc_likely_to_collect_more = next_gc_likely_to_collect_more;
+ return event;
+}
+
+
+MemoryReducer::Event MarkCompactEventGarbageLeft(double time_ms) {
+ return MarkCompactEvent(time_ms, true);
+}
+
+
+MemoryReducer::Event MarkCompactEventNoGarbageLeft(double time_ms) {
+ return MarkCompactEvent(time_ms, false);
+}
+
+
+MemoryReducer::Event TimerEvent(double time_ms,
+ bool should_start_incremental_gc,
+ bool can_start_incremental_gc) {
+ MemoryReducer::Event event;
+ event.type = MemoryReducer::kTimer;
+ event.time_ms = time_ms;
+ event.should_start_incremental_gc = should_start_incremental_gc;
+ event.can_start_incremental_gc = can_start_incremental_gc;
+ return event;
+}
+
+
+MemoryReducer::Event TimerEventLowAllocationRate(double time_ms) {
+ return TimerEvent(time_ms, true, true);
+}
+
+
+MemoryReducer::Event TimerEventHighAllocationRate(double time_ms) {
+ return TimerEvent(time_ms, false, true);
+}
+
+
+MemoryReducer::Event TimerEventPendingGC(double time_ms) {
+ return TimerEvent(time_ms, true, false);
+}
+
+
+MemoryReducer::Event ContextDisposedEvent(double time_ms) {
+ MemoryReducer::Event event;
+ event.type = MemoryReducer::kContextDisposed;
+ event.time_ms = time_ms;
+ return event;
+}
+
+
+TEST(MemoryReducer, FromDoneToDone) {
+ MemoryReducer::State state0(DoneState()), state1(DoneState());
+
+ state1 = MemoryReducer::Step(state0, TimerEventLowAllocationRate(0));
+ EXPECT_EQ(MemoryReducer::kDone, state1.action);
+
+ state1 = MemoryReducer::Step(state0, TimerEventHighAllocationRate(0));
+ EXPECT_EQ(MemoryReducer::kDone, state1.action);
+
+ state1 = MemoryReducer::Step(state0, TimerEventPendingGC(0));
+ EXPECT_EQ(MemoryReducer::kDone, state1.action);
+}
+
+
+TEST(MemoryReducer, FromDoneToWait) {
+ if (!FLAG_incremental_marking) return;
+
+ MemoryReducer::State state0(DoneState()), state1(DoneState());
+
+ state1 = MemoryReducer::Step(state0, MarkCompactEventGarbageLeft(2));
+ EXPECT_EQ(MemoryReducer::kWait, state1.action);
+ EXPECT_EQ(MemoryReducer::kLongDelayMs + 2, state1.next_gc_start_ms);
+ EXPECT_EQ(0, state1.started_gcs);
+ EXPECT_EQ(2, state1.last_gc_time_ms);
+
+ state1 = MemoryReducer::Step(state0, MarkCompactEventNoGarbageLeft(2));
+ EXPECT_EQ(MemoryReducer::kWait, state1.action);
+ EXPECT_EQ(MemoryReducer::kLongDelayMs + 2, state1.next_gc_start_ms);
+ EXPECT_EQ(0, state1.started_gcs);
+ EXPECT_EQ(2, state1.last_gc_time_ms);
+
+ state1 = MemoryReducer::Step(state0, ContextDisposedEvent(0));
+ EXPECT_EQ(MemoryReducer::kWait, state1.action);
+ EXPECT_EQ(MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
+ EXPECT_EQ(0, state1.started_gcs);
+ EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+}
+
+
+TEST(MemoryReducer, FromWaitToWait) {
+ if (!FLAG_incremental_marking) return;
+
+ MemoryReducer::State state0(WaitState(2, 1000.0)), state1(DoneState());
+
+ state1 = MemoryReducer::Step(state0, ContextDisposedEvent(2000));
+ EXPECT_EQ(MemoryReducer::kWait, state1.action);
+ EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
+ EXPECT_EQ(state0.started_gcs, state1.started_gcs);
+
+ state1 = MemoryReducer::Step(
+ state0, TimerEventLowAllocationRate(state0.next_gc_start_ms - 1));
+ EXPECT_EQ(MemoryReducer::kWait, state1.action);
+ EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
+ EXPECT_EQ(state0.started_gcs, state1.started_gcs);
+
+ state1 = MemoryReducer::Step(state0, TimerEventHighAllocationRate(2000));
+ EXPECT_EQ(MemoryReducer::kWait, state1.action);
+ EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
+ EXPECT_EQ(state0.started_gcs, state1.started_gcs);
+
+ state1 = MemoryReducer::Step(state0, TimerEventPendingGC(2000));
+ EXPECT_EQ(MemoryReducer::kWait, state1.action);
+ EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
+ EXPECT_EQ(state0.started_gcs, state1.started_gcs);
+
+ state1 = MemoryReducer::Step(state0, MarkCompactEventGarbageLeft(2000));
+ EXPECT_EQ(MemoryReducer::kWait, state1.action);
+ EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
+ EXPECT_EQ(state0.started_gcs, state1.started_gcs);
+ EXPECT_EQ(2000, state1.last_gc_time_ms);
+
+ state1 = MemoryReducer::Step(state0, MarkCompactEventNoGarbageLeft(2000));
+ EXPECT_EQ(MemoryReducer::kWait, state1.action);
+ EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
+ EXPECT_EQ(state0.started_gcs, state1.started_gcs);
+ EXPECT_EQ(2000, state1.last_gc_time_ms);
+
+ state0.last_gc_time_ms = 0;
+ state1 = MemoryReducer::Step(
+ state0,
+ TimerEventHighAllocationRate(MemoryReducer::kWatchdogDelayMs + 1));
+ EXPECT_EQ(MemoryReducer::kWait, state1.action);
+ EXPECT_EQ(MemoryReducer::kWatchdogDelayMs + 1 + MemoryReducer::kLongDelayMs,
+ state1.next_gc_start_ms);
+ EXPECT_EQ(state0.started_gcs, state1.started_gcs);
+ EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+
+ state0.last_gc_time_ms = 1;
+ state1 = MemoryReducer::Step(state0, TimerEventHighAllocationRate(2000));
+ EXPECT_EQ(MemoryReducer::kWait, state1.action);
+ EXPECT_EQ(2000 + MemoryReducer::kLongDelayMs, state1.next_gc_start_ms);
+ EXPECT_EQ(state0.started_gcs, state1.started_gcs);
+ EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+}
+
+
+TEST(MemoryReducer, FromWaitToRun) {
+ if (!FLAG_incremental_marking) return;
+
+ MemoryReducer::State state0(WaitState(0, 1000.0)), state1(DoneState());
+
+ state1 = MemoryReducer::Step(
+ state0, TimerEventLowAllocationRate(state0.next_gc_start_ms + 1));
+ EXPECT_EQ(MemoryReducer::kRun, state1.action);
+ EXPECT_EQ(0, state1.next_gc_start_ms);
+ EXPECT_EQ(state0.started_gcs + 1, state1.started_gcs);
+
+ state1 = MemoryReducer::Step(
+ state0,
+ TimerEventHighAllocationRate(MemoryReducer::kWatchdogDelayMs + 2));
+ EXPECT_EQ(MemoryReducer::kRun, state1.action);
+ EXPECT_EQ(0, state1.next_gc_start_ms);
+ EXPECT_EQ(state0.started_gcs + 1, state1.started_gcs);
+ EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+}
+
+
+TEST(MemoryReducer, FromWaitToDone) {
+ if (!FLAG_incremental_marking) return;
+
+ MemoryReducer::State state0(WaitState(2, 0.0)), state1(DoneState());
+
+ state0.started_gcs = MemoryReducer::kMaxNumberOfGCs;
+
+ state1 = MemoryReducer::Step(state0, TimerEventLowAllocationRate(2000));
+ EXPECT_EQ(MemoryReducer::kDone, state1.action);
+ EXPECT_EQ(0, state1.next_gc_start_ms);
+ EXPECT_EQ(MemoryReducer::kMaxNumberOfGCs, state1.started_gcs);
+ EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+
+ state1 = MemoryReducer::Step(state0, TimerEventHighAllocationRate(2000));
+ EXPECT_EQ(MemoryReducer::kDone, state1.action);
+ EXPECT_EQ(0, state1.next_gc_start_ms);
+ EXPECT_EQ(MemoryReducer::kMaxNumberOfGCs, state1.started_gcs);
+ EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+
+ state1 = MemoryReducer::Step(state0, TimerEventPendingGC(2000));
+ EXPECT_EQ(MemoryReducer::kDone, state1.action);
+ EXPECT_EQ(0, state1.next_gc_start_ms);
+ EXPECT_EQ(MemoryReducer::kMaxNumberOfGCs, state1.started_gcs);
+ EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+}
+
+
+TEST(MemoryReducer, FromRunToRun) {
+ if (!FLAG_incremental_marking) return;
+
+ MemoryReducer::State state0(RunState(1, 0.0)), state1(DoneState());
+
+ state1 = MemoryReducer::Step(state0, TimerEventLowAllocationRate(2000));
+ EXPECT_EQ(MemoryReducer::kRun, state1.action);
+ EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
+ EXPECT_EQ(state0.started_gcs, state1.started_gcs);
+ EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+
+ state1 = MemoryReducer::Step(state0, TimerEventHighAllocationRate(2000));
+ EXPECT_EQ(MemoryReducer::kRun, state1.action);
+ EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
+ EXPECT_EQ(state0.started_gcs, state1.started_gcs);
+ EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+
+ state1 = MemoryReducer::Step(state0, TimerEventPendingGC(2000));
+ EXPECT_EQ(MemoryReducer::kRun, state1.action);
+ EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
+ EXPECT_EQ(state0.started_gcs, state1.started_gcs);
+ EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+
+ state1 = MemoryReducer::Step(state0, ContextDisposedEvent(2000));
+ EXPECT_EQ(MemoryReducer::kRun, state1.action);
+ EXPECT_EQ(state0.next_gc_start_ms, state1.next_gc_start_ms);
+ EXPECT_EQ(state0.started_gcs, state1.started_gcs);
+ EXPECT_EQ(state0.last_gc_time_ms, state1.last_gc_time_ms);
+}
+
+
+TEST(MemoryReducer, FromRunToDone) {
+ if (!FLAG_incremental_marking) return;
+
+ MemoryReducer::State state0(RunState(2, 0.0)), state1(DoneState());
+
+ state1 = MemoryReducer::Step(state0, MarkCompactEventNoGarbageLeft(2000));
+ EXPECT_EQ(MemoryReducer::kDone, state1.action);
+ EXPECT_EQ(0, state1.next_gc_start_ms);
+ EXPECT_EQ(MemoryReducer::kMaxNumberOfGCs, state1.started_gcs);
+ EXPECT_EQ(2000, state1.last_gc_time_ms);
+
+ state0.started_gcs = MemoryReducer::kMaxNumberOfGCs;
+
+ state1 = MemoryReducer::Step(state0, MarkCompactEventGarbageLeft(2000));
+ EXPECT_EQ(MemoryReducer::kDone, state1.action);
+ EXPECT_EQ(0, state1.next_gc_start_ms);
+ EXPECT_EQ(2000, state1.last_gc_time_ms);
+}
+
+
+TEST(MemoryReducer, FromRunToWait) {
+ if (!FLAG_incremental_marking) return;
+
+ MemoryReducer::State state0(RunState(2, 0.0)), state1(DoneState());
+
+ state1 = MemoryReducer::Step(state0, MarkCompactEventGarbageLeft(2000));
+ EXPECT_EQ(MemoryReducer::kWait, state1.action);
+ EXPECT_EQ(2000 + MemoryReducer::kShortDelayMs, state1.next_gc_start_ms);
+ EXPECT_EQ(state0.started_gcs, state1.started_gcs);
+ EXPECT_EQ(2000, state1.last_gc_time_ms);
+
+ state0.started_gcs = 1;
+
+ state1 = MemoryReducer::Step(state0, MarkCompactEventNoGarbageLeft(2000));
+ EXPECT_EQ(MemoryReducer::kWait, state1.action);
+ EXPECT_EQ(2000 + MemoryReducer::kShortDelayMs, state1.next_gc_start_ms);
+ EXPECT_EQ(state0.started_gcs, state1.started_gcs);
+ EXPECT_EQ(2000, state1.last_gc_time_ms);
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/heap/scavenge-job-unittest.cc b/test/unittests/heap/scavenge-job-unittest.cc
new file mode 100644
index 0000000..dbd463c
--- /dev/null
+++ b/test/unittests/heap/scavenge-job-unittest.cc
@@ -0,0 +1,111 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/globals.h"
+#include "src/heap/scavenge-job.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+const size_t kScavengeSpeedInBytesPerMs = 500 * KB;
+const size_t kNewSpaceCapacity = 8 * MB;
+
+
+TEST(ScavengeJob, AllocationLimitEmptyNewSpace) {
+ EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
+ kScavengeSpeedInBytesPerMs, 0, kNewSpaceCapacity));
+}
+
+
+TEST(ScavengeJob, AllocationLimitFullNewSpace) {
+ EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(
+ kScavengeSpeedInBytesPerMs, kNewSpaceCapacity, kNewSpaceCapacity));
+}
+
+
+TEST(ScavengeJob, AllocationLimitUnknownScavengeSpeed) {
+ size_t expected_size = ScavengeJob::kInitialScavengeSpeedInBytesPerMs *
+ ScavengeJob::kAverageIdleTimeMs -
+ ScavengeJob::kBytesAllocatedBeforeNextIdleTask;
+ EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(0, expected_size - 1,
+ kNewSpaceCapacity));
+ EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(0, expected_size,
+ kNewSpaceCapacity));
+}
+
+
+TEST(ScavengeJob, AllocationLimitLowScavengeSpeed) {
+ size_t scavenge_speed = 1 * KB;
+ EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
+ scavenge_speed, ScavengeJob::kMinAllocationLimit - 1, kNewSpaceCapacity));
+ EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(
+ scavenge_speed, ScavengeJob::kMinAllocationLimit, kNewSpaceCapacity));
+}
+
+
+TEST(ScavengeJob, AllocationLimitAverageScavengeSpeed) {
+ size_t expected_size =
+ kScavengeSpeedInBytesPerMs * ScavengeJob::kAverageIdleTimeMs -
+ ScavengeJob::kBytesAllocatedBeforeNextIdleTask;
+ EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
+ kScavengeSpeedInBytesPerMs, ScavengeJob::kMinAllocationLimit,
+ kNewSpaceCapacity));
+ EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
+ kScavengeSpeedInBytesPerMs, expected_size - 1, kNewSpaceCapacity));
+ EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(
+ kScavengeSpeedInBytesPerMs, expected_size, kNewSpaceCapacity));
+}
+
+
+TEST(ScavengeJob, AllocationLimitHighScavengeSpeed) {
+ size_t scavenge_speed = kNewSpaceCapacity;
+ size_t expected_size =
+ static_cast<size_t>(
+ kNewSpaceCapacity *
+ ScavengeJob::kMaxAllocationLimitAsFractionOfNewSpace) -
+ ScavengeJob::kBytesAllocatedBeforeNextIdleTask;
+ EXPECT_FALSE(ScavengeJob::ReachedIdleAllocationLimit(
+ scavenge_speed, expected_size - 1, kNewSpaceCapacity));
+ EXPECT_TRUE(ScavengeJob::ReachedIdleAllocationLimit(
+ scavenge_speed, expected_size, kNewSpaceCapacity));
+}
+
+
+TEST(ScavengeJob, EnoughIdleTimeForScavengeUnknownScavengeSpeed) {
+ size_t scavenge_speed = ScavengeJob::kInitialScavengeSpeedInBytesPerMs;
+ size_t new_space_size = 1 * MB;
+ size_t expected_time = (new_space_size + scavenge_speed - 1) / scavenge_speed;
+ EXPECT_TRUE(
+ ScavengeJob::EnoughIdleTimeForScavenge(expected_time, 0, new_space_size));
+ EXPECT_FALSE(ScavengeJob::EnoughIdleTimeForScavenge(expected_time - 1, 0,
+ new_space_size));
+}
+
+
+TEST(ScavengeJob, EnoughIdleTimeForScavengeLowScavengeSpeed) {
+ size_t scavenge_speed = 1 * KB;
+ size_t new_space_size = 1 * MB;
+ size_t expected_time = (new_space_size + scavenge_speed - 1) / scavenge_speed;
+ EXPECT_TRUE(ScavengeJob::EnoughIdleTimeForScavenge(
+ expected_time, scavenge_speed, new_space_size));
+ EXPECT_FALSE(ScavengeJob::EnoughIdleTimeForScavenge(
+ expected_time - 1, scavenge_speed, new_space_size));
+}
+
+
+TEST(ScavengeJob, EnoughIdleTimeForScavengeHighScavengeSpeed) {
+ size_t scavenge_speed = kNewSpaceCapacity;
+ size_t new_space_size = 1 * MB;
+ size_t expected_time = (new_space_size + scavenge_speed - 1) / scavenge_speed;
+ EXPECT_TRUE(ScavengeJob::EnoughIdleTimeForScavenge(
+ expected_time, scavenge_speed, new_space_size));
+ EXPECT_FALSE(ScavengeJob::EnoughIdleTimeForScavenge(
+ expected_time - 1, scavenge_speed, new_space_size));
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/test/unittests/interpreter/bytecode-array-builder-unittest.cc
new file mode 100644
index 0000000..2140aa8
--- /dev/null
+++ b/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -0,0 +1,692 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/interpreter/bytecode-array-builder.h"
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecode-register-allocator.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeArrayBuilderTest : public TestWithIsolateAndZone {
+ public:
+ BytecodeArrayBuilderTest() {}
+ ~BytecodeArrayBuilderTest() override {}
+};
+
+
+TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
+ BytecodeArrayBuilder builder(isolate(), zone());
+
+ builder.set_locals_count(200);
+ builder.set_context_count(1);
+ builder.set_parameter_count(0);
+ CHECK_EQ(builder.locals_count(), 200);
+ CHECK_EQ(builder.context_count(), 1);
+ CHECK_EQ(builder.fixed_register_count(), 201);
+
+ // Emit constant loads.
+ builder.LoadLiteral(Smi::FromInt(0))
+ .LoadLiteral(Smi::FromInt(8))
+ .LoadLiteral(Smi::FromInt(10000000))
+ .LoadUndefined()
+ .LoadNull()
+ .LoadTheHole()
+ .LoadTrue()
+ .LoadFalse();
+
+ // Emit accumulator transfers. Stores followed by loads to the same register
+ // are not generated. Hence, a dummy instruction in between.
+ Register reg(0);
+ builder.LoadAccumulatorWithRegister(reg)
+ .LoadNull()
+ .StoreAccumulatorInRegister(reg);
+
+ // Emit register-register transfer.
+ Register other(1);
+ builder.MoveRegister(reg, other);
+
+ // Emit register-register exchanges.
+ Register wide(150);
+ builder.ExchangeRegisters(reg, wide);
+ builder.ExchangeRegisters(wide, reg);
+ Register wider(151);
+ builder.ExchangeRegisters(wide, wider);
+
+ // Emit global load / store operations.
+ Factory* factory = isolate()->factory();
+ Handle<String> name = factory->NewStringFromStaticChars("var_name");
+ builder.LoadGlobal(name, 1, LanguageMode::SLOPPY,
+ TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadGlobal(name, 1, LanguageMode::STRICT, TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadGlobal(name, 1, LanguageMode::SLOPPY, TypeofMode::INSIDE_TYPEOF)
+ .LoadGlobal(name, 1, LanguageMode::STRICT, TypeofMode::INSIDE_TYPEOF)
+ .StoreGlobal(name, 1, LanguageMode::SLOPPY)
+ .StoreGlobal(name, 1, LanguageMode::STRICT);
+
+ // Emit context operations.
+ builder.PushContext(reg)
+ .PopContext(reg)
+ .LoadContextSlot(reg, 1)
+ .StoreContextSlot(reg, 1);
+
+ // Emit load / store property operations.
+ builder.LoadNamedProperty(reg, name, 0, LanguageMode::SLOPPY)
+ .LoadKeyedProperty(reg, 0, LanguageMode::SLOPPY)
+ .StoreNamedProperty(reg, name, 0, LanguageMode::SLOPPY)
+ .StoreKeyedProperty(reg, reg, 0, LanguageMode::SLOPPY)
+ .LoadNamedProperty(reg, name, 0, LanguageMode::STRICT)
+ .LoadKeyedProperty(reg, 0, LanguageMode::STRICT)
+ .StoreNamedProperty(reg, name, 0, LanguageMode::STRICT)
+ .StoreKeyedProperty(reg, reg, 0, LanguageMode::STRICT);
+
+ // Emit load / store lookup slots.
+ builder.LoadLookupSlot(name, TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadLookupSlot(name, TypeofMode::INSIDE_TYPEOF)
+ .StoreLookupSlot(name, LanguageMode::SLOPPY)
+ .StoreLookupSlot(name, LanguageMode::STRICT);
+
+ // Emit closure operations.
+ Handle<SharedFunctionInfo> shared_info = factory->NewSharedFunctionInfo(
+ factory->NewStringFromStaticChars("function_a"), MaybeHandle<Code>(),
+ false);
+ builder.CreateClosure(shared_info, NOT_TENURED);
+
+ // Emit argument creation operations.
+ builder.CreateArguments(CreateArgumentsType::kMappedArguments)
+ .CreateArguments(CreateArgumentsType::kUnmappedArguments);
+
+ // Emit literal creation operations.
+ builder.CreateRegExpLiteral(factory->NewStringFromStaticChars("a"), 0, 0)
+ .CreateArrayLiteral(factory->NewFixedArray(1), 0, 0)
+ .CreateObjectLiteral(factory->NewFixedArray(1), 0, 0);
+
+ // Call operations.
+ builder.Call(reg, reg, 0, 0)
+ .Call(reg, reg, 0, 1024)
+ .CallRuntime(Runtime::kIsArray, reg, 1)
+ .CallRuntimeForPair(Runtime::kLoadLookupSlot, reg, 1, reg)
+ .CallJSRuntime(Context::SPREAD_ITERABLE_INDEX, reg, 1);
+
+ // Emit binary operator invocations.
+ builder.BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .BinaryOperation(Token::Value::SUB, reg, Strength::WEAK)
+ .BinaryOperation(Token::Value::MUL, reg, Strength::WEAK)
+ .BinaryOperation(Token::Value::DIV, reg, Strength::WEAK)
+ .BinaryOperation(Token::Value::MOD, reg, Strength::WEAK);
+
+ // Emit bitwise operator invocations
+ builder.BinaryOperation(Token::Value::BIT_OR, reg, Strength::WEAK)
+ .BinaryOperation(Token::Value::BIT_XOR, reg, Strength::WEAK)
+ .BinaryOperation(Token::Value::BIT_AND, reg, Strength::WEAK);
+
+ // Emit shift operator invocations
+ builder.BinaryOperation(Token::Value::SHL, reg, Strength::WEAK)
+ .BinaryOperation(Token::Value::SAR, reg, Strength::WEAK)
+ .BinaryOperation(Token::Value::SHR, reg, Strength::WEAK);
+
+ // Emit count operatior invocations
+ builder.CountOperation(Token::Value::ADD, Strength::WEAK)
+ .CountOperation(Token::Value::SUB, Strength::WEAK);
+
+ // Emit unary operator invocations.
+ builder.LogicalNot().TypeOf();
+
+ // Emit delete
+ builder.Delete(reg, LanguageMode::SLOPPY)
+ .Delete(reg, LanguageMode::STRICT)
+ .DeleteLookupSlot();
+
+ // Emit new.
+ builder.New(reg, reg, 0);
+
+ // Emit test operator invocations.
+ builder.CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .CompareOperation(Token::Value::NE, reg, Strength::WEAK)
+ .CompareOperation(Token::Value::EQ_STRICT, reg, Strength::WEAK)
+ .CompareOperation(Token::Value::NE_STRICT, reg, Strength::WEAK)
+ .CompareOperation(Token::Value::LT, reg, Strength::WEAK)
+ .CompareOperation(Token::Value::GT, reg, Strength::WEAK)
+ .CompareOperation(Token::Value::LTE, reg, Strength::WEAK)
+ .CompareOperation(Token::Value::GTE, reg, Strength::WEAK)
+ .CompareOperation(Token::Value::INSTANCEOF, reg, Strength::WEAK)
+ .CompareOperation(Token::Value::IN, reg, Strength::WEAK);
+
+ // Emit cast operator invocations.
+ builder.CastAccumulatorToNumber()
+ .CastAccumulatorToJSObject()
+ .CastAccumulatorToName();
+
+ // Emit control flow. Return must be the last instruction.
+ BytecodeLabel start;
+ builder.Bind(&start);
+ // Short jumps with Imm8 operands
+ builder.Jump(&start)
+ .JumpIfNull(&start)
+ .JumpIfUndefined(&start);
+ // Perform an operation that returns boolean value to
+ // generate JumpIfTrue/False
+ builder.CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .JumpIfTrue(&start)
+ .CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .JumpIfFalse(&start);
+ // Perform an operation that returns a non-boolean operation to
+ // generate JumpIfToBooleanTrue/False.
+ builder.BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .JumpIfTrue(&start)
+ .BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .JumpIfFalse(&start);
+ // Insert dummy ops to force longer jumps
+ for (int i = 0; i < 128; i++) {
+ builder.LoadTrue();
+ }
+ // Longer jumps requiring Constant operand
+ builder.Jump(&start)
+ .JumpIfNull(&start)
+ .JumpIfUndefined(&start);
+ // Perform an operation that returns boolean value to
+ // generate JumpIfTrue/False
+ builder.CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .JumpIfTrue(&start)
+ .CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .JumpIfFalse(&start);
+ // Perform an operation that returns a non-boolean operation to
+ // generate JumpIfToBooleanTrue/False.
+ builder.BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .JumpIfTrue(&start)
+ .BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .JumpIfFalse(&start);
+
+ // Emit throw in it's own basic block so that the rest of the code isn't
+ // omitted due to being dead.
+ BytecodeLabel after_throw;
+ builder.Jump(&after_throw)
+ .Throw()
+ .Bind(&after_throw);
+
+ builder.ForInPrepare(reg, reg, reg)
+ .ForInDone(reg, reg)
+ .ForInNext(reg, reg, reg, reg)
+ .ForInStep(reg);
+
+ // Wide constant pool loads
+ for (int i = 0; i < 256; i++) {
+ // Emit junk in constant pool to force wide constant pool index.
+ builder.LoadLiteral(factory->NewNumber(2.5321 + i));
+ }
+ builder.LoadLiteral(Smi::FromInt(20000000));
+ Handle<String> wide_name = factory->NewStringFromStaticChars("var_wide_name");
+
+ // Emit wide global load / store operations.
+ builder.LoadGlobal(name, 1024, LanguageMode::SLOPPY,
+ TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadGlobal(wide_name, 1, LanguageMode::STRICT,
+ TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadGlobal(name, 1024, LanguageMode::SLOPPY, TypeofMode::INSIDE_TYPEOF)
+ .LoadGlobal(wide_name, 1, LanguageMode::STRICT, TypeofMode::INSIDE_TYPEOF)
+ .StoreGlobal(name, 1024, LanguageMode::SLOPPY)
+ .StoreGlobal(wide_name, 1, LanguageMode::STRICT);
+
+ // Emit wide load / store property operations.
+ builder.LoadNamedProperty(reg, wide_name, 0, LanguageMode::SLOPPY)
+ .LoadKeyedProperty(reg, 2056, LanguageMode::SLOPPY)
+ .StoreNamedProperty(reg, wide_name, 0, LanguageMode::SLOPPY)
+ .StoreKeyedProperty(reg, reg, 2056, LanguageMode::SLOPPY)
+ .LoadNamedProperty(reg, wide_name, 0, LanguageMode::STRICT)
+ .LoadKeyedProperty(reg, 2056, LanguageMode::STRICT)
+ .StoreNamedProperty(reg, wide_name, 0, LanguageMode::STRICT)
+ .StoreKeyedProperty(reg, reg, 2056, LanguageMode::STRICT);
+
+ // Emit wide context operations.
+ builder.LoadContextSlot(reg, 1024)
+ .StoreContextSlot(reg, 1024);
+
+ // Emit wide load / store lookup slots.
+ builder.LoadLookupSlot(wide_name, TypeofMode::NOT_INSIDE_TYPEOF)
+ .LoadLookupSlot(wide_name, TypeofMode::INSIDE_TYPEOF)
+ .StoreLookupSlot(wide_name, LanguageMode::SLOPPY)
+ .StoreLookupSlot(wide_name, LanguageMode::STRICT);
+
+ // CreateClosureWide
+ Handle<SharedFunctionInfo> shared_info2 = factory->NewSharedFunctionInfo(
+ factory->NewStringFromStaticChars("function_b"), MaybeHandle<Code>(),
+ false);
+ builder.CreateClosure(shared_info2, NOT_TENURED);
+
+ // Emit wide variant of literal creation operations.
+ builder.CreateRegExpLiteral(factory->NewStringFromStaticChars("wide_literal"),
+ 0, 0)
+ .CreateArrayLiteral(factory->NewFixedArray(2), 0, 0)
+ .CreateObjectLiteral(factory->NewFixedArray(2), 0, 0);
+
+ // Longer jumps requiring ConstantWide operand
+ builder.Jump(&start).JumpIfNull(&start).JumpIfUndefined(&start);
+ // Perform an operation that returns boolean value to
+ // generate JumpIfTrue/False
+ builder.CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .JumpIfTrue(&start)
+ .CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .JumpIfFalse(&start);
+ // Perform an operation that returns a non-boolean operation to
+ // generate JumpIfToBooleanTrue/False.
+ builder.BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .JumpIfTrue(&start)
+ .BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .JumpIfFalse(&start);
+
+ builder.Return();
+
+ // Generate BytecodeArray.
+ Handle<BytecodeArray> the_array = builder.ToBytecodeArray();
+ CHECK_EQ(the_array->frame_size(),
+ builder.fixed_register_count() * kPointerSize);
+
+ // Build scorecard of bytecodes encountered in the BytecodeArray.
+ std::vector<int> scorecard(Bytecodes::ToByte(Bytecode::kLast) + 1);
+ Bytecode final_bytecode = Bytecode::kLdaZero;
+ int i = 0;
+ while (i < the_array->length()) {
+ uint8_t code = the_array->get(i);
+ scorecard[code] += 1;
+ final_bytecode = Bytecodes::FromByte(code);
+ i += Bytecodes::Size(Bytecodes::FromByte(code));
+ }
+
+ // Check return occurs at the end and only once in the BytecodeArray.
+ CHECK_EQ(final_bytecode, Bytecode::kReturn);
+ CHECK_EQ(scorecard[Bytecodes::ToByte(final_bytecode)], 1);
+
+#define CHECK_BYTECODE_PRESENT(Name, ...) \
+ /* Check Bytecode is marked in scorecard */ \
+ CHECK_GE(scorecard[Bytecodes::ToByte(Bytecode::k##Name)], 1);
+ BYTECODE_LIST(CHECK_BYTECODE_PRESENT)
+#undef CHECK_BYTECODE_PRESENT
+}
+
+
+TEST_F(BytecodeArrayBuilderTest, FrameSizesLookGood) {
+ for (int locals = 0; locals < 5; locals++) {
+ for (int contexts = 0; contexts < 4; contexts++) {
+ for (int temps = 0; temps < 3; temps++) {
+ BytecodeArrayBuilder builder(isolate(), zone());
+ builder.set_parameter_count(0);
+ builder.set_locals_count(locals);
+ builder.set_context_count(contexts);
+
+ BytecodeRegisterAllocator temporaries(&builder);
+ for (int i = 0; i < temps; i++) {
+ builder.StoreAccumulatorInRegister(temporaries.NewRegister());
+ }
+ builder.Return();
+
+ Handle<BytecodeArray> the_array = builder.ToBytecodeArray();
+ int total_registers = locals + contexts + temps;
+ CHECK_EQ(the_array->frame_size(), total_registers * kPointerSize);
+ }
+ }
+ }
+}
+
+
+TEST_F(BytecodeArrayBuilderTest, RegisterValues) {
+ int index = 1;
+ uint8_t operand = static_cast<uint8_t>(-index);
+
+ Register the_register(index);
+ CHECK_EQ(the_register.index(), index);
+
+ int actual_operand = the_register.ToOperand();
+ CHECK_EQ(actual_operand, operand);
+
+ int actual_index = Register::FromOperand(actual_operand).index();
+ CHECK_EQ(actual_index, index);
+}
+
+
+TEST_F(BytecodeArrayBuilderTest, Parameters) {
+ BytecodeArrayBuilder builder(isolate(), zone());
+ builder.set_parameter_count(10);
+ builder.set_locals_count(0);
+ builder.set_context_count(0);
+
+ Register param0(builder.Parameter(0));
+ Register param9(builder.Parameter(9));
+ CHECK_EQ(param9.index() - param0.index(), 9);
+}
+
+
+TEST_F(BytecodeArrayBuilderTest, RegisterType) {
+ BytecodeArrayBuilder builder(isolate(), zone());
+ builder.set_parameter_count(10);
+ builder.set_locals_count(3);
+ builder.set_context_count(0);
+
+ BytecodeRegisterAllocator register_allocator(&builder);
+ Register temp0 = register_allocator.NewRegister();
+ Register param0(builder.Parameter(0));
+ Register param9(builder.Parameter(9));
+ Register temp1 = register_allocator.NewRegister();
+ Register reg0(0);
+ Register reg1(1);
+ Register reg2(2);
+ Register temp2 = register_allocator.NewRegister();
+ CHECK_EQ(builder.RegisterIsParameterOrLocal(temp0), false);
+ CHECK_EQ(builder.RegisterIsParameterOrLocal(temp1), false);
+ CHECK_EQ(builder.RegisterIsParameterOrLocal(temp2), false);
+ CHECK_EQ(builder.RegisterIsParameterOrLocal(param0), true);
+ CHECK_EQ(builder.RegisterIsParameterOrLocal(param9), true);
+ CHECK_EQ(builder.RegisterIsParameterOrLocal(reg0), true);
+ CHECK_EQ(builder.RegisterIsParameterOrLocal(reg1), true);
+ CHECK_EQ(builder.RegisterIsParameterOrLocal(reg2), true);
+}
+
+
+TEST_F(BytecodeArrayBuilderTest, Constants) {
+ BytecodeArrayBuilder builder(isolate(), zone());
+ builder.set_parameter_count(0);
+ builder.set_locals_count(0);
+ builder.set_context_count(0);
+
+ Factory* factory = isolate()->factory();
+ Handle<HeapObject> heap_num_1 = factory->NewHeapNumber(3.14);
+ Handle<HeapObject> heap_num_2 = factory->NewHeapNumber(5.2);
+ Handle<Object> large_smi(Smi::FromInt(0x12345678), isolate());
+ Handle<HeapObject> heap_num_2_copy(*heap_num_2);
+ builder.LoadLiteral(heap_num_1)
+ .LoadLiteral(heap_num_2)
+ .LoadLiteral(large_smi)
+ .LoadLiteral(heap_num_1)
+ .LoadLiteral(heap_num_1)
+ .LoadLiteral(heap_num_2_copy);
+
+ Handle<BytecodeArray> array = builder.ToBytecodeArray();
+ // Should only have one entry for each identical constant.
+ CHECK_EQ(array->constant_pool()->length(), 3);
+}
+
+
+TEST_F(BytecodeArrayBuilderTest, ForwardJumps) {
+ static const int kFarJumpDistance = 256;
+
+ BytecodeArrayBuilder builder(isolate(), zone());
+ builder.set_parameter_count(0);
+ builder.set_locals_count(1);
+ builder.set_context_count(0);
+
+ Register reg(0);
+ BytecodeLabel far0, far1, far2, far3, far4;
+ BytecodeLabel near0, near1, near2, near3, near4;
+
+ builder.Jump(&near0)
+ .CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .JumpIfTrue(&near1)
+ .CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .JumpIfFalse(&near2)
+ .BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .JumpIfTrue(&near3)
+ .BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .JumpIfFalse(&near4)
+ .Bind(&near0)
+ .Bind(&near1)
+ .Bind(&near2)
+ .Bind(&near3)
+ .Bind(&near4)
+ .Jump(&far0)
+ .CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .JumpIfTrue(&far1)
+ .CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .JumpIfFalse(&far2)
+ .BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .JumpIfTrue(&far3)
+ .BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .JumpIfFalse(&far4);
+ for (int i = 0; i < kFarJumpDistance - 18; i++) {
+ builder.LoadUndefined();
+ }
+ builder.Bind(&far0).Bind(&far1).Bind(&far2).Bind(&far3).Bind(&far4);
+ builder.Return();
+
+ Handle<BytecodeArray> array = builder.ToBytecodeArray();
+ DCHECK_EQ(array->length(), 36 + kFarJumpDistance - 18 + 1);
+
+ BytecodeArrayIterator iterator(array);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ CHECK_EQ(iterator.GetImmediateOperand(0), 18);
+ iterator.Advance();
+
+ // Ignore compare operation.
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrue);
+ CHECK_EQ(iterator.GetImmediateOperand(0), 14);
+ iterator.Advance();
+
+ // Ignore compare operation.
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalse);
+ CHECK_EQ(iterator.GetImmediateOperand(0), 10);
+ iterator.Advance();
+
+ // Ignore add operation.
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanTrue);
+ CHECK_EQ(iterator.GetImmediateOperand(0), 6);
+ iterator.Advance();
+
+ // Ignore add operation.
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanFalse);
+ CHECK_EQ(iterator.GetImmediateOperand(0), 2);
+ iterator.Advance();
+
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpConstant);
+ CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
+ Smi::FromInt(kFarJumpDistance));
+ iterator.Advance();
+
+ // Ignore compare operation.
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrueConstant);
+ CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
+ Smi::FromInt(kFarJumpDistance - 4));
+ iterator.Advance();
+
+ // Ignore compare operation.
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalseConstant);
+ CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
+ Smi::FromInt(kFarJumpDistance - 8));
+ iterator.Advance();
+
+ // Ignore add operation.
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanTrueConstant);
+ CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
+ Smi::FromInt(kFarJumpDistance - 12));
+ iterator.Advance();
+
+ // Ignore add operation.
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(),
+ Bytecode::kJumpIfToBooleanFalseConstant);
+ CHECK_EQ(*iterator.GetConstantForIndexOperand(0),
+ Smi::FromInt(kFarJumpDistance - 16));
+ iterator.Advance();
+}
+
+
+TEST_F(BytecodeArrayBuilderTest, BackwardJumps) {
+ BytecodeArrayBuilder builder(isolate(), zone());
+ builder.set_parameter_count(0);
+ builder.set_locals_count(1);
+ builder.set_context_count(0);
+ Register reg(0);
+
+ BytecodeLabel label0, label1, label2, label3, label4;
+ builder.Bind(&label0)
+ .Jump(&label0)
+ .Bind(&label1)
+ .CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .JumpIfTrue(&label1)
+ .Bind(&label2)
+ .CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .JumpIfFalse(&label2)
+ .Bind(&label3)
+ .BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .JumpIfTrue(&label3)
+ .Bind(&label4)
+ .BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .JumpIfFalse(&label4);
+ for (int i = 0; i < 63; i++) {
+ builder.Jump(&label4);
+ }
+ builder.BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .JumpIfFalse(&label4);
+ builder.BinaryOperation(Token::Value::ADD, reg, Strength::WEAK)
+ .JumpIfTrue(&label3);
+ builder.CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .JumpIfFalse(&label2);
+ builder.CompareOperation(Token::Value::EQ, reg, Strength::WEAK)
+ .JumpIfTrue(&label1);
+ builder.Jump(&label0);
+ builder.Return();
+
+ Handle<BytecodeArray> array = builder.ToBytecodeArray();
+ BytecodeArrayIterator iterator(array);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ CHECK_EQ(iterator.GetImmediateOperand(0), 0);
+ iterator.Advance();
+ // Ignore compare operation.
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrue);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -2);
+ iterator.Advance();
+ // Ignore compare operation.
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalse);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -2);
+ iterator.Advance();
+ // Ignore binary operation.
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanTrue);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -2);
+ iterator.Advance();
+ // Ignore binary operation.
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanFalse);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -2);
+ iterator.Advance();
+ for (int i = 0; i < 63; i++) {
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -i * 2 - 4);
+ iterator.Advance();
+ }
+ // Ignore binary operation.
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(),
+ Bytecode::kJumpIfToBooleanFalseConstant);
+ CHECK_EQ(Smi::cast(*iterator.GetConstantForIndexOperand(0))->value(), -132);
+ iterator.Advance();
+ // Ignore binary operation.
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfToBooleanTrueConstant);
+ CHECK_EQ(Smi::cast(*iterator.GetConstantForIndexOperand(0))->value(), -140);
+ iterator.Advance();
+ // Ignore compare operation.
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfFalseConstant);
+ CHECK_EQ(Smi::cast(*iterator.GetConstantForIndexOperand(0))->value(), -148);
+ iterator.Advance();
+ // Ignore compare operation.
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpIfTrueConstant);
+ CHECK_EQ(Smi::cast(*iterator.GetConstantForIndexOperand(0))->value(), -156);
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpConstant);
+ CHECK_EQ(Smi::cast(*iterator.GetConstantForIndexOperand(0))->value(), -160);
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kReturn);
+ iterator.Advance();
+ CHECK(iterator.done());
+}
+
+
+TEST_F(BytecodeArrayBuilderTest, LabelReuse) {
+ BytecodeArrayBuilder builder(isolate(), zone());
+ builder.set_parameter_count(0);
+ builder.set_locals_count(0);
+ builder.set_context_count(0);
+
+ // Labels can only have 1 forward reference, but
+ // can be referred to mulitple times once bound.
+ BytecodeLabel label;
+
+ builder.Jump(&label).Bind(&label).Jump(&label).Jump(&label).Return();
+
+ Handle<BytecodeArray> array = builder.ToBytecodeArray();
+ BytecodeArrayIterator iterator(array);
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ CHECK_EQ(iterator.GetImmediateOperand(0), 2);
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ CHECK_EQ(iterator.GetImmediateOperand(0), 0);
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -2);
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kReturn);
+ iterator.Advance();
+ CHECK(iterator.done());
+}
+
+
+TEST_F(BytecodeArrayBuilderTest, LabelAddressReuse) {
+ static const int kRepeats = 3;
+
+ BytecodeArrayBuilder builder(isolate(), zone());
+ builder.set_parameter_count(0);
+ builder.set_locals_count(0);
+ builder.set_context_count(0);
+
+ for (int i = 0; i < kRepeats; i++) {
+ BytecodeLabel label;
+ builder.Jump(&label).Bind(&label).Jump(&label).Jump(&label);
+ }
+
+ builder.Return();
+
+ Handle<BytecodeArray> array = builder.ToBytecodeArray();
+ BytecodeArrayIterator iterator(array);
+ for (int i = 0; i < kRepeats; i++) {
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ CHECK_EQ(iterator.GetImmediateOperand(0), 2);
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ CHECK_EQ(iterator.GetImmediateOperand(0), 0);
+ iterator.Advance();
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kJump);
+ CHECK_EQ(iterator.GetImmediateOperand(0), -2);
+ iterator.Advance();
+ }
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kReturn);
+ iterator.Advance();
+ CHECK(iterator.done());
+}
+
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/interpreter/bytecode-array-iterator-unittest.cc b/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
new file mode 100644
index 0000000..cd9f120
--- /dev/null
+++ b/test/unittests/interpreter/bytecode-array-iterator-unittest.cc
@@ -0,0 +1,113 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/interpreter/bytecode-array-builder.h"
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeArrayIteratorTest : public TestWithIsolateAndZone {
+ public:
+ BytecodeArrayIteratorTest() {}
+ ~BytecodeArrayIteratorTest() override {}
+};
+
+
+TEST_F(BytecodeArrayIteratorTest, IteratesBytecodeArray) {
+ // Use a builder to create an array with containing multiple bytecodes
+ // with 0, 1 and 2 operands.
+ BytecodeArrayBuilder builder(isolate(), zone());
+ builder.set_parameter_count(3);
+ builder.set_locals_count(2);
+ builder.set_context_count(0);
+
+ Factory* factory = isolate()->factory();
+ Handle<HeapObject> heap_num_0 = factory->NewHeapNumber(2.718);
+ Handle<HeapObject> heap_num_1 = factory->NewHeapNumber(2147483647);
+ Smi* zero = Smi::FromInt(0);
+ Smi* smi_0 = Smi::FromInt(64);
+ Smi* smi_1 = Smi::FromInt(-65536);
+ Register reg_0(0);
+ Register reg_1(1);
+ Register reg_2 = Register::FromParameterIndex(2, builder.parameter_count());
+ Handle<String> name = factory->NewStringFromStaticChars("abc");
+ int name_index = 3;
+ int feedback_slot = 97;
+
+ builder.LoadLiteral(heap_num_0)
+ .LoadLiteral(heap_num_1)
+ .LoadLiteral(zero)
+ .LoadLiteral(smi_0)
+ .LoadLiteral(smi_1)
+ .LoadAccumulatorWithRegister(reg_0)
+ .LoadNamedProperty(reg_1, name, feedback_slot, LanguageMode::SLOPPY)
+ .StoreAccumulatorInRegister(reg_2)
+ .CallRuntime(Runtime::kLoadIC_Miss, reg_0, 1)
+ .Return();
+
+ // Test iterator sees the expected output from the builder.
+ BytecodeArrayIterator iterator(builder.ToBytecodeArray());
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
+ CHECK(iterator.GetConstantForIndexOperand(0).is_identical_to(heap_num_0));
+ CHECK(!iterator.done());
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
+ CHECK(iterator.GetConstantForIndexOperand(0).is_identical_to(heap_num_1));
+ CHECK(!iterator.done());
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaZero);
+ CHECK(!iterator.done());
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaSmi8);
+ CHECK_EQ(Smi::FromInt(iterator.GetImmediateOperand(0)), smi_0);
+ CHECK(!iterator.done());
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdaConstant);
+ CHECK_EQ(*iterator.GetConstantForIndexOperand(0), smi_1);
+ CHECK(!iterator.done());
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLdar);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_0.index());
+ CHECK(!iterator.done());
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kLoadICSloppy);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_1.index());
+ CHECK_EQ(iterator.GetIndexOperand(1), name_index);
+ CHECK_EQ(iterator.GetIndexOperand(2), feedback_slot);
+ CHECK(!iterator.done());
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kStar);
+ CHECK_EQ(iterator.GetRegisterOperand(0).index(), reg_2.index());
+ CHECK(!iterator.done());
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kCallRuntime);
+ CHECK_EQ(static_cast<Runtime::FunctionId>(iterator.GetIndexOperand(0)),
+ Runtime::kLoadIC_Miss);
+ CHECK_EQ(iterator.GetRegisterOperand(1).index(), reg_0.index());
+ CHECK_EQ(iterator.GetCountOperand(2), 1);
+ CHECK(!iterator.done());
+ iterator.Advance();
+
+ CHECK_EQ(iterator.current_bytecode(), Bytecode::kReturn);
+ CHECK(!iterator.done());
+ iterator.Advance();
+ CHECK(iterator.done());
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/interpreter/bytecode-register-allocator-unittest.cc b/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
new file mode 100644
index 0000000..0620322
--- /dev/null
+++ b/test/unittests/interpreter/bytecode-register-allocator-unittest.cc
@@ -0,0 +1,67 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/interpreter/bytecode-array-builder.h"
+#include "src/interpreter/bytecode-register-allocator.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class BytecodeRegisterAllocatorTest : public TestWithIsolateAndZone {
+ public:
+ BytecodeRegisterAllocatorTest() {}
+ ~BytecodeRegisterAllocatorTest() override {}
+};
+
+
+TEST_F(BytecodeRegisterAllocatorTest, TemporariesRecycled) {
+ BytecodeArrayBuilder builder(isolate(), zone());
+ builder.set_parameter_count(0);
+ builder.set_locals_count(0);
+ builder.set_context_count(0);
+
+ int first;
+ {
+ BytecodeRegisterAllocator temporaries(&builder);
+ first = temporaries.NewRegister().index();
+ temporaries.NewRegister();
+ temporaries.NewRegister();
+ temporaries.NewRegister();
+ }
+
+ int second;
+ {
+ BytecodeRegisterAllocator temporaries(&builder);
+ second = temporaries.NewRegister().index();
+ }
+
+ CHECK_EQ(first, second);
+}
+
+
+TEST_F(BytecodeRegisterAllocatorTest, ConsecutiveRegisters) {
+ BytecodeArrayBuilder builder(isolate(), zone());
+ builder.set_parameter_count(0);
+ builder.set_locals_count(0);
+ builder.set_context_count(0);
+
+ BytecodeRegisterAllocator temporaries(&builder);
+ temporaries.PrepareForConsecutiveAllocations(4);
+ Register reg0 = temporaries.NextConsecutiveRegister();
+ Register other = temporaries.NewRegister();
+ Register reg1 = temporaries.NextConsecutiveRegister();
+ Register reg2 = temporaries.NextConsecutiveRegister();
+ Register reg3 = temporaries.NextConsecutiveRegister();
+ USE(other);
+
+ CHECK(Register::AreContiguous(reg0, reg1, reg2, reg3));
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/interpreter/bytecodes-unittest.cc b/test/unittests/interpreter/bytecodes-unittest.cc
new file mode 100644
index 0000000..812ee46
--- /dev/null
+++ b/test/unittests/interpreter/bytecodes-unittest.cc
@@ -0,0 +1,63 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "src/v8.h"
+
+#include "src/interpreter/bytecodes.h"
+#include "test/unittests/test-utils.h"
+
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+TEST(OperandConversion, Registers) {
+ for (int i = 0; i < 128; i++) {
+ uint8_t operand_value = Register(i).ToOperand();
+ Register r = Register::FromOperand(operand_value);
+ CHECK_EQ(i, r.index());
+ }
+}
+
+
+TEST(OperandConversion, Parameters) {
+ int parameter_counts[] = {7, 13, 99};
+
+ size_t count = sizeof(parameter_counts) / sizeof(parameter_counts[0]);
+ for (size_t p = 0; p < count; p++) {
+ int parameter_count = parameter_counts[p];
+ for (int i = 0; i < parameter_count; i++) {
+ Register r = Register::FromParameterIndex(i, parameter_count);
+ uint8_t operand_value = r.ToOperand();
+ Register s = Register::FromOperand(operand_value);
+ CHECK_EQ(i, s.ToParameterIndex(parameter_count));
+ }
+ }
+}
+
+
+TEST(OperandConversion, RegistersParametersNoOverlap) {
+ std::vector<uint8_t> operand_count(256);
+
+ for (int i = 0; i <= kMaxInt8; i++) {
+ Register r = Register(i);
+ uint8_t operand = r.ToOperand();
+ operand_count[operand] += 1;
+ CHECK_EQ(operand_count[operand], 1);
+ }
+
+ int parameter_count = Register::MaxParameterIndex() + 1;
+ for (int i = 0; i < parameter_count; i++) {
+ Register r = Register::FromParameterIndex(i, parameter_count);
+ uint8_t operand = r.ToOperand();
+ operand_count[operand] += 1;
+ CHECK_EQ(operand_count[operand], 1);
+ }
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/interpreter/constant-array-builder-unittest.cc b/test/unittests/interpreter/constant-array-builder-unittest.cc
new file mode 100644
index 0000000..ea5d1bb
--- /dev/null
+++ b/test/unittests/interpreter/constant-array-builder-unittest.cc
@@ -0,0 +1,225 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/factory.h"
+#include "src/handles-inl.h"
+#include "src/interpreter/constant-array-builder.h"
+#include "src/isolate.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class ConstantArrayBuilderTest : public TestWithIsolateAndZone {
+ public:
+ ConstantArrayBuilderTest() {}
+ ~ConstantArrayBuilderTest() override {}
+
+ static const size_t kLowCapacity = ConstantArrayBuilder::kLowCapacity;
+ static const size_t kMaxCapacity = ConstantArrayBuilder::kMaxCapacity;
+};
+
+
+STATIC_CONST_MEMBER_DEFINITION const size_t
+ ConstantArrayBuilderTest::kMaxCapacity;
+STATIC_CONST_MEMBER_DEFINITION const size_t
+ ConstantArrayBuilderTest::kLowCapacity;
+
+
+TEST_F(ConstantArrayBuilderTest, AllocateAllEntries) {
+ ConstantArrayBuilder builder(isolate(), zone());
+ for (size_t i = 0; i < kMaxCapacity; i++) {
+ Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
+ builder.Insert(object);
+ CHECK_EQ(builder.size(), i + 1);
+ CHECK(builder.At(i)->SameValue(*object));
+ }
+ for (size_t i = 0; i < kMaxCapacity; i++) {
+ CHECK_EQ(Handle<Smi>::cast(builder.At(i))->value(), static_cast<double>(i));
+ }
+}
+
+
+TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithIdx8Reservations) {
+ for (size_t reserved = 1; reserved < kLowCapacity; reserved *= 3) {
+ ConstantArrayBuilder builder(isolate(), zone());
+ for (size_t i = 0; i < reserved; i++) {
+ OperandSize operand_size = builder.CreateReservedEntry();
+ CHECK(operand_size == OperandSize::kByte);
+ }
+ for (size_t i = 0; i < 2 * kLowCapacity; i++) {
+ Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
+ builder.Insert(object);
+ if (i + reserved < kLowCapacity) {
+ CHECK_LE(builder.size(), kLowCapacity);
+ CHECK_EQ(builder.size(), i + 1);
+ CHECK(builder.At(i)->SameValue(*object));
+ } else {
+ CHECK_GE(builder.size(), kLowCapacity);
+ CHECK_EQ(builder.size(), i + reserved + 1);
+ CHECK(builder.At(i + reserved)->SameValue(*object));
+ }
+ }
+ CHECK_EQ(builder.size(), 2 * kLowCapacity + reserved);
+
+ // Check reserved values represented by the hole.
+ for (size_t i = 0; i < reserved; i++) {
+ Handle<Object> empty = builder.At(kLowCapacity - reserved + i);
+ CHECK(empty->SameValue(isolate()->heap()->the_hole_value()));
+ }
+
+ // Commmit reserved entries with duplicates and check size does not change.
+ DCHECK_EQ(reserved + 2 * kLowCapacity, builder.size());
+ size_t duplicates_in_idx8_space =
+ std::min(reserved, kLowCapacity - reserved);
+ for (size_t i = 0; i < duplicates_in_idx8_space; i++) {
+ builder.CommitReservedEntry(OperandSize::kByte,
+ isolate()->factory()->NewNumberFromSize(i));
+ DCHECK_EQ(reserved + 2 * kLowCapacity, builder.size());
+ }
+
+ // Check all committed values match expected (holes where
+ // duplicates_in_idx8_space allocated).
+ for (size_t i = 0; i < kLowCapacity - reserved; i++) {
+ Smi* smi = Smi::FromInt(static_cast<int>(i));
+ CHECK(Handle<Smi>::cast(builder.At(i))->SameValue(smi));
+ }
+ for (size_t i = kLowCapacity; i < 2 * kLowCapacity + reserved; i++) {
+ Smi* smi = Smi::FromInt(static_cast<int>(i - reserved));
+ CHECK(Handle<Smi>::cast(builder.At(i))->SameValue(smi));
+ }
+ for (size_t i = 0; i < reserved; i++) {
+ size_t index = kLowCapacity - reserved + i;
+ CHECK(builder.At(index)->IsTheHole());
+ }
+
+ // Now make reservations, and commit them with unique entries.
+ for (size_t i = 0; i < duplicates_in_idx8_space; i++) {
+ OperandSize operand_size = builder.CreateReservedEntry();
+ CHECK(operand_size == OperandSize::kByte);
+ }
+ for (size_t i = 0; i < duplicates_in_idx8_space; i++) {
+ Handle<Object> object =
+ isolate()->factory()->NewNumberFromSize(2 * kLowCapacity + i);
+ size_t index = builder.CommitReservedEntry(OperandSize::kByte, object);
+ CHECK_EQ(static_cast<int>(index), kLowCapacity - reserved + i);
+ CHECK(builder.At(static_cast<int>(index))->SameValue(*object));
+ }
+ CHECK_EQ(builder.size(), 2 * kLowCapacity + reserved);
+ }
+}
+
+
+TEST_F(ConstantArrayBuilderTest, AllocateEntriesWithIdx16Reservations) {
+ for (size_t reserved = 1; reserved < kLowCapacity; reserved *= 3) {
+ ConstantArrayBuilder builder(isolate(), zone());
+ for (size_t i = 0; i < kLowCapacity; i++) {
+ Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
+ builder.Insert(object);
+ CHECK(builder.At(i)->SameValue(*object));
+ CHECK_EQ(builder.size(), i + 1);
+ }
+ for (size_t i = 0; i < reserved; i++) {
+ OperandSize operand_size = builder.CreateReservedEntry();
+ CHECK(operand_size == OperandSize::kShort);
+ CHECK_EQ(builder.size(), kLowCapacity);
+ }
+ for (size_t i = 0; i < reserved; i++) {
+ builder.DiscardReservedEntry(OperandSize::kShort);
+ CHECK_EQ(builder.size(), kLowCapacity);
+ }
+ for (size_t i = 0; i < reserved; i++) {
+ OperandSize operand_size = builder.CreateReservedEntry();
+ CHECK(operand_size == OperandSize::kShort);
+ Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
+ builder.CommitReservedEntry(operand_size, object);
+ CHECK_EQ(builder.size(), kLowCapacity);
+ }
+ for (size_t i = kLowCapacity; i < kLowCapacity + reserved; i++) {
+ OperandSize operand_size = builder.CreateReservedEntry();
+ CHECK(operand_size == OperandSize::kShort);
+ Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
+ builder.CommitReservedEntry(operand_size, object);
+ CHECK_EQ(builder.size(), i + 1);
+ }
+ }
+}
+
+
+TEST_F(ConstantArrayBuilderTest, ToFixedArray) {
+ ConstantArrayBuilder builder(isolate(), zone());
+ static const size_t kNumberOfElements = 37;
+ for (size_t i = 0; i < kNumberOfElements; i++) {
+ Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
+ builder.Insert(object);
+ CHECK(builder.At(i)->SameValue(*object));
+ }
+ Handle<FixedArray> constant_array =
+ builder.ToFixedArray(isolate()->factory());
+ CHECK_EQ(constant_array->length(), kNumberOfElements);
+ for (size_t i = 0; i < kNumberOfElements; i++) {
+ CHECK(constant_array->get(static_cast<int>(i))->SameValue(*builder.At(i)));
+ }
+}
+
+
+TEST_F(ConstantArrayBuilderTest, GapFilledWhenLowReservationCommitted) {
+ ConstantArrayBuilder builder(isolate(), zone());
+ for (size_t i = 0; i < kLowCapacity; i++) {
+ OperandSize operand_size = builder.CreateReservedEntry();
+ CHECK(OperandSize::kByte == operand_size);
+ CHECK_EQ(builder.size(), 0);
+ }
+ for (size_t i = 0; i < kLowCapacity; i++) {
+ Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
+ builder.Insert(object);
+ CHECK_EQ(builder.size(), i + kLowCapacity + 1);
+ }
+ for (size_t i = 0; i < kLowCapacity; i++) {
+ builder.CommitReservedEntry(OperandSize::kByte,
+ builder.At(i + kLowCapacity));
+ CHECK_EQ(builder.size(), 2 * kLowCapacity);
+ }
+ for (size_t i = 0; i < kLowCapacity; i++) {
+ Handle<Object> original = builder.At(kLowCapacity + i);
+ Handle<Object> duplicate = builder.At(i);
+ CHECK(original->SameValue(*duplicate));
+ Handle<Object> reference = isolate()->factory()->NewNumberFromSize(i);
+ CHECK(original->SameValue(*reference));
+ }
+}
+
+
+TEST_F(ConstantArrayBuilderTest, GapNotFilledWhenLowReservationDiscarded) {
+ ConstantArrayBuilder builder(isolate(), zone());
+ for (size_t i = 0; i < kLowCapacity; i++) {
+ OperandSize operand_size = builder.CreateReservedEntry();
+ CHECK(OperandSize::kByte == operand_size);
+ CHECK_EQ(builder.size(), 0);
+ }
+ for (size_t i = 0; i < kLowCapacity; i++) {
+ Handle<Object> object = isolate()->factory()->NewNumberFromSize(i);
+ builder.Insert(object);
+ CHECK_EQ(builder.size(), i + kLowCapacity + 1);
+ }
+ for (size_t i = 0; i < kLowCapacity; i++) {
+ builder.DiscardReservedEntry(OperandSize::kByte);
+ builder.Insert(builder.At(i + kLowCapacity));
+ CHECK_EQ(builder.size(), 2 * kLowCapacity);
+ }
+ for (size_t i = 0; i < kLowCapacity; i++) {
+ Handle<Object> reference = isolate()->factory()->NewNumberFromSize(i);
+ Handle<Object> original = builder.At(kLowCapacity + i);
+ CHECK(original->SameValue(*reference));
+ Handle<Object> duplicate = builder.At(i);
+ CHECK(duplicate->SameValue(*isolate()->factory()->the_hole_value()));
+ }
+}
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/libplatform/default-platform-unittest.cc b/test/unittests/libplatform/default-platform-unittest.cc
index d2c160e..814b27b 100644
--- a/test/unittests/libplatform/default-platform-unittest.cc
+++ b/test/unittests/libplatform/default-platform-unittest.cc
@@ -19,6 +19,17 @@
MOCK_METHOD0(Die, void());
};
+
+class DefaultPlatformWithMockTime : public DefaultPlatform {
+ public:
+ DefaultPlatformWithMockTime() : time_(0) {}
+ double MonotonicallyIncreasingTime() override { return time_; }
+ void IncreaseTime(double seconds) { time_ += seconds; }
+
+ private:
+ double time_;
+};
+
} // namespace
@@ -39,5 +50,82 @@
EXPECT_FALSE(platform.PumpMessageLoop(isolate));
}
+
+TEST(DefaultPlatformTest, PumpMessageLoopDelayed) {
+ InSequence s;
+
+ int dummy;
+ Isolate* isolate = reinterpret_cast<Isolate*>(&dummy);
+
+ DefaultPlatformWithMockTime platform;
+ EXPECT_FALSE(platform.PumpMessageLoop(isolate));
+
+ StrictMock<MockTask>* task1 = new StrictMock<MockTask>;
+ StrictMock<MockTask>* task2 = new StrictMock<MockTask>;
+ platform.CallDelayedOnForegroundThread(isolate, task2, 100);
+ platform.CallDelayedOnForegroundThread(isolate, task1, 10);
+
+ EXPECT_FALSE(platform.PumpMessageLoop(isolate));
+
+ platform.IncreaseTime(11);
+ EXPECT_CALL(*task1, Run());
+ EXPECT_CALL(*task1, Die());
+ EXPECT_TRUE(platform.PumpMessageLoop(isolate));
+
+ EXPECT_FALSE(platform.PumpMessageLoop(isolate));
+
+ platform.IncreaseTime(90);
+ EXPECT_CALL(*task2, Run());
+ EXPECT_CALL(*task2, Die());
+ EXPECT_TRUE(platform.PumpMessageLoop(isolate));
+}
+
+
+TEST(DefaultPlatformTest, PumpMessageLoopNoStarvation) {
+ InSequence s;
+
+ int dummy;
+ Isolate* isolate = reinterpret_cast<Isolate*>(&dummy);
+
+ DefaultPlatformWithMockTime platform;
+ EXPECT_FALSE(platform.PumpMessageLoop(isolate));
+
+ StrictMock<MockTask>* task1 = new StrictMock<MockTask>;
+ StrictMock<MockTask>* task2 = new StrictMock<MockTask>;
+ StrictMock<MockTask>* task3 = new StrictMock<MockTask>;
+ platform.CallOnForegroundThread(isolate, task1);
+ platform.CallDelayedOnForegroundThread(isolate, task2, 10);
+ platform.IncreaseTime(11);
+
+ EXPECT_CALL(*task1, Run());
+ EXPECT_CALL(*task1, Die());
+ EXPECT_TRUE(platform.PumpMessageLoop(isolate));
+
+ platform.CallOnForegroundThread(isolate, task3);
+
+ EXPECT_CALL(*task2, Run());
+ EXPECT_CALL(*task2, Die());
+ EXPECT_TRUE(platform.PumpMessageLoop(isolate));
+ EXPECT_CALL(*task3, Run());
+ EXPECT_CALL(*task3, Die());
+ EXPECT_TRUE(platform.PumpMessageLoop(isolate));
+}
+
+
+TEST(DefaultPlatformTest, PendingDelayedTasksAreDestroyedOnShutdown) {
+ InSequence s;
+
+ int dummy;
+ Isolate* isolate = reinterpret_cast<Isolate*>(&dummy);
+
+ {
+ DefaultPlatformWithMockTime platform;
+ StrictMock<MockTask>* task = new StrictMock<MockTask>;
+ platform.CallDelayedOnForegroundThread(isolate, task, 10);
+ EXPECT_CALL(*task, Die());
+ }
+}
+
+
} // namespace platform
} // namespace v8
diff --git a/test/unittests/libplatform/task-queue-unittest.cc b/test/unittests/libplatform/task-queue-unittest.cc
index 9a18658..2de104b 100644
--- a/test/unittests/libplatform/task-queue-unittest.cc
+++ b/test/unittests/libplatform/task-queue-unittest.cc
@@ -21,12 +21,12 @@
};
-class TaskQueueThread FINAL : public base::Thread {
+class TaskQueueThread final : public base::Thread {
public:
explicit TaskQueueThread(TaskQueue* queue)
: Thread(Options("libplatform TaskQueueThread")), queue_(queue) {}
- virtual void Run() OVERRIDE { EXPECT_THAT(queue_->GetNext(), IsNull()); }
+ void Run() override { EXPECT_THAT(queue_->GetNext(), IsNull()); }
private:
TaskQueue* queue_;
diff --git a/test/unittests/locked-queue-unittest.cc b/test/unittests/locked-queue-unittest.cc
new file mode 100644
index 0000000..cc176d9
--- /dev/null
+++ b/test/unittests/locked-queue-unittest.cc
@@ -0,0 +1,90 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/locked-queue-inl.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace {
+
+typedef int Record;
+
+} // namespace
+
+namespace v8 {
+namespace internal {
+
+TEST(LockedQueue, ConstructorEmpty) {
+ LockedQueue<Record> queue;
+ EXPECT_TRUE(queue.IsEmpty());
+}
+
+
+TEST(LockedQueue, SingleRecordEnqueueDequeue) {
+ LockedQueue<Record> queue;
+ EXPECT_TRUE(queue.IsEmpty());
+ queue.Enqueue(1);
+ EXPECT_FALSE(queue.IsEmpty());
+ Record a = -1;
+ bool success = queue.Dequeue(&a);
+ EXPECT_TRUE(success);
+ EXPECT_EQ(a, 1);
+ EXPECT_TRUE(queue.IsEmpty());
+}
+
+
+TEST(LockedQueue, Peek) {
+ LockedQueue<Record> queue;
+ EXPECT_TRUE(queue.IsEmpty());
+ queue.Enqueue(1);
+ EXPECT_FALSE(queue.IsEmpty());
+ Record a = -1;
+ bool success = queue.Peek(&a);
+ EXPECT_TRUE(success);
+ EXPECT_EQ(a, 1);
+ EXPECT_FALSE(queue.IsEmpty());
+ success = queue.Dequeue(&a);
+ EXPECT_TRUE(success);
+ EXPECT_EQ(a, 1);
+ EXPECT_TRUE(queue.IsEmpty());
+}
+
+
+TEST(LockedQueue, PeekOnEmpty) {
+ LockedQueue<Record> queue;
+ EXPECT_TRUE(queue.IsEmpty());
+ Record a = -1;
+ bool success = queue.Peek(&a);
+ EXPECT_FALSE(success);
+}
+
+
+TEST(LockedQueue, MultipleRecords) {
+ LockedQueue<Record> queue;
+ EXPECT_TRUE(queue.IsEmpty());
+ queue.Enqueue(1);
+ EXPECT_FALSE(queue.IsEmpty());
+ for (int i = 2; i <= 5; ++i) {
+ queue.Enqueue(i);
+ EXPECT_FALSE(queue.IsEmpty());
+ }
+ Record rec = 0;
+ for (int i = 1; i <= 4; ++i) {
+ EXPECT_FALSE(queue.IsEmpty());
+ queue.Dequeue(&rec);
+ EXPECT_EQ(i, rec);
+ }
+ for (int i = 6; i <= 12; ++i) {
+ queue.Enqueue(i);
+ EXPECT_FALSE(queue.IsEmpty());
+ }
+ for (int i = 5; i <= 12; ++i) {
+ EXPECT_FALSE(queue.IsEmpty());
+ queue.Dequeue(&rec);
+ EXPECT_EQ(i, rec);
+ }
+ EXPECT_TRUE(queue.IsEmpty());
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/run-all-unittests.cc b/test/unittests/run-all-unittests.cc
index 8c361dd..0b62dbf 100644
--- a/test/unittests/run-all-unittests.cc
+++ b/test/unittests/run-all-unittests.cc
@@ -9,12 +9,11 @@
namespace {
-class DefaultPlatformEnvironment FINAL : public ::testing::Environment {
+class DefaultPlatformEnvironment final : public ::testing::Environment {
public:
DefaultPlatformEnvironment() : platform_(NULL) {}
- ~DefaultPlatformEnvironment() {}
- virtual void SetUp() OVERRIDE {
+ void SetUp() override {
EXPECT_EQ(NULL, platform_);
platform_ = v8::platform::CreateDefaultPlatform();
ASSERT_TRUE(platform_ != NULL);
@@ -22,7 +21,7 @@
ASSERT_TRUE(v8::V8::Initialize());
}
- virtual void TearDown() OVERRIDE {
+ void TearDown() override {
ASSERT_TRUE(platform_ != NULL);
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
@@ -41,5 +40,6 @@
testing::InitGoogleMock(&argc, argv);
testing::AddGlobalTestEnvironment(new DefaultPlatformEnvironment);
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
+ v8::V8::InitializeExternalStartupData(argv[0]);
return RUN_ALL_TESTS();
}
diff --git a/test/unittests/runtime/runtime-interpreter-unittest.cc b/test/unittests/runtime/runtime-interpreter-unittest.cc
new file mode 100644
index 0000000..97b7992
--- /dev/null
+++ b/test/unittests/runtime/runtime-interpreter-unittest.cc
@@ -0,0 +1,172 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/factory.h"
+#include "src/heap/heap.h"
+#include "src/heap/heap-inl.h"
+#include "src/runtime/runtime.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace interpreter {
+
+class RuntimeInterpreterTest : public TestWithIsolateAndZone {
+ public:
+ typedef Object* (*RuntimeMethod)(int, Object**, Isolate*);
+
+ RuntimeInterpreterTest() {}
+ ~RuntimeInterpreterTest() override {}
+
+ bool TestOperatorWithObjects(RuntimeMethod method, Handle<Object> lhs,
+ Handle<Object> rhs, bool expected);
+ bool TestOperator(RuntimeMethod method, int32_t lhs, int32_t rhs,
+ bool expected);
+ bool TestOperator(RuntimeMethod method, double lhs, double rhs,
+ bool expected);
+ bool TestOperator(RuntimeMethod method, const char* lhs, const char* rhs,
+ bool expected);
+};
+
+
+bool RuntimeInterpreterTest::TestOperatorWithObjects(RuntimeMethod method,
+ Handle<Object> lhs,
+ Handle<Object> rhs,
+ bool expected) {
+ Object* args_object[] = {*rhs, *lhs};
+ Handle<Object> result =
+ handle(method(2, &args_object[1], isolate()), isolate());
+ CHECK(result->IsTrue() || result->IsFalse());
+ return result->IsTrue() == expected;
+}
+
+
+bool RuntimeInterpreterTest::TestOperator(RuntimeMethod method, int32_t lhs,
+ int32_t rhs, bool expected) {
+ Handle<Object> x = isolate()->factory()->NewNumberFromInt(lhs);
+ Handle<Object> y = isolate()->factory()->NewNumberFromInt(rhs);
+ return TestOperatorWithObjects(method, x, y, expected);
+}
+
+
+bool RuntimeInterpreterTest::TestOperator(RuntimeMethod method, double lhs,
+ double rhs, bool expected) {
+ Handle<Object> x = isolate()->factory()->NewNumber(lhs);
+ Handle<Object> y = isolate()->factory()->NewNumber(rhs);
+ CHECK_EQ(HeapNumber::cast(*x)->value(), lhs);
+ CHECK_EQ(HeapNumber::cast(*y)->value(), rhs);
+ return TestOperatorWithObjects(method, x, y, expected);
+}
+
+
+bool RuntimeInterpreterTest::TestOperator(RuntimeMethod method, const char* lhs,
+ const char* rhs, bool expected) {
+ Handle<Object> x = isolate()->factory()->NewStringFromAsciiChecked(lhs);
+ Handle<Object> y = isolate()->factory()->NewStringFromAsciiChecked(rhs);
+ return TestOperatorWithObjects(method, x, y, expected);
+}
+
+
+TEST_F(RuntimeInterpreterTest, TestOperatorsWithIntegers) {
+ int32_t inputs[] = {kMinInt, Smi::kMinValue, -17, -1, 0, 1,
+ 991, Smi::kMaxValue, kMaxInt};
+ TRACED_FOREACH(int, lhs, inputs) {
+ TRACED_FOREACH(int, rhs, inputs) {
+#define INTEGER_OPERATOR_CHECK(r, op, x, y) \
+ CHECK(TestOperator(Runtime_Interpreter##r, x, y, x op y))
+ INTEGER_OPERATOR_CHECK(Equals, ==, lhs, rhs);
+ INTEGER_OPERATOR_CHECK(NotEquals, !=, lhs, rhs);
+ INTEGER_OPERATOR_CHECK(StrictEquals, ==, lhs, rhs);
+ INTEGER_OPERATOR_CHECK(StrictNotEquals, !=, lhs, rhs);
+ INTEGER_OPERATOR_CHECK(LessThan, <, lhs, rhs);
+ INTEGER_OPERATOR_CHECK(GreaterThan, >, lhs, rhs);
+ INTEGER_OPERATOR_CHECK(LessThanOrEqual, <=, lhs, rhs);
+ INTEGER_OPERATOR_CHECK(GreaterThanOrEqual, >=, lhs, rhs);
+#undef INTEGER_OPERATOR_CHECK
+ }
+ }
+}
+
+
+TEST_F(RuntimeInterpreterTest, TestOperatorsWithDoubles) {
+ double inputs[] = {std::numeric_limits<double>::min(),
+ std::numeric_limits<double>::max(),
+ -0.001,
+ 0.01,
+ 3.14,
+ -6.02214086e23};
+ TRACED_FOREACH(double, lhs, inputs) {
+ TRACED_FOREACH(double, rhs, inputs) {
+#define DOUBLE_OPERATOR_CHECK(r, op, x, y) \
+ CHECK(TestOperator(Runtime_Interpreter##r, x, y, x op y))
+ DOUBLE_OPERATOR_CHECK(Equals, ==, lhs, rhs);
+ DOUBLE_OPERATOR_CHECK(NotEquals, !=, lhs, rhs);
+ DOUBLE_OPERATOR_CHECK(StrictEquals, ==, lhs, rhs);
+ DOUBLE_OPERATOR_CHECK(StrictNotEquals, !=, lhs, rhs);
+ DOUBLE_OPERATOR_CHECK(LessThan, <, lhs, rhs);
+ DOUBLE_OPERATOR_CHECK(GreaterThan, >, lhs, rhs);
+ DOUBLE_OPERATOR_CHECK(LessThanOrEqual, <=, lhs, rhs);
+ DOUBLE_OPERATOR_CHECK(GreaterThanOrEqual, >=, lhs, rhs);
+#undef DOUBLE_OPERATOR_CHECK
+ }
+ }
+}
+
+
+TEST_F(RuntimeInterpreterTest, TestOperatorsWithString) {
+ const char* inputs[] = {"abc", "a", "def", "0"};
+ TRACED_FOREACH(const char*, lhs, inputs) {
+ TRACED_FOREACH(const char*, rhs, inputs) {
+#define STRING_OPERATOR_CHECK(r, op, x, y) \
+ CHECK(TestOperator(Runtime_Interpreter##r, x, y, \
+ std::string(x) op std::string(y)))
+ STRING_OPERATOR_CHECK(Equals, ==, lhs, rhs);
+ STRING_OPERATOR_CHECK(NotEquals, !=, lhs, rhs);
+ STRING_OPERATOR_CHECK(StrictEquals, ==, lhs, rhs);
+ STRING_OPERATOR_CHECK(StrictNotEquals, !=, lhs, rhs);
+ STRING_OPERATOR_CHECK(LessThan, <, lhs, rhs);
+ STRING_OPERATOR_CHECK(GreaterThan, >, lhs, rhs);
+ STRING_OPERATOR_CHECK(LessThanOrEqual, <=, lhs, rhs);
+ STRING_OPERATOR_CHECK(GreaterThanOrEqual, >=, lhs, rhs);
+#undef STRING_OPERATOR_CHECK
+ }
+ }
+}
+
+
+TEST_F(RuntimeInterpreterTest, ToBoolean) {
+ double quiet_nan = std::numeric_limits<double>::quiet_NaN();
+ std::pair<Handle<Object>, bool> cases[] = {
+ std::make_pair(isolate()->factory()->NewNumberFromInt(0), false),
+ std::make_pair(isolate()->factory()->NewNumberFromInt(1), true),
+ std::make_pair(isolate()->factory()->NewNumberFromInt(100), true),
+ std::make_pair(isolate()->factory()->NewNumberFromInt(-1), true),
+ std::make_pair(isolate()->factory()->NewNumber(7.7), true),
+ std::make_pair(isolate()->factory()->NewNumber(0.00001), true),
+ std::make_pair(isolate()->factory()->NewNumber(quiet_nan), false),
+ std::make_pair(isolate()->factory()->NewHeapNumber(0.0), false),
+ std::make_pair(isolate()->factory()->undefined_value(), false),
+ std::make_pair(isolate()->factory()->null_value(), false),
+ std::make_pair(isolate()->factory()->true_value(), true),
+ std::make_pair(isolate()->factory()->false_value(), false),
+ std::make_pair(isolate()->factory()->NewStringFromStaticChars(""), false),
+ std::make_pair(isolate()->factory()->NewStringFromStaticChars("_"), true),
+ };
+
+ for (size_t i = 0; i < arraysize(cases); i++) {
+ auto& value_expected_tuple = cases[i];
+ Object* args_object[] = {*value_expected_tuple.first};
+ Handle<Object> result = handle(
+ Runtime_InterpreterToBoolean(1, &args_object[0], isolate()), isolate());
+ CHECK(result->IsBoolean());
+ CHECK_EQ(result->IsTrue(), value_expected_tuple.second);
+ }
+}
+
+
+} // namespace interpreter
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/test-utils.cc b/test/unittests/test-utils.cc
index 31d724a..7d04215 100644
--- a/test/unittests/test-utils.cc
+++ b/test/unittests/test-utils.cc
@@ -4,39 +4,30 @@
#include "test/unittests/test-utils.h"
+#include "include/libplatform/libplatform.h"
#include "src/base/platform/time.h"
+#include "src/debug/debug.h"
#include "src/flags.h"
-#include "src/isolate-inl.h"
+#include "src/isolate.h"
+#include "src/v8.h"
namespace v8 {
-std::ostream& operator<<(std::ostream& os, ExternalArrayType type) {
- switch (type) {
- case kExternalInt8Array:
- return os << "ExternalInt8Array";
- case kExternalUint8Array:
- return os << "ExternalUint8Array";
- case kExternalInt16Array:
- return os << "ExternalInt16Array";
- case kExternalUint16Array:
- return os << "ExternalUint16Array";
- case kExternalInt32Array:
- return os << "ExternalInt32Array";
- case kExternalUint32Array:
- return os << "ExternalUint32Array";
- case kExternalFloat32Array:
- return os << "ExternalFloat32Array";
- case kExternalFloat64Array:
- return os << "ExternalFloat64Array";
- case kExternalUint8ClampedArray:
- return os << "ExternalUint8ClampedArray";
+class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
+ public:
+ virtual void* Allocate(size_t length) {
+ void* data = AllocateUninitialized(length);
+ return data == NULL ? data : memset(data, 0, length);
}
- UNREACHABLE();
- return os;
-}
+ virtual void* AllocateUninitialized(size_t length) { return malloc(length); }
+ virtual void Free(void* data, size_t) { free(data); }
+};
// static
+ArrayBufferAllocator* TestWithIsolate::array_buffer_allocator_ = NULL;
+
+// static
Isolate* TestWithIsolate::isolate_ = NULL;
@@ -51,7 +42,10 @@
void TestWithIsolate::SetUpTestCase() {
Test::SetUpTestCase();
EXPECT_EQ(NULL, isolate_);
- isolate_ = v8::Isolate::New();
+ v8::Isolate::CreateParams create_params;
+ array_buffer_allocator_ = new ArrayBufferAllocator;
+ create_params.array_buffer_allocator = array_buffer_allocator_;
+ isolate_ = v8::Isolate::New(create_params);
EXPECT_TRUE(isolate_ != NULL);
}
@@ -59,8 +53,12 @@
// static
void TestWithIsolate::TearDownTestCase() {
ASSERT_TRUE(isolate_ != NULL);
+ v8::Platform* platform = internal::V8::GetCurrentPlatform();
+ ASSERT_TRUE(platform != NULL);
+ while (platform::PumpMessageLoop(platform, isolate_)) continue;
isolate_->Dispose();
isolate_ = NULL;
+ delete array_buffer_allocator_;
Test::TearDownTestCase();
}
@@ -82,7 +80,7 @@
} // namespace
TestWithRandomNumberGenerator::TestWithRandomNumberGenerator()
- : rng_(GetRandomSeedFromFlag(internal::FLAG_random_seed)) {}
+ : rng_(GetRandomSeedFromFlag(::v8::internal::FLAG_random_seed)) {}
TestWithRandomNumberGenerator::~TestWithRandomNumberGenerator() {}
@@ -94,6 +92,7 @@
TestWithIsolate::~TestWithIsolate() {}
+TestWithIsolateAndZone::~TestWithIsolateAndZone() {}
Factory* TestWithIsolate::factory() const { return isolate()->factory(); }
diff --git a/test/unittests/test-utils.h b/test/unittests/test-utils.h
index 511e357..78283bf 100644
--- a/test/unittests/test-utils.h
+++ b/test/unittests/test-utils.h
@@ -13,10 +13,10 @@
namespace v8 {
-std::ostream& operator<<(std::ostream&, ExternalArrayType);
+class ArrayBufferAllocator;
-class TestWithIsolate : public ::testing::Test {
+class TestWithIsolate : public virtual ::testing::Test {
public:
TestWithIsolate();
virtual ~TestWithIsolate();
@@ -27,6 +27,7 @@
static void TearDownTestCase();
private:
+ static ArrayBufferAllocator* array_buffer_allocator_;
static Isolate* isolate_;
Isolate::Scope isolate_scope_;
HandleScope handle_scope_;
@@ -90,9 +91,9 @@
};
-class TestWithZone : public TestWithIsolate {
+class TestWithZone : public virtual ::testing::Test {
public:
- TestWithZone() : zone_(isolate()) {}
+ TestWithZone() {}
virtual ~TestWithZone();
Zone* zone() { return &zone_; }
@@ -103,6 +104,20 @@
DISALLOW_COPY_AND_ASSIGN(TestWithZone);
};
+
+class TestWithIsolateAndZone : public virtual TestWithIsolate {
+ public:
+ TestWithIsolateAndZone() {}
+ virtual ~TestWithIsolateAndZone();
+
+ Zone* zone() { return &zone_; }
+
+ private:
+ Zone zone_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestWithIsolateAndZone);
+};
+
} // namespace internal
} // namespace v8
diff --git a/test/unittests/unittests.gyp b/test/unittests/unittests.gyp
index 2ead44f..5339da3 100644
--- a/test/unittests/unittests.gyp
+++ b/test/unittests/unittests.gyp
@@ -23,11 +23,13 @@
'../..',
],
'sources': [ ### gcmole(all) ###
+ 'atomic-utils-unittest.cc',
'base/bits-unittest.cc',
'base/cpu-unittest.cc',
'base/division-by-constant-unittest.cc',
'base/flags-unittest.cc',
'base/functional-unittest.cc',
+ 'base/logging-unittest.cc',
'base/iterator-unittest.cc',
'base/platform/condition-variable-unittest.cc',
'base/platform/mutex-unittest.cc',
@@ -36,43 +38,84 @@
'base/platform/time-unittest.cc',
'base/sys-info-unittest.cc',
'base/utils/random-number-generator-unittest.cc',
+ 'cancelable-tasks-unittest.cc',
'char-predicates-unittest.cc',
+ 'compiler/branch-elimination-unittest.cc',
'compiler/change-lowering-unittest.cc',
+ 'compiler/coalesced-live-ranges-unittest.cc',
'compiler/common-operator-reducer-unittest.cc',
'compiler/common-operator-unittest.cc',
'compiler/compiler-test-utils.h',
'compiler/control-equivalence-unittest.cc',
+ 'compiler/control-flow-optimizer-unittest.cc',
+ 'compiler/dead-code-elimination-unittest.cc',
'compiler/diamond-unittest.cc',
+ 'compiler/escape-analysis-unittest.cc',
'compiler/graph-reducer-unittest.cc',
+ 'compiler/graph-reducer-unittest.h',
+ 'compiler/graph-trimmer-unittest.cc',
'compiler/graph-unittest.cc',
'compiler/graph-unittest.h',
'compiler/instruction-selector-unittest.cc',
'compiler/instruction-selector-unittest.h',
'compiler/instruction-sequence-unittest.cc',
'compiler/instruction-sequence-unittest.h',
+ 'compiler/interpreter-assembler-unittest.cc',
+ 'compiler/interpreter-assembler-unittest.h',
'compiler/js-builtin-reducer-unittest.cc',
+ 'compiler/js-context-relaxation-unittest.cc',
+ 'compiler/js-intrinsic-lowering-unittest.cc',
'compiler/js-operator-unittest.cc',
'compiler/js-typed-lowering-unittest.cc',
+ 'compiler/linkage-tail-call-unittest.cc',
+ 'compiler/liveness-analyzer-unittest.cc',
+ 'compiler/live-range-unittest.cc',
'compiler/load-elimination-unittest.cc',
+ 'compiler/loop-peeling-unittest.cc',
'compiler/machine-operator-reducer-unittest.cc',
'compiler/machine-operator-unittest.cc',
'compiler/move-optimizer-unittest.cc',
+ 'compiler/node-cache-unittest.cc',
'compiler/node-matchers-unittest.cc',
+ 'compiler/node-properties-unittest.cc',
'compiler/node-test-utils.cc',
'compiler/node-test-utils.h',
+ 'compiler/node-unittest.cc',
+ 'compiler/opcodes-unittest.cc',
'compiler/register-allocator-unittest.cc',
+ 'compiler/schedule-unittest.cc',
'compiler/select-lowering-unittest.cc',
+ 'compiler/scheduler-unittest.cc',
'compiler/simplified-operator-reducer-unittest.cc',
'compiler/simplified-operator-unittest.cc',
+ 'compiler/state-values-utils-unittest.cc',
+ 'compiler/tail-call-optimization-unittest.cc',
+ 'compiler/typer-unittest.cc',
'compiler/value-numbering-reducer-unittest.cc',
'compiler/zone-pool-unittest.cc',
+ 'counters-unittest.cc',
+ 'interpreter/bytecodes-unittest.cc',
+ 'interpreter/bytecode-array-builder-unittest.cc',
+ 'interpreter/bytecode-array-iterator-unittest.cc',
+ 'interpreter/bytecode-register-allocator-unittest.cc',
+ 'interpreter/constant-array-builder-unittest.cc',
'libplatform/default-platform-unittest.cc',
'libplatform/task-queue-unittest.cc',
'libplatform/worker-thread-unittest.cc',
+ 'heap/bitmap-unittest.cc',
'heap/gc-idle-time-handler-unittest.cc',
+ 'heap/memory-reducer-unittest.cc',
+ 'heap/heap-unittest.cc',
+ 'heap/scavenge-job-unittest.cc',
+ 'locked-queue-unittest.cc',
'run-all-unittests.cc',
+ 'runtime/runtime-interpreter-unittest.cc',
'test-utils.h',
'test-utils.cc',
+ 'wasm/ast-decoder-unittest.cc',
+ 'wasm/encoder-unittest.cc',
+ 'wasm/module-decoder-unittest.cc',
+ 'wasm/wasm-macro-gen-unittest.cc',
],
'conditions': [
['v8_target_arch=="arm"', {
@@ -105,20 +148,18 @@
'compiler/x64/instruction-selector-x64-unittest.cc',
],
}],
+ ['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
+ 'sources': [ ### gcmole(arch:ppc) ###
+ 'compiler/ppc/instruction-selector-ppc-unittest.cc',
+ ],
+ }],
+ ['OS=="aix"', {
+ 'ldflags': [ '-Wl,-bbigtoc' ],
+ }],
['component=="shared_library"', {
# compiler-unittests can't be built against a shared library, so we
# need to depend on the underlying static target in that case.
- 'conditions': [
- ['v8_use_snapshot=="true" and v8_use_external_startup_data==0', {
- 'dependencies': ['../../tools/gyp/v8.gyp:v8_snapshot'],
- }],
- ['v8_use_snapshot=="true" and v8_use_external_startup_data==1', {
- 'dependencies': ['../../tools/gyp/v8.gyp:v8_external_snapshot'],
- }],
- ['v8_use_snapshot!="true"', {
- 'dependencies': ['../../tools/gyp/v8.gyp:v8_nosnapshot'],
- }],
- ],
+ 'dependencies': ['../../tools/gyp/v8.gyp:v8_maybe_snapshot'],
}, {
'dependencies': ['../../tools/gyp/v8.gyp:v8'],
}],
@@ -137,4 +178,23 @@
],
},
],
+ 'conditions': [
+ ['test_isolation_mode != "noop"', {
+ 'targets': [
+ {
+ 'target_name': 'unittests_run',
+ 'type': 'none',
+ 'dependencies': [
+ 'unittests',
+ ],
+ 'includes': [
+ '../../build/isolate.gypi',
+ ],
+ 'sources': [
+ 'unittests.isolate',
+ ],
+ },
+ ],
+ }],
+ ],
}
diff --git a/test/unittests/unittests.isolate b/test/unittests/unittests.isolate
new file mode 100644
index 0000000..ae503bf
--- /dev/null
+++ b/test/unittests/unittests.isolate
@@ -0,0 +1,15 @@
+# Copyright 2015 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'variables': {
+ 'files': [
+ '<(PRODUCT_DIR)/unittests<(EXECUTABLE_SUFFIX)',
+ './unittests.status',
+ ],
+ },
+ 'includes': [
+ '../../src/base.isolate',
+ '../../tools/testrunner/testrunner.isolate',
+ ],
+}
\ No newline at end of file
diff --git a/test/unittests/unittests.status b/test/unittests/unittests.status
index d439913..18201cd 100644
--- a/test/unittests/unittests.status
+++ b/test/unittests/unittests.status
@@ -3,4 +3,12 @@
# found in the LICENSE file.
[
+['byteorder == big', {
+ # TODO(mips-team): Fix Wasm for big-endian.
+ 'WasmModuleVerifyTest*': [SKIP],
+ 'WasmFunctionVerifyTest*': [SKIP],
+ 'WasmDecoderTest.TableSwitch*': [SKIP],
+ 'WasmDecoderTest.AllLoadMemCombinations': [SKIP],
+}], # 'byteorder == big'
+
]
diff --git a/test/unittests/wasm/OWNERS b/test/unittests/wasm/OWNERS
new file mode 100644
index 0000000..c2abc8a
--- /dev/null
+++ b/test/unittests/wasm/OWNERS
@@ -0,0 +1,3 @@
+titzer@chromium.org
+bradnelson@chromium.org
+ahaas@chromium.org
diff --git a/test/unittests/wasm/ast-decoder-unittest.cc b/test/unittests/wasm/ast-decoder-unittest.cc
new file mode 100644
index 0000000..923c554
--- /dev/null
+++ b/test/unittests/wasm/ast-decoder-unittest.cc
@@ -0,0 +1,2439 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/test-utils.h"
+
+#include "src/v8.h"
+
+#include "test/cctest/wasm/test-signatures.h"
+
+#include "src/objects.h"
+
+#include "src/wasm/ast-decoder.h"
+#include "src/wasm/wasm-macro-gen.h"
+#include "src/wasm/wasm-module.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+static const byte kCodeGetLocal0[] = {kExprGetLocal, 0};
+static const byte kCodeGetLocal1[] = {kExprGetLocal, 1};
+static const byte kCodeSetLocal0[] = {kExprSetLocal, 0, kExprI8Const, 0};
+
+static const LocalType kLocalTypes[] = {kAstI32, kAstI64, kAstF32, kAstF64};
+static const MachineType machineTypes[] = {
+ MachineType::Int8(), MachineType::Uint8(), MachineType::Int16(),
+ MachineType::Uint16(), MachineType::Int32(), MachineType::Uint32(),
+ MachineType::Int64(), MachineType::Uint64(), MachineType::Float32(),
+ MachineType::Float64()};
+
+static const WasmOpcode kInt32BinopOpcodes[] = {
+ kExprI32Add, kExprI32Sub, kExprI32Mul, kExprI32DivS, kExprI32DivU,
+ kExprI32RemS, kExprI32RemU, kExprI32And, kExprI32Ior, kExprI32Xor,
+ kExprI32Shl, kExprI32ShrU, kExprI32ShrS, kExprI32Eq, kExprI32LtS,
+ kExprI32LeS, kExprI32LtU, kExprI32LeU};
+
+
+#define EXPECT_VERIFIES(env, x) Verify(kSuccess, env, x, x + arraysize(x))
+
+#define EXPECT_FAILURE(env, x) Verify(kError, env, x, x + arraysize(x))
+
+#define EXPECT_VERIFIES_INLINE(env, ...) \
+ do { \
+ static byte code[] = {__VA_ARGS__}; \
+ Verify(kSuccess, env, code, code + arraysize(code)); \
+ } while (false)
+
+
+#define EXPECT_FAILURE_INLINE(env, ...) \
+ do { \
+ static byte code[] = {__VA_ARGS__}; \
+ Verify(kError, env, code, code + arraysize(code)); \
+ } while (false)
+
+#define VERIFY(...) \
+ do { \
+ static const byte code[] = {__VA_ARGS__}; \
+ Verify(kSuccess, &env_v_i, code, code + sizeof(code)); \
+ } while (false)
+
+
+class WasmDecoderTest : public TestWithZone {
+ public:
+ WasmDecoderTest() : TestWithZone(), sigs() {
+ init_env(&env_i_i, sigs.i_i());
+ init_env(&env_v_v, sigs.v_v());
+ init_env(&env_v_i, sigs.v_i());
+ init_env(&env_i_f, sigs.i_f());
+ init_env(&env_i_d, sigs.i_d());
+ init_env(&env_l_l, sigs.l_l());
+ init_env(&env_f_ff, sigs.f_ff());
+ init_env(&env_d_dd, sigs.d_dd());
+ }
+
+ TestSignatures sigs;
+
+ FunctionEnv env_i_i;
+ FunctionEnv env_v_v;
+ FunctionEnv env_v_i;
+ FunctionEnv env_i_f;
+ FunctionEnv env_i_d;
+ FunctionEnv env_l_l;
+ FunctionEnv env_f_ff;
+ FunctionEnv env_d_dd;
+
+ static void init_env(FunctionEnv* env, FunctionSig* sig) {
+ env->module = nullptr;
+ env->sig = sig;
+ env->local_int32_count = 0;
+ env->local_int64_count = 0;
+ env->local_float32_count = 0;
+ env->local_float64_count = 0;
+ env->SumLocals();
+ }
+
+ // A wrapper around VerifyWasmCode() that renders a nice failure message.
+ void Verify(ErrorCode expected, FunctionEnv* env, const byte* start,
+ const byte* end) {
+ TreeResult result = VerifyWasmCode(env, start, end);
+ if (result.error_code != expected) {
+ ptrdiff_t pc = result.error_pc - result.start;
+ ptrdiff_t pt = result.error_pt - result.start;
+ std::ostringstream str;
+ if (expected == kSuccess) {
+ str << "Verification failed: " << result.error_code << " pc = +" << pc;
+ if (result.error_pt) str << ", pt = +" << pt;
+ str << ", msg = " << result.error_msg.get();
+ } else {
+ str << "Verification expected: " << expected << ", but got "
+ << result.error_code;
+ if (result.error_code != kSuccess) {
+ str << " pc = +" << pc;
+ if (result.error_pt) str << ", pt = +" << pt;
+ }
+ }
+ FATAL(str.str().c_str());
+ }
+ }
+
+ void TestBinop(WasmOpcode opcode, FunctionSig* success) {
+ // op(local[0], local[1])
+ byte code[] = {static_cast<byte>(opcode), kExprGetLocal, 0, kExprGetLocal,
+ 1};
+ FunctionEnv env;
+ init_env(&env, success);
+ EXPECT_VERIFIES(&env, code);
+
+ // Try all combinations of return and parameter types.
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
+ for (size_t k = 0; k < arraysize(kLocalTypes); k++) {
+ LocalType types[] = {kLocalTypes[i], kLocalTypes[j], kLocalTypes[k]};
+ if (types[0] != success->GetReturn(0) ||
+ types[1] != success->GetParam(0) ||
+ types[2] != success->GetParam(1)) {
+ // Test signature mismatch.
+ FunctionSig sig(1, 2, types);
+ init_env(&env, &sig);
+ EXPECT_FAILURE(&env, code);
+ }
+ }
+ }
+ }
+ }
+
+ void TestUnop(WasmOpcode opcode, FunctionSig* success) {
+ TestUnop(opcode, success->GetReturn(), success->GetParam(0));
+ }
+
+ void TestUnop(WasmOpcode opcode, LocalType ret_type, LocalType param_type) {
+ // Return(op(local[0]))
+ byte code[] = {static_cast<byte>(opcode), kExprGetLocal, 0};
+ FunctionEnv env;
+ {
+ LocalType types[] = {ret_type, param_type};
+ FunctionSig sig(1, 1, types);
+ init_env(&env, &sig);
+ EXPECT_VERIFIES(&env, code);
+ }
+
+ // Try all combinations of return and parameter types.
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
+ LocalType types[] = {kLocalTypes[i], kLocalTypes[j]};
+ if (types[0] != ret_type || types[1] != param_type) {
+ // Test signature mismatch.
+ FunctionSig sig(1, 1, types);
+ init_env(&env, &sig);
+ EXPECT_FAILURE(&env, code);
+ }
+ }
+ }
+ }
+};
+
+
+static FunctionEnv CreateInt32FunctionEnv(FunctionSig* sig, int count) {
+ FunctionEnv env;
+ env.module = nullptr;
+ env.sig = sig;
+ env.local_int32_count = count;
+ env.local_float64_count = 0;
+ env.local_float32_count = 0;
+ env.total_locals = static_cast<unsigned>(count + sig->parameter_count());
+ return env;
+}
+
+
+TEST_F(WasmDecoderTest, Int8Const) {
+ byte code[] = {kExprI8Const, 0};
+ for (int i = -128; i < 128; i++) {
+ code[1] = static_cast<byte>(i);
+ EXPECT_VERIFIES(&env_i_i, code);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, EmptyFunction) {
+ byte code[] = {0};
+ Verify(kSuccess, &env_v_v, code, code);
+ Verify(kError, &env_i_i, code, code);
+}
+
+
+TEST_F(WasmDecoderTest, IncompleteIf1) {
+ byte code[] = {kExprIf};
+ EXPECT_FAILURE(&env_v_v, code);
+ EXPECT_FAILURE(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, IncompleteIf2) {
+ byte code[] = {kExprIf, kExprI8Const, 0};
+ EXPECT_FAILURE(&env_v_v, code);
+ EXPECT_FAILURE(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, Int8Const_fallthru) {
+ byte code[] = {kExprI8Const, 0, kExprI8Const, 1};
+ EXPECT_VERIFIES(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, Int32Const) {
+ byte code[] = {kExprI32Const, 0, 0, 0, 0};
+ int32_t* ptr = reinterpret_cast<int32_t*>(code + 1);
+ const int kInc = 4498211;
+ for (int32_t i = kMinInt; i < kMaxInt - kInc; i = i + kInc) {
+ *ptr = i;
+ EXPECT_VERIFIES(&env_i_i, code);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, Int8Const_fallthru2) {
+ byte code[] = {kExprI8Const, 0, kExprI32Const, 1, 2, 3, 4};
+ EXPECT_VERIFIES(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, Int64Const) {
+ byte code[] = {kExprI64Const, 0, 0, 0, 0, 0, 0, 0, 0};
+ int64_t* ptr = reinterpret_cast<int64_t*>(code + 1);
+ const int kInc = 4498211;
+ for (int32_t i = kMinInt; i < kMaxInt - kInc; i = i + kInc) {
+ *ptr = (static_cast<int64_t>(i) << 32) | i;
+ EXPECT_VERIFIES(&env_l_l, code);
+ }
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST_F(WasmDecoderTest, Float32Const) {
+ byte code[] = {kExprF32Const, 0, 0, 0, 0};
+ float* ptr = reinterpret_cast<float*>(code + 1);
+ for (int i = 0; i < 30; i++) {
+ *ptr = i * -7.75f;
+ EXPECT_VERIFIES(&env_f_ff, code);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, Float64Const) {
+ byte code[] = {kExprF64Const, 0, 0, 0, 0, 0, 0, 0, 0};
+ double* ptr = reinterpret_cast<double*>(code + 1);
+ for (int i = 0; i < 30; i++) {
+ *ptr = i * 33.45;
+ EXPECT_VERIFIES(&env_d_dd, code);
+ }
+}
+
+#endif
+
+
+TEST_F(WasmDecoderTest, Int32Const_off_end) {
+ byte code[] = {kExprI32Const, 0xaa, 0xbb, 0xcc, 0x44};
+
+ for (int size = 1; size <= 4; size++) {
+ Verify(kError, &env_i_i, code, code + size);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, GetLocal0_param) {
+ EXPECT_VERIFIES(&env_i_i, kCodeGetLocal0);
+}
+
+
+TEST_F(WasmDecoderTest, GetLocal0_local) {
+ FunctionEnv env;
+ init_env(&env, sigs.i_v());
+ env.AddLocals(kAstI32, 1);
+ EXPECT_VERIFIES(&env, kCodeGetLocal0);
+}
+
+
+TEST_F(WasmDecoderTest, GetLocal0_param_n) {
+ FunctionSig* array[] = {sigs.i_i(), sigs.i_ii(), sigs.i_iii()};
+
+ for (size_t i = 0; i < arraysize(array); i++) {
+ FunctionEnv env = CreateInt32FunctionEnv(array[i], 0);
+ EXPECT_VERIFIES(&env, kCodeGetLocal0);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, GetLocalN_local) {
+ for (byte i = 1; i < 8; i++) {
+ FunctionEnv env = CreateInt32FunctionEnv(sigs.i_v(), i);
+ for (byte j = 0; j < i; j++) {
+ byte code[] = {kExprGetLocal, j};
+ EXPECT_VERIFIES(&env, code);
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, GetLocal0_fail_no_params) {
+ FunctionEnv env = CreateInt32FunctionEnv(sigs.i_v(), 0);
+
+ EXPECT_FAILURE(&env, kCodeGetLocal0);
+}
+
+
+TEST_F(WasmDecoderTest, GetLocal1_fail_no_locals) {
+ EXPECT_FAILURE(&env_i_i, kCodeGetLocal1);
+}
+
+
+TEST_F(WasmDecoderTest, GetLocal_off_end) {
+ static const byte code[] = {kExprGetLocal};
+ EXPECT_FAILURE(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, GetLocal_varint) {
+ env_i_i.local_int32_count = 1000000000;
+ env_i_i.total_locals += 1000000000;
+
+ {
+ static const byte code[] = {kExprGetLocal, 0xFF, 0x01};
+ EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_FAILURE(&env_i_f, code);
+ }
+
+ {
+ static const byte code[] = {kExprGetLocal, 0xF0, 0x80, 0x01};
+ EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_FAILURE(&env_i_f, code);
+ }
+
+ {
+ static const byte code[] = {kExprGetLocal, 0xF2, 0x81, 0x82, 0x01};
+ EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_FAILURE(&env_i_f, code);
+ }
+
+ {
+ static const byte code[] = {kExprGetLocal, 0xF3, 0xA1, 0xB1, 0xC1, 0x01};
+ EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_FAILURE(&env_i_f, code);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, Binops_off_end) {
+ byte code1[] = {0}; // [opcode]
+ for (size_t i = 0; i < arraysize(kInt32BinopOpcodes); i++) {
+ code1[0] = kInt32BinopOpcodes[i];
+ EXPECT_FAILURE(&env_i_i, code1);
+ }
+
+ byte code3[] = {0, kExprGetLocal, 0}; // [opcode] [expr]
+ for (size_t i = 0; i < arraysize(kInt32BinopOpcodes); i++) {
+ code3[0] = kInt32BinopOpcodes[i];
+ EXPECT_FAILURE(&env_i_i, code3);
+ }
+
+ byte code4[] = {0, kExprGetLocal, 0, 0}; // [opcode] [expr] [opcode]
+ for (size_t i = 0; i < arraysize(kInt32BinopOpcodes); i++) {
+ code4[0] = kInt32BinopOpcodes[i];
+ code4[3] = kInt32BinopOpcodes[i];
+ EXPECT_FAILURE(&env_i_i, code4);
+ }
+}
+
+
+//===================================================================
+//== Statements
+//===================================================================
+TEST_F(WasmDecoderTest, Nop) {
+ static const byte code[] = {kExprNop};
+ EXPECT_VERIFIES(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, SetLocal0_param) {
+ static const byte code[] = {kExprSetLocal, 0, kExprI8Const, 0};
+ EXPECT_VERIFIES(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, SetLocal0_local) {
+ byte code[] = {kExprSetLocal, 0, kExprI8Const, 0};
+ FunctionEnv env = CreateInt32FunctionEnv(sigs.i_v(), 1);
+
+ EXPECT_VERIFIES(&env, code);
+}
+
+
+TEST_F(WasmDecoderTest, SetLocalN_local) {
+ for (byte i = 1; i < 8; i++) {
+ FunctionEnv env = CreateInt32FunctionEnv(sigs.i_v(), i);
+ for (byte j = 0; j < i; j++) {
+ byte code[] = {kExprSetLocal, j, kExprI8Const, i};
+ EXPECT_VERIFIES(&env, code);
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, Block0) {
+ static const byte code[] = {kExprBlock, 0};
+ EXPECT_VERIFIES(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, Block0_fallthru1) {
+ static const byte code[] = {kExprBlock, 0, kExprBlock, 0};
+ EXPECT_VERIFIES(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, Block1) {
+ static const byte code[] = {kExprBlock, 1, kExprSetLocal, 0, kExprI8Const, 0};
+ EXPECT_VERIFIES(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, Block0_fallthru2) {
+ static const byte code[] = {kExprBlock, 0, kExprSetLocal, 0, kExprI8Const, 0};
+ EXPECT_VERIFIES(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, Block2) {
+ static const byte code[] = {kExprBlock, 2, // --
+ kExprSetLocal, 0, kExprI8Const, 0, // --
+ kExprSetLocal, 0, kExprI8Const, 0}; // --
+ EXPECT_VERIFIES(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, Block2_fallthru) {
+ static const byte code[] = {kExprBlock, 2, // --
+ kExprSetLocal, 0, kExprI8Const, 0, // --
+ kExprSetLocal, 0, kExprI8Const, 0, // --
+ kExprI8Const, 11}; // --
+ EXPECT_VERIFIES(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, BlockN) {
+ byte block[] = {kExprBlock, 2};
+
+ for (size_t i = 0; i < 10; i++) {
+ size_t total = sizeof(block) + sizeof(kCodeSetLocal0) * i;
+ byte* code = reinterpret_cast<byte*>(malloc(total));
+ memcpy(code, block, sizeof(block));
+ code[1] = static_cast<byte>(i);
+ for (size_t j = 0; j < i; j++) {
+ memcpy(code + sizeof(block) + j * sizeof(kCodeSetLocal0), kCodeSetLocal0,
+ sizeof(kCodeSetLocal0));
+ }
+ Verify(kSuccess, &env_v_i, code, code + total);
+ free(code);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, BlockN_off_end) {
+ for (byte i = 2; i < 10; i++) {
+ byte code[] = {kExprBlock, i, kExprNop};
+ EXPECT_FAILURE(&env_v_v, code);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, Block1_break) {
+ static const byte code[] = {kExprBlock, 1, kExprBr, 0, kExprNop};
+ EXPECT_VERIFIES(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, Block2_break) {
+ static const byte code[] = {kExprBlock, 2, kExprNop, kExprBr, 0, kExprNop};
+ EXPECT_VERIFIES(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, Block1_continue) {
+ static const byte code[] = {kExprBlock, 1, kExprBr, 1, kExprNop};
+ EXPECT_FAILURE(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, Block2_continue) {
+ static const byte code[] = {kExprBlock, 2, kExprNop, kExprBr, 1, kExprNop};
+ EXPECT_FAILURE(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, ExprBlock0) {
+ static const byte code[] = {kExprBlock, 0};
+ EXPECT_VERIFIES(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, ExprBlock1a) {
+ static const byte code[] = {kExprBlock, 1, kExprI8Const, 0};
+ EXPECT_VERIFIES(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, ExprBlock1b) {
+ static const byte code[] = {kExprBlock, 1, kExprI8Const, 0};
+ EXPECT_FAILURE(&env_f_ff, code);
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST_F(WasmDecoderTest, ExprBlock1c) {
+ static const byte code[] = {kExprBlock, 1, kExprF32Const, 0, 0, 0, 0};
+ EXPECT_VERIFIES(&env_f_ff, code);
+}
+
+#endif
+
+
+TEST_F(WasmDecoderTest, IfEmpty) {
+ static const byte code[] = {kExprIf, kExprGetLocal, 0, kExprNop};
+ EXPECT_VERIFIES(&env_v_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, IfSet) {
+ static const byte code[] = {kExprIfElse, kExprGetLocal, 0, kExprSetLocal,
+ 0, kExprI8Const, 0, kExprNop};
+ EXPECT_VERIFIES(&env_v_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, IfBlock1) {
+ static const byte code[] = {kExprIfElse, kExprGetLocal, 0, kExprBlock,
+ 1, kExprSetLocal, 0, kExprI8Const,
+ 0, kExprNop};
+ EXPECT_VERIFIES(&env_v_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, IfBlock2) {
+ static const byte code[] = {kExprIf, kExprGetLocal, 0, kExprBlock,
+ 2, kExprSetLocal, 0, kExprI8Const,
+ 0, kExprSetLocal, 0, kExprI8Const,
+ 0};
+ EXPECT_VERIFIES(&env_v_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, IfElseEmpty) {
+ static const byte code[] = {kExprIfElse, kExprGetLocal, 0, kExprNop,
+ kExprNop};
+ EXPECT_VERIFIES(&env_v_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, IfElseSet) {
+ static const byte code[] = {kExprIfElse,
+ kExprGetLocal,
+ 0, // --
+ kExprSetLocal,
+ 0,
+ kExprI8Const,
+ 0, // --
+ kExprSetLocal,
+ 0,
+ kExprI8Const,
+ 1}; // --
+ EXPECT_VERIFIES(&env_v_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, IfElseUnreachable) {
+ static const byte code[] = {kExprIfElse, kExprI8Const, 0,
+ kExprUnreachable, kExprGetLocal, 0};
+
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalType types[] = {kAstI32, kLocalTypes[i]};
+ FunctionEnv env;
+ FunctionSig sig(1, 1, types);
+ init_env(&env, &sig);
+
+ if (kLocalTypes[i] == kAstI32) {
+ EXPECT_VERIFIES(&env, code);
+ } else {
+ EXPECT_FAILURE(&env, code);
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, Loop0) {
+ static const byte code[] = {kExprLoop, 0};
+ EXPECT_VERIFIES(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, Loop1) {
+ static const byte code[] = {kExprLoop, 1, kExprSetLocal, 0, kExprI8Const, 0};
+ EXPECT_VERIFIES(&env_v_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, Loop2) {
+ static const byte code[] = {kExprLoop, 2, // --
+ kExprSetLocal, 0, kExprI8Const, 0, // --
+ kExprSetLocal, 0, kExprI8Const, 0}; // --
+ EXPECT_VERIFIES(&env_v_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, Loop1_continue) {
+ static const byte code[] = {kExprLoop, 1, kExprBr, 0, kExprNop};
+ EXPECT_VERIFIES(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, Loop1_break) {
+ static const byte code[] = {kExprLoop, 1, kExprBr, 1, kExprNop};
+ EXPECT_VERIFIES(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, Loop2_continue) {
+ static const byte code[] = {kExprLoop, 2, // --
+ kExprSetLocal, 0, kExprI8Const, 0, // --
+ kExprBr, 0, kExprNop}; // --
+ EXPECT_VERIFIES(&env_v_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, Loop2_break) {
+ static const byte code[] = {kExprLoop, 2, // --
+ kExprSetLocal, 0, kExprI8Const, 0, // --
+ kExprBr, 1, kExprNop}; // --
+ EXPECT_VERIFIES(&env_v_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, ExprLoop0) {
+ static const byte code[] = {kExprLoop, 0};
+ EXPECT_VERIFIES(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, ExprLoop1a) {
+ static const byte code[] = {kExprLoop, 1, kExprBr, 0, kExprI8Const, 0};
+ EXPECT_VERIFIES(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, ExprLoop1b) {
+ static const byte code[] = {kExprLoop, 1, kExprBr, 0, kExprI8Const, 0};
+ EXPECT_VERIFIES(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, ExprLoop2_unreachable) {
+ static const byte code[] = {kExprLoop, 2, kExprBr, 0,
+ kExprI8Const, 0, kExprNop};
+ EXPECT_VERIFIES(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, ReturnVoid1) {
+ static const byte code[] = {kExprNop};
+ EXPECT_VERIFIES(&env_v_v, code);
+ EXPECT_FAILURE(&env_i_i, code);
+ EXPECT_FAILURE(&env_i_f, code);
+}
+
+
+TEST_F(WasmDecoderTest, ReturnVoid2) {
+ static const byte code[] = {kExprBlock, 1, kExprBr, 0, kExprNop};
+ EXPECT_VERIFIES(&env_v_v, code);
+ EXPECT_FAILURE(&env_i_i, code);
+ EXPECT_FAILURE(&env_i_f, code);
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST_F(WasmDecoderTest, ReturnVoid3) {
+ EXPECT_VERIFIES_INLINE(&env_v_v, kExprI8Const, 0);
+ EXPECT_VERIFIES_INLINE(&env_v_v, kExprI32Const, 0, 0, 0, 0);
+ EXPECT_VERIFIES_INLINE(&env_v_v, kExprI64Const, 0, 0, 0, 0, 0, 0, 0, 0);
+ EXPECT_VERIFIES_INLINE(&env_v_v, kExprF32Const, 0, 0, 0, 0);
+ EXPECT_VERIFIES_INLINE(&env_v_v, kExprF64Const, 0, 0, 0, 0, 0, 0, 0, 0);
+
+ EXPECT_VERIFIES_INLINE(&env_v_i, kExprGetLocal, 0);
+}
+
+#endif
+
+
+TEST_F(WasmDecoderTest, Unreachable1) {
+ EXPECT_VERIFIES_INLINE(&env_v_v, kExprUnreachable);
+ EXPECT_VERIFIES_INLINE(&env_v_v, kExprUnreachable, kExprUnreachable);
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_BLOCK(2, WASM_UNREACHABLE, WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_BLOCK(2, WASM_BR(0), WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(2, WASM_UNREACHABLE, WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(2, WASM_BR(0), WASM_ZERO));
+}
+
+
+TEST_F(WasmDecoderTest, Codeiness) {
+ VERIFY(kExprLoop, 2, // --
+ kExprSetLocal, 0, kExprI8Const, 0, // --
+ kExprBr, 0, kExprNop); // --
+}
+
+
+TEST_F(WasmDecoderTest, ExprIf1) {
+ VERIFY(kExprIf, kExprGetLocal, 0, kExprI8Const, 0, kExprI8Const, 1);
+ VERIFY(kExprIf, kExprGetLocal, 0, kExprGetLocal, 0, kExprGetLocal, 0);
+ VERIFY(kExprIf, kExprGetLocal, 0, kExprI32Add, kExprGetLocal, 0,
+ kExprGetLocal, 0, kExprI8Const, 1);
+}
+
+
+TEST_F(WasmDecoderTest, ExprIf_off_end) {
+ static const byte kCode[] = {kExprIf, kExprGetLocal, 0, kExprGetLocal,
+ 0, kExprGetLocal, 0};
+ for (size_t len = 1; len < arraysize(kCode); len++) {
+ Verify(kError, &env_i_i, kCode, kCode + len);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, ExprIf_type) {
+ {
+ // float|double ? 1 : 2
+ static const byte kCode[] = {kExprIfElse, kExprGetLocal, 0, kExprI8Const,
+ 1, kExprI8Const, 2};
+ EXPECT_FAILURE(&env_i_f, kCode);
+ EXPECT_FAILURE(&env_i_d, kCode);
+ }
+ {
+ // 1 ? float|double : 2
+ static const byte kCode[] = {kExprIfElse, kExprI8Const, 1, kExprGetLocal,
+ 0, kExprI8Const, 2};
+ EXPECT_FAILURE(&env_i_f, kCode);
+ EXPECT_FAILURE(&env_i_d, kCode);
+ }
+ {
+ // stmt ? 0 : 1
+ static const byte kCode[] = {kExprIfElse, kExprNop, kExprI8Const,
+ 0, kExprI8Const, 1};
+ EXPECT_FAILURE(&env_i_i, kCode);
+ }
+ {
+ // 0 ? stmt : 1
+ static const byte kCode[] = {kExprIfElse, kExprI8Const, 0,
+ kExprNop, kExprI8Const, 1};
+ EXPECT_FAILURE(&env_i_i, kCode);
+ }
+ {
+ // 0 ? 1 : stmt
+ static const byte kCode[] = {kExprIfElse, kExprI8Const, 0, kExprI8Const, 1,
+ 0, kExprBlock};
+ EXPECT_FAILURE(&env_i_i, kCode);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, Int64Local_param) {
+ EXPECT_VERIFIES(&env_l_l, kCodeGetLocal0);
+}
+
+
+TEST_F(WasmDecoderTest, Int64Locals) {
+ for (byte i = 1; i < 8; i++) {
+ FunctionEnv env;
+ init_env(&env, sigs.l_v());
+ env.AddLocals(kAstI64, i);
+ for (byte j = 0; j < i; j++) {
+ byte code[] = {kExprGetLocal, j};
+ EXPECT_VERIFIES(&env, code);
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, Int32Binops) {
+ TestBinop(kExprI32Add, sigs.i_ii());
+ TestBinop(kExprI32Sub, sigs.i_ii());
+ TestBinop(kExprI32Mul, sigs.i_ii());
+ TestBinop(kExprI32DivS, sigs.i_ii());
+ TestBinop(kExprI32DivU, sigs.i_ii());
+ TestBinop(kExprI32RemS, sigs.i_ii());
+ TestBinop(kExprI32RemU, sigs.i_ii());
+ TestBinop(kExprI32And, sigs.i_ii());
+ TestBinop(kExprI32Ior, sigs.i_ii());
+ TestBinop(kExprI32Xor, sigs.i_ii());
+ TestBinop(kExprI32Shl, sigs.i_ii());
+ TestBinop(kExprI32ShrU, sigs.i_ii());
+ TestBinop(kExprI32ShrS, sigs.i_ii());
+ TestBinop(kExprI32Eq, sigs.i_ii());
+ TestBinop(kExprI32LtS, sigs.i_ii());
+ TestBinop(kExprI32LeS, sigs.i_ii());
+ TestBinop(kExprI32LtU, sigs.i_ii());
+ TestBinop(kExprI32LeU, sigs.i_ii());
+}
+
+
+TEST_F(WasmDecoderTest, DoubleBinops) {
+ TestBinop(kExprF64Add, sigs.d_dd());
+ TestBinop(kExprF64Sub, sigs.d_dd());
+ TestBinop(kExprF64Mul, sigs.d_dd());
+ TestBinop(kExprF64Div, sigs.d_dd());
+
+ TestBinop(kExprF64Eq, sigs.i_dd());
+ TestBinop(kExprF64Lt, sigs.i_dd());
+ TestBinop(kExprF64Le, sigs.i_dd());
+}
+
+
+TEST_F(WasmDecoderTest, FloatBinops) {
+ TestBinop(kExprF32Add, sigs.f_ff());
+ TestBinop(kExprF32Sub, sigs.f_ff());
+ TestBinop(kExprF32Mul, sigs.f_ff());
+ TestBinop(kExprF32Div, sigs.f_ff());
+
+ TestBinop(kExprF32Eq, sigs.i_ff());
+ TestBinop(kExprF32Lt, sigs.i_ff());
+ TestBinop(kExprF32Le, sigs.i_ff());
+}
+
+
+TEST_F(WasmDecoderTest, TypeConversions) {
+ TestUnop(kExprI32SConvertF32, kAstI32, kAstF32);
+ TestUnop(kExprI32SConvertF64, kAstI32, kAstF64);
+ TestUnop(kExprI32UConvertF32, kAstI32, kAstF32);
+ TestUnop(kExprI32UConvertF64, kAstI32, kAstF64);
+ TestUnop(kExprF64SConvertI32, kAstF64, kAstI32);
+ TestUnop(kExprF64UConvertI32, kAstF64, kAstI32);
+ TestUnop(kExprF64ConvertF32, kAstF64, kAstF32);
+ TestUnop(kExprF32SConvertI32, kAstF32, kAstI32);
+ TestUnop(kExprF32UConvertI32, kAstF32, kAstI32);
+ TestUnop(kExprF32ConvertF64, kAstF32, kAstF64);
+}
+
+
+TEST_F(WasmDecoderTest, MacrosStmt) {
+ VERIFY(WASM_SET_LOCAL(0, WASM_I32(87348)));
+ VERIFY(WASM_STORE_MEM(MachineType::Int32(), WASM_I8(24), WASM_I8(40)));
+ VERIFY(WASM_IF(WASM_GET_LOCAL(0), WASM_NOP));
+ VERIFY(WASM_IF_ELSE(WASM_GET_LOCAL(0), WASM_NOP, WASM_NOP));
+ VERIFY(WASM_NOP);
+ VERIFY(WASM_BLOCK(1, WASM_NOP));
+ VERIFY(WASM_LOOP(1, WASM_NOP));
+ VERIFY(WASM_LOOP(1, WASM_BREAK(0)));
+ VERIFY(WASM_LOOP(1, WASM_CONTINUE(0)));
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST_F(WasmDecoderTest, MacrosBreak) {
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_BREAK(0)));
+
+ EXPECT_VERIFIES_INLINE(&env_i_i, WASM_LOOP(1, WASM_BREAKV(0, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(&env_l_l, WASM_LOOP(1, WASM_BREAKV(0, WASM_I64(0))));
+ EXPECT_VERIFIES_INLINE(&env_f_ff,
+ WASM_LOOP(1, WASM_BREAKV(0, WASM_F32(0.0))));
+ EXPECT_VERIFIES_INLINE(&env_d_dd,
+ WASM_LOOP(1, WASM_BREAKV(0, WASM_F64(0.0))));
+}
+
+#endif
+
+
+TEST_F(WasmDecoderTest, MacrosContinue) {
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_CONTINUE(0)));
+}
+
+
+TEST_F(WasmDecoderTest, MacrosVariadic) {
+ VERIFY(WASM_BLOCK(2, WASM_NOP, WASM_NOP));
+ VERIFY(WASM_BLOCK(3, WASM_NOP, WASM_NOP, WASM_NOP));
+ VERIFY(WASM_LOOP(2, WASM_NOP, WASM_NOP));
+ VERIFY(WASM_LOOP(3, WASM_NOP, WASM_NOP, WASM_NOP));
+}
+
+
+TEST_F(WasmDecoderTest, MacrosNestedBlocks) {
+ VERIFY(WASM_BLOCK(2, WASM_NOP, WASM_BLOCK(2, WASM_NOP, WASM_NOP)));
+ VERIFY(WASM_BLOCK(3, WASM_NOP, // --
+ WASM_BLOCK(2, WASM_NOP, WASM_NOP), // --
+ WASM_BLOCK(2, WASM_NOP, WASM_NOP))); // --
+ VERIFY(WASM_BLOCK(1, WASM_BLOCK(1, WASM_BLOCK(2, WASM_NOP, WASM_NOP))));
+}
+
+
+TEST_F(WasmDecoderTest, MultipleReturn) {
+ static LocalType kIntTypes5[] = {kAstI32, kAstI32, kAstI32, kAstI32, kAstI32};
+ FunctionSig sig_ii_v(2, 0, kIntTypes5);
+ FunctionEnv env_ii_v;
+ init_env(&env_ii_v, &sig_ii_v);
+ EXPECT_VERIFIES_INLINE(&env_ii_v, WASM_RETURN(WASM_ZERO, WASM_ONE));
+ EXPECT_FAILURE_INLINE(&env_ii_v, WASM_RETURN(WASM_ZERO));
+
+ FunctionSig sig_iii_v(3, 0, kIntTypes5);
+ FunctionEnv env_iii_v;
+ init_env(&env_iii_v, &sig_iii_v);
+ EXPECT_VERIFIES_INLINE(&env_iii_v,
+ WASM_RETURN(WASM_ZERO, WASM_ONE, WASM_I8(44)));
+ EXPECT_FAILURE_INLINE(&env_iii_v, WASM_RETURN(WASM_ZERO, WASM_ONE));
+}
+
+
+TEST_F(WasmDecoderTest, MultipleReturn_fallthru) {
+ static LocalType kIntTypes5[] = {kAstI32, kAstI32, kAstI32, kAstI32, kAstI32};
+ FunctionSig sig_ii_v(2, 0, kIntTypes5);
+ FunctionEnv env_ii_v;
+ init_env(&env_ii_v, &sig_ii_v);
+
+ EXPECT_VERIFIES_INLINE(&env_ii_v, WASM_ZERO, WASM_ONE);
+ EXPECT_FAILURE_INLINE(&env_ii_v, WASM_ZERO);
+
+ FunctionSig sig_iii_v(3, 0, kIntTypes5);
+ FunctionEnv env_iii_v;
+ init_env(&env_iii_v, &sig_iii_v);
+ EXPECT_VERIFIES_INLINE(&env_iii_v, WASM_ZERO, WASM_ONE, WASM_I8(44));
+ EXPECT_FAILURE_INLINE(&env_iii_v, WASM_ZERO, WASM_ONE);
+}
+
+
+TEST_F(WasmDecoderTest, MacrosInt32) {
+ VERIFY(WASM_I32_ADD(WASM_GET_LOCAL(0), WASM_I8(12)));
+ VERIFY(WASM_I32_SUB(WASM_GET_LOCAL(0), WASM_I8(13)));
+ VERIFY(WASM_I32_MUL(WASM_GET_LOCAL(0), WASM_I8(14)));
+ VERIFY(WASM_I32_DIVS(WASM_GET_LOCAL(0), WASM_I8(15)));
+ VERIFY(WASM_I32_DIVU(WASM_GET_LOCAL(0), WASM_I8(16)));
+ VERIFY(WASM_I32_REMS(WASM_GET_LOCAL(0), WASM_I8(17)));
+ VERIFY(WASM_I32_REMU(WASM_GET_LOCAL(0), WASM_I8(18)));
+ VERIFY(WASM_I32_AND(WASM_GET_LOCAL(0), WASM_I8(19)));
+ VERIFY(WASM_I32_IOR(WASM_GET_LOCAL(0), WASM_I8(20)));
+ VERIFY(WASM_I32_XOR(WASM_GET_LOCAL(0), WASM_I8(21)));
+ VERIFY(WASM_I32_SHL(WASM_GET_LOCAL(0), WASM_I8(22)));
+ VERIFY(WASM_I32_SHR(WASM_GET_LOCAL(0), WASM_I8(23)));
+ VERIFY(WASM_I32_SAR(WASM_GET_LOCAL(0), WASM_I8(24)));
+ VERIFY(WASM_I32_EQ(WASM_GET_LOCAL(0), WASM_I8(25)));
+ VERIFY(WASM_I32_NE(WASM_GET_LOCAL(0), WASM_I8(25)));
+
+ VERIFY(WASM_I32_LTS(WASM_GET_LOCAL(0), WASM_I8(26)));
+ VERIFY(WASM_I32_LES(WASM_GET_LOCAL(0), WASM_I8(27)));
+ VERIFY(WASM_I32_LTU(WASM_GET_LOCAL(0), WASM_I8(28)));
+ VERIFY(WASM_I32_LEU(WASM_GET_LOCAL(0), WASM_I8(29)));
+
+ VERIFY(WASM_I32_GTS(WASM_GET_LOCAL(0), WASM_I8(26)));
+ VERIFY(WASM_I32_GES(WASM_GET_LOCAL(0), WASM_I8(27)));
+ VERIFY(WASM_I32_GTU(WASM_GET_LOCAL(0), WASM_I8(28)));
+ VERIFY(WASM_I32_GEU(WASM_GET_LOCAL(0), WASM_I8(29)));
+}
+
+
+TEST_F(WasmDecoderTest, MacrosInt64) {
+ FunctionEnv env_i_ll;
+ FunctionEnv env_l_ll;
+ init_env(&env_i_ll, sigs.i_ll());
+ init_env(&env_l_ll, sigs.l_ll());
+
+#define VERIFY_L_LL(...) EXPECT_VERIFIES_INLINE(&env_l_ll, __VA_ARGS__)
+#define VERIFY_I_LL(...) EXPECT_VERIFIES_INLINE(&env_i_ll, __VA_ARGS__)
+
+ VERIFY_L_LL(WASM_I64_ADD(WASM_GET_LOCAL(0), WASM_I64(12)));
+ VERIFY_L_LL(WASM_I64_SUB(WASM_GET_LOCAL(0), WASM_I64(13)));
+ VERIFY_L_LL(WASM_I64_MUL(WASM_GET_LOCAL(0), WASM_I64(14)));
+ VERIFY_L_LL(WASM_I64_DIVS(WASM_GET_LOCAL(0), WASM_I64(15)));
+ VERIFY_L_LL(WASM_I64_DIVU(WASM_GET_LOCAL(0), WASM_I64(16)));
+ VERIFY_L_LL(WASM_I64_REMS(WASM_GET_LOCAL(0), WASM_I64(17)));
+ VERIFY_L_LL(WASM_I64_REMU(WASM_GET_LOCAL(0), WASM_I64(18)));
+ VERIFY_L_LL(WASM_I64_AND(WASM_GET_LOCAL(0), WASM_I64(19)));
+ VERIFY_L_LL(WASM_I64_IOR(WASM_GET_LOCAL(0), WASM_I64(20)));
+ VERIFY_L_LL(WASM_I64_XOR(WASM_GET_LOCAL(0), WASM_I64(21)));
+
+ VERIFY_L_LL(WASM_I64_SHL(WASM_GET_LOCAL(0), WASM_I64(22)));
+ VERIFY_L_LL(WASM_I64_SHR(WASM_GET_LOCAL(0), WASM_I64(23)));
+ VERIFY_L_LL(WASM_I64_SAR(WASM_GET_LOCAL(0), WASM_I64(24)));
+
+ VERIFY_I_LL(WASM_I64_LTS(WASM_GET_LOCAL(0), WASM_I64(26)));
+ VERIFY_I_LL(WASM_I64_LES(WASM_GET_LOCAL(0), WASM_I64(27)));
+ VERIFY_I_LL(WASM_I64_LTU(WASM_GET_LOCAL(0), WASM_I64(28)));
+ VERIFY_I_LL(WASM_I64_LEU(WASM_GET_LOCAL(0), WASM_I64(29)));
+
+ VERIFY_I_LL(WASM_I64_GTS(WASM_GET_LOCAL(0), WASM_I64(26)));
+ VERIFY_I_LL(WASM_I64_GES(WASM_GET_LOCAL(0), WASM_I64(27)));
+ VERIFY_I_LL(WASM_I64_GTU(WASM_GET_LOCAL(0), WASM_I64(28)));
+ VERIFY_I_LL(WASM_I64_GEU(WASM_GET_LOCAL(0), WASM_I64(29)));
+
+ VERIFY_I_LL(WASM_I64_EQ(WASM_GET_LOCAL(0), WASM_I64(25)));
+ VERIFY_I_LL(WASM_I64_NE(WASM_GET_LOCAL(0), WASM_I64(25)));
+}
+
+
+TEST_F(WasmDecoderTest, AllSimpleExpressions) {
+// Test all simple expressions which are described by a signature.
+#define DECODE_TEST(name, opcode, sig) \
+ { \
+ FunctionSig* sig = WasmOpcodes::Signature(kExpr##name); \
+ if (sig->parameter_count() == 1) { \
+ TestUnop(kExpr##name, sig); \
+ } else { \
+ TestBinop(kExpr##name, sig); \
+ } \
+ }
+
+ FOREACH_SIMPLE_OPCODE(DECODE_TEST);
+
+#undef DECODE_TEST
+}
+
+
+TEST_F(WasmDecoderTest, MemorySize) {
+ byte code[] = {kExprMemorySize};
+ EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_FAILURE(&env_f_ff, code);
+}
+
+
+TEST_F(WasmDecoderTest, GrowMemory) {
+ byte code[] = {kExprGrowMemory, kExprGetLocal, 0};
+ EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_FAILURE(&env_i_d, code);
+}
+
+
+TEST_F(WasmDecoderTest, LoadMemOffset) {
+ for (int offset = 0; offset < 128; offset += 7) {
+ byte code[] = {kExprI32LoadMem, WasmOpcodes::LoadStoreAccessOf(true),
+ static_cast<byte>(offset), kExprI8Const, 0};
+ EXPECT_VERIFIES(&env_i_i, code);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, StoreMemOffset) {
+ for (int offset = 0; offset < 128; offset += 7) {
+ byte code[] = {kExprI32StoreMem,
+ WasmOpcodes::LoadStoreAccessOf(true),
+ static_cast<byte>(offset),
+ kExprI8Const,
+ 0,
+ kExprI8Const,
+ 0};
+ EXPECT_VERIFIES(&env_i_i, code);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, LoadMemOffset_varint) {
+ byte code1[] = {kExprI32LoadMem, WasmOpcodes::LoadStoreAccessOf(true), 0,
+ kExprI8Const, 0};
+ byte code2[] = {kExprI32LoadMem,
+ WasmOpcodes::LoadStoreAccessOf(true),
+ 0x80,
+ 1,
+ kExprI8Const,
+ 0};
+ byte code3[] = {kExprI32LoadMem,
+ WasmOpcodes::LoadStoreAccessOf(true),
+ 0x81,
+ 0x82,
+ 5,
+ kExprI8Const,
+ 0};
+ byte code4[] = {kExprI32LoadMem,
+ WasmOpcodes::LoadStoreAccessOf(true),
+ 0x83,
+ 0x84,
+ 0x85,
+ 7,
+ kExprI8Const,
+ 0};
+
+ EXPECT_VERIFIES(&env_i_i, code1);
+ EXPECT_VERIFIES(&env_i_i, code2);
+ EXPECT_VERIFIES(&env_i_i, code3);
+ EXPECT_VERIFIES(&env_i_i, code4);
+}
+
+
+TEST_F(WasmDecoderTest, StoreMemOffset_varint) {
+ byte code1[] = {kExprI32StoreMem,
+ WasmOpcodes::LoadStoreAccessOf(true),
+ 0,
+ kExprI8Const,
+ 0,
+ kExprI8Const,
+ 0};
+ byte code2[] = {kExprI32StoreMem,
+ WasmOpcodes::LoadStoreAccessOf(true),
+ 0x80,
+ 1,
+ kExprI8Const,
+ 0,
+ kExprI8Const,
+ 0};
+ byte code3[] = {kExprI32StoreMem,
+ WasmOpcodes::LoadStoreAccessOf(true),
+ 0x81,
+ 0x82,
+ 5,
+ kExprI8Const,
+ 0,
+ kExprI8Const,
+ 0};
+ byte code4[] = {kExprI32StoreMem,
+ WasmOpcodes::LoadStoreAccessOf(true),
+ 0x83,
+ 0x84,
+ 0x85,
+ 7,
+ kExprI8Const,
+ 0,
+ kExprI8Const,
+ 0};
+
+ EXPECT_VERIFIES(&env_i_i, code1);
+ EXPECT_VERIFIES(&env_i_i, code2);
+ EXPECT_VERIFIES(&env_i_i, code3);
+ EXPECT_VERIFIES(&env_i_i, code4);
+}
+
+
+TEST_F(WasmDecoderTest, AllLoadMemCombinations) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalType local_type = kLocalTypes[i];
+ for (size_t j = 0; j < arraysize(machineTypes); j++) {
+ MachineType mem_type = machineTypes[j];
+ byte code[] = {
+ static_cast<byte>(WasmOpcodes::LoadStoreOpcodeOf(mem_type, false)),
+ WasmOpcodes::LoadStoreAccessOf(false), kExprI8Const, 0};
+ FunctionEnv env;
+ FunctionSig sig(1, 0, &local_type);
+ init_env(&env, &sig);
+ if (local_type == WasmOpcodes::LocalTypeFor(mem_type)) {
+ EXPECT_VERIFIES(&env, code);
+ } else {
+ EXPECT_FAILURE(&env, code);
+ }
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, AllStoreMemCombinations) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalType local_type = kLocalTypes[i];
+ for (size_t j = 0; j < arraysize(machineTypes); j++) {
+ MachineType mem_type = machineTypes[j];
+ byte code[] = {
+ static_cast<byte>(WasmOpcodes::LoadStoreOpcodeOf(mem_type, true)),
+ WasmOpcodes::LoadStoreAccessOf(false),
+ kExprI8Const,
+ 0,
+ kExprGetLocal,
+ 0};
+ FunctionEnv env;
+ FunctionSig sig(0, 1, &local_type);
+ init_env(&env, &sig);
+ if (local_type == WasmOpcodes::LocalTypeFor(mem_type)) {
+ EXPECT_VERIFIES(&env, code);
+ } else {
+ EXPECT_FAILURE(&env, code);
+ }
+ }
+ }
+}
+
+
+namespace {
+// A helper for tests that require a module environment for functions and
+// globals.
+class TestModuleEnv : public ModuleEnv {
+ public:
+ TestModuleEnv() {
+ mem_start = 0;
+ mem_end = 0;
+ module = &mod;
+ linker = nullptr;
+ function_code = nullptr;
+ mod.globals = new std::vector<WasmGlobal>;
+ mod.signatures = new std::vector<FunctionSig*>;
+ mod.functions = new std::vector<WasmFunction>;
+ }
+ byte AddGlobal(MachineType mem_type) {
+ mod.globals->push_back({0, mem_type, 0, false});
+ CHECK(mod.globals->size() <= 127);
+ return static_cast<byte>(mod.globals->size() - 1);
+ }
+ byte AddSignature(FunctionSig* sig) {
+ mod.signatures->push_back(sig);
+ CHECK(mod.signatures->size() <= 127);
+ return static_cast<byte>(mod.signatures->size() - 1);
+ }
+ byte AddFunction(FunctionSig* sig) {
+ mod.functions->push_back({sig, 0, 0, 0, 0, 0, 0, 0, false, false});
+ CHECK(mod.functions->size() <= 127);
+ return static_cast<byte>(mod.functions->size() - 1);
+ }
+
+ private:
+ WasmModule mod;
+};
+} // namespace
+
+
+TEST_F(WasmDecoderTest, SimpleCalls) {
+ FunctionEnv* env = &env_i_i;
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ module_env.AddFunction(sigs.i_v());
+ module_env.AddFunction(sigs.i_i());
+ module_env.AddFunction(sigs.i_ii());
+
+ EXPECT_VERIFIES_INLINE(env, WASM_CALL_FUNCTION(0));
+ EXPECT_VERIFIES_INLINE(env, WASM_CALL_FUNCTION(1, WASM_I8(27)));
+ EXPECT_VERIFIES_INLINE(env, WASM_CALL_FUNCTION(2, WASM_I8(37), WASM_I8(77)));
+}
+
+
+TEST_F(WasmDecoderTest, CallsWithTooFewArguments) {
+ FunctionEnv* env = &env_i_i;
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ module_env.AddFunction(sigs.i_i());
+ module_env.AddFunction(sigs.i_ii());
+ module_env.AddFunction(sigs.f_ff());
+
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION0(0));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(1, WASM_ZERO));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(2, WASM_GET_LOCAL(0)));
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST_F(WasmDecoderTest, CallsWithSpilloverArgs) {
+ static LocalType a_i_ff[] = {kAstI32, kAstF32, kAstF32};
+ FunctionSig sig_i_ff(1, 2, a_i_ff);
+ FunctionEnv env_i_ff;
+ init_env(&env_i_ff, &sig_i_ff);
+
+ TestModuleEnv module_env;
+ env_i_ff.module = &module_env;
+ env_i_i.module = &module_env;
+ env_f_ff.module = &module_env;
+
+ module_env.AddFunction(&sig_i_ff);
+
+ EXPECT_VERIFIES_INLINE(&env_i_i,
+ WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1)));
+
+ EXPECT_VERIFIES_INLINE(&env_i_ff,
+ WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1)));
+
+ EXPECT_FAILURE_INLINE(&env_f_ff,
+ WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1)));
+
+ EXPECT_FAILURE_INLINE(
+ &env_i_i,
+ WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1), WASM_F32(0.2)));
+
+ EXPECT_VERIFIES_INLINE(
+ &env_f_ff,
+ WASM_CALL_FUNCTION(0, WASM_F32(0.1), WASM_F32(0.1), WASM_F32(11)));
+}
+
+
+TEST_F(WasmDecoderTest, CallsWithMismatchedSigs2) {
+ FunctionEnv* env = &env_i_i;
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ module_env.AddFunction(sigs.i_i());
+
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(0, WASM_I64(17)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(0, WASM_F32(17.1)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(0, WASM_F64(17.1)));
+}
+
+
+TEST_F(WasmDecoderTest, CallsWithMismatchedSigs3) {
+ FunctionEnv* env = &env_i_i;
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ module_env.AddFunction(sigs.i_f());
+
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(0, WASM_I8(17)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(0, WASM_I64(27)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(0, WASM_F64(37.2)));
+
+ module_env.AddFunction(sigs.i_d());
+
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(1, WASM_I8(16)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(1, WASM_I64(16)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_FUNCTION(1, WASM_F32(17.6)));
+}
+
+#endif
+
+
+TEST_F(WasmDecoderTest, SimpleIndirectCalls) {
+ FunctionEnv* env = &env_i_i;
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ byte f0 = module_env.AddSignature(sigs.i_v());
+ byte f1 = module_env.AddSignature(sigs.i_i());
+ byte f2 = module_env.AddSignature(sigs.i_ii());
+
+ EXPECT_VERIFIES_INLINE(env, WASM_CALL_INDIRECT0(f0, WASM_ZERO));
+ EXPECT_VERIFIES_INLINE(env, WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_I8(22)));
+ EXPECT_VERIFIES_INLINE(
+ env, WASM_CALL_INDIRECT(f2, WASM_ZERO, WASM_I8(32), WASM_I8(72)));
+}
+
+
+TEST_F(WasmDecoderTest, IndirectCallsOutOfBounds) {
+ FunctionEnv* env = &env_i_i;
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT0(0, WASM_ZERO));
+ module_env.AddSignature(sigs.i_v());
+ EXPECT_VERIFIES_INLINE(env, WASM_CALL_INDIRECT0(0, WASM_ZERO));
+
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(1, WASM_ZERO, WASM_I8(22)));
+ module_env.AddSignature(sigs.i_i());
+ EXPECT_VERIFIES_INLINE(env, WASM_CALL_INDIRECT(1, WASM_ZERO, WASM_I8(27)));
+
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(2, WASM_ZERO, WASM_I8(27)));
+}
+
+
+TEST_F(WasmDecoderTest, IndirectCallsWithMismatchedSigs3) {
+ FunctionEnv* env = &env_i_i;
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ byte f0 = module_env.AddFunction(sigs.i_f());
+
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(f0, WASM_ZERO, WASM_I8(17)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(f0, WASM_ZERO, WASM_I64(27)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(f0, WASM_ZERO, WASM_F64(37.2)));
+
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT0(f0, WASM_I8(17)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT0(f0, WASM_I64(27)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT0(f0, WASM_F64(37.2)));
+
+ byte f1 = module_env.AddFunction(sigs.i_d());
+
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_I8(16)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_I64(16)));
+ EXPECT_FAILURE_INLINE(env, WASM_CALL_INDIRECT(f1, WASM_ZERO, WASM_F32(17.6)));
+}
+
+
+TEST_F(WasmDecoderTest, Int32Globals) {
+ FunctionEnv* env = &env_i_i;
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ module_env.AddGlobal(MachineType::Int8());
+ module_env.AddGlobal(MachineType::Uint8());
+ module_env.AddGlobal(MachineType::Int16());
+ module_env.AddGlobal(MachineType::Uint16());
+ module_env.AddGlobal(MachineType::Int32());
+ module_env.AddGlobal(MachineType::Uint32());
+
+ EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(0));
+ EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(1));
+ EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(2));
+ EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(3));
+ EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(4));
+ EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(5));
+
+ EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(1, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(2, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(3, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(4, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(5, WASM_GET_LOCAL(0)));
+}
+
+
+TEST_F(WasmDecoderTest, Int32Globals_fail) {
+ FunctionEnv* env = &env_i_i;
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ module_env.AddGlobal(MachineType::Int64());
+ module_env.AddGlobal(MachineType::Uint64());
+ module_env.AddGlobal(MachineType::Float32());
+ module_env.AddGlobal(MachineType::Float64());
+
+ EXPECT_FAILURE_INLINE(env, WASM_LOAD_GLOBAL(0));
+ EXPECT_FAILURE_INLINE(env, WASM_LOAD_GLOBAL(1));
+ EXPECT_FAILURE_INLINE(env, WASM_LOAD_GLOBAL(2));
+ EXPECT_FAILURE_INLINE(env, WASM_LOAD_GLOBAL(3));
+
+ EXPECT_FAILURE_INLINE(env, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(env, WASM_STORE_GLOBAL(1, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(env, WASM_STORE_GLOBAL(2, WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(env, WASM_STORE_GLOBAL(3, WASM_GET_LOCAL(0)));
+}
+
+
+TEST_F(WasmDecoderTest, Int64Globals) {
+ FunctionEnv* env = &env_l_l;
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ module_env.AddGlobal(MachineType::Int64());
+ module_env.AddGlobal(MachineType::Uint64());
+
+ EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(0));
+ EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(1));
+
+ EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(1, WASM_GET_LOCAL(0)));
+}
+
+
+TEST_F(WasmDecoderTest, Float32Globals) {
+ FunctionEnv env_f_ff;
+ FunctionEnv* env = &env_f_ff;
+ init_env(env, sigs.f_ff());
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ module_env.AddGlobal(MachineType::Float32());
+
+ EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(0));
+ EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+}
+
+
+TEST_F(WasmDecoderTest, Float64Globals) {
+ FunctionEnv env_d_dd;
+ FunctionEnv* env = &env_d_dd;
+ init_env(env, sigs.d_dd());
+ TestModuleEnv module_env;
+ env->module = &module_env;
+
+ module_env.AddGlobal(MachineType::Float64());
+
+ EXPECT_VERIFIES_INLINE(env, WASM_LOAD_GLOBAL(0));
+ EXPECT_VERIFIES_INLINE(env, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+}
+
+
+TEST_F(WasmDecoderTest, AllLoadGlobalCombinations) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalType local_type = kLocalTypes[i];
+ for (size_t j = 0; j < arraysize(machineTypes); j++) {
+ MachineType mem_type = machineTypes[j];
+ FunctionEnv env;
+ FunctionSig sig(1, 0, &local_type);
+ TestModuleEnv module_env;
+ init_env(&env, &sig);
+ env.module = &module_env;
+ module_env.AddGlobal(mem_type);
+ if (local_type == WasmOpcodes::LocalTypeFor(mem_type)) {
+ EXPECT_VERIFIES_INLINE(&env, WASM_LOAD_GLOBAL(0));
+ } else {
+ EXPECT_FAILURE_INLINE(&env, WASM_LOAD_GLOBAL(0));
+ }
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, AllStoreGlobalCombinations) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalType local_type = kLocalTypes[i];
+ for (size_t j = 0; j < arraysize(machineTypes); j++) {
+ MachineType mem_type = machineTypes[j];
+ FunctionEnv env;
+ FunctionSig sig(0, 1, &local_type);
+ TestModuleEnv module_env;
+ init_env(&env, &sig);
+ env.module = &module_env;
+ module_env.AddGlobal(mem_type);
+ if (local_type == WasmOpcodes::LocalTypeFor(mem_type)) {
+ EXPECT_VERIFIES_INLINE(&env, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ } else {
+ EXPECT_FAILURE_INLINE(&env, WASM_STORE_GLOBAL(0, WASM_GET_LOCAL(0)));
+ }
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, BreakNesting1) {
+ for (int i = 0; i < 5; i++) {
+ // (block[2] (loop[2] (if (get p) break[N]) (set p 1)) p)
+ byte code[] = {WASM_BLOCK(
+ 2, WASM_LOOP(2, WASM_IF(WASM_GET_LOCAL(0), WASM_BRV(i, WASM_ZERO)),
+ WASM_SET_LOCAL(0, WASM_I8(1))),
+ WASM_GET_LOCAL(0))};
+ if (i < 3) {
+ EXPECT_VERIFIES(&env_i_i, code);
+ } else {
+ EXPECT_FAILURE(&env_i_i, code);
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, BreakNesting2) {
+ env_v_v.AddLocals(kAstI32, 1);
+ for (int i = 0; i < 5; i++) {
+ // (block[2] (loop[2] (if (get p) break[N]) (set p 1)) (return p)) (11)
+ byte code[] = {
+ WASM_BLOCK(1, WASM_LOOP(2, WASM_IF(WASM_GET_LOCAL(0), WASM_BREAK(i)),
+ WASM_SET_LOCAL(0, WASM_I8(1)))),
+ WASM_I8(11)};
+ if (i < 2) {
+ EXPECT_VERIFIES(&env_v_v, code);
+ } else {
+ EXPECT_FAILURE(&env_v_v, code);
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, BreakNesting3) {
+ env_v_v.AddLocals(kAstI32, 1);
+ for (int i = 0; i < 5; i++) {
+ // (block[1] (loop[1] (block[1] (if (get p) break[N])
+ byte code[] = {WASM_BLOCK(
+ 1, WASM_LOOP(
+ 1, WASM_BLOCK(1, WASM_IF(WASM_GET_LOCAL(0), WASM_BREAK(i)))))};
+ if (i < 3) {
+ EXPECT_VERIFIES(&env_v_v, code);
+ } else {
+ EXPECT_FAILURE(&env_v_v, code);
+ }
+ }
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST_F(WasmDecoderTest, BreaksWithMultipleTypes) {
+ EXPECT_FAILURE_INLINE(
+ &env_i_i,
+ WASM_BLOCK(2, WASM_BRV_IF(0, WASM_ZERO, WASM_I8(7)), WASM_F32(7.7)));
+ EXPECT_FAILURE_INLINE(&env_i_i,
+ WASM_BLOCK(2, WASM_BRV_IF(0, WASM_ZERO, WASM_I8(7)),
+ WASM_BRV_IF(0, WASM_ZERO, WASM_F32(7.7))));
+ EXPECT_FAILURE_INLINE(&env_i_i,
+ WASM_BLOCK(3, WASM_BRV_IF(0, WASM_ZERO, WASM_I8(8)),
+ WASM_BRV_IF(0, WASM_ZERO, WASM_I8(0)),
+ WASM_BRV_IF(0, WASM_ZERO, WASM_F32(7.7))));
+ EXPECT_FAILURE_INLINE(&env_i_i,
+ WASM_BLOCK(3, WASM_BRV_IF(0, WASM_ZERO, WASM_I8(9)),
+ WASM_BRV_IF(0, WASM_ZERO, WASM_F32(7.7)),
+ WASM_BRV_IF(0, WASM_ZERO, WASM_I8(11))));
+}
+
+#endif
+
+
+TEST_F(WasmDecoderTest, BreakNesting_6_levels) {
+ for (int mask = 0; mask < 64; mask++) {
+ for (int i = 0; i < 14; i++) {
+ byte code[] = {
+ kExprBlock, 1, // --
+ kExprBlock, 1, // --
+ kExprBlock, 1, // --
+ kExprBlock, 1, // --
+ kExprBlock, 1, // --
+ kExprBlock, 1, // --
+ kExprBr, static_cast<byte>(i),
+ kExprNop // --
+ };
+
+ int depth = 6;
+ for (int l = 0; l < 6; l++) {
+ if (mask & (1 << l)) {
+ code[l * 2] = kExprLoop;
+ depth++;
+ }
+ }
+
+ if (i < depth) {
+ EXPECT_VERIFIES(&env_v_v, code);
+ } else {
+ EXPECT_FAILURE(&env_v_v, code);
+ }
+ }
+ }
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST_F(WasmDecoderTest, ExprBreak_TypeCheck) {
+ FunctionEnv* envs[] = {&env_i_i, &env_l_l, &env_f_ff, &env_d_dd};
+ for (size_t i = 0; i < arraysize(envs); i++) {
+ FunctionEnv* env = envs[i];
+ // unify X and X => OK
+ EXPECT_VERIFIES_INLINE(
+ env, WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
+ WASM_GET_LOCAL(0)));
+ }
+
+ // unify i32 and f32 => fail
+ EXPECT_FAILURE_INLINE(
+ &env_i_i,
+ WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_ZERO)), WASM_F32(1.2)));
+
+ // unify f64 and f64 => OK
+ EXPECT_VERIFIES_INLINE(
+ &env_d_dd,
+ WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
+ WASM_F64(1.2)));
+}
+
+#endif
+
+
+TEST_F(WasmDecoderTest, ExprBreak_TypeCheckAll) {
+ byte code1[] = {WASM_BLOCK(2,
+ WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(0))),
+ WASM_GET_LOCAL(1))};
+ byte code2[] = {WASM_BLOCK(
+ 2, WASM_IF(WASM_ZERO, WASM_BRV_IF(0, WASM_ZERO, WASM_GET_LOCAL(0))),
+ WASM_GET_LOCAL(1))};
+
+
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
+ FunctionEnv env;
+ LocalType storage[] = {kLocalTypes[i], kLocalTypes[i], kLocalTypes[j]};
+ FunctionSig sig(1, 2, storage);
+ init_env(&env, &sig);
+
+ if (i == j) {
+ EXPECT_VERIFIES(&env, code1);
+ EXPECT_VERIFIES(&env, code2);
+ } else {
+ EXPECT_FAILURE(&env, code1);
+ EXPECT_FAILURE(&env, code2);
+ }
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, ExprBr_Unify) {
+ FunctionEnv env;
+
+ for (int which = 0; which < 2; which++) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalType type = kLocalTypes[i];
+ LocalType storage[] = {kAstI32, kAstI32, type};
+ FunctionSig sig(1, 2, storage);
+ init_env(&env, &sig); // (i32, X) -> i32
+
+ byte code1[] = {
+ WASM_BLOCK(2, WASM_IF(WASM_ZERO, WASM_BRV(0, WASM_GET_LOCAL(which))),
+ WASM_GET_LOCAL(which ^ 1))};
+ byte code2[] = {
+ WASM_LOOP(2, WASM_IF(WASM_ZERO, WASM_BRV(1, WASM_GET_LOCAL(which))),
+ WASM_GET_LOCAL(which ^ 1))};
+
+
+ if (type == kAstI32) {
+ EXPECT_VERIFIES(&env, code1);
+ EXPECT_VERIFIES(&env, code2);
+ } else {
+ EXPECT_FAILURE(&env, code1);
+ EXPECT_FAILURE(&env, code2);
+ }
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, ExprBrIf_type) {
+ EXPECT_VERIFIES_INLINE(
+ &env_i_i,
+ WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)),
+ WASM_GET_LOCAL(0)));
+ EXPECT_FAILURE_INLINE(
+ &env_d_dd,
+ WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)),
+ WASM_GET_LOCAL(0)));
+
+ FunctionEnv env;
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalType type = kLocalTypes[i];
+ LocalType storage[] = {kAstI32, kAstI32, type};
+ FunctionSig sig(1, 2, storage);
+ init_env(&env, &sig); // (i32, X) -> i32
+
+ byte code1[] = {
+ WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(0), WASM_GET_LOCAL(1)),
+ WASM_GET_LOCAL(0))};
+
+ byte code2[] = {
+ WASM_BLOCK(2, WASM_BRV_IF(0, WASM_GET_LOCAL(1), WASM_GET_LOCAL(0)),
+ WASM_GET_LOCAL(0))};
+ if (type == kAstI32) {
+ EXPECT_VERIFIES(&env, code1);
+ EXPECT_VERIFIES(&env, code2);
+ } else {
+ EXPECT_FAILURE(&env, code1);
+ EXPECT_FAILURE(&env, code2);
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, ExprBrIf_Unify) {
+ FunctionEnv env;
+
+ for (int which = 0; which < 2; which++) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalType type = kLocalTypes[i];
+ LocalType storage[] = {kAstI32, kAstI32, type};
+ FunctionSig sig(1, 2, storage);
+ init_env(&env, &sig); // (i32, X) -> i32
+
+ byte code1[] = {
+ WASM_BLOCK(2, WASM_BRV_IF(0, WASM_ZERO, WASM_GET_LOCAL(which)),
+ WASM_GET_LOCAL(which ^ 1))};
+ byte code2[] = {
+ WASM_LOOP(2, WASM_BRV_IF(1, WASM_ZERO, WASM_GET_LOCAL(which)),
+ WASM_GET_LOCAL(which ^ 1))};
+
+
+ if (type == kAstI32) {
+ EXPECT_VERIFIES(&env, code1);
+ EXPECT_VERIFIES(&env, code2);
+ } else {
+ EXPECT_FAILURE(&env, code1);
+ EXPECT_FAILURE(&env, code2);
+ }
+ }
+ }
+}
+
+
+TEST_F(WasmDecoderTest, TableSwitch0) {
+ static byte code[] = {kExprTableSwitch, 0, 0, 0, 0};
+ EXPECT_FAILURE(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, TableSwitch0b) {
+ static byte code[] = {kExprTableSwitch, 0, 0, 0, 0, kExprI8Const, 11};
+ EXPECT_FAILURE(&env_v_v, code);
+ EXPECT_FAILURE(&env_i_i, code);
+}
+
+
+TEST_F(WasmDecoderTest, TableSwitch0c) {
+ static byte code[] = {
+ WASM_BLOCK(1, WASM_TABLESWITCH_OP(0, 1, WASM_CASE_BR(0)), WASM_I8(67))};
+ EXPECT_VERIFIES(&env_v_v, code);
+}
+
+
+TEST_F(WasmDecoderTest, TableSwitch1) {
+ static byte code[] = {WASM_TABLESWITCH_OP(1, 1, WASM_CASE(0)),
+ WASM_TABLESWITCH_BODY(WASM_I8(0), WASM_I8(9))};
+ EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_VERIFIES(&env_v_v, code);
+ EXPECT_FAILURE(&env_f_ff, code);
+ EXPECT_FAILURE(&env_d_dd, code);
+}
+
+
+TEST_F(WasmDecoderTest, TableSwitch_off_end) {
+ static byte code[] = {WASM_TABLESWITCH_OP(1, 1, WASM_CASE(0)),
+ WASM_TABLESWITCH_BODY(WASM_I8(0), WASM_I8(9))};
+ for (size_t len = arraysize(code) - 1; len > 0; len--) {
+ Verify(kError, &env_v_v, code, code + len);
+ }
+}
+
+
+TEST_F(WasmDecoderTest, TableSwitch2) {
+ static byte code[] = {
+ WASM_TABLESWITCH_OP(2, 2, WASM_CASE(0), WASM_CASE(1)),
+ WASM_TABLESWITCH_BODY(WASM_I8(3), WASM_I8(10), WASM_I8(11))};
+ EXPECT_VERIFIES(&env_i_i, code);
+ EXPECT_VERIFIES(&env_v_v, code);
+ EXPECT_FAILURE(&env_f_ff, code);
+ EXPECT_FAILURE(&env_d_dd, code);
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST_F(WasmDecoderTest, TableSwitch1b) {
+ EXPECT_VERIFIES_INLINE(&env_i_i, WASM_TABLESWITCH_OP(1, 1, WASM_CASE(0)),
+ WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_ZERO));
+
+ EXPECT_VERIFIES_INLINE(&env_f_ff, WASM_TABLESWITCH_OP(1, 1, WASM_CASE(0)),
+ WASM_TABLESWITCH_BODY(WASM_ZERO, WASM_F32(0.0)));
+
+ EXPECT_VERIFIES_INLINE(&env_d_dd, WASM_TABLESWITCH_OP(1, 1, WASM_CASE(0)),
+ WASM_TABLESWITCH_BODY(WASM_ZERO, WASM_F64(0.0)));
+}
+
+#endif
+
+
+TEST_F(WasmDecoderTest, TableSwitch_br) {
+ EXPECT_VERIFIES_INLINE(&env_i_i, WASM_TABLESWITCH_OP(0, 1, WASM_CASE_BR(0)),
+ WASM_GET_LOCAL(0));
+ for (int depth = 0; depth < 2; depth++) {
+ EXPECT_VERIFIES_INLINE(
+ &env_i_i, WASM_BLOCK(1, WASM_TABLESWITCH_OP(0, 1, WASM_CASE_BR(depth)),
+ WASM_GET_LOCAL(0)));
+ }
+}
+
+
+TEST_F(WasmDecoderTest, TableSwitch_invalid_br) {
+ for (int depth = 1; depth < 4; depth++) {
+ EXPECT_FAILURE_INLINE(&env_i_i,
+ WASM_TABLESWITCH_OP(0, 1, WASM_CASE_BR(depth)),
+ WASM_GET_LOCAL(0));
+ EXPECT_FAILURE_INLINE(
+ &env_i_i,
+ WASM_BLOCK(1, WASM_TABLESWITCH_OP(0, 1, WASM_CASE_BR(depth + 1)),
+ WASM_GET_LOCAL(0)));
+ }
+}
+
+
+TEST_F(WasmDecoderTest, TableSwitch_invalid_case_ref) {
+ EXPECT_FAILURE_INLINE(&env_i_i, WASM_TABLESWITCH_OP(0, 1, WASM_CASE(0)),
+ WASM_GET_LOCAL(0));
+ EXPECT_FAILURE_INLINE(&env_i_i, WASM_TABLESWITCH_OP(1, 1, WASM_CASE(1)),
+ WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_ZERO));
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST_F(WasmDecoderTest, TableSwitch1_br) {
+ EXPECT_VERIFIES_INLINE(
+ &env_i_i, WASM_TABLESWITCH_OP(1, 1, WASM_CASE(0)),
+ WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_ZERO)));
+}
+
+#endif
+
+
+TEST_F(WasmDecoderTest, TableSwitch2_br) {
+ EXPECT_VERIFIES_INLINE(
+ &env_i_i, WASM_TABLESWITCH_OP(2, 2, WASM_CASE(0), WASM_CASE(1)),
+ WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(0)),
+ WASM_BRV(0, WASM_I8(1))));
+
+ EXPECT_FAILURE_INLINE(
+ &env_f_ff, WASM_TABLESWITCH_OP(2, 2, WASM_CASE(0), WASM_CASE(1)),
+ WASM_TABLESWITCH_BODY(WASM_ZERO, WASM_BRV(0, WASM_I8(3)),
+ WASM_BRV(0, WASM_I8(4))));
+}
+
+
+TEST_F(WasmDecoderTest, TableSwitch2x2) {
+ EXPECT_VERIFIES_INLINE(
+ &env_i_i, WASM_TABLESWITCH_OP(2, 4, WASM_CASE(0), WASM_CASE(1),
+ WASM_CASE(0), WASM_CASE(1)),
+ WASM_TABLESWITCH_BODY(WASM_GET_LOCAL(0), WASM_BRV(0, WASM_I8(3)),
+ WASM_BRV(0, WASM_I8(4))));
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST_F(WasmDecoderTest, ExprBreakNesting1) {
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_BLOCK(1, WASM_BRV(0, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_BLOCK(1, WASM_BR(0)));
+ EXPECT_VERIFIES_INLINE(&env_v_v,
+ WASM_BLOCK(1, WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_BLOCK(1, WASM_BR_IF(0, WASM_ZERO)));
+
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_BRV(0, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_BR(0)));
+ EXPECT_VERIFIES_INLINE(&env_v_v,
+ WASM_LOOP(1, WASM_BRV_IF(0, WASM_ZERO, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_BR_IF(0, WASM_ZERO)));
+
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_BRV(1, WASM_ZERO)));
+ EXPECT_VERIFIES_INLINE(&env_v_v, WASM_LOOP(1, WASM_BR(1)));
+}
+
+#endif
+
+
+TEST_F(WasmDecoderTest, Select) {
+ EXPECT_VERIFIES_INLINE(
+ &env_i_i,
+ WASM_SELECT(WASM_GET_LOCAL(0), WASM_GET_LOCAL(0), WASM_GET_LOCAL(0)));
+}
+
+
+// TODO(tizer): Fix on arm and reenable.
+#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64
+
+TEST_F(WasmDecoderTest, Select_TypeCheck) {
+ EXPECT_FAILURE_INLINE(&env_i_i, WASM_SELECT(WASM_F32(9.9), WASM_GET_LOCAL(0),
+ WASM_GET_LOCAL(0)));
+
+ EXPECT_FAILURE_INLINE(&env_i_i, WASM_SELECT(WASM_GET_LOCAL(0), WASM_F64(0.25),
+ WASM_GET_LOCAL(0)));
+
+ EXPECT_FAILURE_INLINE(
+ &env_i_i, WASM_SELECT(WASM_F32(9.9), WASM_GET_LOCAL(0), WASM_I64(0)));
+}
+
+#endif
+
+
+class WasmOpcodeLengthTest : public TestWithZone {
+ public:
+ WasmOpcodeLengthTest() : TestWithZone() {}
+};
+
+
+#define EXPECT_LENGTH(expected, opcode) \
+ { \
+ static const byte code[] = {opcode, 0, 0, 0, 0, 0, 0, 0, 0}; \
+ EXPECT_EQ(expected, OpcodeLength(code)); \
+ }
+
+
+TEST_F(WasmOpcodeLengthTest, Statements) {
+ EXPECT_LENGTH(1, kExprNop);
+ EXPECT_LENGTH(2, kExprBlock);
+ EXPECT_LENGTH(2, kExprLoop);
+ EXPECT_LENGTH(1, kExprIf);
+ EXPECT_LENGTH(1, kExprIfElse);
+ EXPECT_LENGTH(1, kExprSelect);
+ EXPECT_LENGTH(2, kExprBr);
+ EXPECT_LENGTH(2, kExprBrIf);
+}
+
+
+TEST_F(WasmOpcodeLengthTest, MiscExpressions) {
+ EXPECT_LENGTH(2, kExprI8Const);
+ EXPECT_LENGTH(5, kExprI32Const);
+ EXPECT_LENGTH(5, kExprF32Const);
+ EXPECT_LENGTH(9, kExprI64Const);
+ EXPECT_LENGTH(9, kExprF64Const);
+ EXPECT_LENGTH(2, kExprGetLocal);
+ EXPECT_LENGTH(2, kExprSetLocal);
+ EXPECT_LENGTH(2, kExprLoadGlobal);
+ EXPECT_LENGTH(2, kExprStoreGlobal);
+ EXPECT_LENGTH(2, kExprCallFunction);
+ EXPECT_LENGTH(2, kExprCallIndirect);
+ EXPECT_LENGTH(1, kExprIf);
+ EXPECT_LENGTH(1, kExprIfElse);
+ EXPECT_LENGTH(2, kExprBlock);
+ EXPECT_LENGTH(2, kExprLoop);
+ EXPECT_LENGTH(2, kExprBr);
+ EXPECT_LENGTH(2, kExprBrIf);
+}
+
+
+TEST_F(WasmOpcodeLengthTest, VariableLength) {
+ byte size2[] = {kExprLoadGlobal, 1};
+ byte size3[] = {kExprLoadGlobal, 1 | 0x80, 2};
+ byte size4[] = {kExprLoadGlobal, 1 | 0x80, 2 | 0x80, 3};
+ byte size5[] = {kExprLoadGlobal, 1 | 0x80, 2 | 0x80, 3 | 0x80, 4};
+ byte size6[] = {kExprLoadGlobal, 1 | 0x80, 2 | 0x80, 3 | 0x80, 4 | 0x80, 5};
+
+ EXPECT_EQ(2, OpcodeLength(size2));
+ EXPECT_EQ(3, OpcodeLength(size3));
+ EXPECT_EQ(4, OpcodeLength(size4));
+ EXPECT_EQ(5, OpcodeLength(size5));
+ EXPECT_EQ(6, OpcodeLength(size6));
+}
+
+
+TEST_F(WasmOpcodeLengthTest, LoadsAndStores) {
+ EXPECT_LENGTH(2, kExprI32LoadMem8S);
+ EXPECT_LENGTH(2, kExprI32LoadMem8U);
+ EXPECT_LENGTH(2, kExprI32LoadMem16S);
+ EXPECT_LENGTH(2, kExprI32LoadMem16U);
+ EXPECT_LENGTH(2, kExprI32LoadMem);
+ EXPECT_LENGTH(2, kExprI64LoadMem8S);
+ EXPECT_LENGTH(2, kExprI64LoadMem8U);
+ EXPECT_LENGTH(2, kExprI64LoadMem16S);
+ EXPECT_LENGTH(2, kExprI64LoadMem16U);
+ EXPECT_LENGTH(2, kExprI64LoadMem32S);
+ EXPECT_LENGTH(2, kExprI64LoadMem32U);
+ EXPECT_LENGTH(2, kExprI64LoadMem);
+ EXPECT_LENGTH(2, kExprF32LoadMem);
+ EXPECT_LENGTH(2, kExprF64LoadMem);
+
+ EXPECT_LENGTH(2, kExprI32StoreMem8);
+ EXPECT_LENGTH(2, kExprI32StoreMem16);
+ EXPECT_LENGTH(2, kExprI32StoreMem);
+ EXPECT_LENGTH(2, kExprI64StoreMem8);
+ EXPECT_LENGTH(2, kExprI64StoreMem16);
+ EXPECT_LENGTH(2, kExprI64StoreMem32);
+ EXPECT_LENGTH(2, kExprI64StoreMem);
+ EXPECT_LENGTH(2, kExprF32StoreMem);
+ EXPECT_LENGTH(2, kExprF64StoreMem);
+}
+
+
+TEST_F(WasmOpcodeLengthTest, MiscMemExpressions) {
+ EXPECT_LENGTH(1, kExprMemorySize);
+ EXPECT_LENGTH(1, kExprGrowMemory);
+}
+
+
+TEST_F(WasmOpcodeLengthTest, SimpleExpressions) {
+ EXPECT_LENGTH(1, kExprI32Add);
+ EXPECT_LENGTH(1, kExprI32Sub);
+ EXPECT_LENGTH(1, kExprI32Mul);
+ EXPECT_LENGTH(1, kExprI32DivS);
+ EXPECT_LENGTH(1, kExprI32DivU);
+ EXPECT_LENGTH(1, kExprI32RemS);
+ EXPECT_LENGTH(1, kExprI32RemU);
+ EXPECT_LENGTH(1, kExprI32And);
+ EXPECT_LENGTH(1, kExprI32Ior);
+ EXPECT_LENGTH(1, kExprI32Xor);
+ EXPECT_LENGTH(1, kExprI32Shl);
+ EXPECT_LENGTH(1, kExprI32ShrU);
+ EXPECT_LENGTH(1, kExprI32ShrS);
+ EXPECT_LENGTH(1, kExprI32Eq);
+ EXPECT_LENGTH(1, kExprI32Ne);
+ EXPECT_LENGTH(1, kExprI32LtS);
+ EXPECT_LENGTH(1, kExprI32LeS);
+ EXPECT_LENGTH(1, kExprI32LtU);
+ EXPECT_LENGTH(1, kExprI32LeU);
+ EXPECT_LENGTH(1, kExprI32GtS);
+ EXPECT_LENGTH(1, kExprI32GeS);
+ EXPECT_LENGTH(1, kExprI32GtU);
+ EXPECT_LENGTH(1, kExprI32GeU);
+ EXPECT_LENGTH(1, kExprI32Clz);
+ EXPECT_LENGTH(1, kExprI32Ctz);
+ EXPECT_LENGTH(1, kExprI32Popcnt);
+ EXPECT_LENGTH(1, kExprBoolNot);
+ EXPECT_LENGTH(1, kExprI64Add);
+ EXPECT_LENGTH(1, kExprI64Sub);
+ EXPECT_LENGTH(1, kExprI64Mul);
+ EXPECT_LENGTH(1, kExprI64DivS);
+ EXPECT_LENGTH(1, kExprI64DivU);
+ EXPECT_LENGTH(1, kExprI64RemS);
+ EXPECT_LENGTH(1, kExprI64RemU);
+ EXPECT_LENGTH(1, kExprI64And);
+ EXPECT_LENGTH(1, kExprI64Ior);
+ EXPECT_LENGTH(1, kExprI64Xor);
+ EXPECT_LENGTH(1, kExprI64Shl);
+ EXPECT_LENGTH(1, kExprI64ShrU);
+ EXPECT_LENGTH(1, kExprI64ShrS);
+ EXPECT_LENGTH(1, kExprI64Eq);
+ EXPECT_LENGTH(1, kExprI64Ne);
+ EXPECT_LENGTH(1, kExprI64LtS);
+ EXPECT_LENGTH(1, kExprI64LeS);
+ EXPECT_LENGTH(1, kExprI64LtU);
+ EXPECT_LENGTH(1, kExprI64LeU);
+ EXPECT_LENGTH(1, kExprI64GtS);
+ EXPECT_LENGTH(1, kExprI64GeS);
+ EXPECT_LENGTH(1, kExprI64GtU);
+ EXPECT_LENGTH(1, kExprI64GeU);
+ EXPECT_LENGTH(1, kExprI64Clz);
+ EXPECT_LENGTH(1, kExprI64Ctz);
+ EXPECT_LENGTH(1, kExprI64Popcnt);
+ EXPECT_LENGTH(1, kExprF32Add);
+ EXPECT_LENGTH(1, kExprF32Sub);
+ EXPECT_LENGTH(1, kExprF32Mul);
+ EXPECT_LENGTH(1, kExprF32Div);
+ EXPECT_LENGTH(1, kExprF32Min);
+ EXPECT_LENGTH(1, kExprF32Max);
+ EXPECT_LENGTH(1, kExprF32Abs);
+ EXPECT_LENGTH(1, kExprF32Neg);
+ EXPECT_LENGTH(1, kExprF32CopySign);
+ EXPECT_LENGTH(1, kExprF32Ceil);
+ EXPECT_LENGTH(1, kExprF32Floor);
+ EXPECT_LENGTH(1, kExprF32Trunc);
+ EXPECT_LENGTH(1, kExprF32NearestInt);
+ EXPECT_LENGTH(1, kExprF32Sqrt);
+ EXPECT_LENGTH(1, kExprF32Eq);
+ EXPECT_LENGTH(1, kExprF32Ne);
+ EXPECT_LENGTH(1, kExprF32Lt);
+ EXPECT_LENGTH(1, kExprF32Le);
+ EXPECT_LENGTH(1, kExprF32Gt);
+ EXPECT_LENGTH(1, kExprF32Ge);
+ EXPECT_LENGTH(1, kExprF64Add);
+ EXPECT_LENGTH(1, kExprF64Sub);
+ EXPECT_LENGTH(1, kExprF64Mul);
+ EXPECT_LENGTH(1, kExprF64Div);
+ EXPECT_LENGTH(1, kExprF64Min);
+ EXPECT_LENGTH(1, kExprF64Max);
+ EXPECT_LENGTH(1, kExprF64Abs);
+ EXPECT_LENGTH(1, kExprF64Neg);
+ EXPECT_LENGTH(1, kExprF64CopySign);
+ EXPECT_LENGTH(1, kExprF64Ceil);
+ EXPECT_LENGTH(1, kExprF64Floor);
+ EXPECT_LENGTH(1, kExprF64Trunc);
+ EXPECT_LENGTH(1, kExprF64NearestInt);
+ EXPECT_LENGTH(1, kExprF64Sqrt);
+ EXPECT_LENGTH(1, kExprF64Eq);
+ EXPECT_LENGTH(1, kExprF64Ne);
+ EXPECT_LENGTH(1, kExprF64Lt);
+ EXPECT_LENGTH(1, kExprF64Le);
+ EXPECT_LENGTH(1, kExprF64Gt);
+ EXPECT_LENGTH(1, kExprF64Ge);
+ EXPECT_LENGTH(1, kExprI32SConvertF32);
+ EXPECT_LENGTH(1, kExprI32SConvertF64);
+ EXPECT_LENGTH(1, kExprI32UConvertF32);
+ EXPECT_LENGTH(1, kExprI32UConvertF64);
+ EXPECT_LENGTH(1, kExprI32ConvertI64);
+ EXPECT_LENGTH(1, kExprI64SConvertF32);
+ EXPECT_LENGTH(1, kExprI64SConvertF64);
+ EXPECT_LENGTH(1, kExprI64UConvertF32);
+ EXPECT_LENGTH(1, kExprI64UConvertF64);
+ EXPECT_LENGTH(1, kExprI64SConvertI32);
+ EXPECT_LENGTH(1, kExprI64UConvertI32);
+ EXPECT_LENGTH(1, kExprF32SConvertI32);
+ EXPECT_LENGTH(1, kExprF32UConvertI32);
+ EXPECT_LENGTH(1, kExprF32SConvertI64);
+ EXPECT_LENGTH(1, kExprF32UConvertI64);
+ EXPECT_LENGTH(1, kExprF32ConvertF64);
+ EXPECT_LENGTH(1, kExprF32ReinterpretI32);
+ EXPECT_LENGTH(1, kExprF64SConvertI32);
+ EXPECT_LENGTH(1, kExprF64UConvertI32);
+ EXPECT_LENGTH(1, kExprF64SConvertI64);
+ EXPECT_LENGTH(1, kExprF64UConvertI64);
+ EXPECT_LENGTH(1, kExprF64ConvertF32);
+ EXPECT_LENGTH(1, kExprF64ReinterpretI64);
+ EXPECT_LENGTH(1, kExprI32ReinterpretF32);
+ EXPECT_LENGTH(1, kExprI64ReinterpretF64);
+}
+
+
+class WasmOpcodeArityTest : public TestWithZone {
+ public:
+ WasmOpcodeArityTest() : TestWithZone() {}
+};
+
+
+#define EXPECT_ARITY(expected, ...) \
+ { \
+ static const byte code[] = {__VA_ARGS__}; \
+ EXPECT_EQ(expected, OpcodeArity(&env, code)); \
+ }
+
+
+TEST_F(WasmOpcodeArityTest, Control) {
+ FunctionEnv env;
+ EXPECT_ARITY(0, kExprNop);
+
+ EXPECT_ARITY(0, kExprBlock, 0);
+ EXPECT_ARITY(1, kExprBlock, 1);
+ EXPECT_ARITY(2, kExprBlock, 2);
+ EXPECT_ARITY(5, kExprBlock, 5);
+ EXPECT_ARITY(10, kExprBlock, 10);
+
+ EXPECT_ARITY(0, kExprLoop, 0);
+ EXPECT_ARITY(1, kExprLoop, 1);
+ EXPECT_ARITY(2, kExprLoop, 2);
+ EXPECT_ARITY(7, kExprLoop, 7);
+ EXPECT_ARITY(11, kExprLoop, 11);
+
+ EXPECT_ARITY(2, kExprIf);
+ EXPECT_ARITY(3, kExprIfElse);
+ EXPECT_ARITY(3, kExprSelect);
+
+ EXPECT_ARITY(1, kExprBr);
+ EXPECT_ARITY(2, kExprBrIf);
+
+ {
+ TestSignatures sigs;
+ FunctionEnv env;
+ WasmDecoderTest::init_env(&env, sigs.v_v());
+ EXPECT_ARITY(0, kExprReturn);
+ WasmDecoderTest::init_env(&env, sigs.i_i());
+ EXPECT_ARITY(1, kExprReturn);
+ }
+}
+
+
+TEST_F(WasmOpcodeArityTest, Misc) {
+ FunctionEnv env;
+
+ EXPECT_ARITY(0, kExprI8Const);
+ EXPECT_ARITY(0, kExprI32Const);
+ EXPECT_ARITY(0, kExprF32Const);
+ EXPECT_ARITY(0, kExprI64Const);
+ EXPECT_ARITY(0, kExprF64Const);
+ EXPECT_ARITY(0, kExprGetLocal);
+ EXPECT_ARITY(1, kExprSetLocal);
+ EXPECT_ARITY(0, kExprLoadGlobal);
+ EXPECT_ARITY(1, kExprStoreGlobal);
+}
+
+
+TEST_F(WasmOpcodeArityTest, Calls) {
+ TestSignatures sigs;
+ TestModuleEnv module;
+ module.AddFunction(sigs.i_ii());
+ module.AddFunction(sigs.i_i());
+
+ module.AddSignature(sigs.f_ff());
+ module.AddSignature(sigs.i_d());
+
+ {
+ FunctionEnv env;
+ WasmDecoderTest::init_env(&env, sigs.i_ii());
+ env.module = &module;
+
+ EXPECT_ARITY(2, kExprCallFunction, 0);
+ EXPECT_ARITY(3, kExprCallIndirect, 0);
+ EXPECT_ARITY(1, kExprBr);
+ EXPECT_ARITY(2, kExprBrIf);
+ }
+
+ {
+ FunctionEnv env;
+ WasmDecoderTest::init_env(&env, sigs.v_v());
+ env.module = &module;
+
+ EXPECT_ARITY(1, kExprCallFunction, 1);
+ EXPECT_ARITY(2, kExprCallIndirect, 1);
+ EXPECT_ARITY(1, kExprBr);
+ EXPECT_ARITY(2, kExprBrIf);
+ }
+}
+
+
+TEST_F(WasmOpcodeArityTest, LoadsAndStores) {
+ FunctionEnv env;
+
+ EXPECT_ARITY(1, kExprI32LoadMem8S);
+ EXPECT_ARITY(1, kExprI32LoadMem8U);
+ EXPECT_ARITY(1, kExprI32LoadMem16S);
+ EXPECT_ARITY(1, kExprI32LoadMem16U);
+ EXPECT_ARITY(1, kExprI32LoadMem);
+
+ EXPECT_ARITY(1, kExprI64LoadMem8S);
+ EXPECT_ARITY(1, kExprI64LoadMem8U);
+ EXPECT_ARITY(1, kExprI64LoadMem16S);
+ EXPECT_ARITY(1, kExprI64LoadMem16U);
+ EXPECT_ARITY(1, kExprI64LoadMem32S);
+ EXPECT_ARITY(1, kExprI64LoadMem32U);
+ EXPECT_ARITY(1, kExprI64LoadMem);
+ EXPECT_ARITY(1, kExprF32LoadMem);
+ EXPECT_ARITY(1, kExprF64LoadMem);
+
+ EXPECT_ARITY(2, kExprI32StoreMem8);
+ EXPECT_ARITY(2, kExprI32StoreMem16);
+ EXPECT_ARITY(2, kExprI32StoreMem);
+ EXPECT_ARITY(2, kExprI64StoreMem8);
+ EXPECT_ARITY(2, kExprI64StoreMem16);
+ EXPECT_ARITY(2, kExprI64StoreMem32);
+ EXPECT_ARITY(2, kExprI64StoreMem);
+ EXPECT_ARITY(2, kExprF32StoreMem);
+ EXPECT_ARITY(2, kExprF64StoreMem);
+}
+
+
+TEST_F(WasmOpcodeArityTest, MiscMemExpressions) {
+ FunctionEnv env;
+
+ EXPECT_ARITY(0, kExprMemorySize);
+ EXPECT_ARITY(1, kExprGrowMemory);
+}
+
+
+TEST_F(WasmOpcodeArityTest, SimpleExpressions) {
+ FunctionEnv env;
+
+ EXPECT_ARITY(2, kExprI32Add);
+ EXPECT_ARITY(2, kExprI32Sub);
+ EXPECT_ARITY(2, kExprI32Mul);
+ EXPECT_ARITY(2, kExprI32DivS);
+ EXPECT_ARITY(2, kExprI32DivU);
+ EXPECT_ARITY(2, kExprI32RemS);
+ EXPECT_ARITY(2, kExprI32RemU);
+ EXPECT_ARITY(2, kExprI32And);
+ EXPECT_ARITY(2, kExprI32Ior);
+ EXPECT_ARITY(2, kExprI32Xor);
+ EXPECT_ARITY(2, kExprI32Shl);
+ EXPECT_ARITY(2, kExprI32ShrU);
+ EXPECT_ARITY(2, kExprI32ShrS);
+ EXPECT_ARITY(2, kExprI32Eq);
+ EXPECT_ARITY(2, kExprI32Ne);
+ EXPECT_ARITY(2, kExprI32LtS);
+ EXPECT_ARITY(2, kExprI32LeS);
+ EXPECT_ARITY(2, kExprI32LtU);
+ EXPECT_ARITY(2, kExprI32LeU);
+ EXPECT_ARITY(2, kExprI32GtS);
+ EXPECT_ARITY(2, kExprI32GeS);
+ EXPECT_ARITY(2, kExprI32GtU);
+ EXPECT_ARITY(2, kExprI32GeU);
+ EXPECT_ARITY(1, kExprI32Clz);
+ EXPECT_ARITY(1, kExprI32Ctz);
+ EXPECT_ARITY(1, kExprI32Popcnt);
+ EXPECT_ARITY(1, kExprBoolNot);
+ EXPECT_ARITY(2, kExprI64Add);
+ EXPECT_ARITY(2, kExprI64Sub);
+ EXPECT_ARITY(2, kExprI64Mul);
+ EXPECT_ARITY(2, kExprI64DivS);
+ EXPECT_ARITY(2, kExprI64DivU);
+ EXPECT_ARITY(2, kExprI64RemS);
+ EXPECT_ARITY(2, kExprI64RemU);
+ EXPECT_ARITY(2, kExprI64And);
+ EXPECT_ARITY(2, kExprI64Ior);
+ EXPECT_ARITY(2, kExprI64Xor);
+ EXPECT_ARITY(2, kExprI64Shl);
+ EXPECT_ARITY(2, kExprI64ShrU);
+ EXPECT_ARITY(2, kExprI64ShrS);
+ EXPECT_ARITY(2, kExprI64Eq);
+ EXPECT_ARITY(2, kExprI64Ne);
+ EXPECT_ARITY(2, kExprI64LtS);
+ EXPECT_ARITY(2, kExprI64LeS);
+ EXPECT_ARITY(2, kExprI64LtU);
+ EXPECT_ARITY(2, kExprI64LeU);
+ EXPECT_ARITY(2, kExprI64GtS);
+ EXPECT_ARITY(2, kExprI64GeS);
+ EXPECT_ARITY(2, kExprI64GtU);
+ EXPECT_ARITY(2, kExprI64GeU);
+ EXPECT_ARITY(1, kExprI64Clz);
+ EXPECT_ARITY(1, kExprI64Ctz);
+ EXPECT_ARITY(1, kExprI64Popcnt);
+ EXPECT_ARITY(2, kExprF32Add);
+ EXPECT_ARITY(2, kExprF32Sub);
+ EXPECT_ARITY(2, kExprF32Mul);
+ EXPECT_ARITY(2, kExprF32Div);
+ EXPECT_ARITY(2, kExprF32Min);
+ EXPECT_ARITY(2, kExprF32Max);
+ EXPECT_ARITY(1, kExprF32Abs);
+ EXPECT_ARITY(1, kExprF32Neg);
+ EXPECT_ARITY(2, kExprF32CopySign);
+ EXPECT_ARITY(1, kExprF32Ceil);
+ EXPECT_ARITY(1, kExprF32Floor);
+ EXPECT_ARITY(1, kExprF32Trunc);
+ EXPECT_ARITY(1, kExprF32NearestInt);
+ EXPECT_ARITY(1, kExprF32Sqrt);
+ EXPECT_ARITY(2, kExprF32Eq);
+ EXPECT_ARITY(2, kExprF32Ne);
+ EXPECT_ARITY(2, kExprF32Lt);
+ EXPECT_ARITY(2, kExprF32Le);
+ EXPECT_ARITY(2, kExprF32Gt);
+ EXPECT_ARITY(2, kExprF32Ge);
+ EXPECT_ARITY(2, kExprF64Add);
+ EXPECT_ARITY(2, kExprF64Sub);
+ EXPECT_ARITY(2, kExprF64Mul);
+ EXPECT_ARITY(2, kExprF64Div);
+ EXPECT_ARITY(2, kExprF64Min);
+ EXPECT_ARITY(2, kExprF64Max);
+ EXPECT_ARITY(1, kExprF64Abs);
+ EXPECT_ARITY(1, kExprF64Neg);
+ EXPECT_ARITY(2, kExprF64CopySign);
+ EXPECT_ARITY(1, kExprF64Ceil);
+ EXPECT_ARITY(1, kExprF64Floor);
+ EXPECT_ARITY(1, kExprF64Trunc);
+ EXPECT_ARITY(1, kExprF64NearestInt);
+ EXPECT_ARITY(1, kExprF64Sqrt);
+ EXPECT_ARITY(2, kExprF64Eq);
+ EXPECT_ARITY(2, kExprF64Ne);
+ EXPECT_ARITY(2, kExprF64Lt);
+ EXPECT_ARITY(2, kExprF64Le);
+ EXPECT_ARITY(2, kExprF64Gt);
+ EXPECT_ARITY(2, kExprF64Ge);
+ EXPECT_ARITY(1, kExprI32SConvertF32);
+ EXPECT_ARITY(1, kExprI32SConvertF64);
+ EXPECT_ARITY(1, kExprI32UConvertF32);
+ EXPECT_ARITY(1, kExprI32UConvertF64);
+ EXPECT_ARITY(1, kExprI32ConvertI64);
+ EXPECT_ARITY(1, kExprI64SConvertF32);
+ EXPECT_ARITY(1, kExprI64SConvertF64);
+ EXPECT_ARITY(1, kExprI64UConvertF32);
+ EXPECT_ARITY(1, kExprI64UConvertF64);
+ EXPECT_ARITY(1, kExprI64SConvertI32);
+ EXPECT_ARITY(1, kExprI64UConvertI32);
+ EXPECT_ARITY(1, kExprF32SConvertI32);
+ EXPECT_ARITY(1, kExprF32UConvertI32);
+ EXPECT_ARITY(1, kExprF32SConvertI64);
+ EXPECT_ARITY(1, kExprF32UConvertI64);
+ EXPECT_ARITY(1, kExprF32ConvertF64);
+ EXPECT_ARITY(1, kExprF32ReinterpretI32);
+ EXPECT_ARITY(1, kExprF64SConvertI32);
+ EXPECT_ARITY(1, kExprF64UConvertI32);
+ EXPECT_ARITY(1, kExprF64SConvertI64);
+ EXPECT_ARITY(1, kExprF64UConvertI64);
+ EXPECT_ARITY(1, kExprF64ConvertF32);
+ EXPECT_ARITY(1, kExprF64ReinterpretI64);
+ EXPECT_ARITY(1, kExprI32ReinterpretF32);
+ EXPECT_ARITY(1, kExprI64ReinterpretF64);
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/wasm/encoder-unittest.cc b/test/unittests/wasm/encoder-unittest.cc
new file mode 100644
index 0000000..156cf6b
--- /dev/null
+++ b/test/unittests/wasm/encoder-unittest.cc
@@ -0,0 +1,151 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/test-utils.h"
+
+#include "src/v8.h"
+
+#include "src/wasm/ast-decoder.h"
+#include "src/wasm/encoder.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class EncoderTest : public TestWithZone {
+ protected:
+ void AddLocal(WasmFunctionBuilder* f, LocalType type) {
+ uint16_t index = f->AddLocal(type);
+ const std::vector<uint8_t>& out_index = UnsignedLEB128From(index);
+ std::vector<uint8_t> code;
+ code.push_back(kExprGetLocal);
+ for (size_t i = 0; i < out_index.size(); i++) {
+ code.push_back(out_index.at(i));
+ }
+ uint32_t local_indices[] = {1};
+ f->EmitCode(&code[0], static_cast<uint32_t>(code.size()), local_indices, 1);
+ }
+
+ void CheckReadValue(uint8_t* leb_value, uint32_t expected_result,
+ int expected_length,
+ ReadUnsignedLEB128ErrorCode expected_error_code) {
+ int length;
+ uint32_t result;
+ ReadUnsignedLEB128ErrorCode error_code =
+ ReadUnsignedLEB128Operand(leb_value, leb_value + 5, &length, &result);
+ CHECK_EQ(error_code, expected_error_code);
+ if (error_code == 0) {
+ CHECK_EQ(result, expected_result);
+ CHECK_EQ(length, expected_length);
+ }
+ }
+
+ void CheckWriteValue(uint32_t input, int length, uint8_t* vals) {
+ const std::vector<uint8_t> result = UnsignedLEB128From(input);
+ CHECK_EQ(result.size(), length);
+ for (int i = 0; i < length; i++) {
+ CHECK_EQ(result.at(i), vals[i]);
+ }
+ }
+};
+
+
+TEST_F(EncoderTest, Function_Builder_Variable_Indexing) {
+ Zone zone;
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ uint16_t f_index = builder->AddFunction();
+ WasmFunctionBuilder* function = builder->FunctionAt(f_index);
+ uint16_t local_float32 = function->AddLocal(kAstF32);
+ uint16_t param_float32 = function->AddParam(kAstF32);
+ uint16_t local_int32 = function->AddLocal(kAstI32);
+ uint16_t local_float64 = function->AddLocal(kAstF64);
+ uint16_t local_int64 = function->AddLocal(kAstI64);
+ uint16_t param_int32 = function->AddParam(kAstI32);
+ uint16_t local_int32_2 = function->AddLocal(kAstI32);
+
+ byte code[] = {kExprGetLocal, static_cast<uint8_t>(param_float32)};
+ uint32_t local_indices[] = {1};
+ function->EmitCode(code, sizeof(code), local_indices, 1);
+ code[1] = static_cast<uint8_t>(param_int32);
+ function->EmitCode(code, sizeof(code), local_indices, 1);
+ code[1] = static_cast<uint8_t>(local_int32);
+ function->EmitCode(code, sizeof(code), local_indices, 1);
+ code[1] = static_cast<uint8_t>(local_int32_2);
+ function->EmitCode(code, sizeof(code), local_indices, 1);
+ code[1] = static_cast<uint8_t>(local_int64);
+ function->EmitCode(code, sizeof(code), local_indices, 1);
+ code[1] = static_cast<uint8_t>(local_float32);
+ function->EmitCode(code, sizeof(code), local_indices, 1);
+ code[1] = static_cast<uint8_t>(local_float64);
+ function->EmitCode(code, sizeof(code), local_indices, 1);
+
+ WasmFunctionEncoder* f = function->Build(&zone, builder);
+ ZoneVector<uint8_t> buffer_vector(f->HeaderSize() + f->BodySize(), &zone);
+ byte* buffer = &buffer_vector[0];
+ byte* header = buffer;
+ byte* body = buffer + f->HeaderSize();
+ f->Serialize(buffer, &header, &body);
+ for (size_t i = 0; i < 7; i++) {
+ CHECK_EQ(i, static_cast<size_t>(*(buffer + 2 * i + f->HeaderSize() + 1)));
+ }
+}
+
+
+TEST_F(EncoderTest, Function_Builder_Indexing_Variable_Width) {
+ Zone zone;
+ WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
+ uint16_t f_index = builder->AddFunction();
+ WasmFunctionBuilder* function = builder->FunctionAt(f_index);
+ for (size_t i = 0; i < 128; i++) {
+ AddLocal(function, kAstF32);
+ }
+ AddLocal(function, kAstI32);
+
+ WasmFunctionEncoder* f = function->Build(&zone, builder);
+ ZoneVector<uint8_t> buffer_vector(f->HeaderSize() + f->BodySize(), &zone);
+ byte* buffer = &buffer_vector[0];
+ byte* header = buffer;
+ byte* body = buffer + f->HeaderSize();
+ f->Serialize(buffer, &header, &body);
+ body = buffer + f->HeaderSize();
+ for (size_t i = 0; i < 127; i++) {
+ CHECK_EQ(kExprGetLocal, static_cast<size_t>(*(body + 2 * i)));
+ CHECK_EQ(i + 1, static_cast<size_t>(*(body + 2 * i + 1)));
+ }
+ CHECK_EQ(kExprGetLocal, static_cast<size_t>(*(body + 2 * 127)));
+ CHECK_EQ(0x80, static_cast<size_t>(*(body + 2 * 127 + 1)));
+ CHECK_EQ(0x01, static_cast<size_t>(*(body + 2 * 127 + 2)));
+ CHECK_EQ(kExprGetLocal, static_cast<size_t>(*(body + 2 * 127 + 3)));
+ CHECK_EQ(0x00, static_cast<size_t>(*(body + 2 * 127 + 4)));
+}
+
+
+TEST_F(EncoderTest, LEB_Functions) {
+ byte leb_value[5] = {0, 0, 0, 0, 0};
+ CheckReadValue(leb_value, 0, 1, kNoError);
+ CheckWriteValue(0, 1, leb_value);
+ leb_value[0] = 23;
+ CheckReadValue(leb_value, 23, 1, kNoError);
+ CheckWriteValue(23, 1, leb_value);
+ leb_value[0] = 0x80;
+ leb_value[1] = 0x01;
+ CheckReadValue(leb_value, 128, 2, kNoError);
+ CheckWriteValue(128, 2, leb_value);
+ leb_value[0] = 0x80;
+ leb_value[1] = 0x80;
+ leb_value[2] = 0x80;
+ leb_value[3] = 0x80;
+ leb_value[4] = 0x01;
+ CheckReadValue(leb_value, 0x10000000, 5, kNoError);
+ CheckWriteValue(0x10000000, 5, leb_value);
+ leb_value[0] = 0x80;
+ leb_value[1] = 0x80;
+ leb_value[2] = 0x80;
+ leb_value[3] = 0x80;
+ leb_value[4] = 0x80;
+ CheckReadValue(leb_value, -1, -1, kInvalidLEB128);
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/wasm/module-decoder-unittest.cc b/test/unittests/wasm/module-decoder-unittest.cc
new file mode 100644
index 0000000..0738b59
--- /dev/null
+++ b/test/unittests/wasm/module-decoder-unittest.cc
@@ -0,0 +1,957 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/test-utils.h"
+
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-opcodes.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class WasmModuleVerifyTest : public TestWithZone {
+ public:
+ ModuleResult DecodeModule(const byte* module_start, const byte* module_end) {
+ return DecodeWasmModule(nullptr, zone(), module_start, module_end, false,
+ false);
+ }
+};
+
+
+#define EXPECT_VERIFIES(data) \
+ do { \
+ ModuleResult result = DecodeModule(data, data + arraysize(data)); \
+ EXPECT_TRUE(result.ok()); \
+ if (result.val) delete result.val; \
+ } while (false)
+
+
+#define EXPECT_FAILURE(data) \
+ do { \
+ ModuleResult result = DecodeModule(data, data + arraysize(data)); \
+ EXPECT_FALSE(result.ok()); \
+ if (result.val) delete result.val; \
+ } while (false)
+
+
+struct LocalTypePair {
+ uint8_t code;
+ LocalType type;
+} kLocalTypes[] = {{kLocalI32, kAstI32},
+ {kLocalI64, kAstI64},
+ {kLocalF32, kAstF32},
+ {kLocalF64, kAstF64}};
+
+
+TEST_F(WasmModuleVerifyTest, DecodeEmpty) {
+ static const byte data[1]{kDeclEnd};
+ {
+ ModuleResult result = DecodeModule(data, data);
+ EXPECT_TRUE(result.ok());
+ if (result.val) delete result.val;
+ }
+ {
+ ModuleResult result = DecodeModule(data, data + 1);
+ EXPECT_TRUE(result.ok());
+ if (result.val) delete result.val;
+ }
+}
+
+
+TEST_F(WasmModuleVerifyTest, OneGlobal) {
+ const byte data[] = {
+ kDeclGlobals,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0, // name offset
+ kMemI32, // memory type
+ 0, // exported
+ };
+
+ {
+ // Should decode to exactly one global.
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(1, result.val->globals->size());
+ EXPECT_EQ(0, result.val->functions->size());
+ EXPECT_EQ(0, result.val->data_segments->size());
+
+ WasmGlobal* global = &result.val->globals->back();
+
+ EXPECT_EQ(0, global->name_offset);
+ EXPECT_EQ(MachineType::Int32(), global->type);
+ EXPECT_EQ(0, global->offset);
+ EXPECT_FALSE(global->exported);
+
+ if (result.val) delete result.val;
+ }
+
+ for (size_t size = 1; size < arraysize(data); size++) {
+ // Should fall off end of module bytes.
+ ModuleResult result = DecodeModule(data, data + size);
+ EXPECT_FALSE(result.ok());
+ if (result.val) delete result.val;
+ }
+}
+
+
+TEST_F(WasmModuleVerifyTest, ZeroGlobals) {
+ const byte data[] = {
+ kDeclGlobals, 0, // declare 0 globals
+ };
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ if (result.val) delete result.val;
+}
+
+
+static void AppendUint32v(std::vector<byte>& buffer, uint32_t val) {
+ while (true) {
+ uint32_t next = val >> 7;
+ uint32_t out = val & 0x7f;
+ if (next) {
+ buffer.push_back(static_cast<byte>(0x80 | out));
+ val = next;
+ } else {
+ buffer.push_back(static_cast<byte>(out));
+ break;
+ }
+ }
+}
+
+
+TEST_F(WasmModuleVerifyTest, NGlobals) {
+ const byte data[] = {
+ 0, 0, 0, 0, // name offset
+ kMemI32, // memory type
+ 0, // exported
+ };
+ for (uint32_t i = 0; i < 1000000; i = i * 7 + 1) {
+ std::vector<byte> buffer;
+ buffer.push_back(kDeclGlobals);
+ AppendUint32v(buffer, i);
+ for (uint32_t j = 0; j < i; j++) {
+ buffer.insert(buffer.end(), data, data + arraysize(data));
+ }
+
+ ModuleResult result = DecodeModule(&buffer[0], &buffer[0] + buffer.size());
+ EXPECT_TRUE(result.ok());
+ if (result.val) delete result.val;
+ }
+}
+
+
+TEST_F(WasmModuleVerifyTest, GlobalWithInvalidNameOffset) {
+ const byte data[] = {
+ kDeclGlobals,
+ 1, // declare one global
+ 0,
+ 3,
+ 0,
+ 0, // name offset
+ kMemI32, // memory type
+ 0, // exported
+ };
+
+ EXPECT_FAILURE(data);
+}
+
+
+TEST_F(WasmModuleVerifyTest, GlobalWithInvalidMemoryType) {
+ const byte data[] = {
+ kDeclGlobals,
+ 1, // declare one global
+ 0,
+ 0,
+ 0,
+ 0, // name offset
+ 33, // memory type
+ 0, // exported
+ };
+
+ EXPECT_FAILURE(data);
+}
+
+
+TEST_F(WasmModuleVerifyTest, TwoGlobals) {
+ const byte data[] = {
+ kDeclGlobals,
+ 2,
+ 0,
+ 0,
+ 0,
+ 0, // #0: name offset
+ kMemF32, // memory type
+ 0, // exported
+ 0,
+ 0,
+ 0,
+ 0, // #1: name offset
+ kMemF64, // memory type
+ 1, // exported
+ };
+
+ {
+ // Should decode to exactly two globals.
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(2, result.val->globals->size());
+ EXPECT_EQ(0, result.val->functions->size());
+ EXPECT_EQ(0, result.val->data_segments->size());
+
+ WasmGlobal* g0 = &result.val->globals->at(0);
+ WasmGlobal* g1 = &result.val->globals->at(1);
+
+ EXPECT_EQ(0, g0->name_offset);
+ EXPECT_EQ(MachineType::Float32(), g0->type);
+ EXPECT_EQ(0, g0->offset);
+ EXPECT_FALSE(g0->exported);
+
+ EXPECT_EQ(0, g1->name_offset);
+ EXPECT_EQ(MachineType::Float64(), g1->type);
+ EXPECT_EQ(0, g1->offset);
+ EXPECT_TRUE(g1->exported);
+
+ if (result.val) delete result.val;
+ }
+
+ for (size_t size = 1; size < arraysize(data); size++) {
+ // Should fall off end of module bytes.
+ ModuleResult result = DecodeModule(data, data + size);
+ EXPECT_FALSE(result.ok());
+ if (result.val) delete result.val;
+ }
+}
+
+
+TEST_F(WasmModuleVerifyTest, OneSignature) {
+ static const byte data[] = {
+ kDeclSignatures, 1, 0, kLocalVoid // void -> void
+ };
+ EXPECT_VERIFIES(data);
+}
+
+
+TEST_F(WasmModuleVerifyTest, MultipleSignatures) {
+ static const byte data[] = {
+ kDeclSignatures,
+ 3,
+ 0,
+ kLocalVoid, // void -> void
+ 1,
+ kLocalI32,
+ kLocalF32, // f32 -> i32
+ 2,
+ kLocalI32,
+ kLocalF64,
+ kLocalF64, // (f64,f64) -> i32
+ };
+
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(3, result.val->signatures->size());
+ if (result.val->signatures->size() == 3) {
+ EXPECT_EQ(0, result.val->signatures->at(0)->return_count());
+ EXPECT_EQ(1, result.val->signatures->at(1)->return_count());
+ EXPECT_EQ(1, result.val->signatures->at(2)->return_count());
+
+ EXPECT_EQ(0, result.val->signatures->at(0)->parameter_count());
+ EXPECT_EQ(1, result.val->signatures->at(1)->parameter_count());
+ EXPECT_EQ(2, result.val->signatures->at(2)->parameter_count());
+ }
+ if (result.val) delete result.val;
+
+ for (size_t size = 1; size < arraysize(data); size++) {
+ ModuleResult result = DecodeModule(data, data + size);
+ // Should fall off the end of module bytes.
+ EXPECT_FALSE(result.ok());
+ if (result.val) delete result.val;
+ }
+}
+
+
+TEST_F(WasmModuleVerifyTest, FunctionWithoutSig) {
+ static const byte data[] = {
+ kDeclFunctions, 1,
+ // func#0 ------------------------------------------------------
+ 0, 0, // signature index
+ 0, 0, 0, 0, // name offset
+ 0, 0, 0, 0, // code start offset
+ 0, 0, 0, 0, // code end offset
+ 1, 2, // local int32 count
+ 3, 4, // local int64 count
+ 5, 6, // local float32 count
+ 7, 8, // local float64 count
+ 0, // exported
+ 1 // external
+ };
+
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_FALSE(result.ok());
+ if (result.val) delete result.val;
+}
+
+
+TEST_F(WasmModuleVerifyTest, OneEmptyVoidVoidFunction) {
+ const int kCodeStartOffset = 23;
+ const int kCodeEndOffset = kCodeStartOffset + 1;
+
+ static const byte data[] = {
+ kDeclSignatures, 1,
+ // sig#0 -------------------------------------------------------
+ 0, 0, // void -> void
+ // func#0 ------------------------------------------------------
+ kDeclFunctions, 1,
+ kDeclFunctionLocals | kDeclFunctionExport | kDeclFunctionName, 0,
+ 0, // signature index
+ 9, 0, 0, 0, // name offset
+ 11, 2, // local int32 count
+ 13, 4, // local int64 count
+ 15, 6, // local float32 count
+ 17, 8, // local float64 count
+ 1, 0, // size
+ kExprNop,
+ };
+
+ {
+ // Should decode to exactly one function.
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(0, result.val->globals->size());
+ EXPECT_EQ(1, result.val->signatures->size());
+ EXPECT_EQ(1, result.val->functions->size());
+ EXPECT_EQ(0, result.val->data_segments->size());
+ EXPECT_EQ(0, result.val->function_table->size());
+
+ WasmFunction* function = &result.val->functions->back();
+
+ EXPECT_EQ(9, function->name_offset);
+ EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
+ EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
+
+ EXPECT_EQ(523, function->local_int32_count);
+ EXPECT_EQ(1037, function->local_int64_count);
+ EXPECT_EQ(1551, function->local_float32_count);
+ EXPECT_EQ(2065, function->local_float64_count);
+
+ EXPECT_TRUE(function->exported);
+ EXPECT_FALSE(function->external);
+
+ if (result.val) delete result.val;
+ }
+
+ for (size_t size = 5; size < arraysize(data); size++) {
+ // Should fall off end of module bytes.
+ ModuleResult result = DecodeModule(data, data + size);
+ EXPECT_FALSE(result.ok());
+ if (result.val) delete result.val;
+ }
+}
+
+
+TEST_F(WasmModuleVerifyTest, OneFunctionImported) {
+ static const byte data[] = {
+ kDeclSignatures, 1,
+ // sig#0 -------------------------------------------------------
+ 0, 0, // void -> void
+ kDeclFunctions, 1,
+ // func#0 ------------------------------------------------------
+ kDeclFunctionImport, // no name, no locals, imported
+ 0, 0, // signature index
+ };
+
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(1, result.val->functions->size());
+ WasmFunction* function = &result.val->functions->back();
+
+ EXPECT_EQ(0, function->name_offset);
+ EXPECT_EQ(0, function->code_start_offset);
+ EXPECT_EQ(0, function->code_end_offset);
+
+ EXPECT_EQ(0, function->local_int32_count);
+ EXPECT_EQ(0, function->local_int64_count);
+ EXPECT_EQ(0, function->local_float32_count);
+ EXPECT_EQ(0, function->local_float64_count);
+
+ EXPECT_FALSE(function->exported);
+ EXPECT_TRUE(function->external);
+
+ if (result.val) delete result.val;
+}
+
+
+TEST_F(WasmModuleVerifyTest, OneFunctionWithNopBody) {
+ static const byte kCodeStartOffset = 11;
+ static const byte kCodeEndOffset = kCodeStartOffset + 1;
+
+ static const byte data[] = {
+ kDeclSignatures, 1,
+ // sig#0 -------------------------------------------------------
+ 0, 0, // void -> void
+ kDeclFunctions, 1,
+ // func#0 ------------------------------------------------------
+ 0, // no name, no locals
+ 0, 0, // signature index
+ 1, 0, // body size
+ kExprNop // body
+ };
+
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(1, result.val->functions->size());
+ WasmFunction* function = &result.val->functions->back();
+
+ EXPECT_EQ(0, function->name_offset);
+ EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
+ EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
+
+ EXPECT_EQ(0, function->local_int32_count);
+ EXPECT_EQ(0, function->local_int64_count);
+ EXPECT_EQ(0, function->local_float32_count);
+ EXPECT_EQ(0, function->local_float64_count);
+
+ EXPECT_FALSE(function->exported);
+ EXPECT_FALSE(function->external);
+
+ if (result.val) delete result.val;
+}
+
+
+TEST_F(WasmModuleVerifyTest, OneFunctionWithNopBody_WithLocals) {
+ static const byte kCodeStartOffset = 19;
+ static const byte kCodeEndOffset = kCodeStartOffset + 1;
+
+ static const byte data[] = {
+ kDeclSignatures, 1,
+ // sig#0 -------------------------------------------------------
+ 0, 0, // void -> void
+ kDeclFunctions, 1,
+ // func#0 ------------------------------------------------------
+ kDeclFunctionLocals, 0, 0, // signature index
+ 1, 2, // local int32 count
+ 3, 4, // local int64 count
+ 5, 6, // local float32 count
+ 7, 8, // local float64 count
+ 1, 0, // body size
+ kExprNop // body
+ };
+
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(1, result.val->functions->size());
+ WasmFunction* function = &result.val->functions->back();
+
+ EXPECT_EQ(0, function->name_offset);
+ EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
+ EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
+
+ EXPECT_EQ(513, function->local_int32_count);
+ EXPECT_EQ(1027, function->local_int64_count);
+ EXPECT_EQ(1541, function->local_float32_count);
+ EXPECT_EQ(2055, function->local_float64_count);
+
+ EXPECT_FALSE(function->exported);
+ EXPECT_FALSE(function->external);
+
+ if (result.val) delete result.val;
+}
+
+
+TEST_F(WasmModuleVerifyTest, OneGlobalOneFunctionWithNopBodyOneDataSegment) {
+ static const byte kCodeStartOffset = 2 + kDeclGlobalSize + 4 + 2 + 17;
+ static const byte kCodeEndOffset = kCodeStartOffset + 3;
+
+ static const byte data[] = {
+ // global#0 --------------------------------------------------
+ kDeclGlobals, 1, 0, 0, 0, 0, // name offset
+ kMemU8, // memory type
+ 0, // exported
+ // sig#0 -----------------------------------------------------
+ kDeclSignatures, 1, 0, 0, // void -> void
+ // func#0 ----------------------------------------------------
+ kDeclFunctions, 1, kDeclFunctionLocals | kDeclFunctionName, 0,
+ 0, // signature index
+ 9, 0, 0, 0, // name offset
+ 1, 2, // local int32 count
+ 3, 4, // local int64 count
+ 5, 6, // local float32 count
+ 7, 8, // local float64 count
+ 3, 0, // body size
+ kExprNop, // func#0 body
+ kExprNop, // func#0 body
+ kExprNop, // func#0 body
+ // segment#0 -------------------------------------------------
+ kDeclDataSegments, 1, 0xae, 0xb3, 0x08, 0, // dest addr
+ 15, 0, 0, 0, // source offset
+ 5, 0, 0, 0, // source size
+ 1, // init
+ // rest ------------------------------------------------------
+ kDeclEnd,
+ };
+
+ {
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(1, result.val->globals->size());
+ EXPECT_EQ(1, result.val->functions->size());
+ EXPECT_EQ(1, result.val->data_segments->size());
+
+ WasmGlobal* global = &result.val->globals->back();
+
+ EXPECT_EQ(0, global->name_offset);
+ EXPECT_EQ(MachineType::Uint8(), global->type);
+ EXPECT_EQ(0, global->offset);
+ EXPECT_FALSE(global->exported);
+
+ WasmFunction* function = &result.val->functions->back();
+
+ EXPECT_EQ(9, function->name_offset);
+ EXPECT_EQ(kCodeStartOffset, function->code_start_offset);
+ EXPECT_EQ(kCodeEndOffset, function->code_end_offset);
+
+ EXPECT_FALSE(function->exported);
+ EXPECT_FALSE(function->external);
+
+ WasmDataSegment* segment = &result.val->data_segments->back();
+
+ EXPECT_EQ(0x8b3ae, segment->dest_addr);
+ EXPECT_EQ(15, segment->source_offset);
+ EXPECT_EQ(5, segment->source_size);
+ EXPECT_TRUE(segment->init);
+
+ if (result.val) delete result.val;
+ }
+}
+
+
+TEST_F(WasmModuleVerifyTest, OneDataSegment) {
+ const byte data[] = {
+ kDeclDataSegments,
+ 1,
+ 0xaa,
+ 0xbb,
+ 0x09,
+ 0, // dest addr
+ 11,
+ 0,
+ 0,
+ 0, // source offset
+ 3,
+ 0,
+ 0,
+ 0, // source size
+ 1, // init
+ };
+
+ {
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(0, result.val->globals->size());
+ EXPECT_EQ(0, result.val->functions->size());
+ EXPECT_EQ(1, result.val->data_segments->size());
+
+ WasmDataSegment* segment = &result.val->data_segments->back();
+
+ EXPECT_EQ(0x9bbaa, segment->dest_addr);
+ EXPECT_EQ(11, segment->source_offset);
+ EXPECT_EQ(3, segment->source_size);
+ EXPECT_TRUE(segment->init);
+
+ if (result.val) delete result.val;
+ }
+
+ for (size_t size = 1; size < arraysize(data); size++) {
+ // Should fall off end of module bytes.
+ ModuleResult result = DecodeModule(data, data + size);
+ EXPECT_FALSE(result.ok());
+ if (result.val) delete result.val;
+ }
+}
+
+
+TEST_F(WasmModuleVerifyTest, TwoDataSegments) {
+ const byte data[] = {
+ kDeclDataSegments,
+ 2,
+ 0xee,
+ 0xff,
+ 0x07,
+ 0, // dest addr
+ 9,
+ 0,
+ 0,
+ 0, // #0: source offset
+ 4,
+ 0,
+ 0,
+ 0, // source size
+ 0, // init
+ 0xcc,
+ 0xdd,
+ 0x06,
+ 0, // #1: dest addr
+ 6,
+ 0,
+ 0,
+ 0, // source offset
+ 10,
+ 0,
+ 0,
+ 0, // source size
+ 1, // init
+ };
+
+ {
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ EXPECT_EQ(0, result.val->globals->size());
+ EXPECT_EQ(0, result.val->functions->size());
+ EXPECT_EQ(2, result.val->data_segments->size());
+
+ WasmDataSegment* s0 = &result.val->data_segments->at(0);
+ WasmDataSegment* s1 = &result.val->data_segments->at(1);
+
+ EXPECT_EQ(0x7ffee, s0->dest_addr);
+ EXPECT_EQ(9, s0->source_offset);
+ EXPECT_EQ(4, s0->source_size);
+ EXPECT_FALSE(s0->init);
+
+ EXPECT_EQ(0x6ddcc, s1->dest_addr);
+ EXPECT_EQ(6, s1->source_offset);
+ EXPECT_EQ(10, s1->source_size);
+ EXPECT_TRUE(s1->init);
+
+ if (result.val) delete result.val;
+ }
+
+ for (size_t size = 1; size < arraysize(data); size++) {
+ // Should fall off end of module bytes.
+ ModuleResult result = DecodeModule(data, data + size);
+ EXPECT_FALSE(result.ok());
+ if (result.val) delete result.val;
+ }
+}
+
+
+// To make below tests for indirect calls much shorter.
+#define FUNCTION(sig_index, external) \
+ kDeclFunctionImport, static_cast<byte>(sig_index), \
+ static_cast<byte>(sig_index >> 8)
+
+
+TEST_F(WasmModuleVerifyTest, OneIndirectFunction) {
+ static const byte data[] = {
+ // sig#0 -------------------------------------------------------
+ kDeclSignatures, 1, 0, 0, // void -> void
+ // func#0 ------------------------------------------------------
+ kDeclFunctions, 1, FUNCTION(0, 0),
+ // indirect table ----------------------------------------------
+ kDeclFunctionTable, 1, 0, 0};
+
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ if (result.ok()) {
+ EXPECT_EQ(1, result.val->signatures->size());
+ EXPECT_EQ(1, result.val->functions->size());
+ EXPECT_EQ(1, result.val->function_table->size());
+ EXPECT_EQ(0, result.val->function_table->at(0));
+ }
+ if (result.val) delete result.val;
+}
+
+
+TEST_F(WasmModuleVerifyTest, MultipleIndirectFunctions) {
+ static const byte data[] = {
+ // sig#0 -------------------------------------------------------
+ kDeclSignatures, 2, 0, 0, // void -> void
+ 0, kLocalI32, // void -> i32
+ // func#0 ------------------------------------------------------
+ kDeclFunctions, 4, FUNCTION(0, 1), FUNCTION(1, 1), FUNCTION(0, 1),
+ FUNCTION(1, 1),
+ // indirect table ----------------------------------------------
+ kDeclFunctionTable, 8, 0, 0, 1, 0, 2, 0, 3, 0, 0, 0, 1, 0, 2, 0, 3, 0,
+ };
+
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ if (result.ok()) {
+ EXPECT_EQ(2, result.val->signatures->size());
+ EXPECT_EQ(4, result.val->functions->size());
+ EXPECT_EQ(8, result.val->function_table->size());
+ for (int i = 0; i < 8; i++) {
+ EXPECT_EQ(i & 3, result.val->function_table->at(i));
+ }
+ }
+ if (result.val) delete result.val;
+}
+
+
+TEST_F(WasmModuleVerifyTest, IndirectFunctionNoFunctions) {
+ static const byte data[] = {
+ // sig#0 -------------------------------------------------------
+ kDeclSignatures, 1, 0, 0, // void -> void
+ // indirect table ----------------------------------------------
+ kDeclFunctionTable, 1, 0, 0,
+ };
+
+ EXPECT_FAILURE(data);
+}
+
+
+TEST_F(WasmModuleVerifyTest, IndirectFunctionInvalidIndex) {
+ static const byte data[] = {
+ // sig#0 -------------------------------------------------------
+ kDeclSignatures, 1, 0, 0, // void -> void
+ // functions ---------------------------------------------------
+ kDeclFunctions, 1, FUNCTION(0, 1),
+ // indirect table ----------------------------------------------
+ kDeclFunctionTable, 1, 1, 0,
+ };
+
+ EXPECT_FAILURE(data);
+}
+
+
+class WasmSignatureDecodeTest : public TestWithZone {};
+
+
+TEST_F(WasmSignatureDecodeTest, Ok_v_v) {
+ static const byte data[] = {0, 0};
+ Zone zone;
+ FunctionSig* sig =
+ DecodeWasmSignatureForTesting(&zone, data, data + arraysize(data));
+
+ EXPECT_TRUE(sig != nullptr);
+ EXPECT_EQ(0, sig->parameter_count());
+ EXPECT_EQ(0, sig->return_count());
+}
+
+
+TEST_F(WasmSignatureDecodeTest, Ok_t_v) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalTypePair ret_type = kLocalTypes[i];
+ const byte data[] = {0, ret_type.code};
+ FunctionSig* sig =
+ DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+
+ EXPECT_TRUE(sig != nullptr);
+ EXPECT_EQ(0, sig->parameter_count());
+ EXPECT_EQ(1, sig->return_count());
+ EXPECT_EQ(ret_type.type, sig->GetReturn());
+ }
+}
+
+
+TEST_F(WasmSignatureDecodeTest, Ok_v_t) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalTypePair param_type = kLocalTypes[i];
+ const byte data[] = {1, 0, param_type.code};
+ FunctionSig* sig =
+ DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+
+ EXPECT_TRUE(sig != nullptr);
+ EXPECT_EQ(1, sig->parameter_count());
+ EXPECT_EQ(0, sig->return_count());
+ EXPECT_EQ(param_type.type, sig->GetParam(0));
+ }
+}
+
+
+TEST_F(WasmSignatureDecodeTest, Ok_t_t) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalTypePair ret_type = kLocalTypes[i];
+ for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
+ LocalTypePair param_type = kLocalTypes[j];
+ const byte data[] = {1, // param count
+ ret_type.code, // ret
+ param_type.code}; // param
+ FunctionSig* sig =
+ DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+
+ EXPECT_TRUE(sig != nullptr);
+ EXPECT_EQ(1, sig->parameter_count());
+ EXPECT_EQ(1, sig->return_count());
+ EXPECT_EQ(param_type.type, sig->GetParam(0));
+ EXPECT_EQ(ret_type.type, sig->GetReturn());
+ }
+ }
+}
+
+
+TEST_F(WasmSignatureDecodeTest, Ok_i_tt) {
+ for (size_t i = 0; i < arraysize(kLocalTypes); i++) {
+ LocalTypePair p0_type = kLocalTypes[i];
+ for (size_t j = 0; j < arraysize(kLocalTypes); j++) {
+ LocalTypePair p1_type = kLocalTypes[j];
+ const byte data[] = {2, // param count
+ kLocalI32, // ret
+ p0_type.code, // p0
+ p1_type.code}; // p1
+ FunctionSig* sig =
+ DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+
+ EXPECT_TRUE(sig != nullptr);
+ EXPECT_EQ(2, sig->parameter_count());
+ EXPECT_EQ(1, sig->return_count());
+ EXPECT_EQ(p0_type.type, sig->GetParam(0));
+ EXPECT_EQ(p1_type.type, sig->GetParam(1));
+ }
+ }
+}
+
+
+TEST_F(WasmSignatureDecodeTest, Fail_off_end) {
+ byte data[256];
+ for (int p = 0; p <= 255; p = p + 1 + p * 3) {
+ for (int i = 0; i <= p; i++) data[i] = kLocalI32;
+ data[0] = static_cast<byte>(p);
+
+ for (int i = 0; i < p + 1; i++) {
+ // Should fall off the end for all signatures.
+ FunctionSig* sig = DecodeWasmSignatureForTesting(zone(), data, data + i);
+ EXPECT_EQ(nullptr, sig);
+ }
+ }
+}
+
+
+TEST_F(WasmSignatureDecodeTest, Fail_invalid_type) {
+ byte kInvalidType = 76;
+ for (int i = 1; i < 3; i++) {
+ byte data[] = {2, kLocalI32, kLocalI32, kLocalI32};
+ data[i] = kInvalidType;
+ FunctionSig* sig =
+ DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+ EXPECT_EQ(nullptr, sig);
+ }
+}
+
+
+TEST_F(WasmSignatureDecodeTest, Fail_invalid_param_type) {
+ static const int kParamCount = 3;
+ for (int i = 0; i < kParamCount; i++) {
+ byte data[] = {kParamCount, kLocalI32, kLocalI32, kLocalI32, kLocalI32};
+ data[i + 2] = kLocalVoid;
+ FunctionSig* sig =
+ DecodeWasmSignatureForTesting(zone(), data, data + arraysize(data));
+ EXPECT_EQ(nullptr, sig);
+ }
+}
+
+
+class WasmFunctionVerifyTest : public TestWithZone {};
+
+
+TEST_F(WasmFunctionVerifyTest, Ok_v_v_empty) {
+ byte data[] = {
+ 0, kLocalVoid, // signature
+ 3, 0, // local int32 count
+ 4, 0, // local int64 count
+ 5, 0, // local float32 count
+ 6, 0, // local float64 count
+ kExprNop // body
+ };
+
+ FunctionResult result = DecodeWasmFunction(nullptr, zone(), nullptr, data,
+ data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+
+ if (result.val && result.ok()) {
+ WasmFunction* function = result.val;
+ EXPECT_EQ(0, function->sig->parameter_count());
+ EXPECT_EQ(0, function->sig->return_count());
+ EXPECT_EQ(0, function->name_offset);
+ EXPECT_EQ(arraysize(data) - 1, function->code_start_offset);
+ EXPECT_EQ(arraysize(data), function->code_end_offset);
+ EXPECT_EQ(3, function->local_int32_count);
+ EXPECT_EQ(4, function->local_int64_count);
+ EXPECT_EQ(5, function->local_float32_count);
+ EXPECT_EQ(6, function->local_float64_count);
+ EXPECT_FALSE(function->external);
+ EXPECT_FALSE(function->exported);
+ }
+
+ if (result.val) delete result.val;
+}
+
+
+TEST_F(WasmModuleVerifyTest, WLLSectionNoLen) {
+ const byte data[] = {
+ kDeclWLL, // section without length.
+ };
+ EXPECT_FAILURE(data);
+}
+
+
+TEST_F(WasmModuleVerifyTest, WLLSectionEmpty) {
+ const byte data[] = {
+ kDeclWLL, 0, // empty section
+ };
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ if (result.val) delete result.val;
+}
+
+
+TEST_F(WasmModuleVerifyTest, WLLSectionOne) {
+ const byte data[] = {
+ kDeclWLL,
+ 1, // LEB128 1
+ 0, // one byte section
+ };
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ if (result.val) delete result.val;
+}
+
+
+TEST_F(WasmModuleVerifyTest, WLLSectionTen) {
+ const byte data[] = {
+ kDeclWLL,
+ 10, // LEB128 10
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // 10 byte section
+ };
+ ModuleResult result = DecodeModule(data, data + arraysize(data));
+ EXPECT_TRUE(result.ok());
+ if (result.val) delete result.val;
+}
+
+
+TEST_F(WasmModuleVerifyTest, WLLSectionOverflow) {
+ const byte data[] = {
+ kDeclWLL,
+ 11, // LEB128 11
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // 10 byte section
+ };
+ EXPECT_FAILURE(data);
+}
+
+
+TEST_F(WasmModuleVerifyTest, WLLSectionUnderflow) {
+ const byte data[] = {
+ kDeclWLL,
+ 0xff, 0xff, 0xff, 0xff, 0x0f, // LEB128 0xffffffff
+ 1, 2, 3, 4, // 4 byte section
+ };
+ EXPECT_FAILURE(data);
+}
+
+
+TEST_F(WasmModuleVerifyTest, WLLSectionLoop) {
+ // Would infinite loop decoding if wrapping and allowed.
+ const byte data[] = {
+ kDeclWLL,
+ 0xfa, 0xff, 0xff, 0xff, 0x0f, // LEB128 0xfffffffa
+ 1, 2, 3, 4, // 4 byte section
+ };
+ EXPECT_FAILURE(data);
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/test/unittests/wasm/wasm-macro-gen-unittest.cc b/test/unittests/wasm/wasm-macro-gen-unittest.cc
new file mode 100644
index 0000000..c5bb5ec
--- /dev/null
+++ b/test/unittests/wasm/wasm-macro-gen-unittest.cc
@@ -0,0 +1,319 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/unittests/test-utils.h"
+
+#include "src/wasm/wasm-macro-gen.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+class WasmMacroGenTest : public TestWithZone {};
+
+#define EXPECT_SIZE(size, ...) \
+ do { \
+ byte code[] = {__VA_ARGS__}; \
+ EXPECT_EQ(size, sizeof(code)); \
+ } while (false)
+
+
+TEST_F(WasmMacroGenTest, Constants) {
+ EXPECT_SIZE(2, WASM_ONE);
+ EXPECT_SIZE(2, WASM_ZERO);
+
+ EXPECT_SIZE(2, WASM_I8(122));
+ EXPECT_SIZE(2, WASM_I8(254));
+
+ EXPECT_SIZE(5, WASM_I32(1));
+ EXPECT_SIZE(5, WASM_I32(10000));
+ EXPECT_SIZE(5, WASM_I32(-9828934));
+
+ EXPECT_SIZE(9, WASM_I64(1));
+ EXPECT_SIZE(9, WASM_I64(10000));
+ EXPECT_SIZE(9, WASM_I64(-9828934));
+ EXPECT_SIZE(9, WASM_I64(0x123456789abcdef0ULL));
+
+ EXPECT_SIZE(5, WASM_F32(1.0f));
+ EXPECT_SIZE(5, WASM_F32(10000.0f));
+ EXPECT_SIZE(5, WASM_F32(-9828934.0f));
+
+ EXPECT_SIZE(9, WASM_F64(1.5));
+ EXPECT_SIZE(9, WASM_F64(10200.0));
+ EXPECT_SIZE(9, WASM_F64(-9818934.0));
+}
+
+
+TEST_F(WasmMacroGenTest, Statements) {
+ EXPECT_SIZE(1, WASM_NOP);
+
+ EXPECT_SIZE(4, WASM_SET_LOCAL(0, WASM_ZERO));
+
+ EXPECT_SIZE(4, WASM_STORE_GLOBAL(0, WASM_ZERO));
+
+ EXPECT_SIZE(6, WASM_STORE_MEM(MachineType::Int32(), WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(4, WASM_IF(WASM_ZERO, WASM_NOP));
+
+ EXPECT_SIZE(5, WASM_IF_ELSE(WASM_ZERO, WASM_NOP, WASM_NOP));
+
+ EXPECT_SIZE(5, WASM_SELECT(WASM_ZERO, WASM_NOP, WASM_NOP));
+
+ EXPECT_SIZE(3, WASM_BR(0));
+ EXPECT_SIZE(5, WASM_BR_IF(0, WASM_ZERO));
+
+ EXPECT_SIZE(3, WASM_BLOCK(1, WASM_NOP));
+ EXPECT_SIZE(4, WASM_BLOCK(2, WASM_NOP, WASM_NOP));
+ EXPECT_SIZE(5, WASM_BLOCK(3, WASM_NOP, WASM_NOP, WASM_NOP));
+
+ EXPECT_SIZE(5, WASM_INFINITE_LOOP);
+
+ EXPECT_SIZE(3, WASM_LOOP(1, WASM_NOP));
+ EXPECT_SIZE(4, WASM_LOOP(2, WASM_NOP, WASM_NOP));
+ EXPECT_SIZE(5, WASM_LOOP(3, WASM_NOP, WASM_NOP, WASM_NOP));
+ EXPECT_SIZE(5, WASM_LOOP(1, WASM_BR(0)));
+ EXPECT_SIZE(7, WASM_LOOP(1, WASM_BR_IF(0, WASM_ZERO)));
+
+ EXPECT_SIZE(1, WASM_RETURN0);
+ EXPECT_SIZE(3, WASM_RETURN(WASM_ZERO));
+ EXPECT_SIZE(5, WASM_RETURN(WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(1, WASM_UNREACHABLE);
+}
+
+
+TEST_F(WasmMacroGenTest, MacroStatements) {
+ EXPECT_SIZE(8, WASM_WHILE(WASM_I8(0), WASM_NOP));
+ EXPECT_SIZE(7, WASM_INC_LOCAL(0));
+ EXPECT_SIZE(7, WASM_INC_LOCAL_BY(0, 3));
+
+ EXPECT_SIZE(3, WASM_BREAK(0));
+ EXPECT_SIZE(3, WASM_CONTINUE(0));
+}
+
+
+TEST_F(WasmMacroGenTest, TableSwitch) {
+ EXPECT_SIZE(2, WASM_CASE(9));
+ EXPECT_SIZE(2, WASM_CASE_BR(11));
+
+ EXPECT_SIZE(7, WASM_TABLESWITCH_OP(0, 1, WASM_CASE(7)));
+ EXPECT_SIZE(9, WASM_TABLESWITCH_OP(0, 2, WASM_CASE(7), WASM_CASE(8)));
+
+ EXPECT_SIZE(4, WASM_TABLESWITCH_BODY(WASM_I8(88), WASM_I8(77)));
+ EXPECT_SIZE(
+ 6, WASM_TABLESWITCH_BODY(WASM_I8(33), WASM_I8(44), WASM_GET_LOCAL(0)));
+}
+
+
+TEST_F(WasmMacroGenTest, Expressions) {
+ EXPECT_SIZE(2, WASM_GET_LOCAL(0));
+ EXPECT_SIZE(2, WASM_GET_LOCAL(1));
+ EXPECT_SIZE(2, WASM_GET_LOCAL(12));
+ EXPECT_SIZE(2, WASM_LOAD_GLOBAL(0));
+ EXPECT_SIZE(2, WASM_LOAD_GLOBAL(1));
+ EXPECT_SIZE(2, WASM_LOAD_GLOBAL(12));
+ EXPECT_SIZE(4, WASM_LOAD_MEM(MachineType::Int32(), WASM_ZERO));
+ EXPECT_SIZE(4, WASM_LOAD_MEM(MachineType::Float64(), WASM_ZERO));
+ EXPECT_SIZE(4, WASM_LOAD_MEM(MachineType::Float32(), WASM_ZERO));
+
+ EXPECT_SIZE(3, WASM_NOT(WASM_ZERO));
+
+ EXPECT_SIZE(4, WASM_BRV(1, WASM_ZERO));
+ EXPECT_SIZE(6, WASM_BRV_IF(1, WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(4, WASM_BLOCK(1, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_BLOCK(2, WASM_NOP, WASM_ZERO));
+ EXPECT_SIZE(6, WASM_BLOCK(3, WASM_NOP, WASM_NOP, WASM_ZERO));
+
+ EXPECT_SIZE(4, WASM_LOOP(1, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_LOOP(2, WASM_NOP, WASM_ZERO));
+ EXPECT_SIZE(6, WASM_LOOP(3, WASM_NOP, WASM_NOP, WASM_ZERO));
+}
+
+
+TEST_F(WasmMacroGenTest, FunctionCalls) {
+ EXPECT_SIZE(2, WASM_CALL_FUNCTION0(0));
+ EXPECT_SIZE(2, WASM_CALL_FUNCTION0(1));
+ EXPECT_SIZE(2, WASM_CALL_FUNCTION0(11));
+
+ EXPECT_SIZE(4, WASM_CALL_FUNCTION(0, WASM_ZERO));
+ EXPECT_SIZE(6, WASM_CALL_FUNCTION(1, WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(4, WASM_CALL_INDIRECT0(0, WASM_ZERO));
+ EXPECT_SIZE(4, WASM_CALL_INDIRECT0(1, WASM_ZERO));
+ EXPECT_SIZE(4, WASM_CALL_INDIRECT0(11, WASM_ZERO));
+
+ EXPECT_SIZE(6, WASM_CALL_INDIRECT(0, WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(8, WASM_CALL_INDIRECT(1, WASM_ZERO, WASM_ZERO, WASM_ZERO));
+}
+
+
+TEST_F(WasmMacroGenTest, Int32Ops) {
+ EXPECT_SIZE(5, WASM_I32_ADD(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_SUB(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_MUL(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_DIVS(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_DIVU(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_REMS(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_REMU(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_AND(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_IOR(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_XOR(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_SHL(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_SHR(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_SAR(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_EQ(WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(5, WASM_I32_LTS(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_LES(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_LTU(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_LEU(WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(5, WASM_I32_GTS(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_GES(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_GTU(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I32_GEU(WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(3, WASM_I32_CLZ(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I32_CTZ(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I32_POPCNT(WASM_ZERO));
+}
+
+
+TEST_F(WasmMacroGenTest, Int64Ops) {
+ EXPECT_SIZE(5, WASM_I64_ADD(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_SUB(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_MUL(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_DIVS(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_DIVU(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_REMS(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_REMU(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_AND(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_IOR(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_XOR(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_SHL(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_SHR(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_SAR(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_EQ(WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(5, WASM_I64_LTS(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_LES(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_LTU(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_LEU(WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(5, WASM_I64_GTS(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_GES(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_GTU(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_I64_GEU(WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(3, WASM_I64_CLZ(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I64_CTZ(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I64_POPCNT(WASM_ZERO));
+}
+
+
+TEST_F(WasmMacroGenTest, Float32Ops) {
+ EXPECT_SIZE(5, WASM_F32_ADD(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F32_SUB(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F32_MUL(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F32_DIV(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F32_MIN(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F32_MAX(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F32_COPYSIGN(WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(3, WASM_F32_ABS(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_NEG(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_CEIL(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_FLOOR(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_TRUNC(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_NEARESTINT(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_SQRT(WASM_ZERO));
+
+ EXPECT_SIZE(5, WASM_F32_EQ(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F32_LT(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F32_LE(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F32_GT(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F32_GE(WASM_ZERO, WASM_ZERO));
+}
+
+
+TEST_F(WasmMacroGenTest, Float64Ops) {
+ EXPECT_SIZE(5, WASM_F64_ADD(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F64_SUB(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F64_MUL(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F64_DIV(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F64_MIN(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F64_MAX(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F64_COPYSIGN(WASM_ZERO, WASM_ZERO));
+
+ EXPECT_SIZE(3, WASM_F64_ABS(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_NEG(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_CEIL(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_FLOOR(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_TRUNC(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_NEARESTINT(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_SQRT(WASM_ZERO));
+
+ EXPECT_SIZE(5, WASM_F64_EQ(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F64_LT(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F64_LE(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F64_GT(WASM_ZERO, WASM_ZERO));
+ EXPECT_SIZE(5, WASM_F64_GE(WASM_ZERO, WASM_ZERO));
+}
+
+
+TEST_F(WasmMacroGenTest, Conversions) {
+ EXPECT_SIZE(3, WASM_I32_SCONVERT_F32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I32_SCONVERT_F64(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I32_UCONVERT_F32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I32_UCONVERT_F64(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I32_CONVERT_I64(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I64_SCONVERT_F32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I64_SCONVERT_F64(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I64_UCONVERT_F32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I64_UCONVERT_F64(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I64_SCONVERT_I32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_I64_UCONVERT_I32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_SCONVERT_I32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_UCONVERT_I32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_SCONVERT_I64(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_UCONVERT_I64(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_CONVERT_F64(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F32_REINTERPRET_I32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_SCONVERT_I32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_UCONVERT_I32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_SCONVERT_I64(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_UCONVERT_I64(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_CONVERT_F32(WASM_ZERO));
+ EXPECT_SIZE(3, WASM_F64_REINTERPRET_I64(WASM_ZERO));
+}
+
+static const MachineType kMemTypes[] = {
+ MachineType::Int8(), MachineType::Uint8(), MachineType::Int16(),
+ MachineType::Uint16(), MachineType::Int32(), MachineType::Uint32(),
+ MachineType::Int64(), MachineType::Uint64(), MachineType::Float32(),
+ MachineType::Float64()};
+
+TEST_F(WasmMacroGenTest, LoadsAndStores) {
+ for (size_t i = 0; i < arraysize(kMemTypes); i++) {
+ EXPECT_SIZE(4, WASM_LOAD_MEM(kMemTypes[i], WASM_ZERO));
+ }
+ for (size_t i = 0; i < arraysize(kMemTypes); i++) {
+ EXPECT_SIZE(6, WASM_STORE_MEM(kMemTypes[i], WASM_ZERO, WASM_GET_LOCAL(0)));
+ }
+}
+
+
+TEST_F(WasmMacroGenTest, LoadsAndStoresWithOffset) {
+ for (size_t i = 0; i < arraysize(kMemTypes); i++) {
+ EXPECT_SIZE(5, WASM_LOAD_MEM_OFFSET(kMemTypes[i], 11, WASM_ZERO));
+ }
+ for (size_t i = 0; i < arraysize(kMemTypes); i++) {
+ EXPECT_SIZE(7, WASM_STORE_MEM_OFFSET(kMemTypes[i], 13, WASM_ZERO,
+ WASM_GET_LOCAL(0)));
+ }
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8