Merge V8 5.2.361.47 DO NOT MERGE
https://chromium.googlesource.com/v8/v8/+/5.2.361.47
FPIIM-449
Change-Id: Ibec421b85a9b88cb3a432ada642e469fe7e78346
(cherry picked from commit bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8)
diff --git a/test/cctest/compiler/call-tester.h b/test/cctest/compiler/call-tester.h
index c75bde1..77d2ce1 100644
--- a/test/cctest/compiler/call-tester.h
+++ b/test/cctest/compiler/call-tester.h
@@ -106,12 +106,14 @@
static int64_t Cast(int32_t r) { return static_cast<int64_t>(r); }
};
+#if !V8_TARGET_ARCH_PPC64
template <>
struct ParameterTraits<uint32_t> {
static int64_t Cast(uint32_t r) {
return static_cast<int64_t>(static_cast<int32_t>(r));
}
};
+#endif
#endif // !V8_TARGET_ARCH_64_BIT
diff --git a/test/cctest/compiler/codegen-tester.h b/test/cctest/compiler/codegen-tester.h
index 5d670bf..dbb9a72 100644
--- a/test/cctest/compiler/codegen-tester.h
+++ b/test/cctest/compiler/codegen-tester.h
@@ -65,7 +65,7 @@
Schedule* schedule = this->Export();
CallDescriptor* call_descriptor = this->call_descriptor();
Graph* graph = this->graph();
- CompilationInfo info("testing", main_isolate(), main_zone());
+ CompilationInfo info(ArrayVector("testing"), main_isolate(), main_zone());
code_ = Pipeline::GenerateCodeForTesting(&info, call_descriptor, graph,
schedule);
}
diff --git a/test/cctest/compiler/function-tester.h b/test/cctest/compiler/function-tester.h
index 631bdde..555e049 100644
--- a/test/cctest/compiler/function-tester.h
+++ b/test/cctest/compiler/function-tester.h
@@ -31,7 +31,7 @@
Compile(function);
const uint32_t supported_flags =
CompilationInfo::kFunctionContextSpecializing |
- CompilationInfo::kInliningEnabled | CompilationInfo::kTypingEnabled;
+ CompilationInfo::kInliningEnabled;
CHECK_EQ(0u, flags_ & ~supported_flags);
}
@@ -177,7 +177,7 @@
Handle<JSFunction> Compile(Handle<JSFunction> function) {
Zone zone(function->GetIsolate()->allocator());
ParseInfo parse_info(&zone, function);
- CompilationInfo info(&parse_info);
+ CompilationInfo info(&parse_info, function);
info.MarkAsDeoptimizationEnabled();
CHECK(Parser::ParseStatic(info.parse_info()));
@@ -188,14 +188,14 @@
if (flags_ & CompilationInfo::kInliningEnabled) {
info.MarkAsInliningEnabled();
}
- if (flags_ & CompilationInfo::kTypingEnabled) {
- info.MarkAsTypingEnabled();
+ if (FLAG_turbo_from_bytecode && function->shared()->HasBytecodeArray()) {
+ info.MarkAsOptimizeFromBytecode();
+ } else {
+ CHECK(Compiler::Analyze(info.parse_info()));
+ CHECK(Compiler::EnsureDeoptimizationSupport(&info));
}
- CHECK(Compiler::Analyze(info.parse_info()));
- CHECK(Compiler::EnsureDeoptimizationSupport(&info));
- Pipeline pipeline(&info);
- Handle<Code> code = pipeline.GenerateCode();
+ Handle<Code> code = Pipeline::GenerateCodeForTesting(&info);
CHECK(!code.is_null());
info.dependencies()->Commit(code);
info.context()->native_context()->AddOptimizedCode(*code);
@@ -226,12 +226,10 @@
Handle<JSFunction> CompileGraph(Graph* graph) {
Zone zone(function->GetIsolate()->allocator());
ParseInfo parse_info(&zone, function);
- CompilationInfo info(&parse_info);
+ CompilationInfo info(&parse_info, function);
CHECK(Parser::ParseStatic(info.parse_info()));
info.SetOptimizing();
- CHECK(Compiler::Analyze(info.parse_info()));
- CHECK(Compiler::EnsureDeoptimizationSupport(&info));
Handle<Code> code = Pipeline::GenerateCodeForTesting(&info, graph);
CHECK(!code.is_null());
diff --git a/test/cctest/compiler/graph-builder-tester.h b/test/cctest/compiler/graph-builder-tester.h
index de2713a..e4cccda 100644
--- a/test/cctest/compiler/graph-builder-tester.h
+++ b/test/cctest/compiler/graph-builder-tester.h
@@ -168,11 +168,11 @@
Node* ChangeFloat64ToTagged(Node* a) {
return NewNode(simplified()->ChangeFloat64ToTagged(), a);
}
- Node* ChangeBoolToBit(Node* a) {
- return NewNode(simplified()->ChangeBoolToBit(), a);
+ Node* ChangeTaggedToBit(Node* a) {
+ return NewNode(simplified()->ChangeTaggedToBit(), a);
}
- Node* ChangeBitToBool(Node* a) {
- return NewNode(simplified()->ChangeBitToBool(), a);
+ Node* ChangeBitToTagged(Node* a) {
+ return NewNode(simplified()->ChangeBitToTagged(), a);
}
Node* LoadField(const FieldAccess& access, Node* object) {
@@ -277,7 +277,7 @@
Zone* zone = graph()->zone();
CallDescriptor* desc =
Linkage::GetSimplifiedCDescriptor(zone, this->csig_);
- CompilationInfo info("testing", main_isolate(), main_zone());
+ CompilationInfo info(ArrayVector("testing"), main_isolate(), main_zone());
code_ = Pipeline::GenerateCodeForTesting(&info, desc, graph());
#ifdef ENABLE_DISASSEMBLER
if (!code_.is_null() && FLAG_print_opt_code) {
diff --git a/test/cctest/compiler/test-branch-combine.cc b/test/cctest/compiler/test-branch-combine.cc
index c3b4308..c5c4166 100644
--- a/test/cctest/compiler/test-branch-combine.cc
+++ b/test/cctest/compiler/test-branch-combine.cc
@@ -457,6 +457,27 @@
}
}
+TEST(BranchCombineEffectLevel) {
+ // Test that the load doesn't get folded into the branch, as there's a store
+ // between them. See http://crbug.com/611976.
+ int32_t input = 0;
+
+ RawMachineAssemblerTester<int32_t> m;
+ Node* a = m.LoadFromPointer(&input, MachineType::Int32());
+ Node* compare = m.Word32And(a, m.Int32Constant(1));
+ Node* equal = m.Word32Equal(compare, m.Int32Constant(0));
+ m.StoreToPointer(&input, MachineRepresentation::kWord32, m.Int32Constant(1));
+
+ RawMachineLabel blocka, blockb;
+ m.Branch(equal, &blocka, &blockb);
+ m.Bind(&blocka);
+ m.Return(m.Int32Constant(42));
+ m.Bind(&blockb);
+ m.Return(m.Int32Constant(0));
+
+ CHECK_EQ(42, m.Call());
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/test/cctest/compiler/test-changes-lowering.cc b/test/cctest/compiler/test-changes-lowering.cc
deleted file mode 100644
index ddeabe4..0000000
--- a/test/cctest/compiler/test-changes-lowering.cc
+++ /dev/null
@@ -1,290 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <limits>
-
-#include "src/ast/scopes.h"
-#include "src/compiler/change-lowering.h"
-#include "src/compiler/control-builders.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/node-properties.h"
-#include "src/compiler/pipeline.h"
-#include "src/compiler/select-lowering.h"
-#include "src/compiler/simplified-lowering.h"
-#include "src/compiler/typer.h"
-#include "src/compiler/verifier.h"
-#include "src/execution.h"
-#include "src/globals.h"
-#include "src/parsing/parser.h"
-#include "src/parsing/rewriter.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/compiler/codegen-tester.h"
-#include "test/cctest/compiler/function-tester.h"
-#include "test/cctest/compiler/graph-builder-tester.h"
-#include "test/cctest/compiler/value-helper.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-template <typename ReturnType>
-class ChangesLoweringTester : public GraphBuilderTester<ReturnType> {
- public:
- explicit ChangesLoweringTester(MachineType p0 = MachineType::None())
- : GraphBuilderTester<ReturnType>(p0),
- javascript(this->zone()),
- jsgraph(this->isolate(), this->graph(), this->common(), &javascript,
- nullptr, this->machine()),
- function(Handle<JSFunction>::null()) {}
-
- JSOperatorBuilder javascript;
- JSGraph jsgraph;
- Handle<JSFunction> function;
-
- Node* start() { return this->graph()->start(); }
-
- template <typename T>
- T* CallWithPotentialGC() {
- // TODO(titzer): we wrap the code in a JSFunction here to reuse the
- // JSEntryStub; that could be done with a special prologue or other stub.
- if (function.is_null()) {
- function = FunctionTester::ForMachineGraph(this->graph());
- }
- Handle<Object>* args = NULL;
- MaybeHandle<Object> result =
- Execution::Call(this->isolate(), function, factory()->undefined_value(),
- 0, args, false);
- return T::cast(*result.ToHandleChecked());
- }
-
- void StoreFloat64(Node* node, double* ptr) {
- Node* ptr_node = this->PointerConstant(ptr);
- this->Store(MachineType::Float64(), ptr_node, node);
- }
-
- Node* LoadInt32(int32_t* ptr) {
- Node* ptr_node = this->PointerConstant(ptr);
- return this->Load(MachineType::Int32(), ptr_node);
- }
-
- Node* LoadUint32(uint32_t* ptr) {
- Node* ptr_node = this->PointerConstant(ptr);
- return this->Load(MachineType::Uint32(), ptr_node);
- }
-
- Node* LoadFloat64(double* ptr) {
- Node* ptr_node = this->PointerConstant(ptr);
- return this->Load(MachineType::Float64(), ptr_node);
- }
-
- void CheckNumber(double expected, Object* number) {
- CHECK(this->isolate()->factory()->NewNumber(expected)->SameValue(number));
- }
-
- void BuildAndLower(const Operator* op) {
- // We build a graph by hand here, because the raw machine assembler
- // does not add the correct control and effect nodes.
- Node* p0 = this->Parameter(0);
- Node* change = this->graph()->NewNode(op, p0);
- Node* ret = this->graph()->NewNode(this->common()->Return(), change,
- this->start(), this->start());
- Node* end = this->graph()->NewNode(this->common()->End(1), ret);
- this->graph()->SetEnd(end);
- LowerChange(change);
- }
-
- void BuildStoreAndLower(const Operator* op, const Operator* store_op,
- void* location) {
- // We build a graph by hand here, because the raw machine assembler
- // does not add the correct control and effect nodes.
- Node* p0 = this->Parameter(0);
- Node* change = this->graph()->NewNode(op, p0);
- Node* store = this->graph()->NewNode(
- store_op, this->PointerConstant(location), this->Int32Constant(0),
- change, this->start(), this->start());
- Node* ret = this->graph()->NewNode(
- this->common()->Return(), this->Int32Constant(0), store, this->start());
- Node* end = this->graph()->NewNode(this->common()->End(1), ret);
- this->graph()->SetEnd(end);
- LowerChange(change);
- }
-
- void BuildLoadAndLower(const Operator* op, const Operator* load_op,
- void* location) {
- // We build a graph by hand here, because the raw machine assembler
- // does not add the correct control and effect nodes.
- Node* load = this->graph()->NewNode(
- load_op, this->PointerConstant(location), this->Int32Constant(0),
- this->start(), this->start());
- Node* change = this->graph()->NewNode(op, load);
- Node* ret = this->graph()->NewNode(this->common()->Return(), change,
- this->start(), this->start());
- Node* end = this->graph()->NewNode(this->common()->End(1), ret);
- this->graph()->SetEnd(end);
- LowerChange(change);
- }
-
- void LowerChange(Node* change) {
- // Run the graph reducer with changes lowering on a single node.
- Typer typer(this->isolate(), this->graph());
- typer.Run();
- ChangeLowering change_lowering(&jsgraph);
- SelectLowering select_lowering(this->graph(), this->common());
- GraphReducer reducer(this->zone(), this->graph());
- reducer.AddReducer(&change_lowering);
- reducer.AddReducer(&select_lowering);
- reducer.ReduceNode(change);
- Verifier::Run(this->graph(), Verifier::UNTYPED);
- }
-
- Factory* factory() { return this->isolate()->factory(); }
- Heap* heap() { return this->isolate()->heap(); }
-};
-
-
-TEST(RunChangeTaggedToInt32) {
- // Build and lower a graph by hand.
- ChangesLoweringTester<int32_t> t(MachineType::AnyTagged());
- t.BuildAndLower(t.simplified()->ChangeTaggedToInt32());
-
- FOR_INT32_INPUTS(i) {
- int32_t input = *i;
-
- if (Smi::IsValid(input)) {
- int32_t result = t.Call(Smi::FromInt(input));
- CHECK_EQ(input, result);
- }
-
- {
- Handle<Object> number = t.factory()->NewNumber(input);
- int32_t result = t.Call(*number);
- CHECK_EQ(input, result);
- }
-
- {
- Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
- int32_t result = t.Call(*number);
- CHECK_EQ(input, result);
- }
- }
-}
-
-
-TEST(RunChangeTaggedToUint32) {
- // Build and lower a graph by hand.
- ChangesLoweringTester<uint32_t> t(MachineType::AnyTagged());
- t.BuildAndLower(t.simplified()->ChangeTaggedToUint32());
-
- FOR_UINT32_INPUTS(i) {
- uint32_t input = *i;
-
- if (Smi::IsValid(input)) {
- uint32_t result = t.Call(Smi::FromInt(input));
- CHECK_EQ(static_cast<int32_t>(input), static_cast<int32_t>(result));
- }
-
- {
- Handle<Object> number = t.factory()->NewNumber(input);
- uint32_t result = t.Call(*number);
- CHECK_EQ(static_cast<int32_t>(input), static_cast<int32_t>(result));
- }
-
- {
- Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
- uint32_t result = t.Call(*number);
- CHECK_EQ(static_cast<int32_t>(input), static_cast<int32_t>(result));
- }
- }
-}
-
-
-TEST(RunChangeTaggedToFloat64) {
- ChangesLoweringTester<int32_t> t(MachineType::AnyTagged());
- double result;
-
- t.BuildStoreAndLower(t.simplified()->ChangeTaggedToFloat64(),
- t.machine()->Store(StoreRepresentation(
- MachineRepresentation::kFloat64, kNoWriteBarrier)),
- &result);
-
- {
- FOR_INT32_INPUTS(i) {
- int32_t input = *i;
-
- if (Smi::IsValid(input)) {
- t.Call(Smi::FromInt(input));
- CHECK_EQ(input, static_cast<int32_t>(result));
- }
-
- {
- Handle<Object> number = t.factory()->NewNumber(input);
- t.Call(*number);
- CHECK_EQ(input, static_cast<int32_t>(result));
- }
-
- {
- Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
- t.Call(*number);
- CHECK_EQ(input, static_cast<int32_t>(result));
- }
- }
- }
-
- {
- FOR_FLOAT64_INPUTS(i) {
- double input = *i;
- {
- Handle<Object> number = t.factory()->NewNumber(input);
- t.Call(*number);
- CHECK_DOUBLE_EQ(input, result);
- }
-
- {
- Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
- t.Call(*number);
- CHECK_DOUBLE_EQ(input, result);
- }
- }
- }
-}
-
-
-TEST(RunChangeBoolToBit) {
- ChangesLoweringTester<int32_t> t(MachineType::AnyTagged());
- t.BuildAndLower(t.simplified()->ChangeBoolToBit());
-
- {
- Object* true_obj = t.heap()->true_value();
- int32_t result = t.Call(true_obj);
- CHECK_EQ(1, result);
- }
-
- {
- Object* false_obj = t.heap()->false_value();
- int32_t result = t.Call(false_obj);
- CHECK_EQ(0, result);
- }
-}
-
-
-TEST(RunChangeBitToBool) {
- ChangesLoweringTester<Object*> t(MachineType::Int32());
- t.BuildAndLower(t.simplified()->ChangeBitToBool());
-
- {
- Object* result = t.Call(1);
- Object* true_obj = t.heap()->true_value();
- CHECK_EQ(true_obj, result);
- }
-
- {
- Object* result = t.Call(0);
- Object* false_obj = t.heap()->false_value();
- CHECK_EQ(false_obj, result);
- }
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/test/cctest/compiler/test-code-stub-assembler.cc b/test/cctest/compiler/test-code-stub-assembler.cc
index ff02cc9..37ba9e9 100644
--- a/test/cctest/compiler/test-code-stub-assembler.cc
+++ b/test/cctest/compiler/test-code-stub-assembler.cc
@@ -246,8 +246,9 @@
CodeStubAssemblerTester m(isolate, descriptor);
Handle<FixedArray> array = isolate->factory()->NewFixedArray(5);
array->set(4, Smi::FromInt(733));
- m.Return(m.LoadFixedArrayElementSmiIndex(m.HeapConstant(array),
- m.SmiTag(m.Int32Constant(4))));
+ m.Return(m.LoadFixedArrayElement(m.HeapConstant(array),
+ m.SmiTag(m.Int32Constant(4)), 0,
+ CodeStubAssembler::SMI_PARAMETERS));
Handle<Code> code = m.GenerateCode();
FunctionTester ft(descriptor, code);
MaybeHandle<Object> result = ft.Call();
@@ -361,6 +362,33 @@
USE(m.GenerateCode());
}
+TEST(TestToConstant) {
+ Isolate* isolate(CcTest::InitIsolateOnce());
+ VoidDescriptor descriptor(isolate);
+ CodeStubAssemblerTester m(isolate, descriptor);
+ int32_t value32;
+ int64_t value64;
+ Node* a = m.Int32Constant(5);
+ CHECK(m.ToInt32Constant(a, value32));
+ CHECK(m.ToInt64Constant(a, value64));
+
+ a = m.Int64Constant(static_cast<int64_t>(1) << 32);
+ CHECK(!m.ToInt32Constant(a, value32));
+ CHECK(m.ToInt64Constant(a, value64));
+
+ a = m.Int64Constant(13);
+ CHECK(m.ToInt32Constant(a, value32));
+ CHECK(m.ToInt64Constant(a, value64));
+
+ a = m.UndefinedConstant();
+ CHECK(!m.ToInt32Constant(a, value32));
+ CHECK(!m.ToInt64Constant(a, value64));
+
+ a = m.UndefinedConstant();
+ CHECK(!m.ToInt32Constant(a, value32));
+ CHECK(!m.ToInt64Constant(a, value64));
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/test/cctest/compiler/test-gap-resolver.cc b/test/cctest/compiler/test-gap-resolver.cc
index 7f85088..9781aeb 100644
--- a/test/cctest/compiler/test-gap-resolver.cc
+++ b/test/cctest/compiler/test-gap-resolver.cc
@@ -81,7 +81,7 @@
if (!is_constant) {
if (op.IsRegister()) {
index = LocationOperand::cast(op).GetRegister().code();
- } else if (op.IsDoubleRegister()) {
+ } else if (op.IsFPRegister()) {
index = LocationOperand::cast(op).GetDoubleRegister().code();
} else {
index = LocationOperand::cast(op).index();
diff --git a/test/cctest/compiler/test-linkage.cc b/test/cctest/compiler/test-linkage.cc
index 0cbdb4c..dc83f4d 100644
--- a/test/cctest/compiler/test-linkage.cc
+++ b/test/cctest/compiler/test-linkage.cc
@@ -43,7 +43,7 @@
HandleAndZoneScope handles;
Handle<JSFunction> function = Compile("a + b");
ParseInfo parse_info(handles.main_zone(), function);
- CompilationInfo info(&parse_info);
+ CompilationInfo info(&parse_info, function);
CallDescriptor* descriptor = Linkage::ComputeIncoming(info.zone(), &info);
CHECK(descriptor);
}
@@ -59,7 +59,7 @@
Handle<JSFunction>::cast(v8::Utils::OpenHandle(
*v8::Local<v8::Function>::Cast(CompileRun(sources[i]))));
ParseInfo parse_info(handles.main_zone(), function);
- CompilationInfo info(&parse_info);
+ CompilationInfo info(&parse_info, function);
CallDescriptor* descriptor = Linkage::ComputeIncoming(info.zone(), &info);
CHECK(descriptor);
@@ -75,7 +75,7 @@
HandleAndZoneScope handles;
Handle<JSFunction> function = Compile("a + c");
ParseInfo parse_info(handles.main_zone(), function);
- CompilationInfo info(&parse_info);
+ CompilationInfo info(&parse_info, function);
for (int i = 0; i < 32; i++) {
CallDescriptor* descriptor = Linkage::GetJSCallDescriptor(
@@ -98,7 +98,8 @@
Isolate* isolate = CcTest::InitIsolateOnce();
Zone zone(isolate->allocator());
ToNumberStub stub(isolate);
- CompilationInfo info("test", isolate, &zone, Code::ComputeFlags(Code::STUB));
+ CompilationInfo info(ArrayVector("test"), isolate, &zone,
+ Code::ComputeFlags(Code::STUB));
CallInterfaceDescriptor interface_descriptor =
stub.GetCallInterfaceDescriptor();
CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
diff --git a/test/cctest/compiler/test-loop-assignment-analysis.cc b/test/cctest/compiler/test-loop-assignment-analysis.cc
index 69f5e15..eafd0ec 100644
--- a/test/cctest/compiler/test-loop-assignment-analysis.cc
+++ b/test/cctest/compiler/test-loop-assignment-analysis.cc
@@ -31,7 +31,7 @@
void CheckLoopAssignedCount(int expected, const char* var_name) {
// TODO(titzer): don't scope analyze every single time.
ParseInfo parse_info(main_zone(), function);
- CompilationInfo info(&parse_info);
+ CompilationInfo info(&parse_info, function);
CHECK(Parser::ParseStatic(&parse_info));
CHECK(Rewriter::Rewrite(&parse_info));
diff --git a/test/cctest/compiler/test-multiple-return.cc b/test/cctest/compiler/test-multiple-return.cc
index 2108ab1..53bae5e 100644
--- a/test/cctest/compiler/test-multiple-return.cc
+++ b/test/cctest/compiler/test-multiple-return.cc
@@ -85,7 +85,8 @@
Node* mul = m.Int32Mul(p0, p1);
m.Return(add, sub, mul);
- CompilationInfo info("testing", handles.main_isolate(), handles.main_zone());
+ CompilationInfo info(ArrayVector("testing"), handles.main_isolate(),
+ handles.main_zone());
Handle<Code> code =
Pipeline::GenerateCodeForTesting(&info, desc, m.graph(), m.Export());
#ifdef ENABLE_DISASSEMBLER
diff --git a/test/cctest/compiler/test-pipeline.cc b/test/cctest/compiler/test-pipeline.cc
deleted file mode 100644
index 35e3427..0000000
--- a/test/cctest/compiler/test-pipeline.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler.h"
-#include "src/compiler/pipeline.h"
-#include "src/handles.h"
-#include "src/parsing/parser.h"
-#include "test/cctest/cctest.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-static void RunPipeline(Zone* zone, const char* source) {
- Handle<JSFunction> function = Handle<JSFunction>::cast(v8::Utils::OpenHandle(
- *v8::Local<v8::Function>::Cast(CompileRun(source))));
- ParseInfo parse_info(zone, function);
- CHECK(Compiler::ParseAndAnalyze(&parse_info));
- CompilationInfo info(&parse_info);
- info.SetOptimizing();
-
- Pipeline pipeline(&info);
- Handle<Code> code = pipeline.GenerateCode();
- CHECK(!code.is_null());
-}
-
-
-TEST(PipelineTyped) {
- HandleAndZoneScope handles;
- FLAG_turbo_types = true;
- RunPipeline(handles.main_zone(), "(function(a,b) { return a + b; })");
-}
-
-
-TEST(PipelineGeneric) {
- HandleAndZoneScope handles;
- FLAG_turbo_types = false;
- RunPipeline(handles.main_zone(), "(function(a,b) { return a + b; })");
-}
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/test/cctest/compiler/test-representation-change.cc b/test/cctest/compiler/test-representation-change.cc
index 7e75bf8..76aa390 100644
--- a/test/cctest/compiler/test-representation-change.cc
+++ b/test/cctest/compiler/test-representation-change.cc
@@ -439,24 +439,45 @@
TEST(SingleChanges) {
- CheckChange(IrOpcode::kChangeBoolToBit, MachineRepresentation::kTagged,
+ CheckChange(IrOpcode::kChangeTaggedToBit, MachineRepresentation::kTagged,
Type::None(), MachineRepresentation::kBit);
- CheckChange(IrOpcode::kChangeBitToBool, MachineRepresentation::kBit,
+ CheckChange(IrOpcode::kChangeBitToTagged, MachineRepresentation::kBit,
Type::None(), MachineRepresentation::kTagged);
+ CheckChange(IrOpcode::kChangeInt31ToTaggedSigned,
+ MachineRepresentation::kWord32, Type::Signed31(),
+ MachineRepresentation::kTagged);
CheckChange(IrOpcode::kChangeInt32ToTagged, MachineRepresentation::kWord32,
Type::Signed32(), MachineRepresentation::kTagged);
CheckChange(IrOpcode::kChangeUint32ToTagged, MachineRepresentation::kWord32,
Type::Unsigned32(), MachineRepresentation::kTagged);
CheckChange(IrOpcode::kChangeFloat64ToTagged, MachineRepresentation::kFloat64,
- Type::None(), MachineRepresentation::kTagged);
+ Type::Number(), MachineRepresentation::kTagged);
+ CheckTwoChanges(IrOpcode::kChangeFloat64ToInt32,
+ IrOpcode::kChangeInt31ToTaggedSigned,
+ MachineRepresentation::kFloat64, Type::Signed31(),
+ MachineRepresentation::kTagged);
+ CheckTwoChanges(IrOpcode::kChangeFloat64ToInt32,
+ IrOpcode::kChangeInt32ToTagged,
+ MachineRepresentation::kFloat64, Type::Signed32(),
+ MachineRepresentation::kTagged);
+ CheckTwoChanges(IrOpcode::kChangeFloat64ToUint32,
+ IrOpcode::kChangeUint32ToTagged,
+ MachineRepresentation::kFloat64, Type::Unsigned32(),
+ MachineRepresentation::kTagged);
CheckChange(IrOpcode::kChangeTaggedToInt32, MachineRepresentation::kTagged,
Type::Signed32(), MachineRepresentation::kWord32);
CheckChange(IrOpcode::kChangeTaggedToUint32, MachineRepresentation::kTagged,
Type::Unsigned32(), MachineRepresentation::kWord32);
CheckChange(IrOpcode::kChangeTaggedToFloat64, MachineRepresentation::kTagged,
- Type::None(), MachineRepresentation::kFloat64);
+ Type::Number(), MachineRepresentation::kFloat64);
+ CheckChange(IrOpcode::kChangeTaggedToFloat64, MachineRepresentation::kTagged,
+ Type::NumberOrUndefined(), MachineRepresentation::kFloat64);
+ CheckTwoChanges(IrOpcode::kChangeTaggedSignedToInt32,
+ IrOpcode::kChangeInt32ToFloat64,
+ MachineRepresentation::kTagged, Type::TaggedSigned(),
+ MachineRepresentation::kFloat64);
// Int32,Uint32 <-> Float64 are actually machine conversions.
CheckChange(IrOpcode::kChangeInt32ToFloat64, MachineRepresentation::kWord32,
@@ -513,7 +534,7 @@
Type::None(), MachineRepresentation::kFloat64);
CheckChange(IrOpcode::kChangeFloat64ToInt32, MachineRepresentation::kFloat64,
Type::Signed32(), MachineRepresentation::kWord32);
- CheckChange(IrOpcode::kTruncateFloat64ToInt32,
+ CheckChange(IrOpcode::kTruncateFloat64ToWord32,
MachineRepresentation::kFloat64, Type::Number(),
MachineRepresentation::kWord32);
@@ -522,7 +543,7 @@
MachineRepresentation::kWord32, Type::None(),
MachineRepresentation::kFloat32);
CheckTwoChanges(IrOpcode::kChangeFloat32ToFloat64,
- IrOpcode::kTruncateFloat64ToInt32,
+ IrOpcode::kTruncateFloat64ToWord32,
MachineRepresentation::kFloat32, Type::Number(),
MachineRepresentation::kWord32);
}
diff --git a/test/cctest/compiler/test-run-bytecode-graph-builder.cc b/test/cctest/compiler/test-run-bytecode-graph-builder.cc
index c32f923..024747f 100644
--- a/test/cctest/compiler/test-run-bytecode-graph-builder.cc
+++ b/test/cctest/compiler/test-run-bytecode-graph-builder.cc
@@ -125,11 +125,11 @@
// having to instantiate a ParseInfo first. Fix this!
ParseInfo parse_info(zone_, function);
- CompilationInfo compilation_info(&parse_info);
+ CompilationInfo compilation_info(&parse_info, function);
compilation_info.SetOptimizing();
compilation_info.MarkAsDeoptimizationEnabled();
- compiler::Pipeline pipeline(&compilation_info);
- Handle<Code> code = pipeline.GenerateCode();
+ compilation_info.MarkAsOptimizeFromBytecode();
+ Handle<Code> code = Pipeline::GenerateCodeForTesting(&compilation_info);
function->ReplaceCode(*code);
return function;
diff --git a/test/cctest/compiler/test-run-calls-to-external-references.cc b/test/cctest/compiler/test-run-calls-to-external-references.cc
index 3b79cd8..430e238 100644
--- a/test/cctest/compiler/test-run-calls-to-external-references.cc
+++ b/test/cctest/compiler/test-run-calls-to-external-references.cc
@@ -2,6 +2,7 @@
// source code is governed by a BSD-style license that can be found in the
// LICENSE file.
+#include "src/wasm/wasm-external-refs.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/codegen-tester.h"
#include "test/cctest/compiler/value-helper.h"
@@ -10,521 +11,234 @@
namespace internal {
namespace compiler {
-template <typename T>
-void TestExternalReferenceRoundingFunction(
- BufferedRawMachineAssemblerTester<int32_t>* m, ExternalReference ref,
- T (*comparison)(T)) {
- T parameter;
+template <typename P>
+void TestExternalReference(BufferedRawMachineAssemblerTester<int32_t>* m,
+ ExternalReference ref, void (*comparison)(P*),
+ P param) {
+ P comparison_param = param;
Node* function = m->ExternalConstant(ref);
m->CallCFunction1(MachineType::Pointer(), MachineType::Pointer(), function,
- m->PointerConstant(¶meter));
+ m->PointerConstant(¶m));
m->Return(m->Int32Constant(4356));
- FOR_FLOAT64_INPUTS(i) {
- parameter = *i;
- m->Call();
- CHECK_DOUBLE_EQ(comparison(*i), parameter);
- }
+
+ m->Call();
+ comparison(&comparison_param);
+
+ CHECK_EQ(comparison_param, param);
+}
+
+template <typename P1, typename P2>
+void TestExternalReference(BufferedRawMachineAssemblerTester<int32_t>* m,
+ ExternalReference ref, void (*comparison)(P1*, P2*),
+ P1 param1, P2 param2) {
+ P1 comparison_param1 = param1;
+ P2 comparison_param2 = param2;
+
+ Node* function = m->ExternalConstant(ref);
+ m->CallCFunction2(MachineType::Pointer(), MachineType::Pointer(),
+ MachineType::Pointer(), function,
+ m->PointerConstant(¶m1), m->PointerConstant(¶m2));
+ m->Return(m->Int32Constant(4356));
+
+ m->Call();
+ comparison(&comparison_param1, &comparison_param2);
+
+ CHECK_EQ(comparison_param1, param1);
+ CHECK_EQ(comparison_param2, param2);
+}
+
+template <typename R, typename P>
+void TestExternalReference(BufferedRawMachineAssemblerTester<R>* m,
+ ExternalReference ref, R (*comparison)(P*),
+ P param) {
+ P comparison_param = param;
+
+ Node* function = m->ExternalConstant(ref);
+ m->Return(m->CallCFunction1(MachineType::Pointer(), MachineType::Pointer(),
+ function, m->PointerConstant(¶m)));
+
+ CHECK_EQ(comparison(&comparison_param), m->Call());
+
+ CHECK_EQ(comparison_param, param);
+}
+
+template <typename R, typename P1, typename P2>
+void TestExternalReference(BufferedRawMachineAssemblerTester<R>* m,
+ ExternalReference ref, R (*comparison)(P1*, P2*),
+ P1 param1, P2 param2) {
+ P1 comparison_param1 = param1;
+ P2 comparison_param2 = param2;
+
+ Node* function = m->ExternalConstant(ref);
+ m->Return(m->CallCFunction2(
+ MachineType::Pointer(), MachineType::Pointer(), MachineType::Pointer(),
+ function, m->PointerConstant(¶m1), m->PointerConstant(¶m2)));
+
+ CHECK_EQ(comparison(&comparison_param1, &comparison_param2), m->Call());
+
+ CHECK_EQ(comparison_param1, param1);
+ CHECK_EQ(comparison_param2, param2);
}
TEST(RunCallF32Trunc) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_f32_trunc(m.isolate());
- TestExternalReferenceRoundingFunction<float>(&m, ref, truncf);
+ TestExternalReference(&m, ref, wasm::f32_trunc_wrapper, 1.25f);
}
TEST(RunCallF32Floor) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_f32_floor(m.isolate());
- TestExternalReferenceRoundingFunction<float>(&m, ref, floorf);
+ TestExternalReference(&m, ref, wasm::f32_floor_wrapper, 1.25f);
}
TEST(RunCallF32Ceil) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_f32_ceil(m.isolate());
- TestExternalReferenceRoundingFunction<float>(&m, ref, ceilf);
+ TestExternalReference(&m, ref, wasm::f32_ceil_wrapper, 1.25f);
}
TEST(RunCallF32RoundTiesEven) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_f32_nearest_int(m.isolate());
- TestExternalReferenceRoundingFunction<float>(&m, ref, nearbyintf);
+ TestExternalReference(&m, ref, wasm::f32_nearest_int_wrapper, 1.25f);
}
TEST(RunCallF64Trunc) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_f64_trunc(m.isolate());
- TestExternalReferenceRoundingFunction<double>(&m, ref, trunc);
+ TestExternalReference(&m, ref, wasm::f64_trunc_wrapper, 1.25);
}
TEST(RunCallF64Floor) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_f64_floor(m.isolate());
- TestExternalReferenceRoundingFunction<double>(&m, ref, floor);
+ TestExternalReference(&m, ref, wasm::f64_floor_wrapper, 1.25);
}
TEST(RunCallF64Ceil) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_f64_ceil(m.isolate());
- TestExternalReferenceRoundingFunction<double>(&m, ref, ceil);
+ TestExternalReference(&m, ref, wasm::f64_ceil_wrapper, 1.25);
}
TEST(RunCallF64RoundTiesEven) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_f64_nearest_int(m.isolate());
- TestExternalReferenceRoundingFunction<double>(&m, ref, nearbyint);
+ TestExternalReference(&m, ref, wasm::f64_nearest_int_wrapper, 1.25);
}
TEST(RunCallInt64ToFloat32) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_int64_to_float32(m.isolate());
-
- int64_t input;
- float output;
-
- Node* function = m.ExternalConstant(ref);
- m.CallCFunction2(MachineType::Pointer(), MachineType::Pointer(),
- MachineType::Pointer(), function, m.PointerConstant(&input),
- m.PointerConstant(&output));
- m.Return(m.Int32Constant(4356));
- FOR_INT64_INPUTS(i) {
- input = *i;
- m.Call();
- CHECK_FLOAT_EQ(static_cast<float>(*i), output);
- }
+ TestExternalReference(&m, ref, wasm::int64_to_float32_wrapper, int64_t(-2124),
+ 1.25f);
}
TEST(RunCallUint64ToFloat32) {
- struct {
- uint64_t input;
- uint32_t expected;
- } values[] = {{0x0, 0x0},
- {0x1, 0x3f800000},
- {0xffffffff, 0x4f800000},
- {0x1b09788b, 0x4dd84bc4},
- {0x4c5fce8, 0x4c98bf9d},
- {0xcc0de5bf, 0x4f4c0de6},
- {0x2, 0x40000000},
- {0x3, 0x40400000},
- {0x4, 0x40800000},
- {0x5, 0x40a00000},
- {0x8, 0x41000000},
- {0x9, 0x41100000},
- {0xffffffffffffffff, 0x5f800000},
- {0xfffffffffffffffe, 0x5f800000},
- {0xfffffffffffffffd, 0x5f800000},
- {0x0, 0x0},
- {0x100000000, 0x4f800000},
- {0xffffffff00000000, 0x5f800000},
- {0x1b09788b00000000, 0x5dd84bc4},
- {0x4c5fce800000000, 0x5c98bf9d},
- {0xcc0de5bf00000000, 0x5f4c0de6},
- {0x200000000, 0x50000000},
- {0x300000000, 0x50400000},
- {0x400000000, 0x50800000},
- {0x500000000, 0x50a00000},
- {0x800000000, 0x51000000},
- {0x900000000, 0x51100000},
- {0x273a798e187937a3, 0x5e1ce9e6},
- {0xece3af835495a16b, 0x5f6ce3b0},
- {0xb668ecc11223344, 0x5d3668ed},
- {0x9e, 0x431e0000},
- {0x43, 0x42860000},
- {0xaf73, 0x472f7300},
- {0x116b, 0x458b5800},
- {0x658ecc, 0x4acb1d98},
- {0x2b3b4c, 0x4a2ced30},
- {0x88776655, 0x4f087766},
- {0x70000000, 0x4ee00000},
- {0x7200000, 0x4ce40000},
- {0x7fffffff, 0x4f000000},
- {0x56123761, 0x4eac246f},
- {0x7fffff00, 0x4efffffe},
- {0x761c4761eeeeeeee, 0x5eec388f},
- {0x80000000eeeeeeee, 0x5f000000},
- {0x88888888dddddddd, 0x5f088889},
- {0xa0000000dddddddd, 0x5f200000},
- {0xddddddddaaaaaaaa, 0x5f5dddde},
- {0xe0000000aaaaaaaa, 0x5f600000},
- {0xeeeeeeeeeeeeeeee, 0x5f6eeeef},
- {0xfffffffdeeeeeeee, 0x5f800000},
- {0xf0000000dddddddd, 0x5f700000},
- {0x7fffffdddddddd, 0x5b000000},
- {0x3fffffaaaaaaaa, 0x5a7fffff},
- {0x1fffffaaaaaaaa, 0x59fffffd},
- {0xfffff, 0x497ffff0},
- {0x7ffff, 0x48ffffe0},
- {0x3ffff, 0x487fffc0},
- {0x1ffff, 0x47ffff80},
- {0xffff, 0x477fff00},
- {0x7fff, 0x46fffe00},
- {0x3fff, 0x467ffc00},
- {0x1fff, 0x45fff800},
- {0xfff, 0x457ff000},
- {0x7ff, 0x44ffe000},
- {0x3ff, 0x447fc000},
- {0x1ff, 0x43ff8000},
- {0x3fffffffffff, 0x56800000},
- {0x1fffffffffff, 0x56000000},
- {0xfffffffffff, 0x55800000},
- {0x7ffffffffff, 0x55000000},
- {0x3ffffffffff, 0x54800000},
- {0x1ffffffffff, 0x54000000},
- {0x8000008000000000, 0x5f000000},
- {0x8000008000000001, 0x5f000001},
- {0x8000008000000002, 0x5f000001},
- {0x8000008000000004, 0x5f000001},
- {0x8000008000000008, 0x5f000001},
- {0x8000008000000010, 0x5f000001},
- {0x8000008000000020, 0x5f000001},
- {0x8000009000000000, 0x5f000001},
- {0x800000a000000000, 0x5f000001},
- {0x8000008000100000, 0x5f000001},
- {0x8000000000000400, 0x5f000000},
- {0x8000000000000401, 0x5f000000}};
-
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref =
ExternalReference::wasm_uint64_to_float32(m.isolate());
-
- uint64_t input;
- float output;
-
- Node* function = m.ExternalConstant(ref);
- m.CallCFunction2(MachineType::Pointer(), MachineType::Pointer(),
- MachineType::Pointer(), function, m.PointerConstant(&input),
- m.PointerConstant(&output));
- m.Return(m.Int32Constant(4356));
-
- for (size_t i = 0; i < arraysize(values); i++) {
- input = values[i].input;
- m.Call();
- CHECK_EQ(values[i].expected, bit_cast<uint32_t>(output));
- }
+ TestExternalReference(&m, ref, wasm::uint64_to_float32_wrapper,
+ uint64_t(2124), 1.25f);
}
TEST(RunCallInt64ToFloat64) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_int64_to_float64(m.isolate());
-
- int64_t input;
- double output;
-
- Node* function = m.ExternalConstant(ref);
- m.CallCFunction2(MachineType::Pointer(), MachineType::Pointer(),
- MachineType::Pointer(), function, m.PointerConstant(&input),
- m.PointerConstant(&output));
- m.Return(m.Int32Constant(4356));
- FOR_INT64_INPUTS(i) {
- input = *i;
- m.Call();
- CHECK_DOUBLE_EQ(static_cast<double>(*i), output);
- }
+ TestExternalReference(&m, ref, wasm::int64_to_float64_wrapper, int64_t(2124),
+ 1.25);
}
TEST(RunCallUint64ToFloat64) {
- struct {
- uint64_t input;
- uint64_t expected;
- } values[] = {{0x0, 0x0},
- {0x1, 0x3ff0000000000000},
- {0xffffffff, 0x41efffffffe00000},
- {0x1b09788b, 0x41bb09788b000000},
- {0x4c5fce8, 0x419317f3a0000000},
- {0xcc0de5bf, 0x41e981bcb7e00000},
- {0x2, 0x4000000000000000},
- {0x3, 0x4008000000000000},
- {0x4, 0x4010000000000000},
- {0x5, 0x4014000000000000},
- {0x8, 0x4020000000000000},
- {0x9, 0x4022000000000000},
- {0xffffffffffffffff, 0x43f0000000000000},
- {0xfffffffffffffffe, 0x43f0000000000000},
- {0xfffffffffffffffd, 0x43f0000000000000},
- {0x100000000, 0x41f0000000000000},
- {0xffffffff00000000, 0x43efffffffe00000},
- {0x1b09788b00000000, 0x43bb09788b000000},
- {0x4c5fce800000000, 0x439317f3a0000000},
- {0xcc0de5bf00000000, 0x43e981bcb7e00000},
- {0x200000000, 0x4200000000000000},
- {0x300000000, 0x4208000000000000},
- {0x400000000, 0x4210000000000000},
- {0x500000000, 0x4214000000000000},
- {0x800000000, 0x4220000000000000},
- {0x900000000, 0x4222000000000000},
- {0x273a798e187937a3, 0x43c39d3cc70c3c9c},
- {0xece3af835495a16b, 0x43ed9c75f06a92b4},
- {0xb668ecc11223344, 0x43a6cd1d98224467},
- {0x9e, 0x4063c00000000000},
- {0x43, 0x4050c00000000000},
- {0xaf73, 0x40e5ee6000000000},
- {0x116b, 0x40b16b0000000000},
- {0x658ecc, 0x415963b300000000},
- {0x2b3b4c, 0x41459da600000000},
- {0x88776655, 0x41e10eeccaa00000},
- {0x70000000, 0x41dc000000000000},
- {0x7200000, 0x419c800000000000},
- {0x7fffffff, 0x41dfffffffc00000},
- {0x56123761, 0x41d5848dd8400000},
- {0x7fffff00, 0x41dfffffc0000000},
- {0x761c4761eeeeeeee, 0x43dd8711d87bbbbc},
- {0x80000000eeeeeeee, 0x43e00000001dddde},
- {0x88888888dddddddd, 0x43e11111111bbbbc},
- {0xa0000000dddddddd, 0x43e40000001bbbbc},
- {0xddddddddaaaaaaaa, 0x43ebbbbbbbb55555},
- {0xe0000000aaaaaaaa, 0x43ec000000155555},
- {0xeeeeeeeeeeeeeeee, 0x43edddddddddddde},
- {0xfffffffdeeeeeeee, 0x43efffffffbdddde},
- {0xf0000000dddddddd, 0x43ee0000001bbbbc},
- {0x7fffffdddddddd, 0x435ffffff7777777},
- {0x3fffffaaaaaaaa, 0x434fffffd5555555},
- {0x1fffffaaaaaaaa, 0x433fffffaaaaaaaa},
- {0xfffff, 0x412ffffe00000000},
- {0x7ffff, 0x411ffffc00000000},
- {0x3ffff, 0x410ffff800000000},
- {0x1ffff, 0x40fffff000000000},
- {0xffff, 0x40efffe000000000},
- {0x7fff, 0x40dfffc000000000},
- {0x3fff, 0x40cfff8000000000},
- {0x1fff, 0x40bfff0000000000},
- {0xfff, 0x40affe0000000000},
- {0x7ff, 0x409ffc0000000000},
- {0x3ff, 0x408ff80000000000},
- {0x1ff, 0x407ff00000000000},
- {0x3fffffffffff, 0x42cfffffffffff80},
- {0x1fffffffffff, 0x42bfffffffffff00},
- {0xfffffffffff, 0x42affffffffffe00},
- {0x7ffffffffff, 0x429ffffffffffc00},
- {0x3ffffffffff, 0x428ffffffffff800},
- {0x1ffffffffff, 0x427ffffffffff000},
- {0x8000008000000000, 0x43e0000010000000},
- {0x8000008000000001, 0x43e0000010000000},
- {0x8000000000000400, 0x43e0000000000000},
- {0x8000000000000401, 0x43e0000000000001},
- {0x8000000000000402, 0x43e0000000000001},
- {0x8000000000000404, 0x43e0000000000001},
- {0x8000000000000408, 0x43e0000000000001},
- {0x8000000000000410, 0x43e0000000000001},
- {0x8000000000000420, 0x43e0000000000001},
- {0x8000000000000440, 0x43e0000000000001},
- {0x8000000000000480, 0x43e0000000000001},
- {0x8000000000000500, 0x43e0000000000001},
- {0x8000000000000600, 0x43e0000000000001}};
-
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref =
ExternalReference::wasm_uint64_to_float64(m.isolate());
-
- uint64_t input;
- double output;
-
- Node* function = m.ExternalConstant(ref);
- m.CallCFunction2(MachineType::Pointer(), MachineType::Pointer(),
- MachineType::Pointer(), function, m.PointerConstant(&input),
- m.PointerConstant(&output));
- m.Return(m.Int32Constant(4356));
-
- for (size_t i = 0; i < arraysize(values); i++) {
- input = values[i].input;
- m.Call();
- CHECK_EQ(values[i].expected, bit_cast<uint64_t>(output));
- }
+ TestExternalReference(&m, ref, wasm::uint64_to_float64_wrapper,
+ uint64_t(2124), 1.25);
}
TEST(RunCallFloat32ToInt64) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_float32_to_int64(m.isolate());
-
- float input;
- int64_t output;
-
- Node* function = m.ExternalConstant(ref);
- m.Return(m.CallCFunction2(
- MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
- function, m.PointerConstant(&input), m.PointerConstant(&output)));
- FOR_FLOAT32_INPUTS(i) {
- input = *i;
- if (*i >= static_cast<float>(std::numeric_limits<int64_t>::min()) &&
- *i < static_cast<float>(std::numeric_limits<int64_t>::max())) {
- CHECK_EQ(1, m.Call());
- CHECK_EQ(static_cast<int64_t>(*i), output);
- } else {
- CHECK_EQ(0, m.Call());
- }
- }
+ TestExternalReference(&m, ref, wasm::float32_to_int64_wrapper, 1.25f,
+ int64_t(2124));
}
TEST(RunCallFloat32ToUint64) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref =
ExternalReference::wasm_float32_to_uint64(m.isolate());
-
- float input;
- uint64_t output;
-
- Node* function = m.ExternalConstant(ref);
- m.Return(m.CallCFunction2(
- MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
- function, m.PointerConstant(&input), m.PointerConstant(&output)));
- FOR_FLOAT32_INPUTS(i) {
- input = *i;
- if (*i > -1.0 &&
- *i < static_cast<float>(std::numeric_limits<uint64_t>::max())) {
- CHECK_EQ(1, m.Call());
- CHECK_EQ(static_cast<uint64_t>(*i), output);
- } else {
- CHECK_EQ(0, m.Call());
- }
- }
+ TestExternalReference(&m, ref, wasm::float32_to_uint64_wrapper, 1.25f,
+ uint64_t(2124));
}
TEST(RunCallFloat64ToInt64) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_float64_to_int64(m.isolate());
-
- double input;
- int64_t output;
-
- Node* function = m.ExternalConstant(ref);
- m.Return(m.CallCFunction2(
- MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
- function, m.PointerConstant(&input), m.PointerConstant(&output)));
- FOR_FLOAT64_INPUTS(i) {
- input = *i;
- if (*i >= static_cast<double>(std::numeric_limits<int64_t>::min()) &&
- *i < static_cast<double>(std::numeric_limits<int64_t>::max())) {
- CHECK_EQ(1, m.Call());
- CHECK_EQ(static_cast<int64_t>(*i), output);
- } else {
- CHECK_EQ(0, m.Call());
- }
- }
+ TestExternalReference(&m, ref, wasm::float64_to_int64_wrapper, 1.25,
+ int64_t(2124));
}
TEST(RunCallFloat64ToUint64) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref =
ExternalReference::wasm_float64_to_uint64(m.isolate());
-
- double input;
- uint64_t output;
-
- Node* function = m.ExternalConstant(ref);
- m.Return(m.CallCFunction2(
- MachineType::Int32(), MachineType::Pointer(), MachineType::Pointer(),
- function, m.PointerConstant(&input), m.PointerConstant(&output)));
- FOR_FLOAT64_INPUTS(i) {
- input = *i;
- if (*i > -1.0 &&
- *i < static_cast<double>(std::numeric_limits<uint64_t>::max())) {
- CHECK_EQ(1, m.Call());
- CHECK_EQ(static_cast<uint64_t>(*i), output);
- } else {
- CHECK_EQ(0, m.Call());
- }
- }
+ TestExternalReference(&m, ref, wasm::float64_to_uint64_wrapper, 1.25,
+ uint64_t(2124));
}
TEST(RunCallInt64Div) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_int64_div(m.isolate());
-
- int64_t dst;
- int64_t src;
-
- Node* function = m.ExternalConstant(ref);
- m.Return(m.CallCFunction2(MachineType::Int32(), MachineType::Pointer(),
- MachineType::Pointer(), function,
- m.PointerConstant(&dst), m.PointerConstant(&src)));
- FOR_INT64_INPUTS(i) {
- FOR_INT64_INPUTS(j) {
- dst = *i;
- src = *j;
- if (src == 0) {
- CHECK_EQ(0, m.Call());
- } else if (src == -1 && dst == std::numeric_limits<int64_t>::min()) {
- CHECK_EQ(-1, m.Call());
- } else {
- CHECK_EQ(1, m.Call());
- CHECK_EQ(*i / *j, dst);
- }
- }
- }
+ TestExternalReference(&m, ref, wasm::int64_div_wrapper, int64_t(1774),
+ int64_t(21));
}
TEST(RunCallInt64Mod) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_int64_mod(m.isolate());
-
- int64_t dst;
- int64_t src;
-
- Node* function = m.ExternalConstant(ref);
- m.Return(m.CallCFunction2(MachineType::Int32(), MachineType::Pointer(),
- MachineType::Pointer(), function,
- m.PointerConstant(&dst), m.PointerConstant(&src)));
- FOR_INT64_INPUTS(i) {
- FOR_INT64_INPUTS(j) {
- dst = *i;
- src = *j;
- if (src == 0) {
- CHECK_EQ(0, m.Call());
- } else {
- CHECK_EQ(1, m.Call());
- CHECK_EQ(*i % *j, dst);
- }
- }
- }
+ TestExternalReference(&m, ref, wasm::int64_mod_wrapper, int64_t(1774),
+ int64_t(21));
}
TEST(RunCallUint64Div) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_uint64_div(m.isolate());
-
- uint64_t dst;
- uint64_t src;
-
- Node* function = m.ExternalConstant(ref);
- m.Return(m.CallCFunction2(MachineType::Int32(), MachineType::Pointer(),
- MachineType::Pointer(), function,
- m.PointerConstant(&dst), m.PointerConstant(&src)));
- FOR_UINT64_INPUTS(i) {
- FOR_UINT64_INPUTS(j) {
- dst = *i;
- src = *j;
- if (src == 0) {
- CHECK_EQ(0, m.Call());
- } else {
- CHECK_EQ(1, m.Call());
- CHECK_EQ(*i / *j, dst);
- }
- }
- }
+ TestExternalReference(&m, ref, wasm::uint64_div_wrapper, uint64_t(1774),
+ uint64_t(21));
}
TEST(RunCallUint64Mod) {
BufferedRawMachineAssemblerTester<int32_t> m;
ExternalReference ref = ExternalReference::wasm_uint64_mod(m.isolate());
+ TestExternalReference(&m, ref, wasm::uint64_mod_wrapper, uint64_t(1774),
+ uint64_t(21));
+}
- uint64_t dst;
- uint64_t src;
+TEST(RunCallWord32Ctz) {
+ BufferedRawMachineAssemblerTester<uint32_t> m;
+ ExternalReference ref = ExternalReference::wasm_word32_ctz(m.isolate());
+ TestExternalReference(&m, ref, wasm::word32_ctz_wrapper, uint32_t(1774));
+}
- Node* function = m.ExternalConstant(ref);
- m.Return(m.CallCFunction2(MachineType::Int32(), MachineType::Pointer(),
- MachineType::Pointer(), function,
- m.PointerConstant(&dst), m.PointerConstant(&src)));
- FOR_UINT64_INPUTS(i) {
- FOR_UINT64_INPUTS(j) {
- dst = *i;
- src = *j;
- if (src == 0) {
- CHECK_EQ(0, m.Call());
- } else {
- CHECK_EQ(1, m.Call());
- CHECK_EQ(*i % *j, dst);
- }
- }
- }
+TEST(RunCallWord64Ctz) {
+ BufferedRawMachineAssemblerTester<uint32_t> m;
+ ExternalReference ref = ExternalReference::wasm_word64_ctz(m.isolate());
+ TestExternalReference(&m, ref, wasm::word64_ctz_wrapper, uint64_t(1774));
+}
+
+TEST(RunCallWord32Popcnt) {
+ BufferedRawMachineAssemblerTester<uint32_t> m;
+ ExternalReference ref = ExternalReference::wasm_word32_popcnt(m.isolate());
+ TestExternalReference(&m, ref, wasm::word32_popcnt_wrapper, uint32_t(1774));
+}
+
+TEST(RunCallWord64Popcnt) {
+ BufferedRawMachineAssemblerTester<uint32_t> m;
+ ExternalReference ref = ExternalReference::wasm_word64_popcnt(m.isolate());
+ TestExternalReference(&m, ref, wasm::word64_popcnt_wrapper, uint64_t(1774));
}
} // namespace compiler
} // namespace internal
diff --git a/test/cctest/compiler/test-run-inlining.cc b/test/cctest/compiler/test-run-inlining.cc
index 234060c..e689bf7 100644
--- a/test/cctest/compiler/test-run-inlining.cc
+++ b/test/cctest/compiler/test-run-inlining.cc
@@ -16,9 +16,9 @@
void AssertInlineCount(const v8::FunctionCallbackInfo<v8::Value>& args) {
StackTraceFrameIterator it(CcTest::i_isolate());
int frames_seen = 0;
- JavaScriptFrame* topmost = it.frame();
+ JavaScriptFrame* topmost = it.javascript_frame();
while (!it.done()) {
- JavaScriptFrame* frame = it.frame();
+ JavaScriptFrame* frame = it.javascript_frame();
List<JSFunction*> functions(2);
frame->GetFunctions(&functions);
PrintF("%d %s, inline count: %d\n", frames_seen,
@@ -47,14 +47,11 @@
.FromJust());
}
-
const uint32_t kRestrictedInliningFlags =
- CompilationInfo::kFunctionContextSpecializing |
- CompilationInfo::kTypingEnabled;
+ CompilationInfo::kFunctionContextSpecializing;
const uint32_t kInlineFlags = CompilationInfo::kInliningEnabled |
- CompilationInfo::kFunctionContextSpecializing |
- CompilationInfo::kTypingEnabled;
+ CompilationInfo::kFunctionContextSpecializing;
} // namespace
diff --git a/test/cctest/compiler/test-run-jsops.cc b/test/cctest/compiler/test-run-jsops.cc
index b68fc1c..78e1257 100644
--- a/test/cctest/compiler/test-run-jsops.cc
+++ b/test/cctest/compiler/test-run-jsops.cc
@@ -512,7 +512,6 @@
TEST(ClassLiteral) {
- FLAG_harmony_sloppy = true;
const char* src =
"(function(a,b) {"
" class C {"
diff --git a/test/cctest/compiler/test-run-load-store.cc b/test/cctest/compiler/test-run-load-store.cc
new file mode 100644
index 0000000..6484d30
--- /dev/null
+++ b/test/cctest/compiler/test-run-load-store.cc
@@ -0,0 +1,919 @@
+// Copyright 2016 the V8 project authors. All rights reserved. Use of this
+// source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+#include <cmath>
+#include <functional>
+#include <limits>
+
+#include "src/base/bits.h"
+#include "src/base/utils/random-number-generator.h"
+#include "src/codegen.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/graph-builder-tester.h"
+#include "test/cctest/compiler/value-helper.h"
+
+using namespace v8::base;
+
+namespace {
+template <typename Type>
+void CheckOobValue(Type val) {
+ UNREACHABLE();
+}
+
+template <>
+void CheckOobValue(int32_t val) {
+ CHECK_EQ(0, val);
+}
+
+template <>
+void CheckOobValue(int64_t val) {
+ CHECK_EQ(0, val);
+}
+
+template <>
+void CheckOobValue(float val) {
+ CHECK(std::isnan(val));
+}
+
+template <>
+void CheckOobValue(double val) {
+ CHECK(std::isnan(val));
+}
+} // namespace
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// This is a America!
+#define A_BILLION 1000000000ULL
+#define A_GIG (1024ULL * 1024ULL * 1024ULL)
+
+TEST(RunLoadInt32) {
+ RawMachineAssemblerTester<int32_t> m;
+
+ int32_t p1 = 0; // loads directly from this location.
+ m.Return(m.LoadFromPointer(&p1, MachineType::Int32()));
+
+ FOR_INT32_INPUTS(i) {
+ p1 = *i;
+ CHECK_EQ(p1, m.Call());
+ }
+}
+
+TEST(RunLoadInt32Offset) {
+ int32_t p1 = 0; // loads directly from this location.
+
+ int32_t offsets[] = {-2000000, -100, -101, 1, 3,
+ 7, 120, 2000, 2000000000, 0xff};
+
+ for (size_t i = 0; i < arraysize(offsets); i++) {
+ RawMachineAssemblerTester<int32_t> m;
+ int32_t offset = offsets[i];
+ byte* pointer = reinterpret_cast<byte*>(&p1) - offset;
+ // generate load [#base + #index]
+ m.Return(m.LoadFromPointer(pointer, MachineType::Int32(), offset));
+
+ FOR_INT32_INPUTS(j) {
+ p1 = *j;
+ CHECK_EQ(p1, m.Call());
+ }
+ }
+}
+
+TEST(RunLoadStoreFloat32Offset) {
+ float p1 = 0.0f; // loads directly from this location.
+ float p2 = 0.0f; // and stores directly into this location.
+
+ FOR_INT32_INPUTS(i) {
+ int32_t magic = 0x2342aabb + *i * 3;
+ RawMachineAssemblerTester<int32_t> m;
+ int32_t offset = *i;
+ byte* from = reinterpret_cast<byte*>(&p1) - offset;
+ byte* to = reinterpret_cast<byte*>(&p2) - offset;
+ // generate load [#base + #index]
+ Node* load = m.Load(MachineType::Float32(), m.PointerConstant(from),
+ m.IntPtrConstant(offset));
+ m.Store(MachineRepresentation::kFloat32, m.PointerConstant(to),
+ m.IntPtrConstant(offset), load, kNoWriteBarrier);
+ m.Return(m.Int32Constant(magic));
+
+ FOR_FLOAT32_INPUTS(j) {
+ p1 = *j;
+ p2 = *j - 5;
+ CHECK_EQ(magic, m.Call());
+ CheckDoubleEq(p1, p2);
+ }
+ }
+}
+
+TEST(RunLoadStoreFloat64Offset) {
+ double p1 = 0; // loads directly from this location.
+ double p2 = 0; // and stores directly into this location.
+
+ FOR_INT32_INPUTS(i) {
+ int32_t magic = 0x2342aabb + *i * 3;
+ RawMachineAssemblerTester<int32_t> m;
+ int32_t offset = *i;
+ byte* from = reinterpret_cast<byte*>(&p1) - offset;
+ byte* to = reinterpret_cast<byte*>(&p2) - offset;
+ // generate load [#base + #index]
+ Node* load = m.Load(MachineType::Float64(), m.PointerConstant(from),
+ m.IntPtrConstant(offset));
+ m.Store(MachineRepresentation::kFloat64, m.PointerConstant(to),
+ m.IntPtrConstant(offset), load, kNoWriteBarrier);
+ m.Return(m.Int32Constant(magic));
+
+ FOR_FLOAT64_INPUTS(j) {
+ p1 = *j;
+ p2 = *j - 5;
+ CHECK_EQ(magic, m.Call());
+ CheckDoubleEq(p1, p2);
+ }
+ }
+}
+
+namespace {
+template <typename Type>
+void RunLoadImmIndex(MachineType rep) {
+ const int kNumElems = 3;
+ Type buffer[kNumElems];
+
+ // initialize the buffer with some raw data.
+ byte* raw = reinterpret_cast<byte*>(buffer);
+ for (size_t i = 0; i < sizeof(buffer); i++) {
+ raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
+ }
+
+ // Test with various large and small offsets.
+ for (int offset = -1; offset <= 200000; offset *= -5) {
+ for (int i = 0; i < kNumElems; i++) {
+ BufferedRawMachineAssemblerTester<Type> m;
+ Node* base = m.PointerConstant(buffer - offset);
+ Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
+ m.Return(m.Load(rep, base, index));
+
+ volatile Type expected = buffer[i];
+ volatile Type actual = m.Call();
+ CHECK_EQ(expected, actual);
+ }
+ }
+}
+
+template <typename CType>
+void RunLoadStore(MachineType rep) {
+ const int kNumElems = 4;
+ CType buffer[kNumElems];
+
+ for (int32_t x = 0; x < kNumElems; x++) {
+ int32_t y = kNumElems - x - 1;
+ // initialize the buffer with raw data.
+ byte* raw = reinterpret_cast<byte*>(buffer);
+ for (size_t i = 0; i < sizeof(buffer); i++) {
+ raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
+ }
+
+ RawMachineAssemblerTester<int32_t> m;
+ int32_t OK = 0x29000 + x;
+ Node* base = m.PointerConstant(buffer);
+ Node* index0 = m.IntPtrConstant(x * sizeof(buffer[0]));
+ Node* load = m.Load(rep, base, index0);
+ Node* index1 = m.IntPtrConstant(y * sizeof(buffer[0]));
+ m.Store(rep.representation(), base, index1, load, kNoWriteBarrier);
+ m.Return(m.Int32Constant(OK));
+
+ CHECK(buffer[x] != buffer[y]);
+ CHECK_EQ(OK, m.Call());
+ CHECK(buffer[x] == buffer[y]);
+ }
+}
+} // namespace
+
+TEST(RunLoadImmIndex) {
+ RunLoadImmIndex<int8_t>(MachineType::Int8());
+ RunLoadImmIndex<uint8_t>(MachineType::Uint8());
+ RunLoadImmIndex<int16_t>(MachineType::Int16());
+ RunLoadImmIndex<uint16_t>(MachineType::Uint16());
+ RunLoadImmIndex<int32_t>(MachineType::Int32());
+ RunLoadImmIndex<uint32_t>(MachineType::Uint32());
+ RunLoadImmIndex<int32_t*>(MachineType::AnyTagged());
+ RunLoadImmIndex<float>(MachineType::Float32());
+ RunLoadImmIndex<double>(MachineType::Float64());
+#if V8_TARGET_ARCH_64_BIT
+ RunLoadImmIndex<int64_t>(MachineType::Int64());
+#endif
+ // TODO(titzer): test various indexing modes.
+}
+
+TEST(RunLoadStore) {
+ RunLoadStore<int8_t>(MachineType::Int8());
+ RunLoadStore<uint8_t>(MachineType::Uint8());
+ RunLoadStore<int16_t>(MachineType::Int16());
+ RunLoadStore<uint16_t>(MachineType::Uint16());
+ RunLoadStore<int32_t>(MachineType::Int32());
+ RunLoadStore<uint32_t>(MachineType::Uint32());
+ RunLoadStore<void*>(MachineType::AnyTagged());
+ RunLoadStore<float>(MachineType::Float32());
+ RunLoadStore<double>(MachineType::Float64());
+#if V8_TARGET_ARCH_64_BIT
+ RunLoadStore<int64_t>(MachineType::Int64());
+#endif
+}
+
+#if V8_TARGET_LITTLE_ENDIAN
+#define LSB(addr, bytes) addr
+#elif V8_TARGET_BIG_ENDIAN
+#define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - bytes
+#else
+#error "Unknown Architecture"
+#endif
+
+TEST(RunLoadStoreSignExtend32) {
+ int32_t buffer[4];
+ RawMachineAssemblerTester<int32_t> m;
+ Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8());
+ Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
+ Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Int32());
+ m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
+ m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16);
+ m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32);
+ m.Return(load8);
+
+ FOR_INT32_INPUTS(i) {
+ buffer[0] = *i;
+
+ CHECK_EQ(static_cast<int8_t>(*i & 0xff), m.Call());
+ CHECK_EQ(static_cast<int8_t>(*i & 0xff), buffer[1]);
+ CHECK_EQ(static_cast<int16_t>(*i & 0xffff), buffer[2]);
+ CHECK_EQ(*i, buffer[3]);
+ }
+}
+
+TEST(RunLoadStoreZeroExtend32) {
+ uint32_t buffer[4];
+ RawMachineAssemblerTester<uint32_t> m;
+ Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8());
+ Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
+ Node* load32 = m.LoadFromPointer(&buffer[0], MachineType::Uint32());
+ m.StoreToPointer(&buffer[1], MachineRepresentation::kWord32, load8);
+ m.StoreToPointer(&buffer[2], MachineRepresentation::kWord32, load16);
+ m.StoreToPointer(&buffer[3], MachineRepresentation::kWord32, load32);
+ m.Return(load8);
+
+ FOR_UINT32_INPUTS(i) {
+ buffer[0] = *i;
+
+ CHECK_EQ((*i & 0xff), m.Call());
+ CHECK_EQ((*i & 0xff), buffer[1]);
+ CHECK_EQ((*i & 0xffff), buffer[2]);
+ CHECK_EQ(*i, buffer[3]);
+ }
+}
+
+#if V8_TARGET_ARCH_64_BIT
+TEST(RunCheckedLoadInt64) {
+ int64_t buffer[] = {0x66bbccddeeff0011LL, 0x1122334455667788LL};
+ RawMachineAssemblerTester<int64_t> m(MachineType::Int32());
+ Node* base = m.PointerConstant(buffer);
+ Node* index = m.Parameter(0);
+ Node* length = m.Int32Constant(16);
+ Node* load = m.AddNode(m.machine()->CheckedLoad(MachineType::Int64()), base,
+ index, length);
+ m.Return(load);
+
+ CHECK_EQ(buffer[0], m.Call(0));
+ CHECK_EQ(buffer[1], m.Call(8));
+ CheckOobValue(m.Call(16));
+}
+
+TEST(RunLoadStoreSignExtend64) {
+ if (true) return; // TODO(titzer): sign extension of loads to 64-bit.
+ int64_t buffer[5];
+ RawMachineAssemblerTester<int64_t> m;
+ Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Int8());
+ Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Int16());
+ Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Int32());
+ Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Int64());
+ m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
+ m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16);
+ m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32);
+ m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64);
+ m.Return(load8);
+
+ FOR_INT64_INPUTS(i) {
+ buffer[0] = *i;
+
+ CHECK_EQ(static_cast<int8_t>(*i & 0xff), m.Call());
+ CHECK_EQ(static_cast<int8_t>(*i & 0xff), buffer[1]);
+ CHECK_EQ(static_cast<int16_t>(*i & 0xffff), buffer[2]);
+ CHECK_EQ(static_cast<int32_t>(*i & 0xffffffff), buffer[3]);
+ CHECK_EQ(*i, buffer[4]);
+ }
+}
+
+TEST(RunLoadStoreZeroExtend64) {
+ if (kPointerSize < 8) return;
+ uint64_t buffer[5];
+ RawMachineAssemblerTester<int64_t> m;
+ Node* load8 = m.LoadFromPointer(LSB(&buffer[0], 1), MachineType::Uint8());
+ Node* load16 = m.LoadFromPointer(LSB(&buffer[0], 2), MachineType::Uint16());
+ Node* load32 = m.LoadFromPointer(LSB(&buffer[0], 4), MachineType::Uint32());
+ Node* load64 = m.LoadFromPointer(&buffer[0], MachineType::Uint64());
+ m.StoreToPointer(&buffer[1], MachineRepresentation::kWord64, load8);
+ m.StoreToPointer(&buffer[2], MachineRepresentation::kWord64, load16);
+ m.StoreToPointer(&buffer[3], MachineRepresentation::kWord64, load32);
+ m.StoreToPointer(&buffer[4], MachineRepresentation::kWord64, load64);
+ m.Return(load8);
+
+ FOR_UINT64_INPUTS(i) {
+ buffer[0] = *i;
+
+ CHECK_EQ((*i & 0xff), m.Call());
+ CHECK_EQ((*i & 0xff), buffer[1]);
+ CHECK_EQ((*i & 0xffff), buffer[2]);
+ CHECK_EQ((*i & 0xffffffff), buffer[3]);
+ CHECK_EQ(*i, buffer[4]);
+ }
+}
+
+TEST(RunCheckedStoreInt64) {
+ const int64_t write = 0x5566778899aabbLL;
+ const int64_t before = 0x33bbccddeeff0011LL;
+ int64_t buffer[] = {before, before};
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
+ Node* base = m.PointerConstant(buffer);
+ Node* index = m.Parameter(0);
+ Node* length = m.Int32Constant(16);
+ Node* value = m.Int64Constant(write);
+ Node* store =
+ m.AddNode(m.machine()->CheckedStore(MachineRepresentation::kWord64), base,
+ index, length, value);
+ USE(store);
+ m.Return(m.Int32Constant(11));
+
+ CHECK_EQ(11, m.Call(16));
+ CHECK_EQ(before, buffer[0]);
+ CHECK_EQ(before, buffer[1]);
+
+ CHECK_EQ(11, m.Call(0));
+ CHECK_EQ(write, buffer[0]);
+ CHECK_EQ(before, buffer[1]);
+
+ CHECK_EQ(11, m.Call(8));
+ CHECK_EQ(write, buffer[0]);
+ CHECK_EQ(write, buffer[1]);
+}
+#endif
+
+namespace {
+template <typename IntType>
+void LoadStoreTruncation(MachineType kRepresentation) {
+ IntType input;
+
+ RawMachineAssemblerTester<int32_t> m;
+ Node* a = m.LoadFromPointer(&input, kRepresentation);
+ Node* ap1 = m.Int32Add(a, m.Int32Constant(1));
+ m.StoreToPointer(&input, kRepresentation.representation(), ap1);
+ m.Return(ap1);
+
+ const IntType max = std::numeric_limits<IntType>::max();
+ const IntType min = std::numeric_limits<IntType>::min();
+
+ // Test upper bound.
+ input = max;
+ CHECK_EQ(max + 1, m.Call());
+ CHECK_EQ(min, input);
+
+ // Test lower bound.
+ input = min;
+ CHECK_EQ(static_cast<IntType>(max + 2), m.Call());
+ CHECK_EQ(min + 1, input);
+
+ // Test all one byte values that are not one byte bounds.
+ for (int i = -127; i < 127; i++) {
+ input = i;
+ int expected = i >= 0 ? i + 1 : max + (i - min) + 2;
+ CHECK_EQ(static_cast<IntType>(expected), m.Call());
+ CHECK_EQ(static_cast<IntType>(i + 1), input);
+ }
+}
+} // namespace
+
+TEST(RunLoadStoreTruncation) {
+ LoadStoreTruncation<int8_t>(MachineType::Int8());
+ LoadStoreTruncation<int16_t>(MachineType::Int16());
+}
+
+void TestRunOobCheckedLoad(bool length_is_immediate) {
+ USE(CheckOobValue<int32_t>);
+ USE(CheckOobValue<int64_t>);
+ USE(CheckOobValue<float>);
+ USE(CheckOobValue<double>);
+
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
+ MachineOperatorBuilder machine(m.zone());
+ const int32_t kNumElems = 27;
+ const int32_t kLength = kNumElems * 4;
+
+ int32_t buffer[kNumElems];
+ Node* base = m.PointerConstant(buffer);
+ Node* offset = m.Parameter(0);
+ Node* len = length_is_immediate ? m.Int32Constant(kLength) : m.Parameter(1);
+ Node* node =
+ m.AddNode(machine.CheckedLoad(MachineType::Int32()), base, offset, len);
+ m.Return(node);
+
+ {
+ // randomize memory.
+ v8::base::RandomNumberGenerator rng;
+ rng.SetSeed(100);
+ rng.NextBytes(&buffer[0], sizeof(buffer));
+ }
+
+ // in-bounds accesses.
+ for (int32_t i = 0; i < kNumElems; i++) {
+ int32_t offset = static_cast<int32_t>(i * sizeof(int32_t));
+ int32_t expected = buffer[i];
+ CHECK_EQ(expected, m.Call(offset, kLength));
+ }
+
+ // slightly out-of-bounds accesses.
+ for (int32_t i = kLength; i < kNumElems + 30; i++) {
+ int32_t offset = static_cast<int32_t>(i * sizeof(int32_t));
+ CheckOobValue(m.Call(offset, kLength));
+ }
+
+ // way out-of-bounds accesses.
+ for (int32_t offset = -2000000000; offset <= 2000000000;
+ offset += 100000000) {
+ if (offset == 0) continue;
+ CheckOobValue(m.Call(offset, kLength));
+ }
+}
+
+TEST(RunOobCheckedLoad) { TestRunOobCheckedLoad(false); }
+
+TEST(RunOobCheckedLoadImm) { TestRunOobCheckedLoad(true); }
+
+void TestRunOobCheckedStore(bool length_is_immediate) {
+ RawMachineAssemblerTester<int32_t> m(MachineType::Int32(),
+ MachineType::Int32());
+ MachineOperatorBuilder machine(m.zone());
+ const int32_t kNumElems = 29;
+ const int32_t kValue = -78227234;
+ const int32_t kLength = kNumElems * 4;
+
+ int32_t buffer[kNumElems + kNumElems];
+ Node* base = m.PointerConstant(buffer);
+ Node* offset = m.Parameter(0);
+ Node* len = length_is_immediate ? m.Int32Constant(kLength) : m.Parameter(1);
+ Node* val = m.Int32Constant(kValue);
+ m.AddNode(machine.CheckedStore(MachineRepresentation::kWord32), base, offset,
+ len, val);
+ m.Return(val);
+
+ // in-bounds accesses.
+ for (int32_t i = 0; i < kNumElems; i++) {
+ memset(buffer, 0, sizeof(buffer));
+ int32_t offset = static_cast<int32_t>(i * sizeof(int32_t));
+ CHECK_EQ(kValue, m.Call(offset, kLength));
+ for (int32_t j = 0; j < kNumElems + kNumElems; j++) {
+ if (i == j) {
+ CHECK_EQ(kValue, buffer[j]);
+ } else {
+ CHECK_EQ(0, buffer[j]);
+ }
+ }
+ }
+
+ memset(buffer, 0, sizeof(buffer));
+
+ // slightly out-of-bounds accesses.
+ for (int32_t i = kLength; i < kNumElems + 30; i++) {
+ int32_t offset = static_cast<int32_t>(i * sizeof(int32_t));
+ CHECK_EQ(kValue, m.Call(offset, kLength));
+ for (int32_t j = 0; j < kNumElems + kNumElems; j++) {
+ CHECK_EQ(0, buffer[j]);
+ }
+ }
+
+ // way out-of-bounds accesses.
+ for (int32_t offset = -2000000000; offset <= 2000000000;
+ offset += 100000000) {
+ if (offset == 0) continue;
+ CHECK_EQ(kValue, m.Call(offset, kLength));
+ for (int32_t j = 0; j < kNumElems + kNumElems; j++) {
+ CHECK_EQ(0, buffer[j]);
+ }
+ }
+}
+
+TEST(RunOobCheckedStore) { TestRunOobCheckedStore(false); }
+
+TEST(RunOobCheckedStoreImm) { TestRunOobCheckedStore(true); }
+
+// TODO(titzer): CheckedLoad/CheckedStore don't support 64-bit offsets.
+#define ALLOW_64_BIT_OFFSETS 0
+
+#if V8_TARGET_ARCH_64_BIT && ALLOW_64_BIT_OFFSETS
+
+void TestRunOobCheckedLoad64(uint32_t pseudo_base, bool length_is_immediate) {
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint64(),
+ MachineType::Uint64());
+ MachineOperatorBuilder machine(m.zone());
+ const uint32_t kNumElems = 25;
+ const uint32_t kLength = kNumElems * 4;
+ int32_t real_buffer[kNumElems];
+
+ // Simulate the end of a large buffer.
+ int32_t* buffer = real_buffer - (pseudo_base / 4);
+ uint64_t length = kLength + pseudo_base;
+
+ Node* base = m.PointerConstant(buffer);
+ Node* offset = m.Parameter(0);
+ Node* len = length_is_immediate ? m.Int64Constant(length) : m.Parameter(1);
+ Node* node =
+ m.AddNode(machine.CheckedLoad(MachineType::Int32()), base, offset, len);
+ m.Return(node);
+
+ {
+ // randomize memory.
+ v8::base::RandomNumberGenerator rng;
+ rng.SetSeed(100);
+ rng.NextBytes(&real_buffer[0], sizeof(real_buffer));
+ }
+
+ // in-bounds accesses.
+ for (uint32_t i = 0; i < kNumElems; i++) {
+ uint64_t offset = pseudo_base + i * 4;
+ int32_t expected = real_buffer[i];
+ CHECK_EQ(expected, m.Call(offset, length));
+ }
+
+ // in-bounds accesses w.r.t lower 32-bits, but upper bits set.
+ for (uint64_t i = 0x100000000ULL; i != 0; i <<= 1) {
+ uint64_t offset = pseudo_base + i;
+ CheckOobValue(m.Call(offset, length));
+ }
+
+ // slightly out-of-bounds accesses.
+ for (uint32_t i = kLength; i < kNumElems + 30; i++) {
+ uint64_t offset = pseudo_base + i * 4;
+ CheckOobValue(0, m.Call(offset, length));
+ }
+
+ // way out-of-bounds accesses.
+ for (uint64_t offset = length; offset < 100 * A_BILLION; offset += A_GIG) {
+ if (offset < length) continue;
+ CheckOobValue(0, m.Call(offset, length));
+ }
+}
+
+TEST(RunOobCheckedLoad64_0) {
+ TestRunOobCheckedLoad64(0, false);
+ TestRunOobCheckedLoad64(0, true);
+}
+
+TEST(RunOobCheckedLoad64_1) {
+ TestRunOobCheckedLoad64(1 * A_BILLION, false);
+ TestRunOobCheckedLoad64(1 * A_BILLION, true);
+}
+
+TEST(RunOobCheckedLoad64_2) {
+ TestRunOobCheckedLoad64(2 * A_BILLION, false);
+ TestRunOobCheckedLoad64(2 * A_BILLION, true);
+}
+
+TEST(RunOobCheckedLoad64_3) {
+ TestRunOobCheckedLoad64(3 * A_BILLION, false);
+ TestRunOobCheckedLoad64(3 * A_BILLION, true);
+}
+
+TEST(RunOobCheckedLoad64_4) {
+ TestRunOobCheckedLoad64(4 * A_BILLION, false);
+ TestRunOobCheckedLoad64(4 * A_BILLION, true);
+}
+
+void TestRunOobCheckedStore64(uint32_t pseudo_base, bool length_is_immediate) {
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint64(),
+ MachineType::Uint64());
+ MachineOperatorBuilder machine(m.zone());
+ const uint32_t kNumElems = 21;
+ const uint32_t kLength = kNumElems * 4;
+ const uint32_t kValue = 897234987;
+ int32_t real_buffer[kNumElems + kNumElems];
+
+ // Simulate the end of a large buffer.
+ int32_t* buffer = real_buffer - (pseudo_base / 4);
+ uint64_t length = kLength + pseudo_base;
+
+ Node* base = m.PointerConstant(buffer);
+ Node* offset = m.Parameter(0);
+ Node* len = length_is_immediate ? m.Int64Constant(length) : m.Parameter(1);
+ Node* val = m.Int32Constant(kValue);
+ m.AddNode(machine.CheckedStore(MachineRepresentation::kWord32), base, offset,
+ len, val);
+ m.Return(val);
+
+ // in-bounds accesses.
+ for (uint32_t i = 0; i < kNumElems; i++) {
+ memset(real_buffer, 0, sizeof(real_buffer));
+ uint64_t offset = pseudo_base + i * 4;
+ CHECK_EQ(kValue, m.Call(offset, length));
+ for (uint32_t j = 0; j < kNumElems + kNumElems; j++) {
+ if (i == j) {
+ CHECK_EQ(kValue, real_buffer[j]);
+ } else {
+ CHECK_EQ(0, real_buffer[j]);
+ }
+ }
+ }
+
+ memset(real_buffer, 0, sizeof(real_buffer));
+
+ // in-bounds accesses w.r.t lower 32-bits, but upper bits set.
+ for (uint64_t i = 0x100000000ULL; i != 0; i <<= 1) {
+ uint64_t offset = pseudo_base + i;
+ CHECK_EQ(kValue, m.Call(offset, length));
+ for (int32_t j = 0; j < kNumElems + kNumElems; j++) {
+ CHECK_EQ(0, real_buffer[j]);
+ }
+ }
+
+ // slightly out-of-bounds accesses.
+ for (uint32_t i = kLength; i < kNumElems + 30; i++) {
+ uint64_t offset = pseudo_base + i * 4;
+ CHECK_EQ(kValue, m.Call(offset, length));
+ for (int32_t j = 0; j < kNumElems + kNumElems; j++) {
+ CHECK_EQ(0, real_buffer[j]);
+ }
+ }
+
+ // way out-of-bounds accesses.
+ for (uint64_t offset = length; offset < 100 * A_BILLION; offset += A_GIG) {
+ if (offset < length) continue;
+ CHECK_EQ(kValue, m.Call(offset, length));
+ for (int32_t j = 0; j < kNumElems + kNumElems; j++) {
+ CHECK_EQ(0, real_buffer[j]);
+ }
+ }
+}
+
+TEST(RunOobCheckedStore64_0) {
+ TestRunOobCheckedStore64(0, false);
+ TestRunOobCheckedStore64(0, true);
+}
+
+TEST(RunOobCheckedStore64_1) {
+ TestRunOobCheckedStore64(1 * A_BILLION, false);
+ TestRunOobCheckedStore64(1 * A_BILLION, true);
+}
+
+TEST(RunOobCheckedStore64_2) {
+ TestRunOobCheckedStore64(2 * A_BILLION, false);
+ TestRunOobCheckedStore64(2 * A_BILLION, true);
+}
+
+TEST(RunOobCheckedStore64_3) {
+ TestRunOobCheckedStore64(3 * A_BILLION, false);
+ TestRunOobCheckedStore64(3 * A_BILLION, true);
+}
+
+TEST(RunOobCheckedStore64_4) {
+ TestRunOobCheckedStore64(4 * A_BILLION, false);
+ TestRunOobCheckedStore64(4 * A_BILLION, true);
+}
+
+#endif
+
+void TestRunOobCheckedLoad_pseudo(uint64_t x, bool length_is_immediate) {
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
+ MachineType::Uint32());
+
+ uint32_t pseudo_base = static_cast<uint32_t>(x);
+ MachineOperatorBuilder machine(m.zone());
+ const uint32_t kNumElems = 29;
+ const uint32_t kLength = pseudo_base + kNumElems * 4;
+
+ int32_t buffer[kNumElems];
+ Node* base = m.PointerConstant(reinterpret_cast<byte*>(buffer) - pseudo_base);
+ Node* offset = m.Parameter(0);
+ Node* len = length_is_immediate ? m.Int32Constant(kLength) : m.Parameter(1);
+ Node* node =
+ m.AddNode(machine.CheckedLoad(MachineType::Int32()), base, offset, len);
+ m.Return(node);
+
+ {
+ // randomize memory.
+ v8::base::RandomNumberGenerator rng;
+ rng.SetSeed(100);
+ rng.NextBytes(&buffer[0], sizeof(buffer));
+ }
+
+ // in-bounds accesses.
+ for (uint32_t i = 0; i < kNumElems; i++) {
+ uint32_t offset = static_cast<uint32_t>(i * sizeof(int32_t));
+ uint32_t expected = buffer[i];
+ CHECK_EQ(expected, m.Call(offset + pseudo_base, kLength));
+ }
+
+ // slightly out-of-bounds accesses.
+ for (int32_t i = kNumElems; i < kNumElems + 30; i++) {
+ uint32_t offset = static_cast<uint32_t>(i * sizeof(int32_t));
+ CheckOobValue(m.Call(offset + pseudo_base, kLength));
+ }
+
+ // way out-of-bounds accesses.
+ for (uint64_t i = pseudo_base + sizeof(buffer); i < 0xFFFFFFFF;
+ i += A_BILLION) {
+ uint32_t offset = static_cast<uint32_t>(i);
+ CheckOobValue(m.Call(offset, kLength));
+ }
+}
+
+TEST(RunOobCheckedLoad_pseudo0) {
+ TestRunOobCheckedLoad_pseudo(0, false);
+ TestRunOobCheckedLoad_pseudo(0, true);
+}
+
+TEST(RunOobCheckedLoad_pseudo1) {
+ TestRunOobCheckedLoad_pseudo(100000, false);
+ TestRunOobCheckedLoad_pseudo(100000, true);
+}
+
+TEST(RunOobCheckedLoad_pseudo2) {
+ TestRunOobCheckedLoad_pseudo(A_BILLION, false);
+ TestRunOobCheckedLoad_pseudo(A_BILLION, true);
+}
+
+TEST(RunOobCheckedLoad_pseudo3) {
+ TestRunOobCheckedLoad_pseudo(A_GIG, false);
+ TestRunOobCheckedLoad_pseudo(A_GIG, true);
+}
+
+TEST(RunOobCheckedLoad_pseudo4) {
+ TestRunOobCheckedLoad_pseudo(2 * A_BILLION, false);
+ TestRunOobCheckedLoad_pseudo(2 * A_BILLION, true);
+}
+
+TEST(RunOobCheckedLoad_pseudo5) {
+ TestRunOobCheckedLoad_pseudo(2 * A_GIG, false);
+ TestRunOobCheckedLoad_pseudo(2 * A_GIG, true);
+}
+
+TEST(RunOobCheckedLoad_pseudo6) {
+ TestRunOobCheckedLoad_pseudo(3 * A_BILLION, false);
+ TestRunOobCheckedLoad_pseudo(3 * A_BILLION, true);
+}
+
+TEST(RunOobCheckedLoad_pseudo7) {
+ TestRunOobCheckedLoad_pseudo(3 * A_GIG, false);
+ TestRunOobCheckedLoad_pseudo(3 * A_GIG, true);
+}
+
+TEST(RunOobCheckedLoad_pseudo8) {
+ TestRunOobCheckedLoad_pseudo(4 * A_BILLION, false);
+ TestRunOobCheckedLoad_pseudo(4 * A_BILLION, true);
+}
+
+template <typename MemType>
+void TestRunOobCheckedLoadT_pseudo(uint64_t x, bool length_is_immediate) {
+ const int32_t kReturn = 11999;
+ const uint32_t kNumElems = 29;
+ MemType buffer[kNumElems];
+ uint32_t pseudo_base = static_cast<uint32_t>(x);
+ const uint32_t kLength = static_cast<uint32_t>(pseudo_base + sizeof(buffer));
+
+ MemType result;
+
+ RawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
+ MachineType::Uint32());
+ MachineOperatorBuilder machine(m.zone());
+ Node* base = m.PointerConstant(reinterpret_cast<byte*>(buffer) - pseudo_base);
+ Node* offset = m.Parameter(0);
+ Node* len = length_is_immediate ? m.Int32Constant(kLength) : m.Parameter(1);
+ Node* node = m.AddNode(machine.CheckedLoad(MachineTypeForC<MemType>()), base,
+ offset, len);
+ Node* store = m.StoreToPointer(
+ &result, MachineTypeForC<MemType>().representation(), node);
+ USE(store);
+ m.Return(m.Int32Constant(kReturn));
+
+ {
+ // randomize memory.
+ v8::base::RandomNumberGenerator rng;
+ rng.SetSeed(103);
+ rng.NextBytes(&buffer[0], sizeof(buffer));
+ }
+
+ // in-bounds accesses.
+ for (uint32_t i = 0; i < kNumElems; i++) {
+ uint32_t offset = static_cast<uint32_t>(i * sizeof(MemType));
+ MemType expected = buffer[i];
+ CHECK_EQ(kReturn, m.Call(offset + pseudo_base, kLength));
+ CHECK_EQ(expected, result);
+ }
+
+ // slightly out-of-bounds accesses.
+ for (int32_t i = kNumElems; i < kNumElems + 30; i++) {
+ uint32_t offset = static_cast<uint32_t>(i * sizeof(MemType));
+ CHECK_EQ(kReturn, m.Call(offset + pseudo_base, kLength));
+ CheckOobValue(result);
+ }
+
+ // way out-of-bounds accesses.
+ for (uint64_t i = pseudo_base + sizeof(buffer); i < 0xFFFFFFFF;
+ i += A_BILLION) {
+ uint32_t offset = static_cast<uint32_t>(i);
+ CHECK_EQ(kReturn, m.Call(offset, kLength));
+ CheckOobValue(result);
+ }
+}
+
+TEST(RunOobCheckedLoadT_pseudo0) {
+ TestRunOobCheckedLoadT_pseudo<int32_t>(0, false);
+ TestRunOobCheckedLoadT_pseudo<int32_t>(0, true);
+ TestRunOobCheckedLoadT_pseudo<float>(0, false);
+ TestRunOobCheckedLoadT_pseudo<float>(0, true);
+ TestRunOobCheckedLoadT_pseudo<double>(0, false);
+ TestRunOobCheckedLoadT_pseudo<double>(0, true);
+}
+
+TEST(RunOobCheckedLoadT_pseudo1) {
+ TestRunOobCheckedLoadT_pseudo<int32_t>(100000, false);
+ TestRunOobCheckedLoadT_pseudo<int32_t>(100000, true);
+ TestRunOobCheckedLoadT_pseudo<float>(100000, false);
+ TestRunOobCheckedLoadT_pseudo<float>(100000, true);
+ TestRunOobCheckedLoadT_pseudo<double>(100000, false);
+ TestRunOobCheckedLoadT_pseudo<double>(100000, true);
+}
+
+TEST(RunOobCheckedLoadT_pseudo2) {
+ TestRunOobCheckedLoadT_pseudo<int32_t>(A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<int32_t>(A_BILLION, true);
+ TestRunOobCheckedLoadT_pseudo<float>(A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<float>(A_BILLION, true);
+ TestRunOobCheckedLoadT_pseudo<double>(A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<double>(A_BILLION, true);
+}
+
+TEST(RunOobCheckedLoadT_pseudo3) {
+ TestRunOobCheckedLoadT_pseudo<int32_t>(A_GIG, false);
+ TestRunOobCheckedLoadT_pseudo<int32_t>(A_GIG, true);
+ TestRunOobCheckedLoadT_pseudo<float>(A_GIG, false);
+ TestRunOobCheckedLoadT_pseudo<float>(A_GIG, true);
+ TestRunOobCheckedLoadT_pseudo<double>(A_GIG, false);
+ TestRunOobCheckedLoadT_pseudo<double>(A_GIG, true);
+}
+
+TEST(RunOobCheckedLoadT_pseudo4) {
+ TestRunOobCheckedLoadT_pseudo<int32_t>(2 * A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<int32_t>(2 * A_BILLION, true);
+ TestRunOobCheckedLoadT_pseudo<float>(2 * A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<float>(2 * A_BILLION, true);
+ TestRunOobCheckedLoadT_pseudo<double>(2 * A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<double>(2 * A_BILLION, true);
+}
+
+TEST(RunOobCheckedLoadT_pseudo5) {
+ TestRunOobCheckedLoadT_pseudo<int32_t>(2 * A_GIG, false);
+ TestRunOobCheckedLoadT_pseudo<int32_t>(2 * A_GIG, true);
+ TestRunOobCheckedLoadT_pseudo<float>(2 * A_GIG, false);
+ TestRunOobCheckedLoadT_pseudo<float>(2 * A_GIG, true);
+ TestRunOobCheckedLoadT_pseudo<double>(2 * A_GIG, false);
+ TestRunOobCheckedLoadT_pseudo<double>(2 * A_GIG, true);
+}
+
+TEST(RunOobCheckedLoadT_pseudo6) {
+ TestRunOobCheckedLoadT_pseudo<int32_t>(3 * A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<int32_t>(3 * A_BILLION, true);
+ TestRunOobCheckedLoadT_pseudo<float>(3 * A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<float>(3 * A_BILLION, true);
+ TestRunOobCheckedLoadT_pseudo<double>(3 * A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<double>(3 * A_BILLION, true);
+}
+
+TEST(RunOobCheckedLoadT_pseudo7) {
+ TestRunOobCheckedLoadT_pseudo<int32_t>(3 * A_GIG, false);
+ TestRunOobCheckedLoadT_pseudo<int32_t>(3 * A_GIG, true);
+ TestRunOobCheckedLoadT_pseudo<float>(3 * A_GIG, false);
+ TestRunOobCheckedLoadT_pseudo<float>(3 * A_GIG, true);
+ TestRunOobCheckedLoadT_pseudo<double>(3 * A_GIG, false);
+ TestRunOobCheckedLoadT_pseudo<double>(3 * A_GIG, true);
+}
+
+TEST(RunOobCheckedLoadT_pseudo8) {
+ TestRunOobCheckedLoadT_pseudo<int32_t>(4 * A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<int32_t>(4 * A_BILLION, true);
+ TestRunOobCheckedLoadT_pseudo<float>(4 * A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<float>(4 * A_BILLION, true);
+ TestRunOobCheckedLoadT_pseudo<double>(4 * A_BILLION, false);
+ TestRunOobCheckedLoadT_pseudo<double>(4 * A_BILLION, true);
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/test/cctest/compiler/test-run-machops.cc b/test/cctest/compiler/test-run-machops.cc
index 2bfe124..6d681bc 100644
--- a/test/cctest/compiler/test-run-machops.cc
+++ b/test/cctest/compiler/test-run-machops.cc
@@ -28,6 +28,39 @@
CHECK_EQ(1, m.Call());
}
+static int RunInt32AddShift(bool is_left, int32_t add_left, int32_t add_right,
+ int32_t shift_left, int32_t shit_right) {
+ RawMachineAssemblerTester<int32_t> m;
+ Node* shift =
+ m.Word32Shl(m.Int32Constant(shift_left), m.Int32Constant(shit_right));
+ Node* add = m.Int32Add(m.Int32Constant(add_left), m.Int32Constant(add_right));
+ Node* lsa = is_left ? m.Int32Add(shift, add) : m.Int32Add(add, shift);
+ m.Return(lsa);
+ return m.Call();
+}
+
+TEST(RunInt32AddShift) {
+ struct Test_case {
+ int32_t add_left, add_right, shift_left, shit_right, expected;
+ };
+
+ Test_case tc[] = {
+ {20, 22, 4, 2, 58},
+ {20, 22, 4, 1, 50},
+ {20, 22, 1, 6, 106},
+ {INT_MAX - 2, 1, 1, 1, INT_MIN}, // INT_MAX - 2 + 1 + (1 << 1), overflow.
+ };
+ const size_t tc_size = sizeof(tc) / sizeof(Test_case);
+
+ for (size_t i = 0; i < tc_size; ++i) {
+ CHECK_EQ(tc[i].expected,
+ RunInt32AddShift(false, tc[i].add_left, tc[i].add_right,
+ tc[i].shift_left, tc[i].shit_right));
+ CHECK_EQ(tc[i].expected,
+ RunInt32AddShift(true, tc[i].add_left, tc[i].add_right,
+ tc[i].shift_left, tc[i].shit_right));
+ }
+}
TEST(RunWord32ReverseBits) {
BufferedRawMachineAssemblerTester<uint32_t> m(MachineType::Uint32());
@@ -636,6 +669,38 @@
}
}
+static int64_t RunInt64AddShift(bool is_left, int64_t add_left,
+ int64_t add_right, int64_t shift_left,
+ int64_t shit_right) {
+ RawMachineAssemblerTester<int64_t> m;
+ Node* shift = m.Word64Shl(m.Int64Constant(4), m.Int64Constant(2));
+ Node* add = m.Int64Add(m.Int64Constant(20), m.Int64Constant(22));
+ Node* dlsa = is_left ? m.Int64Add(shift, add) : m.Int64Add(add, shift);
+ m.Return(dlsa);
+ return m.Call();
+}
+
+TEST(RunInt64AddShift) {
+ struct Test_case {
+ int64_t add_left, add_right, shift_left, shit_right, expected;
+ };
+
+ Test_case tc[] = {
+ {20, 22, 4, 2, 58},
+ {20, 22, 4, 1, 50},
+ {20, 22, 1, 6, 106},
+ {INT64_MAX - 2, 1, 1, 1,
+ INT64_MIN}, // INT64_MAX - 2 + 1 + (1 << 1), overflow.
+ };
+ const size_t tc_size = sizeof(tc) / sizeof(Test_case);
+
+ for (size_t i = 0; i < tc_size; ++i) {
+ CHECK_EQ(58, RunInt64AddShift(false, tc[i].add_left, tc[i].add_right,
+ tc[i].shift_left, tc[i].shit_right));
+ CHECK_EQ(58, RunInt64AddShift(true, tc[i].add_left, tc[i].add_right,
+ tc[i].shift_left, tc[i].shit_right));
+ }
+}
// TODO(titzer): add tests that run 64-bit integer operations.
#endif // V8_TARGET_ARCH_64_BIT
@@ -1142,94 +1207,6 @@
}
-TEST(RunLoadInt32) {
- RawMachineAssemblerTester<int32_t> m;
-
- int32_t p1 = 0; // loads directly from this location.
- m.Return(m.LoadFromPointer(&p1, MachineType::Int32()));
-
- FOR_INT32_INPUTS(i) {
- p1 = *i;
- CHECK_EQ(p1, m.Call());
- }
-}
-
-
-TEST(RunLoadInt32Offset) {
- int32_t p1 = 0; // loads directly from this location.
-
- int32_t offsets[] = {-2000000, -100, -101, 1, 3,
- 7, 120, 2000, 2000000000, 0xff};
-
- for (size_t i = 0; i < arraysize(offsets); i++) {
- RawMachineAssemblerTester<int32_t> m;
- int32_t offset = offsets[i];
- byte* pointer = reinterpret_cast<byte*>(&p1) - offset;
- // generate load [#base + #index]
- m.Return(m.LoadFromPointer(pointer, MachineType::Int32(), offset));
-
- FOR_INT32_INPUTS(j) {
- p1 = *j;
- CHECK_EQ(p1, m.Call());
- }
- }
-}
-
-
-TEST(RunLoadStoreFloat32Offset) {
- float p1 = 0.0f; // loads directly from this location.
- float p2 = 0.0f; // and stores directly into this location.
-
- FOR_INT32_INPUTS(i) {
- int32_t magic = 0x2342aabb + *i * 3;
- RawMachineAssemblerTester<int32_t> m;
- int32_t offset = *i;
- byte* from = reinterpret_cast<byte*>(&p1) - offset;
- byte* to = reinterpret_cast<byte*>(&p2) - offset;
- // generate load [#base + #index]
- Node* load = m.Load(MachineType::Float32(), m.PointerConstant(from),
- m.IntPtrConstant(offset));
- m.Store(MachineRepresentation::kFloat32, m.PointerConstant(to),
- m.IntPtrConstant(offset), load, kNoWriteBarrier);
- m.Return(m.Int32Constant(magic));
-
- FOR_FLOAT32_INPUTS(j) {
- p1 = *j;
- p2 = *j - 5;
- CHECK_EQ(magic, m.Call());
- CHECK_DOUBLE_EQ(p1, p2);
- }
- }
-}
-
-
-TEST(RunLoadStoreFloat64Offset) {
- double p1 = 0; // loads directly from this location.
- double p2 = 0; // and stores directly into this location.
-
- FOR_INT32_INPUTS(i) {
- int32_t magic = 0x2342aabb + *i * 3;
- RawMachineAssemblerTester<int32_t> m;
- int32_t offset = *i;
- byte* from = reinterpret_cast<byte*>(&p1) - offset;
- byte* to = reinterpret_cast<byte*>(&p2) - offset;
- // generate load [#base + #index]
- Node* load = m.Load(MachineType::Float64(), m.PointerConstant(from),
- m.IntPtrConstant(offset));
- m.Store(MachineRepresentation::kFloat64, m.PointerConstant(to),
- m.IntPtrConstant(offset), load, kNoWriteBarrier);
- m.Return(m.Int32Constant(magic));
-
- FOR_FLOAT64_INPUTS(j) {
- p1 = *j;
- p2 = *j - 5;
- CHECK_EQ(magic, m.Call());
- CHECK_DOUBLE_EQ(p1, p2);
- }
- }
-}
-
-
TEST(RunInt32AddP) {
RawMachineAssemblerTester<int32_t> m;
Int32BinopTester bt(&m);
@@ -1709,7 +1686,6 @@
}
}
-
TEST(RunInt32SubImm) {
{
FOR_UINT32_INPUTS(i) {
@@ -1733,6 +1709,11 @@
}
}
+TEST(RunInt32SubImm2) {
+ BufferedRawMachineAssemblerTester<int32_t> r;
+ r.Return(r.Int32Sub(r.Int32Constant(-1), r.Int32Constant(0)));
+ CHECK_EQ(-1, r.Call());
+}
TEST(RunInt32SubAndWord32SarP) {
{
@@ -3566,92 +3547,6 @@
}
-template <typename Type>
-static void RunLoadImmIndex(MachineType rep) {
- const int kNumElems = 3;
- Type buffer[kNumElems];
-
- // initialize the buffer with some raw data.
- byte* raw = reinterpret_cast<byte*>(buffer);
- for (size_t i = 0; i < sizeof(buffer); i++) {
- raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
- }
-
- // Test with various large and small offsets.
- for (int offset = -1; offset <= 200000; offset *= -5) {
- for (int i = 0; i < kNumElems; i++) {
- BufferedRawMachineAssemblerTester<Type> m;
- Node* base = m.PointerConstant(buffer - offset);
- Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
- m.Return(m.Load(rep, base, index));
-
- volatile Type expected = buffer[i];
- volatile Type actual = m.Call();
- CHECK_EQ(expected, actual);
- }
- }
-}
-
-
-TEST(RunLoadImmIndex) {
- RunLoadImmIndex<int8_t>(MachineType::Int8());
- RunLoadImmIndex<uint8_t>(MachineType::Uint8());
- RunLoadImmIndex<int16_t>(MachineType::Int16());
- RunLoadImmIndex<uint16_t>(MachineType::Uint16());
- RunLoadImmIndex<int32_t>(MachineType::Int32());
- RunLoadImmIndex<uint32_t>(MachineType::Uint32());
- RunLoadImmIndex<int32_t*>(MachineType::AnyTagged());
- RunLoadImmIndex<float>(MachineType::Float32());
- RunLoadImmIndex<double>(MachineType::Float64());
- if (kPointerSize == 8) {
- RunLoadImmIndex<int64_t>(MachineType::Int64());
- }
- // TODO(titzer): test various indexing modes.
-}
-
-
-template <typename CType>
-static void RunLoadStore(MachineType rep) {
- const int kNumElems = 4;
- CType buffer[kNumElems];
-
- for (int32_t x = 0; x < kNumElems; x++) {
- int32_t y = kNumElems - x - 1;
- // initialize the buffer with raw data.
- byte* raw = reinterpret_cast<byte*>(buffer);
- for (size_t i = 0; i < sizeof(buffer); i++) {
- raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
- }
-
- RawMachineAssemblerTester<int32_t> m;
- int32_t OK = 0x29000 + x;
- Node* base = m.PointerConstant(buffer);
- Node* index0 = m.IntPtrConstant(x * sizeof(buffer[0]));
- Node* load = m.Load(rep, base, index0);
- Node* index1 = m.IntPtrConstant(y * sizeof(buffer[0]));
- m.Store(rep.representation(), base, index1, load, kNoWriteBarrier);
- m.Return(m.Int32Constant(OK));
-
- CHECK(buffer[x] != buffer[y]);
- CHECK_EQ(OK, m.Call());
- CHECK(buffer[x] == buffer[y]);
- }
-}
-
-
-TEST(RunLoadStore) {
- RunLoadStore<int8_t>(MachineType::Int8());
- RunLoadStore<uint8_t>(MachineType::Uint8());
- RunLoadStore<int16_t>(MachineType::Int16());
- RunLoadStore<uint16_t>(MachineType::Uint16());
- RunLoadStore<int32_t>(MachineType::Int32());
- RunLoadStore<uint32_t>(MachineType::Uint32());
- RunLoadStore<void*>(MachineType::AnyTagged());
- RunLoadStore<float>(MachineType::Float32());
- RunLoadStore<double>(MachineType::Float64());
-}
-
-
TEST(RunFloat32Add) {
BufferedRawMachineAssemblerTester<float> m(MachineType::Float32(),
MachineType::Float32());
@@ -4124,7 +4019,7 @@
m.Return(m.TruncateFloat32ToUint32(m.Parameter(0)));
{
FOR_UINT32_INPUTS(i) {
- float input = static_cast<float>(*i);
+ volatile float input = static_cast<float>(*i);
// This condition on 'input' is required because
// static_cast<float>(std::numeric_limits<uint32_t>::max()) results in a
// value outside uint32 range.
@@ -4201,7 +4096,7 @@
return (static_cast<uint64_t>(high) << 32) | static_cast<uint64_t>(low);
}
-#if V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_X87
+#if V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_X87
TEST(RunInt32PairAdd) {
BufferedRawMachineAssemblerTester<int32_t> m(
MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32(),
@@ -4449,6 +4344,56 @@
TestWord32PairShlWithSharedInput(1, 1);
}
+TEST(RunWord32PairShr) {
+ BufferedRawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32());
+
+ uint32_t high;
+ uint32_t low;
+
+ Node* PairAdd =
+ m.Word32PairShr(m.Parameter(0), m.Parameter(1), m.Parameter(2));
+
+ m.StoreToPointer(&low, MachineRepresentation::kWord32,
+ m.Projection(0, PairAdd));
+ m.StoreToPointer(&high, MachineRepresentation::kWord32,
+ m.Projection(1, PairAdd));
+ m.Return(m.Int32Constant(74));
+
+ FOR_UINT64_INPUTS(i) {
+ for (uint32_t j = 0; j < 64; j++) {
+ m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ static_cast<uint32_t>(*i >> 32), j);
+ CHECK_EQ(*i >> j, ToInt64(low, high));
+ }
+ }
+}
+
+TEST(RunWord32PairSar) {
+ BufferedRawMachineAssemblerTester<int32_t> m(
+ MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32());
+
+ uint32_t high;
+ uint32_t low;
+
+ Node* PairAdd =
+ m.Word32PairSar(m.Parameter(0), m.Parameter(1), m.Parameter(2));
+
+ m.StoreToPointer(&low, MachineRepresentation::kWord32,
+ m.Projection(0, PairAdd));
+ m.StoreToPointer(&high, MachineRepresentation::kWord32,
+ m.Projection(1, PairAdd));
+ m.Return(m.Int32Constant(74));
+
+ FOR_INT64_INPUTS(i) {
+ for (uint32_t j = 0; j < 64; j++) {
+ m.Call(static_cast<uint32_t>(*i & 0xffffffff),
+ static_cast<uint32_t>(*i >> 32), j);
+ CHECK_EQ(*i >> j, ToInt64(low, high));
+ }
+ }
+}
+
#endif
TEST(RunDeadChangeFloat64ToInt32) {
@@ -4968,45 +4913,6 @@
}
-template <typename IntType>
-static void LoadStoreTruncation(MachineType kRepresentation) {
- IntType input;
-
- RawMachineAssemblerTester<int32_t> m;
- Node* a = m.LoadFromPointer(&input, kRepresentation);
- Node* ap1 = m.Int32Add(a, m.Int32Constant(1));
- m.StoreToPointer(&input, kRepresentation.representation(), ap1);
- m.Return(ap1);
-
- const IntType max = std::numeric_limits<IntType>::max();
- const IntType min = std::numeric_limits<IntType>::min();
-
- // Test upper bound.
- input = max;
- CHECK_EQ(max + 1, m.Call());
- CHECK_EQ(min, input);
-
- // Test lower bound.
- input = min;
- CHECK_EQ(static_cast<IntType>(max + 2), m.Call());
- CHECK_EQ(min + 1, input);
-
- // Test all one byte values that are not one byte bounds.
- for (int i = -127; i < 127; i++) {
- input = i;
- int expected = i >= 0 ? i + 1 : max + (i - min) + 2;
- CHECK_EQ(static_cast<IntType>(expected), m.Call());
- CHECK_EQ(static_cast<IntType>(i + 1), input);
- }
-}
-
-
-TEST(RunLoadStoreTruncation) {
- LoadStoreTruncation<int8_t>(MachineType::Int8());
- LoadStoreTruncation<int16_t>(MachineType::Int16());
-}
-
-
static void IntPtrCompare(intptr_t left, intptr_t right) {
for (int test = 0; test < 7; test++) {
RawMachineAssemblerTester<bool> m(MachineType::Pointer(),
@@ -5417,8 +5323,7 @@
}
}
-
-TEST(RunTruncateFloat64ToInt32P) {
+TEST(RunTruncateFloat64ToWord32P) {
struct {
double from;
double raw;
@@ -5479,8 +5384,7 @@
{-1.7976931348623157e+308, 0}};
double input = -1.0;
RawMachineAssemblerTester<int32_t> m;
- m.Return(m.TruncateFloat64ToInt32(
- TruncationMode::kJavaScript,
+ m.Return(m.TruncateFloat64ToWord32(
m.LoadFromPointer(&input, MachineType::Float64())));
for (size_t i = 0; i < arraysize(kValues); ++i) {
input = kValues[i].from;
@@ -5489,6 +5393,12 @@
}
}
+TEST(RunTruncateFloat64ToWord32SignExtension) {
+ BufferedRawMachineAssemblerTester<int32_t> r;
+ r.Return(r.Int32Sub(r.TruncateFloat64ToWord32(r.Float64Constant(-1.0)),
+ r.Int32Constant(0)));
+ CHECK_EQ(-1, r.Call());
+}
TEST(RunChangeFloat32ToFloat64) {
BufferedRawMachineAssemblerTester<double> m(MachineType::Float32());
@@ -5854,50 +5764,6 @@
#if V8_TARGET_ARCH_64_BIT
// TODO(titzer): run int64 tests on all platforms when supported.
-TEST(RunCheckedLoadInt64) {
- int64_t buffer[] = {0x66bbccddeeff0011LL, 0x1122334455667788LL};
- RawMachineAssemblerTester<int64_t> m(MachineType::Int32());
- Node* base = m.PointerConstant(buffer);
- Node* index = m.Parameter(0);
- Node* length = m.Int32Constant(16);
- Node* load = m.AddNode(m.machine()->CheckedLoad(MachineType::Int64()), base,
- index, length);
- m.Return(load);
-
- CHECK_EQ(buffer[0], m.Call(0));
- CHECK_EQ(buffer[1], m.Call(8));
- CHECK_EQ(0, m.Call(16));
-}
-
-
-TEST(RunCheckedStoreInt64) {
- const int64_t write = 0x5566778899aabbLL;
- const int64_t before = 0x33bbccddeeff0011LL;
- int64_t buffer[] = {before, before};
- RawMachineAssemblerTester<int32_t> m(MachineType::Int32());
- Node* base = m.PointerConstant(buffer);
- Node* index = m.Parameter(0);
- Node* length = m.Int32Constant(16);
- Node* value = m.Int64Constant(write);
- Node* store =
- m.AddNode(m.machine()->CheckedStore(MachineRepresentation::kWord64), base,
- index, length, value);
- USE(store);
- m.Return(m.Int32Constant(11));
-
- CHECK_EQ(11, m.Call(16));
- CHECK_EQ(before, buffer[0]);
- CHECK_EQ(before, buffer[1]);
-
- CHECK_EQ(11, m.Call(0));
- CHECK_EQ(write, buffer[0]);
- CHECK_EQ(before, buffer[1]);
-
- CHECK_EQ(11, m.Call(8));
- CHECK_EQ(write, buffer[0]);
- CHECK_EQ(write, buffer[1]);
-}
-
TEST(RunBitcastInt64ToFloat64) {
int64_t input = 1;
diff --git a/test/cctest/compiler/test-run-native-calls.cc b/test/cctest/compiler/test-run-native-calls.cc
index bfdcc0e..a63cc8a 100644
--- a/test/cctest/compiler/test-run-native-calls.cc
+++ b/test/cctest/compiler/test-run-native-calls.cc
@@ -255,7 +255,7 @@
Handle<Code> CompileGraph(const char* name, CallDescriptor* desc, Graph* graph,
Schedule* schedule = nullptr) {
Isolate* isolate = CcTest::InitIsolateOnce();
- CompilationInfo info("testing", isolate, graph->zone());
+ CompilationInfo info(ArrayVector("testing"), isolate, graph->zone());
Handle<Code> code =
Pipeline::GenerateCodeForTesting(&info, desc, graph, schedule);
CHECK(!code.is_null());
diff --git a/test/cctest/compiler/test-run-stubs.cc b/test/cctest/compiler/test-run-stubs.cc
index c745219..feb25c9 100644
--- a/test/cctest/compiler/test-run-stubs.cc
+++ b/test/cctest/compiler/test-run-stubs.cc
@@ -27,7 +27,7 @@
// Create code and an accompanying descriptor.
StringLengthStub stub(isolate);
Handle<Code> code = stub.GenerateCode();
- CompilationInfo info("test", isolate, zone,
+ CompilationInfo info(ArrayVector("test"), isolate, zone,
Code::ComputeFlags(Code::HANDLER));
CallInterfaceDescriptor interface_descriptor =
stub.GetCallInterfaceDescriptor();
diff --git a/test/cctest/compiler/test-run-wasm-machops.cc b/test/cctest/compiler/test-run-wasm-machops.cc
new file mode 100644
index 0000000..0b23669
--- /dev/null
+++ b/test/cctest/compiler/test-run-wasm-machops.cc
@@ -0,0 +1,170 @@
+// Copyright 2016 the V8 project authors. All rights reserved. Use of this
+// source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+#include <cmath>
+#include <functional>
+#include <limits>
+
+#include "src/base/bits.h"
+#include "src/base/utils/random-number-generator.h"
+#include "src/codegen.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/graph-builder-tester.h"
+#include "test/cctest/compiler/value-helper.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+static void UpdateMemoryReferences(Handle<Code> code, Address old_base,
+ Address new_base, uint32_t old_size,
+ uint32_t new_size) {
+ Isolate* isolate = CcTest::i_isolate();
+ bool modified = false;
+ int mode_mask = RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
+ RelocInfo::Mode mode = it.rinfo()->rmode();
+ if (RelocInfo::IsWasmMemoryReference(mode) ||
+ RelocInfo::IsWasmMemorySizeReference(mode)) {
+ // Patch addresses with change in memory start address
+ it.rinfo()->update_wasm_memory_reference(old_base, new_base, old_size,
+ new_size);
+ modified = true;
+ }
+ }
+ if (modified) {
+ Assembler::FlushICache(isolate, code->instruction_start(),
+ code->instruction_size());
+ }
+}
+
+template <typename CType>
+static void RunLoadStoreRelocation(MachineType rep) {
+ const int kNumElems = 2;
+ CType buffer[kNumElems];
+ CType new_buffer[kNumElems];
+ byte* raw = reinterpret_cast<byte*>(buffer);
+ byte* new_raw = reinterpret_cast<byte*>(new_buffer);
+ for (size_t i = 0; i < sizeof(buffer); i++) {
+ raw[i] = static_cast<byte>((i + sizeof(CType)) ^ 0xAA);
+ new_raw[i] = static_cast<byte>((i + sizeof(CType)) ^ 0xAA);
+ }
+ int32_t OK = 0x29000;
+ RawMachineAssemblerTester<uint32_t> m;
+ Node* base = m.RelocatableIntPtrConstant(reinterpret_cast<intptr_t>(raw),
+ RelocInfo::WASM_MEMORY_REFERENCE);
+ Node* base1 = m.RelocatableIntPtrConstant(
+ reinterpret_cast<intptr_t>(raw + sizeof(CType)),
+ RelocInfo::WASM_MEMORY_REFERENCE);
+ Node* index = m.Int32Constant(0);
+ Node* load = m.Load(rep, base, index);
+ m.Store(rep.representation(), base1, index, load, kNoWriteBarrier);
+ m.Return(m.Int32Constant(OK));
+ CHECK(buffer[0] != buffer[1]);
+ CHECK_EQ(OK, m.Call());
+ CHECK(buffer[0] == buffer[1]);
+ m.GenerateCode();
+ Handle<Code> code = m.GetCode();
+ UpdateMemoryReferences(code, raw, new_raw, sizeof(buffer),
+ sizeof(new_buffer));
+ CHECK(new_buffer[0] != new_buffer[1]);
+ CHECK_EQ(OK, m.Call());
+ CHECK(new_buffer[0] == new_buffer[1]);
+}
+
+TEST(RunLoadStoreRelocation) {
+ RunLoadStoreRelocation<int8_t>(MachineType::Int8());
+ RunLoadStoreRelocation<uint8_t>(MachineType::Uint8());
+ RunLoadStoreRelocation<int16_t>(MachineType::Int16());
+ RunLoadStoreRelocation<uint16_t>(MachineType::Uint16());
+ RunLoadStoreRelocation<int32_t>(MachineType::Int32());
+ RunLoadStoreRelocation<uint32_t>(MachineType::Uint32());
+ RunLoadStoreRelocation<void*>(MachineType::AnyTagged());
+ RunLoadStoreRelocation<float>(MachineType::Float32());
+ RunLoadStoreRelocation<double>(MachineType::Float64());
+}
+
+template <typename CType>
+static void RunLoadStoreRelocationOffset(MachineType rep) {
+ RawMachineAssemblerTester<int32_t> r(MachineType::Int32());
+ const int kNumElems = 4;
+ CType buffer[kNumElems];
+ CType new_buffer[kNumElems + 1];
+
+ for (int32_t x = 0; x < kNumElems; x++) {
+ int32_t y = kNumElems - x - 1;
+ // initialize the buffer with raw data.
+ byte* raw = reinterpret_cast<byte*>(buffer);
+ for (size_t i = 0; i < sizeof(buffer); i++) {
+ raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
+ }
+
+ RawMachineAssemblerTester<int32_t> m;
+ int32_t OK = 0x29000 + x;
+ Node* base = m.RelocatableIntPtrConstant(reinterpret_cast<intptr_t>(buffer),
+ RelocInfo::WASM_MEMORY_REFERENCE);
+ Node* index0 = m.IntPtrConstant(x * sizeof(buffer[0]));
+ Node* load = m.Load(rep, base, index0);
+ Node* index1 = m.IntPtrConstant(y * sizeof(buffer[0]));
+ m.Store(rep.representation(), base, index1, load, kNoWriteBarrier);
+ m.Return(m.Int32Constant(OK));
+
+ CHECK(buffer[x] != buffer[y]);
+ CHECK_EQ(OK, m.Call());
+ CHECK(buffer[x] == buffer[y]);
+ m.GenerateCode();
+
+ // Initialize new buffer and set old_buffer to 0
+ byte* new_raw = reinterpret_cast<byte*>(new_buffer);
+ for (size_t i = 0; i < sizeof(buffer); i++) {
+ raw[i] = 0;
+ new_raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
+ }
+
+ // Perform relocation on generated code
+ Handle<Code> code = m.GetCode();
+ UpdateMemoryReferences(code, raw, new_raw, sizeof(buffer),
+ sizeof(new_buffer));
+
+ CHECK(new_buffer[x] != new_buffer[y]);
+ CHECK_EQ(OK, m.Call());
+ CHECK(new_buffer[x] == new_buffer[y]);
+ }
+}
+
+TEST(RunLoadStoreRelocationOffset) {
+ RunLoadStoreRelocationOffset<int8_t>(MachineType::Int8());
+ RunLoadStoreRelocationOffset<uint8_t>(MachineType::Uint8());
+ RunLoadStoreRelocationOffset<int16_t>(MachineType::Int16());
+ RunLoadStoreRelocationOffset<uint16_t>(MachineType::Uint16());
+ RunLoadStoreRelocationOffset<int32_t>(MachineType::Int32());
+ RunLoadStoreRelocationOffset<uint32_t>(MachineType::Uint32());
+ RunLoadStoreRelocationOffset<void*>(MachineType::AnyTagged());
+ RunLoadStoreRelocationOffset<float>(MachineType::Float32());
+ RunLoadStoreRelocationOffset<double>(MachineType::Float64());
+}
+
+TEST(Uint32LessThanRelocation) {
+ RawMachineAssemblerTester<uint32_t> m;
+ RawMachineLabel within_bounds, out_of_bounds;
+ Node* index = m.Int32Constant(0x200);
+ Node* limit =
+ m.RelocatableInt32Constant(0x200, RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+ Node* cond = m.AddNode(m.machine()->Uint32LessThan(), index, limit);
+ m.Branch(cond, &within_bounds, &out_of_bounds);
+ m.Bind(&within_bounds);
+ m.Return(m.Int32Constant(0xaced));
+ m.Bind(&out_of_bounds);
+ m.Return(m.Int32Constant(0xdeadbeef));
+ // Check that index is out of bounds with current size
+ CHECK_EQ(0xdeadbeef, m.Call());
+ m.GenerateCode();
+
+ Handle<Code> code = m.GetCode();
+ UpdateMemoryReferences(code, reinterpret_cast<Address>(1234),
+ reinterpret_cast<Address>(1234), 0x200, 0x400);
+ // Check that after limit is increased, index is within bounds.
+ CHECK_EQ(0xaced, m.Call());
+}
diff --git a/test/cctest/compiler/test-simplified-lowering.cc b/test/cctest/compiler/test-simplified-lowering.cc
index b5e9929..4efb149 100644
--- a/test/cctest/compiler/test-simplified-lowering.cc
+++ b/test/cctest/compiler/test-simplified-lowering.cc
@@ -6,13 +6,14 @@
#include "src/ast/scopes.h"
#include "src/compiler/access-builder.h"
-#include "src/compiler/change-lowering.h"
#include "src/compiler/control-builders.h"
-#include "src/compiler/graph-reducer.h"
+#include "src/compiler/effect-control-linearizer.h"
#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/memory-optimizer.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/representation-change.h"
+#include "src/compiler/scheduler.h"
#include "src/compiler/simplified-lowering.h"
#include "src/compiler/source-position.h"
#include "src/compiler/typer.h"
@@ -60,11 +61,13 @@
typer.Run();
lowering.LowerAllNodes();
- ChangeLowering lowering(&jsgraph);
- GraphReducer reducer(this->zone(), this->graph());
- reducer.AddReducer(&lowering);
- reducer.ReduceGraph();
- Verifier::Run(this->graph());
+ Schedule* schedule = Scheduler::ComputeSchedule(this->zone(), this->graph(),
+ Scheduler::kNoFlags);
+ EffectControlLinearizer linearizer(&jsgraph, schedule, this->zone());
+ linearizer.Run();
+
+ MemoryOptimizer memory_optimizer(&jsgraph, this->zone());
+ memory_optimizer.Optimize();
}
void CheckNumberCall(double expected, double input) {
@@ -99,13 +102,15 @@
double input;
int32_t result;
SimplifiedLoweringTester<Object*> t;
- FieldAccess load = {kUntaggedBase, 0, Handle<Name>(), Type::Number(),
- MachineType::Float64()};
+ FieldAccess load = {kUntaggedBase, 0,
+ Handle<Name>(), Type::Number(),
+ MachineType::Float64(), kNoWriteBarrier};
Node* loaded = t.LoadField(load, t.PointerConstant(&input));
NodeProperties::SetType(loaded, Type::Number());
Node* convert = t.NumberToInt32(loaded);
- FieldAccess store = {kUntaggedBase, 0, Handle<Name>(), Type::Signed32(),
- MachineType::Int32()};
+ FieldAccess store = {kUntaggedBase, 0,
+ Handle<Name>(), Type::Signed32(),
+ MachineType::Int32(), kNoWriteBarrier};
t.StoreField(store, t.PointerConstant(&result), convert);
t.Return(t.jsgraph.TrueConstant());
t.LowerAllNodesAndLowerChanges();
@@ -126,13 +131,15 @@
double input;
uint32_t result;
SimplifiedLoweringTester<Object*> t;
- FieldAccess load = {kUntaggedBase, 0, Handle<Name>(), Type::Number(),
- MachineType::Float64()};
+ FieldAccess load = {kUntaggedBase, 0,
+ Handle<Name>(), Type::Number(),
+ MachineType::Float64(), kNoWriteBarrier};
Node* loaded = t.LoadField(load, t.PointerConstant(&input));
NodeProperties::SetType(loaded, Type::Number());
Node* convert = t.NumberToUint32(loaded);
- FieldAccess store = {kUntaggedBase, 0, Handle<Name>(), Type::Unsigned32(),
- MachineType::Uint32()};
+ FieldAccess store = {kUntaggedBase, 0,
+ Handle<Name>(), Type::Unsigned32(),
+ MachineType::Uint32(), kNoWriteBarrier};
t.StoreField(store, t.PointerConstant(&result), convert);
t.Return(t.jsgraph.TrueConstant());
t.LowerAllNodesAndLowerChanges();
@@ -291,8 +298,12 @@
for (size_t i = 0; i < arraysize(smis); i++) {
int offset = static_cast<int>(i * sizeof(Smi*));
- FieldAccess access = {kUntaggedBase, offset, Handle<Name>(),
- Type::Integral32(), MachineType::AnyTagged()};
+ FieldAccess access = {kUntaggedBase,
+ offset,
+ Handle<Name>(),
+ Type::Integral32(),
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
SimplifiedLoweringTester<Object*> t;
Node* load = t.LoadField(access, t.PointerConstant(smis));
@@ -313,8 +324,12 @@
for (size_t i = 0; i < arraysize(smis); i++) {
int offset = static_cast<int>(i * sizeof(Smi*));
- FieldAccess access = {kUntaggedBase, offset, Handle<Name>(),
- Type::Integral32(), MachineType::AnyTagged()};
+ FieldAccess access = {kUntaggedBase,
+ offset,
+ Handle<Name>(),
+ Type::Integral32(),
+ MachineType::AnyTagged(),
+ kNoWriteBarrier};
SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
Node* p0 = t.Parameter(0);
@@ -340,7 +355,7 @@
for (size_t j = 0; (i + j) < arraysize(smis); j++) { // for element index
int offset = static_cast<int>(i * sizeof(Smi*));
ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
- MachineType::AnyTagged()};
+ MachineType::AnyTagged(), kNoWriteBarrier};
SimplifiedLoweringTester<Object*> t;
Node* load = t.LoadElement(access, t.PointerConstant(smis),
@@ -366,7 +381,7 @@
for (size_t j = 0; (i + j) < arraysize(smis); j++) { // for element index
int offset = static_cast<int>(i * sizeof(Smi*));
ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
- MachineType::AnyTagged()};
+ MachineType::AnyTagged(), kNoWriteBarrier};
SimplifiedLoweringTester<Object*> t(MachineType::AnyTagged());
Node* p0 = t.Parameter(0);
@@ -518,7 +533,7 @@
ElementAccess GetElementAccess() {
ElementAccess access = {tagged ? kTaggedBase : kUntaggedBase,
tagged ? FixedArrayBase::kHeaderSize : 0,
- Type::Any(), rep};
+ Type::Any(), rep, kFullWriteBarrier};
return access;
}
@@ -526,7 +541,10 @@
int offset = field * sizeof(E);
FieldAccess access = {tagged ? kTaggedBase : kUntaggedBase,
offset + (tagged ? FixedArrayBase::kHeaderSize : 0),
- Handle<Name>(), Type::Any(), rep};
+ Handle<Name>(),
+ Type::Any(),
+ rep,
+ kFullWriteBarrier};
return access;
}
@@ -726,11 +744,13 @@
SourcePositionTable table(jsgraph.graph());
SimplifiedLowering(&jsgraph, jsgraph.zone(), &table).LowerAllNodes();
- ChangeLowering lowering(&jsgraph);
- GraphReducer reducer(this->zone(), this->graph());
- reducer.AddReducer(&lowering);
- reducer.ReduceGraph();
- Verifier::Run(this->graph());
+ Schedule* schedule = Scheduler::ComputeSchedule(this->zone(), this->graph(),
+ Scheduler::kNoFlags);
+ EffectControlLinearizer linearizer(&jsgraph, schedule, this->zone());
+ linearizer.Run();
+
+ MemoryOptimizer memory_optimizer(&jsgraph, this->zone());
+ memory_optimizer.Optimize();
}
// Inserts the node as the return value of the graph.
@@ -831,7 +851,7 @@
Node* use = t.Use(inv, MachineType::AnyTagged());
t.Return(use);
t.Lower();
- CHECK_EQ(IrOpcode::kChangeBitToBool, use->InputAt(0)->opcode());
+ CHECK_EQ(IrOpcode::kChangeBitToTagged, use->InputAt(0)->opcode());
Node* cmp = use->InputAt(0)->InputAt(0);
CHECK_EQ(t.machine()->Word32Equal()->opcode(), cmp->opcode());
CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
@@ -863,7 +883,7 @@
Node* use = t.Use(inv, MachineType::AnyTagged());
t.Return(use);
t.Lower();
- CHECK_EQ(IrOpcode::kChangeBitToBool, use->InputAt(0)->opcode());
+ CHECK_EQ(IrOpcode::kChangeBitToTagged, use->InputAt(0)->opcode());
Node* cmp = use->InputAt(0)->InputAt(0);
CHECK_EQ(t.machine()->WordEqual()->opcode(), cmp->opcode());
CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
@@ -908,7 +928,7 @@
t.Return(use);
t.Lower();
CHECK_EQ(b, use->InputAt(0)->InputAt(0));
- CHECK_EQ(IrOpcode::kChangeUint32ToTagged, use->InputAt(0)->opcode());
+ CHECK_EQ(IrOpcode::kChangeInt31ToTaggedSigned, use->InputAt(0)->opcode());
}
@@ -921,7 +941,7 @@
t.Return(use);
t.Lower();
CHECK_EQ(cnv, use->InputAt(0)->InputAt(0));
- CHECK_EQ(IrOpcode::kChangeUint32ToTagged, use->InputAt(0)->opcode());
+ CHECK_EQ(IrOpcode::kChangeInt31ToTaggedSigned, use->InputAt(0)->opcode());
CHECK_EQ(t.machine()->WordEqual()->opcode(), cnv->opcode());
CHECK(b == cnv->InputAt(0) || b == cnv->InputAt(1));
Node* c = t.jsgraph.TrueConstant();
@@ -1046,8 +1066,7 @@
CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p0, use->InputAt(0));
}
-
-TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32) {
+TEST(LowerNumberToInt32_to_TruncateFloat64ToWord32) {
// NumberToInt32(x: kRepFloat64) used as MachineType::Int32()
TestingGraph t(Type::Number());
Node* p0 = t.ExampleWithOutput(MachineType::Float64());
@@ -1055,22 +1074,17 @@
Node* use = t.Use(trunc, MachineType::Int32());
t.Return(use);
t.Lower();
- CheckChangeOf(IrOpcode::kTruncateFloat64ToInt32, p0, use->InputAt(0));
+ CheckChangeOf(IrOpcode::kTruncateFloat64ToWord32, p0, use->InputAt(0));
}
-
-TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32_with_change) {
+TEST(LowerNumberToInt32_to_TruncateTaggedToWord32) {
// NumberToInt32(x: kTypeNumber | kRepTagged) used as MachineType::Int32()
TestingGraph t(Type::Number());
Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
Node* use = t.Use(trunc, MachineType::Int32());
t.Return(use);
t.Lower();
- Node* node = use->InputAt(0);
- CHECK_EQ(IrOpcode::kTruncateFloat64ToInt32, node->opcode());
- Node* of = node->InputAt(0);
- CHECK_EQ(IrOpcode::kChangeTaggedToFloat64, of->opcode());
- CHECK_EQ(t.p0, of->InputAt(0));
+ CheckChangeOf(IrOpcode::kTruncateTaggedToWord32, t.p0, use->InputAt(0));
}
@@ -1084,8 +1098,7 @@
CheckChangeOf(IrOpcode::kChangeTaggedToUint32, t.p0, use->InputAt(0));
}
-
-TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32) {
+TEST(LowerNumberToUint32_to_TruncateFloat64ToWord32) {
// NumberToUint32(x: kRepFloat64) used as MachineType::Uint32()
TestingGraph t(Type::Number());
Node* p0 = t.ExampleWithOutput(MachineType::Float64());
@@ -1095,26 +1108,20 @@
Node* use = t.Use(trunc, MachineType::Uint32());
t.Return(use);
t.Lower();
- CheckChangeOf(IrOpcode::kTruncateFloat64ToInt32, p0, use->InputAt(0));
+ CheckChangeOf(IrOpcode::kTruncateFloat64ToWord32, p0, use->InputAt(0));
}
-
-TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32_with_change) {
+TEST(LowerNumberToUint32_to_TruncateTaggedToWord32) {
// NumberToInt32(x: kTypeNumber | kRepTagged) used as MachineType::Uint32()
TestingGraph t(Type::Number());
Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
Node* use = t.Use(trunc, MachineType::Uint32());
t.Return(use);
t.Lower();
- Node* node = use->InputAt(0);
- CHECK_EQ(IrOpcode::kTruncateFloat64ToInt32, node->opcode());
- Node* of = node->InputAt(0);
- CHECK_EQ(IrOpcode::kChangeTaggedToFloat64, of->opcode());
- CHECK_EQ(t.p0, of->InputAt(0));
+ CheckChangeOf(IrOpcode::kTruncateTaggedToWord32, t.p0, use->InputAt(0));
}
-
-TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32_uint32) {
+TEST(LowerNumberToUint32_to_TruncateFloat64ToWord32_uint32) {
// NumberToUint32(x: kRepFloat64) used as kRepWord32
TestingGraph t(Type::Unsigned32());
Node* input = t.ExampleWithOutput(MachineType::Float64());
@@ -1122,7 +1129,7 @@
Node* use = t.Use(trunc, MachineType::RepWord32());
t.Return(use);
t.Lower();
- CheckChangeOf(IrOpcode::kTruncateFloat64ToInt32, input, use->InputAt(0));
+ CheckChangeOf(IrOpcode::kTruncateFloat64ToWord32, input, use->InputAt(0));
}
@@ -1150,7 +1157,7 @@
MachineType::Int32(), Type::Signed32());
CheckChangeInsertion(IrOpcode::kChangeFloat64ToUint32, MachineType::Float64(),
MachineType::Uint32(), Type::Unsigned32());
- CheckChangeInsertion(IrOpcode::kTruncateFloat64ToInt32,
+ CheckChangeInsertion(IrOpcode::kTruncateFloat64ToWord32,
MachineType::Float64(), MachineType::Uint32(),
Type::Integral32());
CheckChangeInsertion(IrOpcode::kChangeTaggedToInt32, MachineType::AnyTagged(),
@@ -1222,7 +1229,7 @@
for (size_t i = 0; i < arraysize(ops); i++) {
CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToInt32,
- IrOpcode::kChangeBitToBool);
+ IrOpcode::kChangeBitToTagged);
}
}
@@ -1235,7 +1242,7 @@
for (size_t i = 0; i < arraysize(ops); i++) {
CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToUint32,
- IrOpcode::kChangeBitToBool);
+ IrOpcode::kChangeBitToTagged);
}
}
@@ -1265,7 +1272,7 @@
for (size_t i = 0; i < arraysize(ops); i++) {
CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToFloat64,
- IrOpcode::kChangeBitToBool);
+ IrOpcode::kChangeBitToTagged);
}
}
@@ -1311,11 +1318,11 @@
TEST(LowerLoadField_to_load) {
- TestingGraph t(Type::Any(), Type::Signed32());
-
for (size_t i = 0; i < arraysize(kMachineReps); i++) {
- FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(), kMachineReps[i]};
+ TestingGraph t(Type::Any(), Type::Signed32());
+ FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Handle<Name>::null(), Type::Any(),
+ kMachineReps[i], kNoWriteBarrier};
Node* load = t.graph()->NewNode(t.simplified()->LoadField(access), t.p0,
t.start, t.start);
@@ -1337,9 +1344,9 @@
TestingGraph t(Type::Any(), Type::Signed32());
for (size_t i = 0; i < arraysize(kMachineReps); i++) {
- FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(), kMachineReps[i]};
-
+ FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Handle<Name>::null(), Type::Any(),
+ kMachineReps[i], kNoWriteBarrier};
Node* val = t.ExampleWithOutput(kMachineReps[i]);
Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
@@ -1352,7 +1359,7 @@
StoreRepresentation rep = StoreRepresentationOf(store->op());
if (kMachineReps[i].representation() == MachineRepresentation::kTagged) {
- CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
+ CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
}
CHECK_EQ(kMachineReps[i].representation(), rep.representation());
}
@@ -1362,9 +1369,9 @@
Zone* z = scope.main_zone();
TestingGraph t(Type::Any(), Type::Intersect(Type::SignedSmall(),
Type::TaggedSigned(), z));
- FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(),
- MachineType::AnyTagged()};
+ FieldAccess access = {
+ kTaggedBase, FixedArrayBase::kHeaderSize, Handle<Name>::null(),
+ Type::Any(), MachineType::AnyTagged(), kNoWriteBarrier};
Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
t.p1, t.start, t.start);
t.Effect(store);
@@ -1378,11 +1385,10 @@
TEST(LowerLoadElement_to_load) {
- TestingGraph t(Type::Any(), Type::Signed32());
-
for (size_t i = 0; i < arraysize(kMachineReps); i++) {
+ TestingGraph t(Type::Any(), Type::Signed32());
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Type::Any(), kMachineReps[i]};
+ Type::Any(), kMachineReps[i], kNoWriteBarrier};
Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
t.p1, t.start, t.start);
@@ -1401,11 +1407,11 @@
TEST(LowerStoreElement_to_store) {
{
- TestingGraph t(Type::Any(), Type::Signed32());
-
for (size_t i = 0; i < arraysize(kMachineReps); i++) {
+ TestingGraph t(Type::Any(), Type::Signed32());
+
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Type::Any(), kMachineReps[i]};
+ Type::Any(), kMachineReps[i], kNoWriteBarrier};
Node* val = t.ExampleWithOutput(kMachineReps[i]);
Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access),
@@ -1418,7 +1424,7 @@
StoreRepresentation rep = StoreRepresentationOf(store->op());
if (kMachineReps[i].representation() == MachineRepresentation::kTagged) {
- CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
+ CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
}
CHECK_EQ(kMachineReps[i].representation(), rep.representation());
}
@@ -1430,7 +1436,8 @@
Type::Any(), Type::Signed32(),
Type::Intersect(Type::SignedSmall(), Type::TaggedSigned(), z));
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Type::Any(), MachineType::AnyTagged()};
+ Type::Any(), MachineType::AnyTagged(),
+ kNoWriteBarrier};
Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
t.p1, t.p2, t.start, t.start);
t.Effect(store);
@@ -1448,7 +1455,7 @@
// Load(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k))
TestingGraph t(Type::Any(), Type::Signed32());
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
- MachineType::AnyTagged()};
+ MachineType::AnyTagged(), kNoWriteBarrier};
Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
t.p1, t.start, t.start);
@@ -1465,7 +1472,7 @@
// Store(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k), val)
TestingGraph t(Type::Any(), Type::Signed32());
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
- MachineType::AnyTagged()};
+ MachineType::AnyTagged(), kFullWriteBarrier};
Node* store =
t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0, t.p1,
@@ -1482,7 +1489,7 @@
// TODO(titzer): test all load/store representation change insertions.
TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
- MachineType::Float64()};
+ MachineType::Float64(), kNoWriteBarrier};
Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
t.p1, t.start, t.start);
@@ -1497,9 +1504,9 @@
TEST(InsertChangeForLoadField) {
// TODO(titzer): test all load/store representation change insertions.
TestingGraph t(Type::Any(), Type::Signed32());
- FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(),
- MachineType::Float64()};
+ FieldAccess access = {
+ kTaggedBase, FixedArrayBase::kHeaderSize, Handle<Name>::null(),
+ Type::Any(), MachineType::Float64(), kNoWriteBarrier};
Node* load = t.graph()->NewNode(t.simplified()->LoadField(access), t.p0,
t.start, t.start);
@@ -1515,7 +1522,7 @@
// TODO(titzer): test all load/store representation change insertions.
TestingGraph t(Type::Any(), Type::Signed32());
ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
- MachineType::Float64()};
+ MachineType::Float64(), kFullWriteBarrier};
Node* store =
t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
@@ -1532,9 +1539,9 @@
TEST(InsertChangeForStoreField) {
// TODO(titzer): test all load/store representation change insertions.
TestingGraph t(Type::Any(), Type::Signed32());
- FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), Type::Any(),
- MachineType::Float64()};
+ FieldAccess access = {
+ kTaggedBase, FixedArrayBase::kHeaderSize, Handle<Name>::null(),
+ Type::Any(), MachineType::Float64(), kNoWriteBarrier};
Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
t.p1, t.start, t.start);
@@ -1554,8 +1561,9 @@
Type* kTypes[] = {Type::Signed32(), Type::Unsigned32(), Type::Number()};
for (size_t i = 0; i < arraysize(kMachineTypes); i++) {
- FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
- Handle<Name>::null(), kTypes[i], kMachineTypes[i]};
+ FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+ Handle<Name>::null(), kTypes[i],
+ kMachineTypes[i], kFullWriteBarrier};
Node* load0 = t.graph()->NewNode(t.simplified()->LoadField(access), t.p0,
t.start, t.start);