Push version 2.1.10 to trunk.
Fixed scons build issues.
Fixed a couple of minor bugs.
git-svn-id: http://v8.googlecode.com/svn/trunk@4294 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/SConscript b/src/SConscript
index e7f6efd..a1d4796 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -61,6 +61,7 @@
execution.cc
factory.cc
flags.cc
+ flow-graph.cc
frame-element.cc
frames.cc
full-codegen.cc
diff --git a/src/arguments.h b/src/arguments.h
index 3fed223..c17f4cf 100644
--- a/src/arguments.h
+++ b/src/arguments.h
@@ -72,7 +72,7 @@
};
-// Cursom arguments replicate a small segment of stack that can be
+// Custom arguments replicate a small segment of stack that can be
// accessed through an Arguments object the same way the actual stack
// can.
class CustomArguments : public Relocatable {
@@ -80,15 +80,14 @@
inline CustomArguments(Object* data,
JSObject* self,
JSObject* holder) {
- values_[3] = self;
- values_[2] = holder;
- values_[1] = Smi::FromInt(0);
+ values_[2] = self;
+ values_[1] = holder;
values_[0] = data;
}
void IterateInstance(ObjectVisitor* v);
- Object** end() { return values_ + 3; }
+ Object** end() { return values_ + ARRAY_SIZE(values_) - 1; }
private:
- Object* values_[4];
+ Object* values_[3];
};
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 5e00677..0ca4d35 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -3996,14 +3996,7 @@
}
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ BinaryOperation");
- Token::Value op = node->op();
-
+void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
// According to ECMA-262 section 11.11, page 58, the binary logical
// operators must yield the result of one of the two expressions
// before any ToBoolean() conversions. This means that the value
@@ -4015,8 +4008,7 @@
// after evaluating the left hand side (due to the shortcut
// semantics), but the compiler must (statically) know if the result
// of compiling the binary operation is materialized or not.
-
- if (op == Token::AND) {
+ if (node->op() == Token::AND) {
JumpTarget is_true;
LoadConditionAndSpill(node->left(),
&is_true,
@@ -4062,7 +4054,8 @@
ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
}
- } else if (op == Token::OR) {
+ } else {
+ ASSERT(node->op() == Token::OR);
JumpTarget is_false;
LoadConditionAndSpill(node->left(),
true_target(),
@@ -4107,7 +4100,19 @@
// Nothing to do.
ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
}
+ }
+}
+
+void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+#ifdef DEBUG
+ int original_height = frame_->height();
+#endif
+ VirtualFrame::SpilledScope spilled_scope;
+ Comment cmnt(masm_, "[ BinaryOperation");
+
+ if (node->op() == Token::AND || node->op() == Token::OR) {
+ GenerateLogicalBooleanOperation(node);
} else {
// Optimize for the case where (at least) one of the expressions
// is a literal small integer.
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 4bea341..0d1a385 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -306,6 +306,9 @@
void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
+ // Generate code that computes a shortcutting logical operation.
+ void GenerateLogicalBooleanOperation(BinaryOperation* node);
+
void GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode,
int known_rhs = kUnknownIntValue);
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 2259aea..cc7cab7 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -65,11 +65,11 @@
// Check for the absence of an interceptor.
// Load the map into t0.
__ ldr(t0, FieldMemOperand(t1, JSObject::kMapOffset));
- // Test the has_named_interceptor bit in the map.
- __ ldr(r3, FieldMemOperand(t0, Map::kInstanceAttributesOffset));
- __ tst(r3, Operand(1 << (Map::kHasNamedInterceptor + (3 * 8))));
- // Jump to miss if the interceptor bit is set.
- __ b(ne, miss);
+
+ // Bail out if the receiver has a named interceptor.
+ __ ldrb(r3, FieldMemOperand(t0, Map::kBitFieldOffset));
+ __ tst(r3, Operand(1 << Map::kHasNamedInterceptor));
+ __ b(nz, miss);
// Bail out if we have a JS global proxy object.
__ ldrb(r3, FieldMemOperand(t0, Map::kInstanceTypeOffset));
@@ -144,6 +144,95 @@
}
+static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
+ Label* miss,
+ Register elements,
+ Register key,
+ Register t0,
+ Register t1,
+ Register t2) {
+ // Register use:
+ //
+ // elements - holds the slow-case elements of the receiver and is unchanged.
+ //
+ // key - holds the smi key on entry and is unchanged if a branch is
+ // performed to the miss label.
+ //
+ // Scratch registers:
+ //
+ // t0 - holds the untagged key on entry and holds the hash once computed.
+ // Holds the result on exit if the load succeeded.
+ //
+ // t1 - used to hold the capacity mask of the dictionary
+ //
+ // t2 - used for the index into the dictionary.
+ Label done;
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash << 15);
+ __ mvn(t1, Operand(t0));
+ __ add(t0, t1, Operand(t0, LSL, 15));
+ // hash = hash ^ (hash >> 12);
+ __ eor(t0, t0, Operand(t0, LSR, 12));
+ // hash = hash + (hash << 2);
+ __ add(t0, t0, Operand(t0, LSL, 2));
+ // hash = hash ^ (hash >> 4);
+ __ eor(t0, t0, Operand(t0, LSR, 4));
+ // hash = hash * 2057;
+ __ mov(t1, Operand(2057));
+ __ mul(t0, t0, t1);
+ // hash = hash ^ (hash >> 16);
+ __ eor(t0, t0, Operand(t0, LSR, 16));
+
+ // Compute the capacity mask.
+ __ ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
+ __ mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int
+ __ sub(t1, t1, Operand(1));
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ static const int kProbes = 4;
+ for (int i = 0; i < kProbes; i++) {
+ // Use t2 for index calculations and keep the hash intact in t0.
+ __ mov(t2, t0);
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ __ add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i)));
+ }
+ __ and_(t2, t2, Operand(t1));
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(NumberDictionary::kEntrySize == 3);
+ __ add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
+
+ // Check if the key is identical to the name.
+ __ add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
+ __ ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset));
+ __ cmp(key, Operand(ip));
+ if (i != kProbes - 1) {
+ __ b(eq, &done);
+ } else {
+ __ b(ne, miss);
+ }
+ }
+
+ __ bind(&done);
+ // Check that the value is a normal property.
+ // t2: elements + (index * kPointerSize)
+ const int kDetailsOffset =
+ NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ __ ldr(t1, FieldMemOperand(t2, kDetailsOffset));
+ __ tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
+ __ b(ne, miss);
+
+ // Get the value at the masked, scaled index and return.
+ const int kValueOffset =
+ NumberDictionary::kElementsStartOffset + kPointerSize;
+ __ ldr(t0, FieldMemOperand(t2, kValueOffset));
+}
+
+
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
@@ -530,7 +619,7 @@
// -- sp[0] : key
// -- sp[4] : receiver
// -----------------------------------
- Label slow, fast, check_pixel_array;
+ Label slow, fast, check_pixel_array, check_number_dictionary;
// Get the key and receiver object from the stack.
__ ldm(ia, sp, r0.bit() | r1.bit());
@@ -554,6 +643,8 @@
// Check that the key is a smi.
__ BranchOnNotSmi(r0, &slow);
+ // Save key in r2 in case we want it for the number dictionary case.
+ __ mov(r2, r0);
__ mov(r0, Operand(r0, ASR, kSmiTagSize));
// Get the elements array of the object.
@@ -562,17 +653,26 @@
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r3, ip);
- __ b(ne, &slow);
+ __ b(ne, &check_pixel_array);
// Check that the key (index) is within bounds.
__ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset));
__ cmp(r0, Operand(r3));
- __ b(lo, &fast);
+ __ b(ge, &slow);
+ // Fast case: Do the load.
+ __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(r0, ip);
+ // In case the loaded value is the_hole we have to consult GetProperty
+ // to ensure the prototype chain is searched.
+ __ b(eq, &slow);
+ __ Ret();
// Check whether the elements is a pixel array.
__ bind(&check_pixel_array);
__ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
__ cmp(r3, ip);
- __ b(ne, &slow);
+ __ b(ne, &check_number_dictionary);
__ ldr(ip, FieldMemOperand(r1, PixelArray::kLengthOffset));
__ cmp(r0, ip);
__ b(hs, &slow);
@@ -581,22 +681,21 @@
__ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Tag result as smi.
__ Ret();
+ __ bind(&check_number_dictionary);
+ // Check whether the elements is a number dictionary.
+ // r0: untagged index
+ // r1: elements
+ // r2: key
+ __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+ __ cmp(r3, ip);
+ __ b(ne, &slow);
+ GenerateNumberDictionaryLoad(masm, &slow, r1, r2, r0, r3, r4);
+ __ Ret();
+
// Slow case: Push extra copies of the arguments (2).
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1);
GenerateRuntimeGetProperty(masm);
-
- // Fast case: Do the load.
- __ bind(&fast);
- __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r0, ip);
- // In case the loaded value is the_hole we have to consult GetProperty
- // to ensure the prototype chain is searched.
- __ b(eq, &slow);
-
- __ Ret();
}
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index abf2f64..bbffef2 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -396,15 +396,14 @@
Register holder,
Register name,
JSObject* holder_obj) {
- __ push(receiver);
- __ push(holder);
__ push(name);
InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
ASSERT(!Heap::InNewSpace(interceptor));
-
- Register scratch = receiver;
+ Register scratch = name;
__ mov(scratch, Operand(Handle<Object>(interceptor)));
__ push(scratch);
+ __ push(receiver);
+ __ push(holder);
__ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
__ push(scratch);
}
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index e42f758..ea74898 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -61,13 +61,9 @@
void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
code->set_major_key(MajorKey());
-#ifdef ENABLE_OPROFILE_AGENT
- // Register the generated stub with the OPROFILE agent.
- OProfileAgent::CreateNativeCodeRegion(GetName(),
- code->instruction_start(),
- code->instruction_size());
-#endif
-
+ OPROFILE(CreateNativeCodeRegion(GetName(),
+ code->instruction_start(),
+ code->instruction_size()));
LOG(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
Counters::total_stubs_code_size.Increment(code->instruction_size());
diff --git a/src/codegen.h b/src/codegen.h
index 4634f4c..0dfea8d 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -450,7 +450,7 @@
virtual bool GetCustomCache(Code** code_out);
virtual void SetCustomCache(Code* value);
- static const int kStackSpace = 6;
+ static const int kStackSpace = 5;
static const int kArgc = 4;
private:
Handle<AccessorInfo> info() { return info_; }
diff --git a/src/compiler.cc b/src/compiler.cc
index e2021fa..c9dd107 100755
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -34,6 +34,7 @@
#include "data-flow.h"
#include "debug.h"
#include "fast-codegen.h"
+#include "flow-graph.h"
#include "full-codegen.h"
#include "liveedit.h"
#include "oprofile-agent.h"
@@ -235,27 +236,19 @@
return Handle<SharedFunctionInfo>::null();
}
-#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
- // Log the code generation for the script. Check explicit whether logging is
- // to avoid allocating when not required.
- if (Logger::is_logging() || OProfileAgent::is_enabled()) {
- if (script->name()->IsString()) {
- SmartPointer<char> data =
- String::cast(script->name())->ToCString(DISALLOW_NULLS);
- LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG,
- *code, *data));
- OProfileAgent::CreateNativeCodeRegion(*data,
- code->instruction_start(),
- code->instruction_size());
- } else {
- LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG,
- *code, ""));
- OProfileAgent::CreateNativeCodeRegion(is_eval ? "Eval" : "Script",
- code->instruction_start(),
- code->instruction_size());
- }
+ if (script->name()->IsString()) {
+ LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG,
+ *code, String::cast(script->name())));
+ OPROFILE(CreateNativeCodeRegion(String::cast(script->name()),
+ code->instruction_start(),
+ code->instruction_size()));
+ } else {
+ LOG(CodeCreateEvent(is_eval ? Logger::EVAL_TAG : Logger::SCRIPT_TAG,
+ *code, ""));
+ OPROFILE(CreateNativeCodeRegion(is_eval ? "Eval" : "Script",
+ code->instruction_start(),
+ code->instruction_size()));
}
-#endif
// Allocate function.
Handle<SharedFunctionInfo> result =
@@ -443,14 +436,12 @@
return false;
}
-#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
- LogCodeCreateEvent(Logger::LAZY_COMPILE_TAG,
- name,
- Handle<String>(shared->inferred_name()),
- start_position,
- info->script(),
- code);
-#endif
+ RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG,
+ name,
+ Handle<String>(shared->inferred_name()),
+ start_position,
+ info->script(),
+ code);
// Update the shared function info with the compiled code.
shared->set_code(*code);
@@ -578,15 +569,12 @@
}
// Function compilation complete.
-
-#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
- LogCodeCreateEvent(Logger::FUNCTION_TAG,
- literal->name(),
- literal->inferred_name(),
- literal->start_position(),
- script,
- code);
-#endif
+ RecordFunctionCompilation(Logger::FUNCTION_TAG,
+ literal->name(),
+ literal->inferred_name(),
+ literal->start_position(),
+ script,
+ code);
}
// Create a boilerplate function.
@@ -628,13 +616,12 @@
}
-#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
-void Compiler::LogCodeCreateEvent(Logger::LogEventsAndTags tag,
- Handle<String> name,
- Handle<String> inferred_name,
- int start_position,
- Handle<Script> script,
- Handle<Code> code) {
+void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
+ Handle<String> name,
+ Handle<String> inferred_name,
+ int start_position,
+ Handle<Script> script,
+ Handle<Code> code) {
// Log the code generation. If source information is available
// include script name and line number. Check explicitly whether
// logging is enabled as finding the line number is not free.
@@ -642,21 +629,21 @@
Handle<String> func_name(name->length() > 0 ? *name : *inferred_name);
if (script->name()->IsString()) {
int line_num = GetScriptLineNumber(script, start_position) + 1;
+ USE(line_num);
LOG(CodeCreateEvent(tag, *code, *func_name,
String::cast(script->name()), line_num));
- OProfileAgent::CreateNativeCodeRegion(*func_name,
- String::cast(script->name()),
- line_num,
- code->instruction_start(),
- code->instruction_size());
+ OPROFILE(CreateNativeCodeRegion(*func_name,
+ String::cast(script->name()),
+ line_num,
+ code->instruction_start(),
+ code->instruction_size()));
} else {
LOG(CodeCreateEvent(tag, *code, *func_name));
- OProfileAgent::CreateNativeCodeRegion(*func_name,
- code->instruction_start(),
- code->instruction_size());
+ OPROFILE(CreateNativeCodeRegion(*func_name,
+ code->instruction_start(),
+ code->instruction_size()));
}
}
}
-#endif
} } // namespace v8::internal
diff --git a/src/compiler.h b/src/compiler.h
index e08e26e..ecc7b1c 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -266,15 +266,12 @@
Handle<Script> script);
private:
-
-#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
- static void LogCodeCreateEvent(Logger::LogEventsAndTags tag,
- Handle<String> name,
- Handle<String> inferred_name,
- int start_position,
- Handle<Script> script,
- Handle<Code> code);
-#endif
+ static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
+ Handle<String> name,
+ Handle<String> inferred_name,
+ int start_position,
+ Handle<Script> script,
+ Handle<Code> code);
};
diff --git a/src/conversions.cc b/src/conversions.cc
index d0abc2b..5c46752 100644
--- a/src/conversions.cc
+++ b/src/conversions.cc
@@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdarg.h>
+#include <limits.h>
#include "v8.h"
@@ -92,6 +93,47 @@
}
+namespace {
+
+// C++-style iterator adaptor for StringInputBuffer
+// (unlike C++ iterators the end-marker has different type).
+class StringInputBufferIterator {
+ public:
+ class EndMarker {};
+
+ explicit StringInputBufferIterator(StringInputBuffer* buffer);
+
+ int operator*() const;
+ void operator++();
+ bool operator==(EndMarker const&) const { return end_; }
+ bool operator!=(EndMarker const& m) const { return !end_; }
+
+ private:
+ StringInputBuffer* const buffer_;
+ int current_;
+ bool end_;
+};
+
+
+StringInputBufferIterator::StringInputBufferIterator(
+ StringInputBuffer* buffer) : buffer_(buffer) {
+ ++(*this);
+}
+
+int StringInputBufferIterator::operator*() const {
+ return current_;
+}
+
+
+void StringInputBufferIterator::operator++() {
+ end_ = !buffer_->has_more();
+ if (!end_) {
+ current_ = buffer_->GetNext();
+ }
+}
+}
+
+
static inline void ReleaseCString(const char* original, const char* str) {
}
@@ -101,71 +143,30 @@
}
-static inline bool IsSpace(const char* str, int index) {
- ASSERT(index >= 0 && index < StrLength(str));
- return Scanner::kIsWhiteSpace.get(str[index]);
-}
-
-
-static inline bool IsSpace(String* str, int index) {
- return Scanner::kIsWhiteSpace.get(str->Get(index));
-}
-
-
-static inline bool SubStringEquals(const char* str,
- int index,
- const char* other) {
- return strncmp(str + index, other, strlen(other)) != 0;
-}
-
-
-static inline bool SubStringEquals(String* str, int index, const char* other) {
- HandleScope scope;
- int str_length = str->length();
- int other_length = StrLength(other);
- int end = index + other_length < str_length ?
- index + other_length :
- str_length;
- Handle<String> substring =
- Factory::NewSubString(Handle<String>(str), index, end);
- return substring->IsEqualTo(Vector<const char>(other, other_length));
-}
-
-
-// Check if a string should be parsed as an octal number. The string
-// can be either a char* or a String*.
-template<class S>
-static bool ShouldParseOctal(S* s, int i) {
- int index = i;
- int len = GetLength(s);
- if (index < len && GetChar(s, index) != '0') return false;
-
- // If the first real character (following '0') is not an octal
- // digit, bail out early. This also takes care of numbers of the
- // forms 0.xxx and 0exxx by not allowing the first 0 to be
- // interpreted as an octal.
- index++;
- if (index < len) {
- int d = GetChar(s, index) - '0';
- if (d < 0 || d > 7) return false;
- } else {
- return false;
+template <class Iterator, class EndMark>
+static bool SubStringEquals(Iterator* current,
+ EndMark end,
+ const char* substring) {
+ ASSERT(**current == *substring);
+ for (substring++; *substring != '\0'; substring++) {
+ ++*current;
+ if (*current == end || **current != *substring) return false;
}
-
- // Traverse all digits (including the first). If there is an octal
- // prefix which is not a part of a longer decimal prefix, we return
- // true. Otherwise, false is returned.
- while (index < len) {
- int d = GetChar(s, index++) - '0';
- if (d == 8 || d == 9) return false;
- if (d < 0 || d > 7) return true;
- }
+ ++*current;
return true;
}
extern "C" double gay_strtod(const char* s00, const char** se);
+// Maximum number of significant digits in decimal representation.
+// The longest possible double in decimal representation is
+// (2^53 - 1) * 2 ^ -1074 that is (2 ^ 53 - 1) * 5 ^ 1074 / 10 ^ 1074
+// (768 digits). If we parse a number whose first digits are equal to a
+// mean of 2 adjacent doubles (that could have up to 769 digits) the result
+// must be rounded to the bigger one unless the tail consists of zeros, so
+// we don't need to preserve all the digits.
+const int kMaxSignificantDigits = 772;
// Parse an int from a string starting a given index and in a given
// radix. The string can be either a char* or a String*.
@@ -262,95 +263,372 @@
static const double JUNK_STRING_VALUE = OS::nan_value();
-// Convert a string to a double value. The string can be either a
-// char* or a String*.
-template<class S>
-static double InternalStringToDouble(S* str,
- int flags,
- double empty_string_val) {
- double result = 0.0;
- int index = 0;
-
- int len = GetLength(str);
-
- // Skip leading spaces.
- while ((index < len) && IsSpace(str, index)) index++;
-
- // Is the string empty?
- if (index >= len) return empty_string_val;
-
- // Get the first character.
- uint16_t first = GetChar(str, index);
-
- // Numbers can only start with '-', '+', '.', 'I' (Infinity), or a digit.
- if (first != '-' && first != '+' && first != '.' && first != 'I' &&
- (first > '9' || first < '0')) {
- return JUNK_STRING_VALUE;
+// Returns true if a nonspace found and false if the end has reached.
+template <class Iterator, class EndMark>
+static inline bool AdvanceToNonspace(Iterator* current, EndMark end) {
+ while (*current != end) {
+ if (!Scanner::kIsWhiteSpace.get(**current)) return true;
+ ++*current;
}
-
- // Compute sign of result based on first character.
- int sign = 1;
- if (first == '-') {
- sign = -1;
- index++;
- // String only containing a '-' are junk chars.
- if (index == len) return JUNK_STRING_VALUE;
- }
-
- // do we have a hex number?
- // (since the string is 0-terminated, it's ok to look one char beyond the end)
- if ((flags & ALLOW_HEX) != 0 &&
- (index + 1) < len &&
- GetChar(str, index) == '0' &&
- (GetChar(str, index + 1) == 'x' || GetChar(str, index + 1) == 'X')) {
- index += 2;
- index = StringToInt(str, index, 16, &result);
- } else if ((flags & ALLOW_OCTALS) != 0 && ShouldParseOctal(str, index)) {
- // NOTE: We optimistically try to parse the number as an octal (if
- // we're allowed to), even though this is not as dictated by
- // ECMA-262. The reason for doing this is compatibility with IE and
- // Firefox.
- index = StringToInt(str, index, 8, &result);
- } else {
- const char* cstr = GetCString(str, index);
- const char* end;
- // Optimistically parse the number and then, if that fails,
- // check if it might have been {+,-,}Infinity.
- result = gay_strtod(cstr, &end);
- ReleaseCString(str, cstr);
- if (result != 0.0 || end != cstr) {
- // It appears that strtod worked
- index += static_cast<int>(end - cstr);
- } else {
- // Check for {+,-,}Infinity
- bool is_negative = (GetChar(str, index) == '-');
- if (GetChar(str, index) == '+' || GetChar(str, index) == '-')
- index++;
- if (!SubStringEquals(str, index, "Infinity"))
- return JUNK_STRING_VALUE;
- result = is_negative ? -V8_INFINITY : V8_INFINITY;
- index += 8;
- }
- }
-
- if ((flags & ALLOW_TRAILING_JUNK) == 0) {
- // skip trailing spaces
- while ((index < len) && IsSpace(str, index)) index++;
- // string ending with junk?
- if (index < len) return JUNK_STRING_VALUE;
- }
-
- return sign * result;
+ return false;
}
+template <class Iterator, class EndMark>
+static double InternalHexadecimalStringToDouble(Iterator current,
+ EndMark end,
+ char* buffer,
+ bool allow_trailing_junk) {
+ ASSERT(current != end);
+
+ const int max_hex_significant_digits = 52 / 4 + 2;
+ // We reuse the buffer of InternalStringToDouble. Since hexadecimal
+ // numbers may have much less digits than decimal the buffer won't overflow.
+ ASSERT(max_hex_significant_digits < kMaxSignificantDigits);
+
+ int significant_digits = 0;
+ int insignificant_digits = 0;
+ bool leading_zero = false;
+ // A double has a 53bit significand (once the hidden bit has been added).
+ // Halfway cases thus have at most 54bits. Therefore 54/4 + 1 digits are
+ // sufficient to represent halfway cases. By adding another digit we can keep
+ // track of dropped digits.
+ int buffer_pos = 0;
+ bool nonzero_digit_dropped = false;
+
+ // Skip leading 0s.
+ while (*current == '0') {
+ leading_zero = true;
+ ++current;
+ if (current == end) return 0;
+ }
+
+ int begin_pos = buffer_pos;
+ while ((*current >= '0' && *current <= '9')
+ || (*current >= 'a' && *current <= 'f')
+ || (*current >= 'A' && *current <= 'F')) {
+ if (significant_digits <= max_hex_significant_digits) {
+ buffer[buffer_pos++] = static_cast<char>(*current);
+ significant_digits++;
+ } else {
+ insignificant_digits++;
+ nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
+ }
+ ++current;
+ if (current == end) break;
+ }
+
+ if (!allow_trailing_junk && AdvanceToNonspace(¤t, end)) {
+ return JUNK_STRING_VALUE;
+ }
+
+ if (significant_digits == 0) {
+ return leading_zero ? 0 : JUNK_STRING_VALUE;
+ }
+
+ if (nonzero_digit_dropped) {
+ ASSERT(insignificant_digits > 0);
+ insignificant_digits--;
+ buffer[buffer_pos++] = '1';
+ }
+
+ buffer[buffer_pos] = '\0';
+
+ double result;
+ StringToInt(buffer, begin_pos, 16, &result);
+ if (insignificant_digits > 0) {
+ // Multiplying by a power of 2 doesn't cause a loss of precision.
+ result *= pow(16.0, insignificant_digits);
+ }
+ return result;
+}
+
+
+// Converts a string to a double value. Assumes the Iterator supports
+// the following operations:
+// 1. current == end (other ops are not allowed), current != end.
+// 2. *current - gets the current character in the sequence.
+// 3. ++current (advances the position).
+template <class Iterator, class EndMark>
+static double InternalStringToDouble(Iterator current,
+ EndMark end,
+ int flags,
+ double empty_string_val) {
+ // To make sure that iterator dereferencing is valid the following
+ // convention is used:
+ // 1. Each '++current' statement is followed by check for equality to 'end'.
+ // 2. If AdvanceToNonspace returned false then current == end.
+ // 3. If 'current' becomes be equal to 'end' the function returns or goes to
+ // 'parsing_done'.
+ // 4. 'current' is not dereferenced after the 'parsing_done' label.
+ // 5. Code before 'parsing_done' may rely on 'current != end'.
+ if (!AdvanceToNonspace(¤t, end)) return empty_string_val;
+
+ const bool allow_trailing_junk = (flags & ALLOW_TRAILING_JUNK) != 0;
+
+ // The longest form of simplified number is: "-<significant digits>'.1eXXX\0".
+ const int kBufferSize = kMaxSignificantDigits + 10;
+ char buffer[kBufferSize]; // NOLINT: size is known at compile time.
+ int buffer_pos = 0;
+
+ // Exponent will be adjusted if insignificant digits of the integer part
+ // or insignificant leading zeros of the fractional part are dropped.
+ int exponent = 0;
+ int significant_digits = 0;
+ int insignificant_digits = 0;
+ bool nonzero_digit_dropped = false;
+
+ double signed_zero = 0.0;
+
+ if (*current == '+') {
+ // Ignore leading sign; skip following spaces.
+ ++current;
+ if (!AdvanceToNonspace(¤t, end)) return JUNK_STRING_VALUE;
+ } else if (*current == '-') {
+ buffer[buffer_pos++] = '-';
+ ++current;
+ if (!AdvanceToNonspace(¤t, end)) return JUNK_STRING_VALUE;
+ signed_zero = -0.0;
+ }
+
+ static const char kInfinitySymbol[] = "Infinity";
+ if (*current == kInfinitySymbol[0]) {
+ if (!SubStringEquals(¤t, end, kInfinitySymbol)) {
+ return JUNK_STRING_VALUE;
+ }
+
+ if (!allow_trailing_junk && AdvanceToNonspace(¤t, end)) {
+ return JUNK_STRING_VALUE;
+ }
+
+ ASSERT(buffer_pos == 0 || buffer[0] == '-');
+ return buffer_pos > 0 ? -V8_INFINITY : V8_INFINITY;
+ }
+
+ bool leading_zero = false;
+ if (*current == '0') {
+ ++current;
+ if (current == end) return signed_zero;
+
+ leading_zero = true;
+
+ // It could be hexadecimal value.
+ if ((flags & ALLOW_HEX) && (*current == 'x' || *current == 'X')) {
+ ++current;
+ if (current == end) return JUNK_STRING_VALUE; // "0x".
+
+ double result = InternalHexadecimalStringToDouble(current,
+ end,
+ buffer + buffer_pos,
+ allow_trailing_junk);
+ return (buffer_pos > 0 && buffer[0] == '-') ? -result : result;
+ }
+
+ // Ignore leading zeros in the integer part.
+ while (*current == '0') {
+ ++current;
+ if (current == end) return signed_zero;
+ }
+ }
+
+ bool octal = leading_zero && (flags & ALLOW_OCTALS) != 0;
+
+ // Copy significant digits of the integer part (if any) to the buffer.
+ while (*current >= '0' && *current <= '9') {
+ if (significant_digits < kMaxSignificantDigits) {
+ ASSERT(buffer_pos < kBufferSize);
+ buffer[buffer_pos++] = static_cast<char>(*current);
+ significant_digits++;
+ // Will later check if it's an octal in the buffer.
+ } else {
+ insignificant_digits++; // Move the digit into the exponential part.
+ nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
+ }
+ octal = octal && *current < '8';
+ ++current;
+ if (current == end) goto parsing_done;
+ }
+
+ if (significant_digits == 0) {
+ octal = false;
+ }
+
+ if (*current == '.') {
+ ASSERT(buffer_pos < kBufferSize);
+ buffer[buffer_pos++] = '.';
+ ++current;
+ if (current == end) {
+ if (significant_digits == 0 && !leading_zero) {
+ return JUNK_STRING_VALUE;
+ } else {
+ goto parsing_done;
+ }
+ }
+
+ if (significant_digits == 0) {
+ // octal = false;
+ // Integer part consists of 0 or is absent. Significant digits start after
+ // leading zeros (if any).
+ while (*current == '0') {
+ ++current;
+ if (current == end) return signed_zero;
+ exponent--; // Move this 0 into the exponent.
+ }
+ }
+
+ // There is the fractional part.
+ while (*current >= '0' && *current <= '9') {
+ if (significant_digits < kMaxSignificantDigits) {
+ ASSERT(buffer_pos < kBufferSize);
+ buffer[buffer_pos++] = static_cast<char>(*current);
+ significant_digits++;
+ } else {
+ // Ignore insignificant digits in the fractional part.
+ nonzero_digit_dropped = nonzero_digit_dropped || *current != '0';
+ }
+ ++current;
+ if (current == end) goto parsing_done;
+ }
+ }
+
+ if (!leading_zero && exponent == 0 && significant_digits == 0) {
+ // If leading_zeros is true then the string contains zeros.
+ // If exponent < 0 then string was [+-]\.0*...
+ // If significant_digits != 0 the string is not equal to 0.
+ // Otherwise there are no digits in the string.
+ return JUNK_STRING_VALUE;
+ }
+
+ // Parse exponential part.
+ if (*current == 'e' || *current == 'E') {
+ if (octal) return JUNK_STRING_VALUE;
+ ++current;
+ if (current == end) {
+ if (allow_trailing_junk) {
+ goto parsing_done;
+ } else {
+ return JUNK_STRING_VALUE;
+ }
+ }
+ char sign = '+';
+ if (*current == '+' || *current == '-') {
+ sign = static_cast<char>(*current);
+ ++current;
+ if (current == end) {
+ if (allow_trailing_junk) {
+ goto parsing_done;
+ } else {
+ return JUNK_STRING_VALUE;
+ }
+ }
+ }
+
+ if (current == end || *current < '0' || *current > '9') {
+ if (allow_trailing_junk) {
+ goto parsing_done;
+ } else {
+ return JUNK_STRING_VALUE;
+ }
+ }
+
+ const int max_exponent = INT_MAX / 2;
+ ASSERT(-max_exponent / 2 <= exponent && exponent <= max_exponent / 2);
+ int num = 0;
+ do {
+ // Check overflow.
+ int digit = *current - '0';
+ if (num >= max_exponent / 10
+ && !(num == max_exponent / 10 && digit <= max_exponent % 10)) {
+ num = max_exponent;
+ } else {
+ num = num * 10 + digit;
+ }
+ ++current;
+ } while (current != end && *current >= '0' && *current <= '9');
+
+ exponent += (sign == '-' ? -num : num);
+ }
+
+ if (!allow_trailing_junk && AdvanceToNonspace(¤t, end)) {
+ return JUNK_STRING_VALUE;
+ }
+
+ parsing_done:
+ exponent += insignificant_digits;
+
+ if (octal) {
+ buffer[buffer_pos] = '\0';
+ // ALLOW_OCTALS is set and there is no '8' or '9' in insignificant
+ // digits. Check significant digits now.
+ char sign = '+';
+ const char* s = buffer;
+ if (*s == '-' || *s == '+') sign = *s++;
+
+ double result;
+ s += StringToInt(s, 0, 8, &result);
+ if (!allow_trailing_junk && *s != '\0') return JUNK_STRING_VALUE;
+
+ if (sign == '-') result = -result;
+ if (insignificant_digits > 0) {
+ result *= pow(8.0, insignificant_digits);
+ }
+ return result;
+ }
+
+ if (nonzero_digit_dropped) {
+ if (insignificant_digits) buffer[buffer_pos++] = '.';
+ buffer[buffer_pos++] = '1';
+ }
+
+ if (exponent != 0) {
+ ASSERT(buffer_pos < kBufferSize);
+ buffer[buffer_pos++] = 'e';
+ if (exponent < 0) {
+ ASSERT(buffer_pos < kBufferSize);
+ buffer[buffer_pos++] = '-';
+ exponent = -exponent;
+ }
+ if (exponent > 999) exponent = 999; // Result will be Infinity or 0 or -0.
+
+ const int exp_digits = 3;
+ for (int i = 0; i < exp_digits; i++) {
+ buffer[buffer_pos + exp_digits - 1 - i] = '0' + exponent % 10;
+ exponent /= 10;
+ }
+ ASSERT(exponent == 0);
+ buffer_pos += exp_digits;
+ }
+
+ ASSERT(buffer_pos < kBufferSize);
+ buffer[buffer_pos] = '\0';
+
+ return gay_strtod(buffer, NULL);
+}
+
double StringToDouble(String* str, int flags, double empty_string_val) {
- return InternalStringToDouble(str, flags, empty_string_val);
+ StringShape shape(str);
+ if (shape.IsSequentialAscii()) {
+ const char* begin = SeqAsciiString::cast(str)->GetChars();
+ const char* end = begin + str->length();
+ return InternalStringToDouble(begin, end, flags, empty_string_val);
+ } else if (shape.IsSequentialTwoByte()) {
+ const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
+ const uc16* end = begin + str->length();
+ return InternalStringToDouble(begin, end, flags, empty_string_val);
+ } else {
+ StringInputBuffer buffer(str);
+ return InternalStringToDouble(StringInputBufferIterator(&buffer),
+ StringInputBufferIterator::EndMarker(),
+ flags,
+ empty_string_val);
+ }
}
double StringToDouble(const char* str, int flags, double empty_string_val) {
- return InternalStringToDouble(str, flags, empty_string_val);
+ const char* end = str + StrLength(str);
+
+ return InternalStringToDouble(str, end, flags, empty_string_val);
}
diff --git a/src/data-flow.cc b/src/data-flow.cc
index fe4b3db..1bc77c0 100644
--- a/src/data-flow.cc
+++ b/src/data-flow.cc
@@ -28,6 +28,7 @@
#include "v8.h"
#include "data-flow.h"
+#include "flow-graph.h"
#include "scopes.h"
namespace v8 {
@@ -50,561 +51,6 @@
#endif
-void FlowGraph::AppendInstruction(AstNode* instruction) {
- // Add a (non-null) AstNode to the end of the graph fragment.
- ASSERT(instruction != NULL);
- if (exit()->IsExitNode()) return;
- if (!exit()->IsBlockNode()) AppendNode(new BlockNode());
- BlockNode::cast(exit())->AddInstruction(instruction);
-}
-
-
-void FlowGraph::AppendNode(Node* node) {
- // Add a node to the end of the graph. An empty block is added to
- // maintain edge-split form (that no join nodes or exit nodes as
- // successors to branch nodes).
- ASSERT(node != NULL);
- if (exit()->IsExitNode()) return;
- if (exit()->IsBranchNode() && (node->IsJoinNode() || node->IsExitNode())) {
- AppendNode(new BlockNode());
- }
- exit()->AddSuccessor(node);
- node->AddPredecessor(exit());
- exit_ = node;
-}
-
-
-void FlowGraph::AppendGraph(FlowGraph* graph) {
- // Add a flow graph fragment to the end of this one. An empty block is
- // added to maintain edge-split form (that no join nodes or exit nodes as
- // successors to branch nodes).
- ASSERT(graph != NULL);
- if (exit()->IsExitNode()) return;
- Node* node = graph->entry();
- if (exit()->IsBranchNode() && (node->IsJoinNode() || node->IsExitNode())) {
- AppendNode(new BlockNode());
- }
- exit()->AddSuccessor(node);
- node->AddPredecessor(exit());
- exit_ = graph->exit();
-}
-
-
-void FlowGraph::Split(BranchNode* branch,
- FlowGraph* left,
- FlowGraph* right,
- JoinNode* join) {
- // Add the branch node, left flowgraph, join node.
- AppendNode(branch);
- AppendGraph(left);
- AppendNode(join);
-
- // Splice in the right flowgraph.
- right->AppendNode(join);
- branch->AddSuccessor(right->entry());
- right->entry()->AddPredecessor(branch);
-}
-
-
-void FlowGraph::Loop(JoinNode* join,
- FlowGraph* condition,
- BranchNode* branch,
- FlowGraph* body) {
- // Add the join, condition and branch. Add join's predecessors in
- // left-to-right order.
- AppendNode(join);
- body->AppendNode(join);
- AppendGraph(condition);
- AppendNode(branch);
-
- // Splice in the body flowgraph.
- branch->AddSuccessor(body->entry());
- body->entry()->AddPredecessor(branch);
-}
-
-
-void ExitNode::Traverse(bool mark,
- ZoneList<Node*>* preorder,
- ZoneList<Node*>* postorder) {
- preorder->Add(this);
- postorder->Add(this);
-}
-
-
-void BlockNode::Traverse(bool mark,
- ZoneList<Node*>* preorder,
- ZoneList<Node*>* postorder) {
- ASSERT(successor_ != NULL);
- preorder->Add(this);
- if (!successor_->IsMarkedWith(mark)) {
- successor_->MarkWith(mark);
- successor_->Traverse(mark, preorder, postorder);
- }
- postorder->Add(this);
-}
-
-
-void BranchNode::Traverse(bool mark,
- ZoneList<Node*>* preorder,
- ZoneList<Node*>* postorder) {
- ASSERT(successor0_ != NULL && successor1_ != NULL);
- preorder->Add(this);
- if (!successor1_->IsMarkedWith(mark)) {
- successor1_->MarkWith(mark);
- successor1_->Traverse(mark, preorder, postorder);
- }
- if (!successor0_->IsMarkedWith(mark)) {
- successor0_->MarkWith(mark);
- successor0_->Traverse(mark, preorder, postorder);
- }
- postorder->Add(this);
-}
-
-
-void JoinNode::Traverse(bool mark,
- ZoneList<Node*>* preorder,
- ZoneList<Node*>* postorder) {
- ASSERT(successor_ != NULL);
- preorder->Add(this);
- if (!successor_->IsMarkedWith(mark)) {
- successor_->MarkWith(mark);
- successor_->Traverse(mark, preorder, postorder);
- }
- postorder->Add(this);
-}
-
-
-void FlowGraphBuilder::Build(FunctionLiteral* lit) {
- global_exit_ = new ExitNode();
- VisitStatements(lit->body());
-
- if (HasStackOverflow()) return;
-
- // The graph can end with a branch node (if the function ended with a
- // loop). Maintain edge-split form (no join nodes or exit nodes as
- // successors to branch nodes).
- if (graph_.exit()->IsBranchNode()) graph_.AppendNode(new BlockNode());
- graph_.AppendNode(global_exit_);
-
- // Build preorder and postorder traversal orders. All the nodes in
- // the graph have the same mark flag. For the traversal, use that
- // flag's negation. Traversal will flip all the flags.
- bool mark = graph_.entry()->IsMarkedWith(false);
- graph_.entry()->MarkWith(mark);
- graph_.entry()->Traverse(mark, &preorder_, &postorder_);
-}
-
-
-// This function peels off one iteration of a for-loop. The return value
-// is either a block statement containing the peeled loop or NULL in case
-// there is a stack overflow.
-static Statement* PeelForLoop(ForStatement* stmt) {
- // Mark this for-statement as processed.
- stmt->set_peel_this_loop(false);
-
- // Create new block containing the init statement of the for-loop and
- // an if-statement containing the peeled iteration and the original
- // loop without the init-statement.
- Block* block = new Block(NULL, 2, false);
- if (stmt->init() != NULL) {
- Statement* init = stmt->init();
- // The init statement gets the statement position of the for-loop
- // to make debugging of peeled loops possible.
- init->set_statement_pos(stmt->statement_pos());
- block->AddStatement(init);
- }
-
- // Copy the condition.
- CopyAstVisitor copy_visitor;
- Expression* cond_copy = stmt->cond() != NULL
- ? copy_visitor.DeepCopyExpr(stmt->cond())
- : new Literal(Factory::true_value());
- if (copy_visitor.HasStackOverflow()) return NULL;
-
- // Construct a block with the peeled body and the rest of the for-loop.
- Statement* body_copy = copy_visitor.DeepCopyStmt(stmt->body());
- if (copy_visitor.HasStackOverflow()) return NULL;
-
- Statement* next_copy = stmt->next() != NULL
- ? copy_visitor.DeepCopyStmt(stmt->next())
- : new EmptyStatement();
- if (copy_visitor.HasStackOverflow()) return NULL;
-
- Block* peeled_body = new Block(NULL, 3, false);
- peeled_body->AddStatement(body_copy);
- peeled_body->AddStatement(next_copy);
- peeled_body->AddStatement(stmt);
-
- // Remove the duplicated init statement from the for-statement.
- stmt->set_init(NULL);
-
- // Create new test at the top and add it to the newly created block.
- IfStatement* test = new IfStatement(cond_copy,
- peeled_body,
- new EmptyStatement());
- block->AddStatement(test);
- return block;
-}
-
-
-void FlowGraphBuilder::VisitStatements(ZoneList<Statement*>* stmts) {
- for (int i = 0, len = stmts->length(); i < len; i++) {
- stmts->at(i) = ProcessStatement(stmts->at(i));
- }
-}
-
-
-Statement* FlowGraphBuilder::ProcessStatement(Statement* stmt) {
- if (FLAG_loop_peeling &&
- stmt->AsForStatement() != NULL &&
- stmt->AsForStatement()->peel_this_loop()) {
- Statement* tmp_stmt = PeelForLoop(stmt->AsForStatement());
- if (tmp_stmt == NULL) {
- SetStackOverflow();
- } else {
- stmt = tmp_stmt;
- }
- }
- Visit(stmt);
- return stmt;
-}
-
-
-void FlowGraphBuilder::VisitDeclaration(Declaration* decl) {
- UNREACHABLE();
-}
-
-
-void FlowGraphBuilder::VisitBlock(Block* stmt) {
- VisitStatements(stmt->statements());
-}
-
-
-void FlowGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
- Visit(stmt->expression());
-}
-
-
-void FlowGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
- // Nothing to do.
-}
-
-
-void FlowGraphBuilder::VisitIfStatement(IfStatement* stmt) {
- Visit(stmt->condition());
-
- BranchNode* branch = new BranchNode();
- FlowGraph original = graph_;
- graph_ = FlowGraph::Empty();
- stmt->set_then_statement(ProcessStatement(stmt->then_statement()));
-
- FlowGraph left = graph_;
- graph_ = FlowGraph::Empty();
- stmt->set_else_statement(ProcessStatement(stmt->else_statement()));
-
- if (HasStackOverflow()) return;
- JoinNode* join = new JoinNode();
- original.Split(branch, &left, &graph_, join);
- graph_ = original;
-}
-
-
-void FlowGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitForStatement(ForStatement* stmt) {
- if (stmt->init() != NULL) stmt->set_init(ProcessStatement(stmt->init()));
-
- JoinNode* join = new JoinNode();
- FlowGraph original = graph_;
- graph_ = FlowGraph::Empty();
- if (stmt->cond() != NULL) Visit(stmt->cond());
-
- BranchNode* branch = new BranchNode();
- FlowGraph condition = graph_;
- graph_ = FlowGraph::Empty();
- stmt->set_body(ProcessStatement(stmt->body()));
-
- if (stmt->next() != NULL) stmt->set_next(ProcessStatement(stmt->next()));
-
- if (HasStackOverflow()) return;
- original.Loop(join, &condition, branch, &graph_);
- graph_ = original;
-}
-
-
-void FlowGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitSharedFunctionInfoLiteral(
- SharedFunctionInfoLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitConditional(Conditional* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitSlot(Slot* expr) {
- UNREACHABLE();
-}
-
-
-void FlowGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
- graph_.AppendInstruction(expr);
-}
-
-
-void FlowGraphBuilder::VisitLiteral(Literal* expr) {
- graph_.AppendInstruction(expr);
-}
-
-
-void FlowGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitAssignment(Assignment* expr) {
- Variable* var = expr->target()->AsVariableProxy()->AsVariable();
- Property* prop = expr->target()->AsProperty();
- // Left-hand side can be a variable or property (or reference error) but
- // not both.
- ASSERT(var == NULL || prop == NULL);
- if (var != NULL) {
- if (expr->is_compound()) Visit(expr->target());
- Visit(expr->value());
- if (var->IsStackAllocated()) {
- // The first definition in the body is numbered n, where n is the
- // number of parameters and stack-allocated locals.
- expr->set_num(body_definitions_.length() + variable_count_);
- body_definitions_.Add(expr);
- }
-
- } else if (prop != NULL) {
- Visit(prop->obj());
- if (!prop->key()->IsPropertyName()) Visit(prop->key());
- Visit(expr->value());
- }
-
- if (HasStackOverflow()) return;
- graph_.AppendInstruction(expr);
-}
-
-
-void FlowGraphBuilder::VisitThrow(Throw* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitProperty(Property* expr) {
- Visit(expr->obj());
- if (!expr->key()->IsPropertyName()) Visit(expr->key());
-
- if (HasStackOverflow()) return;
- graph_.AppendInstruction(expr);
-}
-
-
-void FlowGraphBuilder::VisitCall(Call* expr) {
- Visit(expr->expression());
- ZoneList<Expression*>* arguments = expr->arguments();
- for (int i = 0, len = arguments->length(); i < len; i++) {
- Visit(arguments->at(i));
- }
-
- if (HasStackOverflow()) return;
- graph_.AppendInstruction(expr);
-}
-
-
-void FlowGraphBuilder::VisitCallNew(CallNew* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
- SetStackOverflow();
-}
-
-
-void FlowGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
- switch (expr->op()) {
- case Token::NOT:
- case Token::BIT_NOT:
- case Token::DELETE:
- case Token::TYPEOF:
- case Token::VOID:
- SetStackOverflow();
- break;
-
- case Token::ADD:
- case Token::SUB:
- Visit(expr->expression());
- if (HasStackOverflow()) return;
- graph_.AppendInstruction(expr);
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FlowGraphBuilder::VisitCountOperation(CountOperation* expr) {
- Visit(expr->expression());
- Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
- if (var != NULL && var->IsStackAllocated()) {
- // The first definition in the body is numbered n, where n is the number
- // of parameters and stack-allocated locals.
- expr->set_num(body_definitions_.length() + variable_count_);
- body_definitions_.Add(expr);
- }
-
- if (HasStackOverflow()) return;
- graph_.AppendInstruction(expr);
-}
-
-
-void FlowGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
- switch (expr->op()) {
- case Token::COMMA:
- case Token::OR:
- case Token::AND:
- SetStackOverflow();
- break;
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SHL:
- case Token::SHR:
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD:
- case Token::SAR:
- Visit(expr->left());
- Visit(expr->right());
- if (HasStackOverflow()) return;
- graph_.AppendInstruction(expr);
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FlowGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
- switch (expr->op()) {
- case Token::EQ:
- case Token::NE:
- case Token::EQ_STRICT:
- case Token::NE_STRICT:
- case Token::INSTANCEOF:
- case Token::IN:
- SetStackOverflow();
- break;
-
- case Token::LT:
- case Token::GT:
- case Token::LTE:
- case Token::GTE:
- Visit(expr->left());
- Visit(expr->right());
- if (HasStackOverflow()) return;
- graph_.AppendInstruction(expr);
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void FlowGraphBuilder::VisitThisFunction(ThisFunction* expr) {
- SetStackOverflow();
-}
-
-
void AstLabeler::Label(CompilationInfo* info) {
info_ = info;
VisitStatements(info_->function()->body());
@@ -1300,6 +746,387 @@
}
+int ReachingDefinitions::IndexFor(Variable* var, int variable_count) {
+ // Parameters are numbered left-to-right from the beginning of the bit
+ // set. Stack-allocated locals are allocated right-to-left from the end.
+ ASSERT(var != NULL && var->IsStackAllocated());
+ Slot* slot = var->slot();
+ if (slot->type() == Slot::PARAMETER) {
+ return slot->index();
+ } else {
+ return (variable_count - 1) - slot->index();
+ }
+}
+
+
+void Node::InitializeReachingDefinitions(int definition_count,
+ List<BitVector*>* variables,
+ WorkList<Node>* worklist,
+ bool mark) {
+ ASSERT(!IsMarkedWith(mark));
+ rd_.Initialize(definition_count);
+ MarkWith(mark);
+ worklist->Insert(this);
+}
+
+
+void BlockNode::InitializeReachingDefinitions(int definition_count,
+ List<BitVector*>* variables,
+ WorkList<Node>* worklist,
+ bool mark) {
+ ASSERT(!IsMarkedWith(mark));
+ int instruction_count = instructions_.length();
+ int variable_count = variables->length();
+
+ rd_.Initialize(definition_count);
+ // The RD_in set for the entry node has a definition for each parameter
+ // and local.
+ if (predecessor_ == NULL) {
+ for (int i = 0; i < variable_count; i++) rd_.rd_in()->Add(i);
+ }
+
+ for (int i = 0; i < instruction_count; i++) {
+ Expression* expr = instructions_[i]->AsExpression();
+ if (expr == NULL) continue;
+ Variable* var = expr->AssignedVariable();
+ if (var == NULL || !var->IsStackAllocated()) continue;
+
+ // All definitions of this variable are killed.
+ BitVector* def_set =
+ variables->at(ReachingDefinitions::IndexFor(var, variable_count));
+ rd_.kill()->Union(*def_set);
+
+ // All previously generated definitions are not generated.
+ rd_.gen()->Subtract(*def_set);
+
+ // This one is generated.
+ rd_.gen()->Add(expr->num());
+ }
+
+ // Add all blocks except the entry node to the worklist.
+ if (predecessor_ != NULL) {
+ MarkWith(mark);
+ worklist->Insert(this);
+ }
+}
+
+
+void ExitNode::ComputeRDOut(BitVector* result) {
+ // Should not be the predecessor of any node.
+ UNREACHABLE();
+}
+
+
+void BlockNode::ComputeRDOut(BitVector* result) {
+ // All definitions reaching this block ...
+ *result = *rd_.rd_in();
+ // ... except those killed by the block ...
+ result->Subtract(*rd_.kill());
+ // ... but including those generated by the block.
+ result->Union(*rd_.gen());
+}
+
+
+void BranchNode::ComputeRDOut(BitVector* result) {
+ // Branch nodes don't kill or generate definitions.
+ *result = *rd_.rd_in();
+}
+
+
+void JoinNode::ComputeRDOut(BitVector* result) {
+ // Join nodes don't kill or generate definitions.
+ *result = *rd_.rd_in();
+}
+
+
+void ExitNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
+ // The exit node has no successors so we can just update in place. New
+ // RD_in is the union over all predecessors.
+ int definition_count = rd_.rd_in()->length();
+ rd_.rd_in()->Clear();
+
+ BitVector temp(definition_count);
+ for (int i = 0, len = predecessors_.length(); i < len; i++) {
+ // Because ComputeRDOut always overwrites temp and its value is
+ // always read out before calling ComputeRDOut again, we do not
+ // have to clear it on each iteration of the loop.
+ predecessors_[i]->ComputeRDOut(&temp);
+ rd_.rd_in()->Union(temp);
+ }
+}
+
+
+void BlockNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
+ // The entry block has no predecessor. Its RD_in does not change.
+ if (predecessor_ == NULL) return;
+
+ BitVector new_rd_in(rd_.rd_in()->length());
+ predecessor_->ComputeRDOut(&new_rd_in);
+
+ if (rd_.rd_in()->Equals(new_rd_in)) return;
+
+ // Update RD_in.
+ *rd_.rd_in() = new_rd_in;
+ // Add the successor to the worklist if not already present.
+ if (!successor_->IsMarkedWith(mark)) {
+ successor_->MarkWith(mark);
+ worklist->Insert(successor_);
+ }
+}
+
+
+void BranchNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
+ BitVector new_rd_in(rd_.rd_in()->length());
+ predecessor_->ComputeRDOut(&new_rd_in);
+
+ if (rd_.rd_in()->Equals(new_rd_in)) return;
+
+ // Update RD_in.
+ *rd_.rd_in() = new_rd_in;
+ // Add the successors to the worklist if not already present.
+ if (!successor0_->IsMarkedWith(mark)) {
+ successor0_->MarkWith(mark);
+ worklist->Insert(successor0_);
+ }
+ if (!successor1_->IsMarkedWith(mark)) {
+ successor1_->MarkWith(mark);
+ worklist->Insert(successor1_);
+ }
+}
+
+
+void JoinNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
+ int definition_count = rd_.rd_in()->length();
+ BitVector new_rd_in(definition_count);
+
+ // New RD_in is the union over all predecessors.
+ BitVector temp(definition_count);
+ for (int i = 0, len = predecessors_.length(); i < len; i++) {
+ predecessors_[i]->ComputeRDOut(&temp);
+ new_rd_in.Union(temp);
+ }
+
+ if (rd_.rd_in()->Equals(new_rd_in)) return;
+
+ // Update RD_in.
+ *rd_.rd_in() = new_rd_in;
+ // Add the successor to the worklist if not already present.
+ if (!successor_->IsMarkedWith(mark)) {
+ successor_->MarkWith(mark);
+ worklist->Insert(successor_);
+ }
+}
+
+
+void Node::PropagateReachingDefinitions(List<BitVector*>* variables) {
+ // Nothing to do.
+}
+
+
+void BlockNode::PropagateReachingDefinitions(List<BitVector*>* variables) {
+ // Propagate RD_in from the start of the block to all the variable
+ // references.
+ int variable_count = variables->length();
+ BitVector rd = *rd_.rd_in();
+ for (int i = 0, len = instructions_.length(); i < len; i++) {
+ Expression* expr = instructions_[i]->AsExpression();
+ if (expr == NULL) continue;
+
+ // Look for a variable reference to record its reaching definitions.
+ VariableProxy* proxy = expr->AsVariableProxy();
+ if (proxy == NULL) {
+ // Not a VariableProxy? Maybe it's a count operation.
+ CountOperation* count_operation = expr->AsCountOperation();
+ if (count_operation != NULL) {
+ proxy = count_operation->expression()->AsVariableProxy();
+ }
+ }
+ if (proxy == NULL) {
+ // OK, Maybe it's a compound assignment.
+ Assignment* assignment = expr->AsAssignment();
+ if (assignment != NULL && assignment->is_compound()) {
+ proxy = assignment->target()->AsVariableProxy();
+ }
+ }
+
+ if (proxy != NULL &&
+ proxy->var()->IsStackAllocated() &&
+ !proxy->var()->is_this()) {
+ // All definitions for this variable.
+ BitVector* definitions =
+ variables->at(ReachingDefinitions::IndexFor(proxy->var(),
+ variable_count));
+ BitVector* reaching_definitions = new BitVector(*definitions);
+ // Intersected with all definitions (of any variable) reaching this
+ // instruction.
+ reaching_definitions->Intersect(rd);
+ proxy->set_reaching_definitions(reaching_definitions);
+ }
+
+ // It may instead (or also) be a definition. If so update the running
+ // value of reaching definitions for the block.
+ Variable* var = expr->AssignedVariable();
+ if (var == NULL || !var->IsStackAllocated()) continue;
+
+ // All definitions of this variable are killed.
+ BitVector* def_set =
+ variables->at(ReachingDefinitions::IndexFor(var, variable_count));
+ rd.Subtract(*def_set);
+ // This definition is generated.
+ rd.Add(expr->num());
+ }
+}
+
+
+void ReachingDefinitions::Compute() {
+ // The definitions in the body plus an implicit definition for each
+ // variable at function entry.
+ int definition_count = body_definitions_->length() + variable_count_;
+ int node_count = postorder_->length();
+
+ // Step 1: For each stack-allocated variable, identify the set of all its
+ // definitions.
+ List<BitVector*> variables;
+ for (int i = 0; i < variable_count_; i++) {
+ // Add the initial definition for each variable.
+ BitVector* initial = new BitVector(definition_count);
+ initial->Add(i);
+ variables.Add(initial);
+ }
+ for (int i = 0, len = body_definitions_->length(); i < len; i++) {
+ // Account for each definition in the body as a definition of the
+ // defined variable.
+ Variable* var = body_definitions_->at(i)->AssignedVariable();
+ variables[IndexFor(var, variable_count_)]->Add(i + variable_count_);
+ }
+
+ // Step 2: Compute KILL and GEN for each block node, initialize RD_in for
+ // all nodes, and mark and add all nodes to the worklist in reverse
+ // postorder. All nodes should currently have the same mark.
+ bool mark = postorder_->at(0)->IsMarkedWith(false); // Negation of current.
+ WorkList<Node> worklist(node_count);
+ for (int i = node_count - 1; i >= 0; i--) {
+ postorder_->at(i)->InitializeReachingDefinitions(definition_count,
+ &variables,
+ &worklist,
+ mark);
+ }
+
+ // Step 3: Until the worklist is empty, remove an item compute and update
+ // its rd_in based on its predecessor's rd_out. If rd_in has changed, add
+ // all necessary successors to the worklist.
+ while (!worklist.is_empty()) {
+ Node* node = worklist.Remove();
+ node->MarkWith(!mark);
+ node->UpdateRDIn(&worklist, mark);
+ }
+
+ // Step 4: Based on RD_in for block nodes, propagate reaching definitions
+ // to all variable uses in the block.
+ for (int i = 0; i < node_count; i++) {
+ postorder_->at(i)->PropagateReachingDefinitions(&variables);
+ }
+}
+
+
+bool TypeAnalyzer::IsPrimitiveDef(int def_num) {
+ if (def_num < param_count_) return false;
+ if (def_num < variable_count_) return true;
+ return body_definitions_->at(def_num - variable_count_)->IsPrimitive();
+}
+
+
+void TypeAnalyzer::Compute() {
+ bool changed;
+ int count = 0;
+
+ do {
+ changed = false;
+
+ if (FLAG_print_graph_text) {
+ PrintF("TypeAnalyzer::Compute - iteration %d\n", count++);
+ }
+
+ for (int i = postorder_->length() - 1; i >= 0; --i) {
+ Node* node = postorder_->at(i);
+ if (node->IsBlockNode()) {
+ BlockNode* block = BlockNode::cast(node);
+ for (int j = 0; j < block->instructions()->length(); j++) {
+ Expression* expr = block->instructions()->at(j)->AsExpression();
+ if (expr != NULL) {
+ // For variable uses: Compute new type from reaching definitions.
+ VariableProxy* proxy = expr->AsVariableProxy();
+ if (proxy != NULL && proxy->reaching_definitions() != NULL) {
+ BitVector* rd = proxy->reaching_definitions();
+ bool prim_type = true;
+ // TODO(fsc): A sparse set representation of reaching
+ // definitions would speed up iterating here.
+ for (int k = 0; k < rd->length(); k++) {
+ if (rd->Contains(k) && !IsPrimitiveDef(k)) {
+ prim_type = false;
+ break;
+ }
+ }
+ // Reset changed flag if new type information was computed.
+ if (prim_type != proxy->IsPrimitive()) {
+ changed = true;
+ proxy->SetIsPrimitive(prim_type);
+ }
+ }
+ }
+ }
+ }
+ }
+ } while (changed);
+}
+
+
+void Node::MarkCriticalInstructions(
+ List<AstNode*>* stack,
+ ZoneList<Expression*>* body_definitions,
+ int variable_count) {
+}
+
+
+void BlockNode::MarkCriticalInstructions(
+ List<AstNode*>* stack,
+ ZoneList<Expression*>* body_definitions,
+ int variable_count) {
+ for (int i = instructions_.length() - 1; i >= 0; i--) {
+ // Only expressions can appear in the flow graph for now.
+ Expression* expr = instructions_[i]->AsExpression();
+ if (expr != NULL && !expr->is_live() &&
+ (expr->is_loop_condition() || expr->IsCritical())) {
+ expr->mark_as_live();
+ expr->ProcessNonLiveChildren(stack, body_definitions, variable_count);
+ }
+ }
+}
+
+
+void MarkLiveCode(ZoneList<Node*>* nodes,
+ ZoneList<Expression*>* body_definitions,
+ int variable_count) {
+ List<AstNode*> stack(20);
+
+ // Mark the critical AST nodes as live; mark their dependencies and
+ // add them to the marking stack.
+ for (int i = nodes->length() - 1; i >= 0; i--) {
+ nodes->at(i)->MarkCriticalInstructions(&stack, body_definitions,
+ variable_count);
+ }
+
+ // Continue marking dependencies until no more.
+ while (!stack.is_empty()) {
+ // Only expressions can appear in the flow graph for now.
+ Expression* expr = stack.RemoveLast()->AsExpression();
+ if (expr != NULL) {
+ expr->ProcessNonLiveChildren(&stack, body_definitions, variable_count);
+ }
+ }
+}
+
+
#ifdef DEBUG
// Print a textual representation of an instruction in a flow graph. Using
@@ -1714,389 +1541,7 @@
}
}
-
-#endif // defined(DEBUG)
-
-
-int ReachingDefinitions::IndexFor(Variable* var, int variable_count) {
- // Parameters are numbered left-to-right from the beginning of the bit
- // set. Stack-allocated locals are allocated right-to-left from the end.
- ASSERT(var != NULL && var->IsStackAllocated());
- Slot* slot = var->slot();
- if (slot->type() == Slot::PARAMETER) {
- return slot->index();
- } else {
- return (variable_count - 1) - slot->index();
- }
-}
-
-
-void Node::InitializeReachingDefinitions(int definition_count,
- List<BitVector*>* variables,
- WorkList<Node>* worklist,
- bool mark) {
- ASSERT(!IsMarkedWith(mark));
- rd_.Initialize(definition_count);
- MarkWith(mark);
- worklist->Insert(this);
-}
-
-
-void BlockNode::InitializeReachingDefinitions(int definition_count,
- List<BitVector*>* variables,
- WorkList<Node>* worklist,
- bool mark) {
- ASSERT(!IsMarkedWith(mark));
- int instruction_count = instructions_.length();
- int variable_count = variables->length();
-
- rd_.Initialize(definition_count);
- // The RD_in set for the entry node has a definition for each parameter
- // and local.
- if (predecessor_ == NULL) {
- for (int i = 0; i < variable_count; i++) rd_.rd_in()->Add(i);
- }
-
- for (int i = 0; i < instruction_count; i++) {
- Expression* expr = instructions_[i]->AsExpression();
- if (expr == NULL) continue;
- Variable* var = expr->AssignedVariable();
- if (var == NULL || !var->IsStackAllocated()) continue;
-
- // All definitions of this variable are killed.
- BitVector* def_set =
- variables->at(ReachingDefinitions::IndexFor(var, variable_count));
- rd_.kill()->Union(*def_set);
-
- // All previously generated definitions are not generated.
- rd_.gen()->Subtract(*def_set);
-
- // This one is generated.
- rd_.gen()->Add(expr->num());
- }
-
- // Add all blocks except the entry node to the worklist.
- if (predecessor_ != NULL) {
- MarkWith(mark);
- worklist->Insert(this);
- }
-}
-
-
-void ExitNode::ComputeRDOut(BitVector* result) {
- // Should not be the predecessor of any node.
- UNREACHABLE();
-}
-
-
-void BlockNode::ComputeRDOut(BitVector* result) {
- // All definitions reaching this block ...
- *result = *rd_.rd_in();
- // ... except those killed by the block ...
- result->Subtract(*rd_.kill());
- // ... but including those generated by the block.
- result->Union(*rd_.gen());
-}
-
-
-void BranchNode::ComputeRDOut(BitVector* result) {
- // Branch nodes don't kill or generate definitions.
- *result = *rd_.rd_in();
-}
-
-
-void JoinNode::ComputeRDOut(BitVector* result) {
- // Join nodes don't kill or generate definitions.
- *result = *rd_.rd_in();
-}
-
-
-void ExitNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
- // The exit node has no successors so we can just update in place. New
- // RD_in is the union over all predecessors.
- int definition_count = rd_.rd_in()->length();
- rd_.rd_in()->Clear();
-
- BitVector temp(definition_count);
- for (int i = 0, len = predecessors_.length(); i < len; i++) {
- // Because ComputeRDOut always overwrites temp and its value is
- // always read out before calling ComputeRDOut again, we do not
- // have to clear it on each iteration of the loop.
- predecessors_[i]->ComputeRDOut(&temp);
- rd_.rd_in()->Union(temp);
- }
-}
-
-
-void BlockNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
- // The entry block has no predecessor. Its RD_in does not change.
- if (predecessor_ == NULL) return;
-
- BitVector new_rd_in(rd_.rd_in()->length());
- predecessor_->ComputeRDOut(&new_rd_in);
-
- if (rd_.rd_in()->Equals(new_rd_in)) return;
-
- // Update RD_in.
- *rd_.rd_in() = new_rd_in;
- // Add the successor to the worklist if not already present.
- if (!successor_->IsMarkedWith(mark)) {
- successor_->MarkWith(mark);
- worklist->Insert(successor_);
- }
-}
-
-
-void BranchNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
- BitVector new_rd_in(rd_.rd_in()->length());
- predecessor_->ComputeRDOut(&new_rd_in);
-
- if (rd_.rd_in()->Equals(new_rd_in)) return;
-
- // Update RD_in.
- *rd_.rd_in() = new_rd_in;
- // Add the successors to the worklist if not already present.
- if (!successor0_->IsMarkedWith(mark)) {
- successor0_->MarkWith(mark);
- worklist->Insert(successor0_);
- }
- if (!successor1_->IsMarkedWith(mark)) {
- successor1_->MarkWith(mark);
- worklist->Insert(successor1_);
- }
-}
-
-
-void JoinNode::UpdateRDIn(WorkList<Node>* worklist, bool mark) {
- int definition_count = rd_.rd_in()->length();
- BitVector new_rd_in(definition_count);
-
- // New RD_in is the union over all predecessors.
- BitVector temp(definition_count);
- for (int i = 0, len = predecessors_.length(); i < len; i++) {
- predecessors_[i]->ComputeRDOut(&temp);
- new_rd_in.Union(temp);
- }
-
- if (rd_.rd_in()->Equals(new_rd_in)) return;
-
- // Update RD_in.
- *rd_.rd_in() = new_rd_in;
- // Add the successor to the worklist if not already present.
- if (!successor_->IsMarkedWith(mark)) {
- successor_->MarkWith(mark);
- worklist->Insert(successor_);
- }
-}
-
-
-void Node::PropagateReachingDefinitions(List<BitVector*>* variables) {
- // Nothing to do.
-}
-
-
-void BlockNode::PropagateReachingDefinitions(List<BitVector*>* variables) {
- // Propagate RD_in from the start of the block to all the variable
- // references.
- int variable_count = variables->length();
- BitVector rd = *rd_.rd_in();
- for (int i = 0, len = instructions_.length(); i < len; i++) {
- Expression* expr = instructions_[i]->AsExpression();
- if (expr == NULL) continue;
-
- // Look for a variable reference to record its reaching definitions.
- VariableProxy* proxy = expr->AsVariableProxy();
- if (proxy == NULL) {
- // Not a VariableProxy? Maybe it's a count operation.
- CountOperation* count_operation = expr->AsCountOperation();
- if (count_operation != NULL) {
- proxy = count_operation->expression()->AsVariableProxy();
- }
- }
- if (proxy == NULL) {
- // OK, Maybe it's a compound assignment.
- Assignment* assignment = expr->AsAssignment();
- if (assignment != NULL && assignment->is_compound()) {
- proxy = assignment->target()->AsVariableProxy();
- }
- }
-
- if (proxy != NULL &&
- proxy->var()->IsStackAllocated() &&
- !proxy->var()->is_this()) {
- // All definitions for this variable.
- BitVector* definitions =
- variables->at(ReachingDefinitions::IndexFor(proxy->var(),
- variable_count));
- BitVector* reaching_definitions = new BitVector(*definitions);
- // Intersected with all definitions (of any variable) reaching this
- // instruction.
- reaching_definitions->Intersect(rd);
- proxy->set_reaching_definitions(reaching_definitions);
- }
-
- // It may instead (or also) be a definition. If so update the running
- // value of reaching definitions for the block.
- Variable* var = expr->AssignedVariable();
- if (var == NULL || !var->IsStackAllocated()) continue;
-
- // All definitions of this variable are killed.
- BitVector* def_set =
- variables->at(ReachingDefinitions::IndexFor(var, variable_count));
- rd.Subtract(*def_set);
- // This definition is generated.
- rd.Add(expr->num());
- }
-}
-
-
-void ReachingDefinitions::Compute() {
- // The definitions in the body plus an implicit definition for each
- // variable at function entry.
- int definition_count = body_definitions_->length() + variable_count_;
- int node_count = postorder_->length();
-
- // Step 1: For each stack-allocated variable, identify the set of all its
- // definitions.
- List<BitVector*> variables;
- for (int i = 0; i < variable_count_; i++) {
- // Add the initial definition for each variable.
- BitVector* initial = new BitVector(definition_count);
- initial->Add(i);
- variables.Add(initial);
- }
- for (int i = 0, len = body_definitions_->length(); i < len; i++) {
- // Account for each definition in the body as a definition of the
- // defined variable.
- Variable* var = body_definitions_->at(i)->AssignedVariable();
- variables[IndexFor(var, variable_count_)]->Add(i + variable_count_);
- }
-
- // Step 2: Compute KILL and GEN for each block node, initialize RD_in for
- // all nodes, and mark and add all nodes to the worklist in reverse
- // postorder. All nodes should currently have the same mark.
- bool mark = postorder_->at(0)->IsMarkedWith(false); // Negation of current.
- WorkList<Node> worklist(node_count);
- for (int i = node_count - 1; i >= 0; i--) {
- postorder_->at(i)->InitializeReachingDefinitions(definition_count,
- &variables,
- &worklist,
- mark);
- }
-
- // Step 3: Until the worklist is empty, remove an item compute and update
- // its rd_in based on its predecessor's rd_out. If rd_in has changed, add
- // all necessary successors to the worklist.
- while (!worklist.is_empty()) {
- Node* node = worklist.Remove();
- node->MarkWith(!mark);
- node->UpdateRDIn(&worklist, mark);
- }
-
- // Step 4: Based on RD_in for block nodes, propagate reaching definitions
- // to all variable uses in the block.
- for (int i = 0; i < node_count; i++) {
- postorder_->at(i)->PropagateReachingDefinitions(&variables);
- }
-}
-
-
-bool TypeAnalyzer::IsPrimitiveDef(int def_num) {
- if (def_num < param_count_) return false;
- if (def_num < variable_count_) return true;
- return body_definitions_->at(def_num - variable_count_)->IsPrimitive();
-}
-
-
-void TypeAnalyzer::Compute() {
- bool changed;
- int count = 0;
-
- do {
- changed = false;
-
- if (FLAG_print_graph_text) {
- PrintF("TypeAnalyzer::Compute - iteration %d\n", count++);
- }
-
- for (int i = postorder_->length() - 1; i >= 0; --i) {
- Node* node = postorder_->at(i);
- if (node->IsBlockNode()) {
- BlockNode* block = BlockNode::cast(node);
- for (int j = 0; j < block->instructions()->length(); j++) {
- Expression* expr = block->instructions()->at(j)->AsExpression();
- if (expr != NULL) {
- // For variable uses: Compute new type from reaching definitions.
- VariableProxy* proxy = expr->AsVariableProxy();
- if (proxy != NULL && proxy->reaching_definitions() != NULL) {
- BitVector* rd = proxy->reaching_definitions();
- bool prim_type = true;
- // TODO(fsc): A sparse set representation of reaching
- // definitions would speed up iterating here.
- for (int k = 0; k < rd->length(); k++) {
- if (rd->Contains(k) && !IsPrimitiveDef(k)) {
- prim_type = false;
- break;
- }
- }
- // Reset changed flag if new type information was computed.
- if (prim_type != proxy->IsPrimitive()) {
- changed = true;
- proxy->SetIsPrimitive(prim_type);
- }
- }
- }
- }
- }
- }
- } while (changed);
-}
-
-
-void Node::MarkCriticalInstructions(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count) {
-}
-
-
-void BlockNode::MarkCriticalInstructions(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count) {
- for (int i = instructions_.length() - 1; i >= 0; i--) {
- // Only expressions can appear in the flow graph for now.
- Expression* expr = instructions_[i]->AsExpression();
- if (expr != NULL && !expr->is_live() &&
- (expr->is_loop_condition() || expr->IsCritical())) {
- expr->mark_as_live();
- expr->ProcessNonLiveChildren(stack, body_definitions, variable_count);
- }
- }
-}
-
-
-void MarkLiveCode(ZoneList<Node*>* nodes,
- ZoneList<Expression*>* body_definitions,
- int variable_count) {
- List<AstNode*> stack(20);
-
- // Mark the critical AST nodes as live; mark their dependencies and
- // add them to the marking stack.
- for (int i = nodes->length() - 1; i >= 0; i--) {
- nodes->at(i)->MarkCriticalInstructions(&stack, body_definitions,
- variable_count);
- }
-
- // Continue marking dependencies until no more.
- while (!stack.is_empty()) {
- // Only expressions can appear in the flow graph for now.
- Expression* expr = stack.RemoveLast()->AsExpression();
- if (expr != NULL) {
- expr->ProcessNonLiveChildren(&stack, body_definitions, variable_count);
- }
- }
-}
+#endif // DEBUG
} } // namespace v8::internal
diff --git a/src/data-flow.h b/src/data-flow.h
index 8046e42..66df635 100644
--- a/src/data-flow.h
+++ b/src/data-flow.h
@@ -37,6 +37,9 @@
namespace v8 {
namespace internal {
+// Forward declarations.
+class Node;
+
class BitVector: public ZoneObject {
public:
explicit BitVector(int length)
@@ -205,344 +208,6 @@
};
-// Flow-graph nodes.
-class Node: public ZoneObject {
- public:
- Node() : number_(-1), mark_(false) {}
-
- virtual ~Node() {}
-
- virtual bool IsExitNode() { return false; }
- virtual bool IsBlockNode() { return false; }
- virtual bool IsBranchNode() { return false; }
- virtual bool IsJoinNode() { return false; }
-
- virtual void AddPredecessor(Node* predecessor) = 0;
- virtual void AddSuccessor(Node* successor) = 0;
-
- bool IsMarkedWith(bool mark) { return mark_ == mark; }
- void MarkWith(bool mark) { mark_ = mark; }
-
- // Perform a depth first search and record preorder and postorder
- // traversal orders.
- virtual void Traverse(bool mark,
- ZoneList<Node*>* preorder,
- ZoneList<Node*>* postorder) = 0;
-
- int number() { return number_; }
- void set_number(int number) { number_ = number; }
-
- // Functions used by data-flow analyses.
- virtual void InitializeReachingDefinitions(int definition_count,
- List<BitVector*>* variables,
- WorkList<Node>* worklist,
- bool mark);
- virtual void ComputeRDOut(BitVector* result) = 0;
- virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark) = 0;
- virtual void PropagateReachingDefinitions(List<BitVector*>* variables);
-
- // Functions used by dead-code elimination.
- virtual void MarkCriticalInstructions(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count);
-
-#ifdef DEBUG
- void AssignNodeNumber();
- void PrintReachingDefinitions();
- virtual void PrintText() = 0;
-#endif
-
- protected:
- ReachingDefinitionsData rd_;
-
- private:
- int number_;
- bool mark_;
-
- DISALLOW_COPY_AND_ASSIGN(Node);
-};
-
-
-// An exit node has a arbitrarily many predecessors and no successors.
-class ExitNode: public Node {
- public:
- ExitNode() : predecessors_(4) {}
-
- virtual bool IsExitNode() { return true; }
-
- virtual void AddPredecessor(Node* predecessor) {
- ASSERT(predecessor != NULL);
- predecessors_.Add(predecessor);
- }
-
- virtual void AddSuccessor(Node* successor) { UNREACHABLE(); }
-
- virtual void Traverse(bool mark,
- ZoneList<Node*>* preorder,
- ZoneList<Node*>* postorder);
-
- virtual void ComputeRDOut(BitVector* result);
- virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
-
-#ifdef DEBUG
- virtual void PrintText();
-#endif
-
- private:
- ZoneList<Node*> predecessors_;
-
- DISALLOW_COPY_AND_ASSIGN(ExitNode);
-};
-
-
-// Block nodes have a single successor and predecessor and a list of
-// instructions.
-class BlockNode: public Node {
- public:
- BlockNode() : predecessor_(NULL), successor_(NULL), instructions_(4) {}
-
- static BlockNode* cast(Node* node) {
- ASSERT(node->IsBlockNode());
- return reinterpret_cast<BlockNode*>(node);
- }
-
- virtual bool IsBlockNode() { return true; }
-
- bool is_empty() { return instructions_.is_empty(); }
-
- ZoneList<AstNode*>* instructions() { return &instructions_; }
-
- virtual void AddPredecessor(Node* predecessor) {
- ASSERT(predecessor_ == NULL && predecessor != NULL);
- predecessor_ = predecessor;
- }
-
- virtual void AddSuccessor(Node* successor) {
- ASSERT(successor_ == NULL && successor != NULL);
- successor_ = successor;
- }
-
- void AddInstruction(AstNode* instruction) {
- instructions_.Add(instruction);
- }
-
- virtual void Traverse(bool mark,
- ZoneList<Node*>* preorder,
- ZoneList<Node*>* postorder);
-
- virtual void InitializeReachingDefinitions(int definition_count,
- List<BitVector*>* variables,
- WorkList<Node>* worklist,
- bool mark);
- virtual void ComputeRDOut(BitVector* result);
- virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
- virtual void PropagateReachingDefinitions(List<BitVector*>* variables);
-
- virtual void MarkCriticalInstructions(
- List<AstNode*>* stack,
- ZoneList<Expression*>* body_definitions,
- int variable_count);
-
-#ifdef DEBUG
- virtual void PrintText();
-#endif
-
- private:
- Node* predecessor_;
- Node* successor_;
- ZoneList<AstNode*> instructions_;
-
- DISALLOW_COPY_AND_ASSIGN(BlockNode);
-};
-
-
-// Branch nodes have a single predecessor and a pair of successors.
-class BranchNode: public Node {
- public:
- BranchNode() : predecessor_(NULL), successor0_(NULL), successor1_(NULL) {}
-
- virtual bool IsBranchNode() { return true; }
-
- virtual void AddPredecessor(Node* predecessor) {
- ASSERT(predecessor_ == NULL && predecessor != NULL);
- predecessor_ = predecessor;
- }
-
- virtual void AddSuccessor(Node* successor) {
- ASSERT(successor1_ == NULL && successor != NULL);
- if (successor0_ == NULL) {
- successor0_ = successor;
- } else {
- successor1_ = successor;
- }
- }
-
- virtual void Traverse(bool mark,
- ZoneList<Node*>* preorder,
- ZoneList<Node*>* postorder);
-
- virtual void ComputeRDOut(BitVector* result);
- virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
-
-#ifdef DEBUG
- virtual void PrintText();
-#endif
-
- private:
- Node* predecessor_;
- Node* successor0_;
- Node* successor1_;
-
- DISALLOW_COPY_AND_ASSIGN(BranchNode);
-};
-
-
-// Join nodes have arbitrarily many predecessors and a single successor.
-class JoinNode: public Node {
- public:
- JoinNode() : predecessors_(2), successor_(NULL) {}
-
- static JoinNode* cast(Node* node) {
- ASSERT(node->IsJoinNode());
- return reinterpret_cast<JoinNode*>(node);
- }
-
- virtual bool IsJoinNode() { return true; }
-
- virtual void AddPredecessor(Node* predecessor) {
- ASSERT(predecessor != NULL);
- predecessors_.Add(predecessor);
- }
-
- virtual void AddSuccessor(Node* successor) {
- ASSERT(successor_ == NULL && successor != NULL);
- successor_ = successor;
- }
-
- virtual void Traverse(bool mark,
- ZoneList<Node*>* preorder,
- ZoneList<Node*>* postorder);
-
- virtual void ComputeRDOut(BitVector* result);
- virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
-
-#ifdef DEBUG
- virtual void PrintText();
-#endif
-
- private:
- ZoneList<Node*> predecessors_;
- Node* successor_;
-
- DISALLOW_COPY_AND_ASSIGN(JoinNode);
-};
-
-
-// Flow graphs have a single entry and single exit. The empty flowgraph is
-// represented by both entry and exit being NULL.
-class FlowGraph BASE_EMBEDDED {
- public:
- static FlowGraph Empty() {
- FlowGraph graph;
- graph.entry_ = new BlockNode();
- graph.exit_ = graph.entry_;
- return graph;
- }
-
- bool is_empty() const {
- return entry_ == exit_ && BlockNode::cast(entry_)->is_empty();
- }
- Node* entry() const { return entry_; }
- Node* exit() const { return exit_; }
-
- // Add a single instruction to the end of this flowgraph.
- void AppendInstruction(AstNode* instruction);
-
- // Add a single node to the end of this flow graph.
- void AppendNode(Node* node);
-
- // Add a flow graph fragment to the end of this one.
- void AppendGraph(FlowGraph* graph);
-
- // Concatenate an if-then-else flow-graph to this one. Control is split
- // and merged, so the graph remains single-entry, single-exit.
- void Split(BranchNode* branch,
- FlowGraph* left,
- FlowGraph* right,
- JoinNode* merge);
-
- // Concatenate a forward loop (e.g., while or for loop) flow-graph to this
- // one. Control is split by the condition and merged back from the back
- // edge at end of the body to the beginning of the condition. The single
- // (free) exit of the result graph is the right (false) arm of the branch
- // node.
- void Loop(JoinNode* merge,
- FlowGraph* condition,
- BranchNode* branch,
- FlowGraph* body);
-
-#ifdef DEBUG
- void PrintText(FunctionLiteral* fun, ZoneList<Node*>* postorder);
-#endif
-
- private:
- FlowGraph() : entry_(NULL), exit_(NULL) {}
-
- Node* entry_;
- Node* exit_;
-};
-
-
-// Construct a flow graph from a function literal. Build pre- and postorder
-// traversal orders as a byproduct.
-class FlowGraphBuilder: public AstVisitor {
- public:
- explicit FlowGraphBuilder(int variable_count)
- : graph_(FlowGraph::Empty()),
- global_exit_(NULL),
- preorder_(4),
- postorder_(4),
- variable_count_(variable_count),
- body_definitions_(4) {
- }
-
- void Build(FunctionLiteral* lit);
-
- FlowGraph* graph() { return &graph_; }
- ZoneList<Node*>* preorder() { return &preorder_; }
- ZoneList<Node*>* postorder() { return &postorder_; }
- ZoneList<Expression*>* body_definitions() { return &body_definitions_; }
-
- private:
- ExitNode* global_exit() { return global_exit_; }
-
- // Helpers to allow tranforming the ast during flow graph construction.
- void VisitStatements(ZoneList<Statement*>* stmts);
- Statement* ProcessStatement(Statement* stmt);
-
- // AST node visit functions.
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
- AST_NODE_LIST(DECLARE_VISIT)
-#undef DECLARE_VISIT
-
- FlowGraph graph_;
- ExitNode* global_exit_;
- ZoneList<Node*> preorder_;
- ZoneList<Node*> postorder_;
-
- // The flow graph builder collects a list of explicit definitions
- // (assignments and count operations) to stack-allocated variables to use
- // for reaching definitions analysis. It does not count the implicit
- // definition at function entry. AST node numbers in the AST are used to
- // refer into this list.
- int variable_count_;
- ZoneList<Expression*> body_definitions_;
-
- DISALLOW_COPY_AND_ASSIGN(FlowGraphBuilder);
-};
-
-
// This class is used to number all expressions in the AST according to
// their evaluation order (post-order left-to-right traversal).
class AstLabeler: public AstVisitor {
diff --git a/src/flow-graph.cc b/src/flow-graph.cc
new file mode 100644
index 0000000..bd9602f
--- /dev/null
+++ b/src/flow-graph.cc
@@ -0,0 +1,588 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "flow-graph.h"
+
+namespace v8 {
+namespace internal {
+
+void FlowGraph::AppendInstruction(AstNode* instruction) {
+ // Add a (non-null) AstNode to the end of the graph fragment.
+ ASSERT(instruction != NULL);
+ if (exit()->IsExitNode()) return;
+ if (!exit()->IsBlockNode()) AppendNode(new BlockNode());
+ BlockNode::cast(exit())->AddInstruction(instruction);
+}
+
+
+void FlowGraph::AppendNode(Node* node) {
+ // Add a node to the end of the graph. An empty block is added to
+ // maintain edge-split form (that no join nodes or exit nodes as
+ // successors to branch nodes).
+ ASSERT(node != NULL);
+ if (exit()->IsExitNode()) return;
+ if (exit()->IsBranchNode() && (node->IsJoinNode() || node->IsExitNode())) {
+ AppendNode(new BlockNode());
+ }
+ exit()->AddSuccessor(node);
+ node->AddPredecessor(exit());
+ exit_ = node;
+}
+
+
+void FlowGraph::AppendGraph(FlowGraph* graph) {
+ // Add a flow graph fragment to the end of this one. An empty block is
+ // added to maintain edge-split form (that no join nodes or exit nodes as
+ // successors to branch nodes).
+ ASSERT(graph != NULL);
+ if (exit()->IsExitNode()) return;
+ Node* node = graph->entry();
+ if (exit()->IsBranchNode() && (node->IsJoinNode() || node->IsExitNode())) {
+ AppendNode(new BlockNode());
+ }
+ exit()->AddSuccessor(node);
+ node->AddPredecessor(exit());
+ exit_ = graph->exit();
+}
+
+
+void FlowGraph::Split(BranchNode* branch,
+ FlowGraph* left,
+ FlowGraph* right,
+ JoinNode* join) {
+ // Add the branch node, left flowgraph, join node.
+ AppendNode(branch);
+ AppendGraph(left);
+ AppendNode(join);
+
+ // Splice in the right flowgraph.
+ right->AppendNode(join);
+ branch->AddSuccessor(right->entry());
+ right->entry()->AddPredecessor(branch);
+}
+
+
+void FlowGraph::Loop(JoinNode* join,
+ FlowGraph* condition,
+ BranchNode* branch,
+ FlowGraph* body) {
+ // Add the join, condition and branch. Add join's predecessors in
+ // left-to-right order.
+ AppendNode(join);
+ body->AppendNode(join);
+ AppendGraph(condition);
+ AppendNode(branch);
+
+ // Splice in the body flowgraph.
+ branch->AddSuccessor(body->entry());
+ body->entry()->AddPredecessor(branch);
+}
+
+
+void ExitNode::Traverse(bool mark,
+ ZoneList<Node*>* preorder,
+ ZoneList<Node*>* postorder) {
+ preorder->Add(this);
+ postorder->Add(this);
+}
+
+
+void BlockNode::Traverse(bool mark,
+ ZoneList<Node*>* preorder,
+ ZoneList<Node*>* postorder) {
+ ASSERT(successor_ != NULL);
+ preorder->Add(this);
+ if (!successor_->IsMarkedWith(mark)) {
+ successor_->MarkWith(mark);
+ successor_->Traverse(mark, preorder, postorder);
+ }
+ postorder->Add(this);
+}
+
+
+void BranchNode::Traverse(bool mark,
+ ZoneList<Node*>* preorder,
+ ZoneList<Node*>* postorder) {
+ ASSERT(successor0_ != NULL && successor1_ != NULL);
+ preorder->Add(this);
+ if (!successor1_->IsMarkedWith(mark)) {
+ successor1_->MarkWith(mark);
+ successor1_->Traverse(mark, preorder, postorder);
+ }
+ if (!successor0_->IsMarkedWith(mark)) {
+ successor0_->MarkWith(mark);
+ successor0_->Traverse(mark, preorder, postorder);
+ }
+ postorder->Add(this);
+}
+
+
+void JoinNode::Traverse(bool mark,
+ ZoneList<Node*>* preorder,
+ ZoneList<Node*>* postorder) {
+ ASSERT(successor_ != NULL);
+ preorder->Add(this);
+ if (!successor_->IsMarkedWith(mark)) {
+ successor_->MarkWith(mark);
+ successor_->Traverse(mark, preorder, postorder);
+ }
+ postorder->Add(this);
+}
+
+
+void FlowGraphBuilder::Build(FunctionLiteral* lit) {
+ global_exit_ = new ExitNode();
+ VisitStatements(lit->body());
+
+ if (HasStackOverflow()) return;
+
+ // The graph can end with a branch node (if the function ended with a
+ // loop). Maintain edge-split form (no join nodes or exit nodes as
+ // successors to branch nodes).
+ if (graph_.exit()->IsBranchNode()) graph_.AppendNode(new BlockNode());
+ graph_.AppendNode(global_exit_);
+
+ // Build preorder and postorder traversal orders. All the nodes in
+ // the graph have the same mark flag. For the traversal, use that
+ // flag's negation. Traversal will flip all the flags.
+ bool mark = graph_.entry()->IsMarkedWith(false);
+ graph_.entry()->MarkWith(mark);
+ graph_.entry()->Traverse(mark, &preorder_, &postorder_);
+}
+
+
+// This function peels off one iteration of a for-loop. The return value
+// is either a block statement containing the peeled loop or NULL in case
+// there is a stack overflow.
+static Statement* PeelForLoop(ForStatement* stmt) {
+ // Mark this for-statement as processed.
+ stmt->set_peel_this_loop(false);
+
+ // Create new block containing the init statement of the for-loop and
+ // an if-statement containing the peeled iteration and the original
+ // loop without the init-statement.
+ Block* block = new Block(NULL, 2, false);
+ if (stmt->init() != NULL) {
+ Statement* init = stmt->init();
+ // The init statement gets the statement position of the for-loop
+ // to make debugging of peeled loops possible.
+ init->set_statement_pos(stmt->statement_pos());
+ block->AddStatement(init);
+ }
+
+ // Copy the condition.
+ CopyAstVisitor copy_visitor;
+ Expression* cond_copy = stmt->cond() != NULL
+ ? copy_visitor.DeepCopyExpr(stmt->cond())
+ : new Literal(Factory::true_value());
+ if (copy_visitor.HasStackOverflow()) return NULL;
+
+ // Construct a block with the peeled body and the rest of the for-loop.
+ Statement* body_copy = copy_visitor.DeepCopyStmt(stmt->body());
+ if (copy_visitor.HasStackOverflow()) return NULL;
+
+ Statement* next_copy = stmt->next() != NULL
+ ? copy_visitor.DeepCopyStmt(stmt->next())
+ : new EmptyStatement();
+ if (copy_visitor.HasStackOverflow()) return NULL;
+
+ Block* peeled_body = new Block(NULL, 3, false);
+ peeled_body->AddStatement(body_copy);
+ peeled_body->AddStatement(next_copy);
+ peeled_body->AddStatement(stmt);
+
+ // Remove the duplicated init statement from the for-statement.
+ stmt->set_init(NULL);
+
+ // Create new test at the top and add it to the newly created block.
+ IfStatement* test = new IfStatement(cond_copy,
+ peeled_body,
+ new EmptyStatement());
+ block->AddStatement(test);
+ return block;
+}
+
+
+void FlowGraphBuilder::VisitStatements(ZoneList<Statement*>* stmts) {
+ for (int i = 0, len = stmts->length(); i < len; i++) {
+ stmts->at(i) = ProcessStatement(stmts->at(i));
+ }
+}
+
+
+Statement* FlowGraphBuilder::ProcessStatement(Statement* stmt) {
+ if (FLAG_loop_peeling &&
+ stmt->AsForStatement() != NULL &&
+ stmt->AsForStatement()->peel_this_loop()) {
+ Statement* tmp_stmt = PeelForLoop(stmt->AsForStatement());
+ if (tmp_stmt == NULL) {
+ SetStackOverflow();
+ } else {
+ stmt = tmp_stmt;
+ }
+ }
+ Visit(stmt);
+ return stmt;
+}
+
+
+void FlowGraphBuilder::VisitDeclaration(Declaration* decl) {
+ UNREACHABLE();
+}
+
+
+void FlowGraphBuilder::VisitBlock(Block* stmt) {
+ VisitStatements(stmt->statements());
+}
+
+
+void FlowGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+ Visit(stmt->expression());
+}
+
+
+void FlowGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+ // Nothing to do.
+}
+
+
+void FlowGraphBuilder::VisitIfStatement(IfStatement* stmt) {
+ Visit(stmt->condition());
+
+ BranchNode* branch = new BranchNode();
+ FlowGraph original = graph_;
+ graph_ = FlowGraph::Empty();
+ stmt->set_then_statement(ProcessStatement(stmt->then_statement()));
+
+ FlowGraph left = graph_;
+ graph_ = FlowGraph::Empty();
+ stmt->set_else_statement(ProcessStatement(stmt->else_statement()));
+
+ if (HasStackOverflow()) return;
+ JoinNode* join = new JoinNode();
+ original.Split(branch, &left, &graph_, join);
+ graph_ = original;
+}
+
+
+void FlowGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitWithEnterStatement(WithEnterStatement* stmt) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitWithExitStatement(WithExitStatement* stmt) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitForStatement(ForStatement* stmt) {
+ if (stmt->init() != NULL) stmt->set_init(ProcessStatement(stmt->init()));
+
+ JoinNode* join = new JoinNode();
+ FlowGraph original = graph_;
+ graph_ = FlowGraph::Empty();
+ if (stmt->cond() != NULL) Visit(stmt->cond());
+
+ BranchNode* branch = new BranchNode();
+ FlowGraph condition = graph_;
+ graph_ = FlowGraph::Empty();
+ stmt->set_body(ProcessStatement(stmt->body()));
+
+ if (stmt->next() != NULL) stmt->set_next(ProcessStatement(stmt->next()));
+
+ if (HasStackOverflow()) return;
+ original.Loop(join, &condition, branch, &graph_);
+ graph_ = original;
+}
+
+
+void FlowGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitSharedFunctionInfoLiteral(
+ SharedFunctionInfoLiteral* expr) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitConditional(Conditional* expr) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitSlot(Slot* expr) {
+ UNREACHABLE();
+}
+
+
+void FlowGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
+ graph_.AppendInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitLiteral(Literal* expr) {
+ graph_.AppendInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitCatchExtensionObject(CatchExtensionObject* expr) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitAssignment(Assignment* expr) {
+ Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+ Property* prop = expr->target()->AsProperty();
+ // Left-hand side can be a variable or property (or reference error) but
+ // not both.
+ ASSERT(var == NULL || prop == NULL);
+ if (var != NULL) {
+ if (expr->is_compound()) Visit(expr->target());
+ Visit(expr->value());
+ if (var->IsStackAllocated()) {
+ // The first definition in the body is numbered n, where n is the
+ // number of parameters and stack-allocated locals.
+ expr->set_num(body_definitions_.length() + variable_count_);
+ body_definitions_.Add(expr);
+ }
+
+ } else if (prop != NULL) {
+ Visit(prop->obj());
+ if (!prop->key()->IsPropertyName()) Visit(prop->key());
+ Visit(expr->value());
+ }
+
+ if (HasStackOverflow()) return;
+ graph_.AppendInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitThrow(Throw* expr) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitProperty(Property* expr) {
+ Visit(expr->obj());
+ if (!expr->key()->IsPropertyName()) Visit(expr->key());
+
+ if (HasStackOverflow()) return;
+ graph_.AppendInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitCall(Call* expr) {
+ Visit(expr->expression());
+ ZoneList<Expression*>* arguments = expr->arguments();
+ for (int i = 0, len = arguments->length(); i < len; i++) {
+ Visit(arguments->at(i));
+ }
+
+ if (HasStackOverflow()) return;
+ graph_.AppendInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitCallNew(CallNew* expr) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
+ SetStackOverflow();
+}
+
+
+void FlowGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::NOT:
+ case Token::BIT_NOT:
+ case Token::DELETE:
+ case Token::TYPEOF:
+ case Token::VOID:
+ SetStackOverflow();
+ break;
+
+ case Token::ADD:
+ case Token::SUB:
+ Visit(expr->expression());
+ if (HasStackOverflow()) return;
+ graph_.AppendInstruction(expr);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FlowGraphBuilder::VisitCountOperation(CountOperation* expr) {
+ Visit(expr->expression());
+ Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
+ if (var != NULL && var->IsStackAllocated()) {
+ // The first definition in the body is numbered n, where n is the number
+ // of parameters and stack-allocated locals.
+ expr->set_num(body_definitions_.length() + variable_count_);
+ body_definitions_.Add(expr);
+ }
+
+ if (HasStackOverflow()) return;
+ graph_.AppendInstruction(expr);
+}
+
+
+void FlowGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
+ switch (expr->op()) {
+ case Token::COMMA:
+ case Token::OR:
+ case Token::AND:
+ SetStackOverflow();
+ break;
+
+ case Token::BIT_OR:
+ case Token::BIT_XOR:
+ case Token::BIT_AND:
+ case Token::SHL:
+ case Token::SHR:
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV:
+ case Token::MOD:
+ case Token::SAR:
+ Visit(expr->left());
+ Visit(expr->right());
+ if (HasStackOverflow()) return;
+ graph_.AppendInstruction(expr);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FlowGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+ switch (expr->op()) {
+ case Token::EQ:
+ case Token::NE:
+ case Token::EQ_STRICT:
+ case Token::NE_STRICT:
+ case Token::INSTANCEOF:
+ case Token::IN:
+ SetStackOverflow();
+ break;
+
+ case Token::LT:
+ case Token::GT:
+ case Token::LTE:
+ case Token::GTE:
+ Visit(expr->left());
+ Visit(expr->right());
+ if (HasStackOverflow()) return;
+ graph_.AppendInstruction(expr);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FlowGraphBuilder::VisitThisFunction(ThisFunction* expr) {
+ SetStackOverflow();
+}
+
+
+} } // namespace v8::internal
diff --git a/src/flow-graph.h b/src/flow-graph.h
new file mode 100644
index 0000000..183b71d
--- /dev/null
+++ b/src/flow-graph.h
@@ -0,0 +1,379 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_FLOW_GRAPH_H_
+#define V8_FLOW_GRAPH_H_
+
+#include "v8.h"
+
+#include "data-flow.h"
+#include "zone.h"
+
+namespace v8 {
+namespace internal {
+
+// Flow-graph nodes.
+class Node: public ZoneObject {
+ public:
+ Node() : number_(-1), mark_(false) {}
+
+ virtual ~Node() {}
+
+ virtual bool IsExitNode() { return false; }
+ virtual bool IsBlockNode() { return false; }
+ virtual bool IsBranchNode() { return false; }
+ virtual bool IsJoinNode() { return false; }
+
+ virtual void AddPredecessor(Node* predecessor) = 0;
+ virtual void AddSuccessor(Node* successor) = 0;
+
+ bool IsMarkedWith(bool mark) { return mark_ == mark; }
+ void MarkWith(bool mark) { mark_ = mark; }
+
+ // Perform a depth first search and record preorder and postorder
+ // traversal orders.
+ virtual void Traverse(bool mark,
+ ZoneList<Node*>* preorder,
+ ZoneList<Node*>* postorder) = 0;
+
+ int number() { return number_; }
+ void set_number(int number) { number_ = number; }
+
+ // Functions used by data-flow analyses.
+ virtual void InitializeReachingDefinitions(int definition_count,
+ List<BitVector*>* variables,
+ WorkList<Node>* worklist,
+ bool mark);
+ virtual void ComputeRDOut(BitVector* result) = 0;
+ virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark) = 0;
+ virtual void PropagateReachingDefinitions(List<BitVector*>* variables);
+
+ // Functions used by dead-code elimination.
+ virtual void MarkCriticalInstructions(
+ List<AstNode*>* stack,
+ ZoneList<Expression*>* body_definitions,
+ int variable_count);
+
+#ifdef DEBUG
+ void AssignNodeNumber();
+ void PrintReachingDefinitions();
+ virtual void PrintText() = 0;
+#endif
+
+ protected:
+ ReachingDefinitionsData rd_;
+
+ private:
+ int number_;
+ bool mark_;
+
+ DISALLOW_COPY_AND_ASSIGN(Node);
+};
+
+
+// An exit node has a arbitrarily many predecessors and no successors.
+class ExitNode: public Node {
+ public:
+ ExitNode() : predecessors_(4) {}
+
+ virtual bool IsExitNode() { return true; }
+
+ virtual void AddPredecessor(Node* predecessor) {
+ ASSERT(predecessor != NULL);
+ predecessors_.Add(predecessor);
+ }
+
+ virtual void AddSuccessor(Node* successor) { UNREACHABLE(); }
+
+ virtual void Traverse(bool mark,
+ ZoneList<Node*>* preorder,
+ ZoneList<Node*>* postorder);
+
+ virtual void ComputeRDOut(BitVector* result);
+ virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
+
+#ifdef DEBUG
+ virtual void PrintText();
+#endif
+
+ private:
+ ZoneList<Node*> predecessors_;
+
+ DISALLOW_COPY_AND_ASSIGN(ExitNode);
+};
+
+
+// Block nodes have a single successor and predecessor and a list of
+// instructions.
+class BlockNode: public Node {
+ public:
+ BlockNode() : predecessor_(NULL), successor_(NULL), instructions_(4) {}
+
+ static BlockNode* cast(Node* node) {
+ ASSERT(node->IsBlockNode());
+ return reinterpret_cast<BlockNode*>(node);
+ }
+
+ virtual bool IsBlockNode() { return true; }
+
+ bool is_empty() { return instructions_.is_empty(); }
+
+ ZoneList<AstNode*>* instructions() { return &instructions_; }
+
+ virtual void AddPredecessor(Node* predecessor) {
+ ASSERT(predecessor_ == NULL && predecessor != NULL);
+ predecessor_ = predecessor;
+ }
+
+ virtual void AddSuccessor(Node* successor) {
+ ASSERT(successor_ == NULL && successor != NULL);
+ successor_ = successor;
+ }
+
+ void AddInstruction(AstNode* instruction) {
+ instructions_.Add(instruction);
+ }
+
+ virtual void Traverse(bool mark,
+ ZoneList<Node*>* preorder,
+ ZoneList<Node*>* postorder);
+
+ virtual void InitializeReachingDefinitions(int definition_count,
+ List<BitVector*>* variables,
+ WorkList<Node>* worklist,
+ bool mark);
+ virtual void ComputeRDOut(BitVector* result);
+ virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
+ virtual void PropagateReachingDefinitions(List<BitVector*>* variables);
+
+ virtual void MarkCriticalInstructions(
+ List<AstNode*>* stack,
+ ZoneList<Expression*>* body_definitions,
+ int variable_count);
+
+#ifdef DEBUG
+ virtual void PrintText();
+#endif
+
+ private:
+ Node* predecessor_;
+ Node* successor_;
+ ZoneList<AstNode*> instructions_;
+
+ DISALLOW_COPY_AND_ASSIGN(BlockNode);
+};
+
+
+// Branch nodes have a single predecessor and a pair of successors.
+class BranchNode: public Node {
+ public:
+ BranchNode() : predecessor_(NULL), successor0_(NULL), successor1_(NULL) {}
+
+ virtual bool IsBranchNode() { return true; }
+
+ virtual void AddPredecessor(Node* predecessor) {
+ ASSERT(predecessor_ == NULL && predecessor != NULL);
+ predecessor_ = predecessor;
+ }
+
+ virtual void AddSuccessor(Node* successor) {
+ ASSERT(successor1_ == NULL && successor != NULL);
+ if (successor0_ == NULL) {
+ successor0_ = successor;
+ } else {
+ successor1_ = successor;
+ }
+ }
+
+ virtual void Traverse(bool mark,
+ ZoneList<Node*>* preorder,
+ ZoneList<Node*>* postorder);
+
+ virtual void ComputeRDOut(BitVector* result);
+ virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
+
+#ifdef DEBUG
+ virtual void PrintText();
+#endif
+
+ private:
+ Node* predecessor_;
+ Node* successor0_;
+ Node* successor1_;
+
+ DISALLOW_COPY_AND_ASSIGN(BranchNode);
+};
+
+
+// Join nodes have arbitrarily many predecessors and a single successor.
+class JoinNode: public Node {
+ public:
+ JoinNode() : predecessors_(2), successor_(NULL) {}
+
+ static JoinNode* cast(Node* node) {
+ ASSERT(node->IsJoinNode());
+ return reinterpret_cast<JoinNode*>(node);
+ }
+
+ virtual bool IsJoinNode() { return true; }
+
+ virtual void AddPredecessor(Node* predecessor) {
+ ASSERT(predecessor != NULL);
+ predecessors_.Add(predecessor);
+ }
+
+ virtual void AddSuccessor(Node* successor) {
+ ASSERT(successor_ == NULL && successor != NULL);
+ successor_ = successor;
+ }
+
+ virtual void Traverse(bool mark,
+ ZoneList<Node*>* preorder,
+ ZoneList<Node*>* postorder);
+
+ virtual void ComputeRDOut(BitVector* result);
+ virtual void UpdateRDIn(WorkList<Node>* worklist, bool mark);
+
+#ifdef DEBUG
+ virtual void PrintText();
+#endif
+
+ private:
+ ZoneList<Node*> predecessors_;
+ Node* successor_;
+
+ DISALLOW_COPY_AND_ASSIGN(JoinNode);
+};
+
+
+// Flow graphs have a single entry and single exit. The empty flowgraph is
+// represented by both entry and exit being NULL.
+class FlowGraph BASE_EMBEDDED {
+ public:
+ static FlowGraph Empty() {
+ FlowGraph graph;
+ graph.entry_ = new BlockNode();
+ graph.exit_ = graph.entry_;
+ return graph;
+ }
+
+ bool is_empty() const {
+ return entry_ == exit_ && BlockNode::cast(entry_)->is_empty();
+ }
+ Node* entry() const { return entry_; }
+ Node* exit() const { return exit_; }
+
+ // Add a single instruction to the end of this flowgraph.
+ void AppendInstruction(AstNode* instruction);
+
+ // Add a single node to the end of this flow graph.
+ void AppendNode(Node* node);
+
+ // Add a flow graph fragment to the end of this one.
+ void AppendGraph(FlowGraph* graph);
+
+ // Concatenate an if-then-else flow-graph to this one. Control is split
+ // and merged, so the graph remains single-entry, single-exit.
+ void Split(BranchNode* branch,
+ FlowGraph* left,
+ FlowGraph* right,
+ JoinNode* merge);
+
+ // Concatenate a forward loop (e.g., while or for loop) flow-graph to this
+ // one. Control is split by the condition and merged back from the back
+ // edge at end of the body to the beginning of the condition. The single
+ // (free) exit of the result graph is the right (false) arm of the branch
+ // node.
+ void Loop(JoinNode* merge,
+ FlowGraph* condition,
+ BranchNode* branch,
+ FlowGraph* body);
+
+#ifdef DEBUG
+ void PrintText(FunctionLiteral* fun, ZoneList<Node*>* postorder);
+#endif
+
+ private:
+ FlowGraph() : entry_(NULL), exit_(NULL) {}
+
+ Node* entry_;
+ Node* exit_;
+};
+
+
+// Construct a flow graph from a function literal. Build pre- and postorder
+// traversal orders as a byproduct.
+class FlowGraphBuilder: public AstVisitor {
+ public:
+ explicit FlowGraphBuilder(int variable_count)
+ : graph_(FlowGraph::Empty()),
+ global_exit_(NULL),
+ preorder_(4),
+ postorder_(4),
+ variable_count_(variable_count),
+ body_definitions_(4) {
+ }
+
+ void Build(FunctionLiteral* lit);
+
+ FlowGraph* graph() { return &graph_; }
+ ZoneList<Node*>* preorder() { return &preorder_; }
+ ZoneList<Node*>* postorder() { return &postorder_; }
+ ZoneList<Expression*>* body_definitions() { return &body_definitions_; }
+
+ private:
+ ExitNode* global_exit() { return global_exit_; }
+
+ // Helpers to allow tranforming the ast during flow graph construction.
+ void VisitStatements(ZoneList<Statement*>* stmts);
+ Statement* ProcessStatement(Statement* stmt);
+
+ // AST node visit functions.
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+ AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ FlowGraph graph_;
+ ExitNode* global_exit_;
+ ZoneList<Node*> preorder_;
+ ZoneList<Node*> postorder_;
+
+ // The flow graph builder collects a list of explicit definitions
+ // (assignments and count operations) to stack-allocated variables to use
+ // for reaching definitions analysis. It does not count the implicit
+ // definition at function entry. AST node numbers in the AST are used to
+ // refer into this list.
+ int variable_count_;
+ ZoneList<Expression*> body_definitions_;
+
+ DISALLOW_COPY_AND_ASSIGN(FlowGraphBuilder);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_FLOW_GRAPH_H_
diff --git a/src/handles.cc b/src/handles.cc
index f8a679b..d4c593f 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -541,7 +541,7 @@
void CustomArguments::IterateInstance(ObjectVisitor* v) {
- v->VisitPointers(values_, values_ + 4);
+ v->VisitPointers(values_, values_ + ARRAY_SIZE(values_));
}
diff --git a/src/heap.cc b/src/heap.cc
index a9754ce..1f1599a 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -2663,7 +2663,7 @@
FixedArray* elements = FixedArray::cast(source->elements());
FixedArray* properties = FixedArray::cast(source->properties());
// Update elements if necessary.
- if (elements->length()> 0) {
+ if (elements->length() > 0) {
Object* elem = CopyFixedArray(elements);
if (elem->IsFailure()) return elem;
JSObject::cast(clone)->set_elements(FixedArray::cast(elem));
diff --git a/src/heap.h b/src/heap.h
index d37641c..2a0de23 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -938,6 +938,8 @@
static void RecordStats(HeapStats* stats);
+ static int MaxObjectSizeInNewSpace() { return kMaxObjectSizeInNewSpace; }
+
private:
static int reserved_semispace_size_;
static int max_semispace_size_;
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 20b6463..200e3ef 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -7424,10 +7424,8 @@
}
}
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
- Comment cmnt(masm_, "[ BinaryOperation");
- Token::Value op = node->op();
+void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
// According to ECMA-262 section 11.11, page 58, the binary logical
// operators must yield the result of one of the two expressions
// before any ToBoolean() conversions. This means that the value
@@ -7437,7 +7435,7 @@
// control flow), we force the right hand side to do the same. This
// is necessary because we assume that if we get control flow on the
// last path out of an expression we got it on all paths.
- if (op == Token::AND) {
+ if (node->op() == Token::AND) {
ASSERT(!in_safe_int32_mode());
JumpTarget is_true;
ControlDestination dest(&is_true, destination()->false_target(), true);
@@ -7501,7 +7499,8 @@
exit.Bind();
}
- } else if (op == Token::OR) {
+ } else {
+ ASSERT(node->op() == Token::OR);
ASSERT(!in_safe_int32_mode());
JumpTarget is_false;
ControlDestination dest(destination()->true_target(), &is_false, false);
@@ -7563,7 +7562,15 @@
// Exit (always with a materialized value).
exit.Bind();
}
+ }
+}
+
+void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+ Comment cmnt(masm_, "[ BinaryOperation");
+
+ if (node->op() == Token::AND || node->op() == Token::OR) {
+ GenerateLogicalBooleanOperation(node);
} else if (in_safe_int32_mode()) {
Visit(node->left());
Visit(node->right());
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index ca4a44b..9fcc466 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -489,6 +489,9 @@
// control destination.
void ToBoolean(ControlDestination* destination);
+ // Generate code that computes a shortcutting logical operation.
+ void GenerateLogicalBooleanOperation(BinaryOperation* node);
+
void GenericBinaryOperation(
Token::Value op,
StaticType* type,
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 8d6c346..cf521a2 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -73,11 +73,10 @@
// Check for the absence of an interceptor.
// Load the map into r0.
__ mov(r0, FieldOperand(receiver, JSObject::kMapOffset));
- // Test the has_named_interceptor bit in the map.
- __ test(FieldOperand(r0, Map::kInstanceAttributesOffset),
- Immediate(1 << (Map::kHasNamedInterceptor + (3 * 8))));
- // Jump to miss if the interceptor bit is set.
+ // Bail out if the receiver has a named interceptor.
+ __ test(FieldOperand(r0, Map::kBitFieldOffset),
+ Immediate(1 << Map::kHasNamedInterceptor));
__ j(not_zero, miss_label, not_taken);
// Bail out if we have a JS global proxy object.
@@ -202,17 +201,10 @@
__ xor_(r0, Operand(r1));
// Compute capacity mask.
- const int kCapacityOffset =
- NumberDictionary::kHeaderSize +
- NumberDictionary::kCapacityIndex * kPointerSize;
- __ mov(r1, FieldOperand(elements, kCapacityOffset));
+ __ mov(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
__ shr(r1, kSmiTagSize); // convert smi to int
__ dec(r1);
- const int kElementsStartOffset =
- NumberDictionary::kHeaderSize +
- NumberDictionary::kElementsStartIndex * kPointerSize;
-
// Generate an unrolled loop that performs a few probes before giving up.
const int kProbes = 4;
for (int i = 0; i < kProbes; i++) {
@@ -232,7 +224,7 @@
__ cmp(key, FieldOperand(elements,
r2,
times_pointer_size,
- kElementsStartOffset));
+ NumberDictionary::kElementsStartOffset));
if (i != (kProbes - 1)) {
__ j(equal, &done, taken);
} else {
@@ -242,14 +234,16 @@
__ bind(&done);
// Check that the value is a normal propety.
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ const int kDetailsOffset =
+ NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
ASSERT_EQ(NORMAL, 0);
__ test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
Immediate(PropertyDetails::TypeField::mask() << kSmiTagSize));
__ j(not_zero, miss);
// Get the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
+ const int kValueOffset =
+ NumberDictionary::kElementsStartOffset + kPointerSize;
__ mov(key, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index e554a31..315600b 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -276,14 +276,15 @@
Register holder,
Register name,
JSObject* holder_obj) {
- __ push(receiver);
- __ push(holder);
__ push(name);
InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
ASSERT(!Heap::InNewSpace(interceptor));
- __ mov(receiver, Immediate(Handle<Object>(interceptor)));
+ Register scratch = name;
+ __ mov(scratch, Immediate(Handle<Object>(interceptor)));
+ __ push(scratch);
__ push(receiver);
- __ push(FieldOperand(receiver, InterceptorInfo::kDataOffset));
+ __ push(holder);
+ __ push(FieldOperand(scratch, InterceptorInfo::kDataOffset));
}
@@ -1045,17 +1046,16 @@
__ push(receiver); // receiver
__ push(reg); // holder
__ mov(other, Immediate(callback_handle));
- __ push(other);
__ push(FieldOperand(other, AccessorInfo::kDataOffset)); // data
__ push(name_reg); // name
// Save a pointer to where we pushed the arguments pointer.
- // This will be passed as the const Arguments& to the C++ callback.
+ // This will be passed as the const AccessorInfo& to the C++ callback.
__ mov(eax, esp);
- __ add(Operand(eax), Immediate(5 * kPointerSize));
+ __ add(Operand(eax), Immediate(4 * kPointerSize));
__ mov(ebx, esp);
// Do call through the api.
- ASSERT_EQ(6, ApiGetterEntryStub::kStackSpace);
+ ASSERT_EQ(5, ApiGetterEntryStub::kStackSpace);
Address getter_address = v8::ToCData<Address>(callback->getter());
ApiFunction fun(getter_address);
ApiGetterEntryStub stub(callback_handle, &fun);
@@ -1251,8 +1251,7 @@
__ j(not_equal, &miss);
if (argc == 1) { // Otherwise fall through to call builtin.
- Label call_builtin, exit, with_rset_update,
- attempt_to_grow_elements, finish_push;
+ Label call_builtin, exit, with_rset_update, attempt_to_grow_elements;
// Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
@@ -1278,8 +1277,6 @@
__ mov(ecx, Operand(esp, argc * kPointerSize));
__ mov(Operand(edx, 0), ecx);
- __ bind(&finish_push);
-
// Check if value is a smi.
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &with_rset_update);
@@ -1318,10 +1315,13 @@
// We fit and could grow elements.
__ mov(Operand::StaticVariable(new_space_allocation_top), ecx);
__ mov(ecx, Operand(esp, argc * kPointerSize));
+
+ // Push the argument...
__ mov(Operand(edx, 0), ecx);
+ // ... and fill the rest with holes.
for (int i = 1; i < kAllocationDelta; i++) {
__ mov(Operand(edx, i * kPointerSize),
- Immediate(Factory::undefined_value()));
+ Immediate(Factory::the_hole_value()));
}
// Restore receiver to edx as finish sequence assumes it's here.
@@ -1332,7 +1332,8 @@
Immediate(kAllocationDelta));
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
- __ jmp(&finish_push);
+ // Elements are in new space, so no remembered set updates are necessary.
+ __ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);
}
diff --git a/src/oprofile-agent.cc b/src/oprofile-agent.cc
index 8aa3937..6df8f50 100644
--- a/src/oprofile-agent.cc
+++ b/src/oprofile-agent.cc
@@ -32,10 +32,6 @@
namespace v8 {
namespace internal {
-#ifdef ENABLE_OPROFILE_AGENT
-op_agent_t OProfileAgent::handle_ = NULL;
-#endif
-
bool OProfileAgent::Initialize() {
#ifdef ENABLE_OPROFILE_AGENT
@@ -70,47 +66,43 @@
}
+#ifdef ENABLE_OPROFILE_AGENT
+op_agent_t OProfileAgent::handle_ = NULL;
+
+
void OProfileAgent::CreateNativeCodeRegion(const char* name,
const void* ptr, unsigned int size) {
-#ifdef ENABLE_OPROFILE_AGENT
- if (handle_ == NULL) return;
op_write_native_code(handle_, name, (uint64_t)ptr, ptr, size);
-#endif
}
void OProfileAgent::CreateNativeCodeRegion(String* name,
const void* ptr, unsigned int size) {
-#ifdef ENABLE_OPROFILE_AGENT
- if (handle_ != NULL) {
- const char* func_name;
- SmartPointer<char> str =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- func_name = name->length() > 0 ? *str : "<anonymous>";
- CreateNativeCodeRegion(func_name, ptr, size);
- }
-#endif
+ const char* func_name;
+ SmartPointer<char> str =
+ name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ func_name = name->length() > 0 ? *str : "<anonymous>";
+ CreateNativeCodeRegion(func_name, ptr, size);
}
void OProfileAgent::CreateNativeCodeRegion(String* name, String* source,
int line_num, const void* ptr, unsigned int size) {
-#ifdef ENABLE_OPROFILE_AGENT
- if (handle_ != NULL) {
- Vector<char> buf = Vector<char>::New(OProfileAgent::kFormattingBufSize);
- const char* func_name;
- SmartPointer<char> str =
- name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- func_name = name->length() > 0 ? *str : "<anonymous>";
- SmartPointer<char> source_str =
- source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- if (v8::internal::OS::SNPrintF(buf, "%s %s:%d",
- func_name, *source_str, line_num) != -1) {
- CreateNativeCodeRegion(buf.start(), ptr, size);
- } else {
- CreateNativeCodeRegion("<script/func name too long>", ptr, size);
- }
+ Vector<char> buf = Vector<char>::New(OProfileAgent::kFormattingBufSize);
+ const char* func_name;
+ SmartPointer<char> str =
+ name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ func_name = name->length() > 0 ? *str : "<anonymous>";
+ SmartPointer<char> source_str =
+ source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+ if (v8::internal::OS::SNPrintF(buf, "%s %s:%d",
+ func_name, *source_str, line_num) != -1) {
+ CreateNativeCodeRegion(buf.start(), ptr, size);
+ } else {
+ CreateNativeCodeRegion("<script/func name too long>", ptr, size);
}
-#endif
}
-} }
+
+#endif // ENABLE_OPROFILE_AGENT
+
+} } // namespace v8::internal
diff --git a/src/oprofile-agent.h b/src/oprofile-agent.h
index 4c299bf..4c50f0f 100644
--- a/src/oprofile-agent.h
+++ b/src/oprofile-agent.h
@@ -37,6 +37,14 @@
// system headers (they have __uint64_t), but is defined
// in V8's headers.
#include <opagent.h> // NOLINT
+
+#define OPROFILE(Call) \
+ do { \
+ if (v8::internal::OProfileAgent::is_enabled()) \
+ v8::internal::OProfileAgent::Call; \
+ } while (false)
+#else
+#define OPROFILE(Call) ((void) 0)
#endif
namespace v8 {
@@ -46,13 +54,13 @@
public:
static bool Initialize();
static void TearDown();
+#ifdef ENABLE_OPROFILE_AGENT
static void CreateNativeCodeRegion(const char* name,
const void* ptr, unsigned int size);
static void CreateNativeCodeRegion(String* name,
const void* ptr, unsigned int size);
static void CreateNativeCodeRegion(String* name, String* source, int line_num,
const void* ptr, unsigned int size);
-#ifdef ENABLE_OPROFILE_AGENT
static bool is_enabled() { return handle_ != NULL; }
private:
diff --git a/src/platform.h b/src/platform.h
index f124cf1..76028e6 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -505,7 +505,6 @@
};
-#ifdef ENABLE_LOGGING_AND_PROFILING
// ----------------------------------------------------------------------------
// Sampler
//
@@ -533,6 +532,7 @@
int frames_count; // Number of captured frames.
};
+#ifdef ENABLE_LOGGING_AND_PROFILING
class Sampler {
public:
// Initialize sampler.
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index c942d9d..ce587bc 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -782,6 +782,10 @@
return *value;
}
+
+static const int kAccessorInfoOffsetInInterceptorArgs = 2;
+
+
/**
* Attempts to load a property with an interceptor (which must be present),
* but doesn't search the prototype chain.
@@ -790,11 +794,12 @@
* provide any value for the given name.
*/
Object* LoadPropertyWithInterceptorOnly(Arguments args) {
- JSObject* receiver_handle = JSObject::cast(args[0]);
- JSObject* holder_handle = JSObject::cast(args[1]);
- Handle<String> name_handle = args.at<String>(2);
- Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(3);
- Object* data_handle = args[4];
+ Handle<String> name_handle = args.at<String>(0);
+ Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(1);
+ ASSERT(kAccessorInfoOffsetInInterceptorArgs == 2);
+ ASSERT(args[2]->IsJSObject()); // Receiver.
+ ASSERT(args[3]->IsJSObject()); // Holder.
+ ASSERT(args.length() == 5); // Last arg is data object.
Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
v8::NamedPropertyGetter getter =
@@ -803,8 +808,8 @@
{
// Use the interceptor getter.
- CustomArguments args(data_handle, receiver_handle, holder_handle);
- v8::AccessorInfo info(args.end());
+ v8::AccessorInfo info(args.arguments() -
+ kAccessorInfoOffsetInInterceptorArgs);
HandleScope scope;
v8::Handle<v8::Value> r;
{
@@ -842,11 +847,12 @@
static Object* LoadWithInterceptor(Arguments* args,
PropertyAttributes* attrs) {
- Handle<JSObject> receiver_handle = args->at<JSObject>(0);
- Handle<JSObject> holder_handle = args->at<JSObject>(1);
- Handle<String> name_handle = args->at<String>(2);
- Handle<InterceptorInfo> interceptor_info = args->at<InterceptorInfo>(3);
- Handle<Object> data_handle = args->at<Object>(4);
+ Handle<String> name_handle = args->at<String>(0);
+ Handle<InterceptorInfo> interceptor_info = args->at<InterceptorInfo>(1);
+ ASSERT(kAccessorInfoOffsetInInterceptorArgs == 2);
+ Handle<JSObject> receiver_handle = args->at<JSObject>(2);
+ Handle<JSObject> holder_handle = args->at<JSObject>(3);
+ ASSERT(args->length() == 5); // Last arg is data object.
Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
v8::NamedPropertyGetter getter =
@@ -855,8 +861,8 @@
{
// Use the interceptor getter.
- CustomArguments args(*data_handle, *receiver_handle, *holder_handle);
- v8::AccessorInfo info(args.end());
+ v8::AccessorInfo info(args->arguments() -
+ kAccessorInfoOffsetInInterceptorArgs);
HandleScope scope;
v8::Handle<v8::Value> r;
{
@@ -891,7 +897,7 @@
// If the property is present, return it.
if (attr != ABSENT) return result;
- return ThrowReferenceError(String::cast(args[2]));
+ return ThrowReferenceError(String::cast(args[0]));
}
diff --git a/src/utils.h b/src/utils.h
index 89cc5ef..8ff1f9b 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -581,11 +581,12 @@
#endif
#if defined(__GNUC__) && defined(STOS)
- asm("cld;"
+ asm volatile(
+ "cld;"
"rep ; " STOS
- : /* no output */
- : "c" (counter), "a" (value), "D" (dest)
- : /* no clobbered list as all inputs are considered clobbered */);
+ : "+&c" (counter), "+&D" (dest)
+ : "a" (value)
+ : "memory", "cc");
#else
for (int i = 0; i < counter; i++) {
dest[i] = value;
diff --git a/src/version.cc b/src/version.cc
index f7116d1..e3d0887 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 1
-#define BUILD_NUMBER 9
-#define PATCH_LEVEL 1
+#define BUILD_NUMBER 10
+#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
// Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 108384c..dd8015f 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -3281,13 +3281,7 @@
}
-void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
- // TODO(X64): This code was copied verbatim from codegen-ia32.
- // Either find a reason to change it or move it to a shared location.
-
- Comment cmnt(masm_, "[ BinaryOperation");
- Token::Value op = node->op();
-
+void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
// According to ECMA-262 section 11.11, page 58, the binary logical
// operators must yield the result of one of the two expressions
// before any ToBoolean() conversions. This means that the value
@@ -3297,7 +3291,7 @@
// control flow), we force the right hand side to do the same. This
// is necessary because we assume that if we get control flow on the
// last path out of an expression we got it on all paths.
- if (op == Token::AND) {
+ if (node->op() == Token::AND) {
JumpTarget is_true;
ControlDestination dest(&is_true, destination()->false_target(), true);
LoadCondition(node->left(), &dest, false);
@@ -3360,7 +3354,8 @@
exit.Bind();
}
- } else if (op == Token::OR) {
+ } else {
+ ASSERT(node->op() == Token::OR);
JumpTarget is_false;
ControlDestination dest(destination()->true_target(), &is_false, false);
LoadCondition(node->left(), &dest, false);
@@ -3421,7 +3416,14 @@
// Exit (always with a materialized value).
exit.Bind();
}
+ }
+}
+void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+ Comment cmnt(masm_, "[ BinaryOperation");
+
+ if (node->op() == Token::AND || node->op() == Token::OR) {
+ GenerateLogicalBooleanOperation(node);
} else {
// NOTE: The code below assumes that the slow cases (calls to runtime)
// never return a constant/immutable object.
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 3f33918..8eea479 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -454,6 +454,9 @@
// control destination.
void ToBoolean(ControlDestination* destination);
+ // Generate code that computes a shortcutting logical operation.
+ void GenerateLogicalBooleanOperation(BinaryOperation* node);
+
void GenericBinaryOperation(
Token::Value op,
StaticType* type,
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 77043ce..7212c05 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -72,11 +72,10 @@
// Check for the absence of an interceptor.
// Load the map into r0.
__ movq(r0, FieldOperand(r1, JSObject::kMapOffset));
- // Test the has_named_interceptor bit in the map.
- __ testl(FieldOperand(r0, Map::kInstanceAttributesOffset),
- Immediate(1 << (Map::kHasNamedInterceptor + (3 * 8))));
- // Jump to miss if the interceptor bit is set.
+ // Bail out if the receiver has a named interceptor.
+ __ testl(FieldOperand(r0, Map::kBitFieldOffset),
+ Immediate(1 << Map::kHasNamedInterceptor));
__ j(not_zero, miss_label);
// Bail out if we have a JS global proxy object.
@@ -201,17 +200,10 @@
__ xorl(r0, r1);
// Compute capacity mask.
- const int kCapacityOffset =
- StringDictionary::kHeaderSize +
- StringDictionary::kCapacityIndex * kPointerSize;
- __ movq(r1, FieldOperand(elements, kCapacityOffset));
+ __ movq(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
__ SmiToInteger32(r1, r1);
__ decl(r1);
- const int kElementsStartOffset =
- NumberDictionary::kHeaderSize +
- NumberDictionary::kElementsStartIndex * kPointerSize;
-
// Generate an unrolled loop that performs a few probes before giving up.
const int kProbes = 4;
for (int i = 0; i < kProbes; i++) {
@@ -231,7 +223,7 @@
__ cmpq(key, FieldOperand(elements,
r2,
times_pointer_size,
- kElementsStartOffset));
+ NumberDictionary::kElementsStartOffset));
if (i != (kProbes - 1)) {
__ j(equal, &done);
} else {
@@ -241,14 +233,16 @@
__ bind(&done);
// Check that the value is a normal propety.
- const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+ const int kDetailsOffset =
+ NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
ASSERT_EQ(NORMAL, 0);
__ Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
Smi::FromInt(PropertyDetails::TypeField::mask()));
__ j(not_zero, miss);
// Get the value at the masked, scaled index.
- const int kValueOffset = kElementsStartOffset + kPointerSize;
+ const int kValueOffset =
+ NumberDictionary::kElementsStartOffset + kPointerSize;
__ movq(r0, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
@@ -1404,7 +1398,7 @@
// Check for non-global object that requires access check.
__ testl(FieldOperand(rbx, Map::kBitFieldOffset),
- Immediate(1 << Map::kIsAccessCheckNeeded));
+ Immediate(1 << Map::kIsAccessCheckNeeded));
__ j(not_zero, &miss);
// Search the dictionary placing the result in rax.
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index aa620fc..03b21a5 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -138,14 +138,13 @@
Register holder,
Register name,
JSObject* holder_obj) {
- __ push(receiver);
- __ push(holder);
__ push(name);
InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
ASSERT(!Heap::InNewSpace(interceptor));
- __ movq(kScratchRegister, Handle<Object>(interceptor),
- RelocInfo::EMBEDDED_OBJECT);
+ __ Move(kScratchRegister, Handle<Object>(interceptor));
__ push(kScratchRegister);
+ __ push(receiver);
+ __ push(holder);
__ push(FieldOperand(kScratchRegister, InterceptorInfo::kDataOffset));
}