Improved string hash-code distribution by excluding bit-field bits from the hash-code.
Changed string search algorithm used in indexOf from KMP to Boyer-Moore.
Improved the generated code for the instanceof operator.
Improved performance of slow-case string equality checks by specializing the code based on the string representation.
Improve the handling of out-of-memory situations (issue 70).
Improved performance of strict equality checks.
Improved profiler output to make it easier to see anonymous functions.
Improved performance of slow-case keyed loads.
Improved property access performance by allocating a number of properties in the front object.
Changed the toString behavior on the built-in object constructors to print [native code] instead of the actual source. Some web applications do not like constructors with complex toString results.
git-svn-id: http://v8.googlecode.com/svn/trunk@511 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/AUTHORS b/AUTHORS
index 7bdd7f7..cc96cd1 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -9,3 +9,4 @@
Rafal Krypa <rafal@krypa.net>
Jay Freeman <saurik@saurik.com>
Daniel James <dnljms@gmail.com>
+Paolo Giarrusso <p.giarrusso@gmail.com>
diff --git a/ChangeLog b/ChangeLog
index 3fa5f67..943b657 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,34 @@
+2008-10-16: Version 0.3.5
+
+ Improved string hash-code distribution by excluding bit-field bits
+ from the hash-code.
+
+ Changed string search algorithm used in indexOf from KMP to
+ Boyer-Moore.
+
+ Improved the generated code for the instanceof operator.
+
+ Improved performance of slow-case string equality checks by
+ specializing the code based on the string representation.
+
+ Improve the handling of out-of-memory situations (issue 70).
+
+ Improved performance of strict equality checks.
+
+ Improved profiler output to make it easier to see anonymous
+ functions.
+
+ Improved performance of slow-case keyed loads.
+
+ Improved property access performance by allocating a number of
+ properties in the front object.
+
+ Changed the toString behavior on the built-in object constructors
+ to print [native code] instead of the actual source. Some web
+ applications do not like constructors with complex toString
+ results.
+
+
2008-10-06: Version 0.3.4
Changed Array.prototype.sort to use quick sort.
diff --git a/SConstruct b/SConstruct
index 96ce466..7789518 100644
--- a/SConstruct
+++ b/SConstruct
@@ -52,7 +52,7 @@
'CPPDEFINES': ['ENABLE_DISASSEMBLER', 'DEBUG']
},
'mode:release': {
- 'CCFLAGS': ['-O3']
+ 'CCFLAGS': ['-O3', '-fomit-frame-pointer']
},
'wordsize:64': {
'CCFLAGS': ['-m32'],
diff --git a/include/v8.h b/include/v8.h
index 2842c32..eba21f5 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -78,7 +78,7 @@
// Setup for Linux shared library export. There is no need to destinguish
// neither between building or using the V8 shared library nor between using
// the shared or static V8 library as there is on Windows. Therefore there is
-// on checking of BUILDING_V8_SHARED and USING_V8_SHARED.
+// no checking of BUILDING_V8_SHARED and USING_V8_SHARED.
#if defined(__GNUC__) && (__GNUC__ >= 4)
#define EXPORT __attribute__ ((visibility("default")))
#define EXPORT_INLINE __attribute__ ((visibility("default")))
diff --git a/src/api.cc b/src/api.cc
index 7b22484..a181687 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -2216,7 +2216,7 @@
const char* v8::V8::GetVersion() {
- return "0.3.4.1";
+ return "0.3.5";
}
diff --git a/src/assembler-ia32.cc b/src/assembler-ia32.cc
index 27ed246..bacee5a 100644
--- a/src/assembler-ia32.cc
+++ b/src/assembler-ia32.cc
@@ -720,6 +720,23 @@
void Assembler::add(const Operand& dst, const Immediate& x) {
+ ASSERT(reloc_info_writer.last_pc() != NULL);
+ if (FLAG_push_pop_elimination && (reloc_info_writer.last_pc() <= last_pc_)) {
+ byte instr = last_pc_[0];
+ if ((instr & 0xf8) == 0x50) {
+ // Last instruction was a push. Check whether this is a pop without a
+ // result.
+ if ((dst.is_reg(esp)) &&
+ (x.x_ == kPointerSize) && (x.rmode_ == RelocInfo::NONE)) {
+ pc_ = last_pc_;
+ last_pc_ = NULL;
+ if (FLAG_print_push_pop_elimination) {
+ PrintF("%d push/pop(noreg) eliminated\n", pc_offset());
+ }
+ return;
+ }
+ }
+ }
EnsureSpace ensure_space(this);
last_pc_ = pc_;
emit_arith(0, dst, x);
diff --git a/src/ast.h b/src/ast.h
index e613636..fe946d9 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -140,9 +140,6 @@
};
-class Reference;
-enum InitState { CONST_INIT, NOT_CONST_INIT };
-
class Expression: public Node {
public:
virtual Expression* AsExpression() { return this; }
@@ -153,17 +150,6 @@
// statement. This is used to transform postfix increments to
// (faster) prefix increments.
virtual void MarkAsStatement() { /* do nothing */ }
-
- // Generate code to store into an expression evaluated as the left-hand
- // side of an assignment. The code will expect the stored value on top of
- // the expression stack, and a reference containing the expression
- // immediately below that. This function is overridden for expression
- // types that can be stored into.
- virtual void GenerateStoreCode(CodeGenerator* cgen,
- Reference* ref,
- InitState init_state) {
- UNREACHABLE();
- }
};
@@ -758,13 +744,6 @@
// Bind this proxy to the variable var.
void BindTo(Variable* var);
- // Generate code to store into an expression evaluated as the left-hand
- // side of an assignment. The code will expect the stored value on top of
- // the expression stack, and a reference containing the expression
- // immediately below that.
- virtual void GenerateStoreCode(CodeGenerator* cgen,
- Reference* ref,
- InitState init_state);
protected:
Handle<String> name_;
Variable* var_; // resolved variable, or NULL
@@ -840,13 +819,6 @@
Type type() const { return type_; }
int index() const { return index_; }
- // Generate code to store into an expression evaluated as the left-hand
- // side of an assignment. The code will expect the stored value on top of
- // the expression stack, and a reference containing the expression
- // immediately below that.
- virtual void GenerateStoreCode(CodeGenerator* cgen,
- Reference* ref,
- InitState init_state);
private:
Variable* var_;
Type type_;
@@ -874,13 +846,6 @@
// during preparsing.
static Property* this_property() { return &this_property_; }
- // Generate code to store into an expression evaluated as the left-hand
- // side of an assignment. The code will expect the stored value on top of
- // the expression stack, and a reference containing the expression
- // immediately below that.
- virtual void GenerateStoreCode(CodeGenerator* cgen,
- Reference* ref,
- InitState init_state);
private:
Expression* obj_;
Expression* key_;
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 925c8ff..0c5650a 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -63,7 +63,7 @@
bool Lookup(Vector<const char> name, Handle<JSFunction>* handle) {
for (int i = 0; i < cache_->length(); i+=2) {
- AsciiString* str = AsciiString::cast(cache_->get(i));
+ SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i));
if (str->IsEqualTo(name)) {
*handle = Handle<JSFunction>(JSFunction::cast(cache_->get(i + 1)));
return true;
@@ -751,7 +751,7 @@
// function and insert it into the cache.
if (!cache->Lookup(name, &boilerplate)) {
#ifdef DEBUG
- ASSERT(source->IsAscii());
+ ASSERT(source->IsAsciiRepresentation());
#endif
Handle<String> script_name = Factory::NewStringFromUtf8(name);
boilerplate =
@@ -1161,7 +1161,7 @@
HandleScope inner;
Handle<String> key = Handle<String>(stream.GetKey());
int index = stream.GetFieldIndex();
- Handle<Object> value = Handle<Object>(from->properties()->get(index));
+ Handle<Object> value = Handle<Object>(from->FastPropertyAt(index));
SetProperty(to, key, value, details.attributes());
break;
}
diff --git a/src/builtins-arm.cc b/src/builtins-arm.cc
index c030cde..68b5f1d 100644
--- a/src/builtins-arm.cc
+++ b/src/builtins-arm.cc
@@ -58,8 +58,8 @@
// -- sp[...]: constructor arguments
// -----------------------------------
- // Enter an internal frame.
- __ EnterInternalFrame();
+ // Enter a construct frame.
+ __ EnterConstructFrame();
// Preserve the two incoming parameters
__ mov(r0, Operand(r0, LSL, kSmiTagSize));
@@ -116,10 +116,8 @@
// Call the function.
// r0: number of arguments
// r1: constructor function
- Label return_site;
ParameterCount actual(r0);
__ InvokeFunction(r1, actual, CALL_FUNCTION);
- __ bind(&return_site);
// Pop the function from the stack.
// sp[0]: constructor function
@@ -168,15 +166,10 @@
// sp[1]: constructor function
// sp[2]: number of arguments (smi-tagged)
__ ldr(r1, MemOperand(sp, 2 * kPointerSize));
- __ LeaveInternalFrame();
+ __ LeaveConstructFrame();
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
__ add(sp, sp, Operand(kPointerSize));
__ mov(pc, Operand(lr));
-
- // Compute the offset from the beginning of the JSConstructCall
- // builtin code object to the return address after the call.
- ASSERT(return_site.is_bound());
- construct_call_pc_offset_ = return_site.pos() + Code::kHeaderSize;
}
@@ -539,7 +532,7 @@
}
-static void ExitArgumentsAdaptorFrame(MacroAssembler* masm) {
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : result being passed through
// -----------------------------------
@@ -641,20 +634,13 @@
}
// Call the entry point.
- Label return_site;
__ bind(&invoke);
-
__ Call(r3);
- __ bind(&return_site);
- ExitArgumentsAdaptorFrame(masm);
+ // Exit frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
__ mov(pc, lr);
- // Compute the offset from the beginning of the ArgumentsAdaptorTrampoline
- // builtin code object to the return address after the call.
- ASSERT(return_site.is_bound());
- arguments_adaptor_call_pc_offset_ = return_site.pos() + Code::kHeaderSize;
-
// -------------------------------------------
// Dont adapt arguments.
diff --git a/src/builtins-ia32.cc b/src/builtins-ia32.cc
index e049fbe..28122cf 100644
--- a/src/builtins-ia32.cc
+++ b/src/builtins-ia32.cc
@@ -56,8 +56,8 @@
// -- edi: constructor function
// -----------------------------------
- // Enter an internal frame.
- __ EnterInternalFrame();
+ // Enter a construct frame.
+ __ EnterConstructFrame();
// Store a smi-tagged arguments count on the stack.
__ shl(eax, kSmiTagSize);
@@ -111,6 +111,7 @@
// edi: constructor
// eax: initial map
__ movzx_b(edi, FieldOperand(eax, Map::kInstanceSizeOffset));
+ __ shl(edi, kPointerSizeLog2);
// Make sure that the maximum heap object size will never cause us
// problem here, because it is always greater than the maximum
// instance size that can be represented in a byte.
@@ -163,8 +164,11 @@
// ebx: JSObject
// edi: start of next object
__ movzx_b(edx, FieldOperand(eax, Map::kUnusedPropertyFieldsOffset));
+ __ movzx_b(ecx, FieldOperand(eax, Map::kInObjectPropertiesOffset));
+ // Calculate unused properties past the end of the in-object properties.
+ __ sub(edx, Operand(ecx));
__ test(edx, Operand(edx));
- // Done if no unused properties are to be allocated.
+ // Done if no extra properties are to be allocated.
__ j(zero, &allocated);
// Scale the number of elements by pointer size and add the header for
@@ -265,10 +269,8 @@
__ j(greater_equal, &loop);
// Call the function.
- Label return_site;
ParameterCount actual(eax);
__ InvokeFunction(edi, actual, CALL_FUNCTION);
- __ bind(&return_site);
// Restore context from the frame.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -294,10 +296,10 @@
__ bind(&use_receiver);
__ mov(eax, Operand(esp, 0));
- // Restore the arguments count and exit the internal frame.
+ // Restore the arguments count and leave the construct frame.
__ bind(&exit);
__ mov(ebx, Operand(esp, kPointerSize)); // get arguments count
- __ LeaveInternalFrame();
+ __ LeaveConstructFrame();
// Remove caller arguments from the stack and return.
ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
@@ -305,11 +307,6 @@
__ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
__ push(ecx);
__ ret(0);
-
- // Compute the offset from the beginning of the JSConstructCall
- // builtin code object to the return address after the call.
- ASSERT(return_site.is_bound());
- construct_call_pc_offset_ = return_site.pos() + Code::kHeaderSize;
}
@@ -662,7 +659,7 @@
}
-static void ExitArgumentsAdaptorFrame(MacroAssembler* masm) {
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
// Retrieve the number of arguments from the stack.
__ mov(ebx, Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
@@ -742,20 +739,13 @@
}
// Call the entry point.
- Label return_site;
__ bind(&invoke);
__ call(Operand(edx));
- __ bind(&return_site);
- ExitArgumentsAdaptorFrame(masm);
+ // Leave frame and return.
+ LeaveArgumentsAdaptorFrame(masm);
__ ret(0);
- // Compute the offset from the beginning of the ArgumentsAdaptorTrampoline
- // builtin code object to the return address after the call.
- ASSERT(return_site.is_bound());
- arguments_adaptor_call_pc_offset_ = return_site.pos() + Code::kHeaderSize;
-
-
// -------------------------------------------
// Dont adapt arguments.
// -------------------------------------------
diff --git a/src/builtins.cc b/src/builtins.cc
index 43049f4..c8c428b 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -94,33 +94,13 @@
ASSERT(it.frame()->is_exit());
it.Advance();
StackFrame* frame = it.frame();
- return frame->is_internal() &&
- InternalFrame::cast(frame)->is_construct_trampoline();
+ return frame->is_construct();
}
// ----------------------------------------------------------------------------
-int Builtins::construct_call_pc_offset_ = 0;
-int Builtins::arguments_adaptor_call_pc_offset_ = 0;
-
-
-// Check if the builtin was called in a 'new' call.
-bool Builtins::IsConstructCall(Address pc) {
- ASSERT(construct_call_pc_offset_ > 0);
- int offset = pc - builtin(JSConstructCall)->address();
- return offset == construct_call_pc_offset_;
-}
-
-
-bool Builtins::IsArgumentsAdaptorCall(Address pc) {
- ASSERT(arguments_adaptor_call_pc_offset_ > 0);
- int offset = pc - builtin(ArgumentsAdaptorTrampoline)->address();
- return offset == arguments_adaptor_call_pc_offset_;
-}
-
-
Handle<Code> Builtins::GetCode(JavaScript id, bool* resolved) {
Code* code = Builtins::builtin(Builtins::Illegal);
*resolved = false;
@@ -674,10 +654,15 @@
masm.GetCode(&desc);
Code::Flags flags = functions[i].flags;
Object* code = Heap::CreateCode(desc, NULL, flags);
- if (code->IsRetryAfterGC()) {
- CHECK(Heap::CollectGarbage(Failure::cast(code)->requested(),
- Failure::cast(code)->allocation_space()));
- code = Heap::CreateCode(desc, NULL, flags);
+ if (code->IsFailure()) {
+ if (code->IsRetryAfterGC()) {
+ CHECK(Heap::CollectGarbage(Failure::cast(code)->requested(),
+ Failure::cast(code)->allocation_space()));
+ code = Heap::CreateCode(desc, NULL, flags);
+ }
+ if (code->IsFailure()) {
+ v8::internal::V8::FatalProcessOutOfMemory("CreateCode");
+ }
}
// Add any unresolved jumps or calls to the fixup list in the
// bootstrapper.
diff --git a/src/builtins.h b/src/builtins.h
index 14d4ee6..80a897d 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -167,9 +167,6 @@
id_count
};
- static bool IsConstructCall(Address pc);
- static bool IsArgumentsAdaptorCall(Address pc);
-
static Code* builtin(Name name) {
// Code::cast cannot be used here since we access builtins
// during the marking phase of mark sweep. See IC::Clear.
@@ -206,12 +203,6 @@
static const char* javascript_names_[id_count];
static int javascript_argc_[id_count];
- // The offset from the beginning of the JSConstructCall builtin code
- // object to the return address after the call. Used for determining
- // if a call is a constructor invocation.
- static int construct_call_pc_offset_;
- static int arguments_adaptor_call_pc_offset_;
-
static void Generate_Adaptor(MacroAssembler* masm, CFunctionId id);
static void Generate_JSConstructCall(MacroAssembler* masm);
static void Generate_JSEntryTrampoline(MacroAssembler* masm);
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index d337124..2ead1b3 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -110,6 +110,8 @@
return "RevertToNumber";
case ToBoolean:
return "ToBoolean";
+ case Instanceof:
+ return "Instanceof";
case CounterOp:
return "CounterOp";
case ArgumentsAccess:
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 6e8c024..d21b0ee 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -44,6 +44,7 @@
UnarySub,
RevertToNumber,
ToBoolean,
+ Instanceof,
CounterOp,
ArgumentsAccess,
Runtime,
@@ -82,7 +83,7 @@
virtual int MinorKey() = 0;
// Returns a name for logging/debugging purposes.
- virtual const char* GetName() = 0;
+ virtual const char* GetName() { return MajorName(MajorKey()); }
#ifdef DEBUG
virtual void Print() { PrintF("%s\n", GetName()); }
diff --git a/src/codegen-arm.cc b/src/codegen-arm.cc
index 89fc0db..92206dd 100644
--- a/src/codegen-arm.cc
+++ b/src/codegen-arm.cc
@@ -30,320 +30,17 @@
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "debug.h"
-#include "prettyprinter.h"
-#include "scopeinfo.h"
#include "scopes.h"
#include "runtime.h"
namespace v8 { namespace internal {
-class ArmCodeGenerator;
-
-
-// -----------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that keeps an ECMA
-// reference on the execution stack while in scope. For variables
-// the reference is empty, indicating that it isn't necessary to
-// store state on the stack for keeping track of references to those.
-// For properties, we keep either one (named) or two (indexed) values
-// on the execution stack to represent the reference.
-
-class Reference BASE_EMBEDDED {
- public:
- enum Type { ILLEGAL = -1, EMPTY = 0, NAMED = 1, KEYED = 2 };
- Reference(ArmCodeGenerator* cgen, Expression* expression);
- ~Reference();
-
- Expression* expression() const { return expression_; }
- Type type() const { return type_; }
- void set_type(Type value) {
- ASSERT(type_ == ILLEGAL);
- type_ = value;
- }
- int size() const { return type_; }
-
- bool is_illegal() const { return type_ == ILLEGAL; }
-
- private:
- ArmCodeGenerator* cgen_;
- Expression* expression_;
- Type type_;
-};
-
-
-// -------------------------------------------------------------------------
-// Code generation state
-
-// The state is passed down the AST by the code generator. It is passed
-// implicitly (in a member variable) to the non-static code generator member
-// functions, and explicitly (as an argument) to the static member functions
-// and the AST node member functions.
-//
-// The state is threaded through the call stack. Constructing a state
-// implicitly pushes it on the owning code generator's stack of states, and
-// destroying one implicitly pops it.
-
-class CodeGenState BASE_EMBEDDED {
- public:
- enum AccessType {
- UNDEFINED,
- LOAD,
- LOAD_TYPEOF_EXPR
- };
-
- // Create an initial code generator state. Destroying the initial state
- // leaves the code generator with a NULL state.
- explicit CodeGenState(ArmCodeGenerator* owner);
-
- // Create a code generator state based on a code generator's current
- // state. The new state has its own access type and pair of branch
- // labels, and no reference.
- CodeGenState(ArmCodeGenerator* owner,
- AccessType access,
- Label* true_target,
- Label* false_target);
-
- // Create a code generator state based on a code generator's current
- // state. The new state has an access type of LOAD, its own reference,
- // and inherits the pair of branch labels of the current state.
- CodeGenState(ArmCodeGenerator* owner, Reference* ref);
-
- // Destroy a code generator state and restore the owning code generator's
- // previous state.
- ~CodeGenState();
-
- AccessType access() const { return access_; }
- Reference* ref() const { return ref_; }
- Label* true_target() const { return true_target_; }
- Label* false_target() const { return false_target_; }
-
- private:
- ArmCodeGenerator* owner_;
- AccessType access_;
- Reference* ref_;
- Label* true_target_;
- Label* false_target_;
- CodeGenState* previous_;
-};
-
-
-// -----------------------------------------------------------------------------
-// ArmCodeGenerator
-
-class ArmCodeGenerator: public CodeGenerator {
- public:
- static Handle<Code> MakeCode(FunctionLiteral* fun,
- Handle<Script> script,
- bool is_eval);
-
- MacroAssembler* masm() { return masm_; }
-
- Scope* scope() const { return scope_; }
-
- CodeGenState* state() { return state_; }
- void set_state(CodeGenState* state) { state_ = state; }
-
- private:
- // Assembler
- MacroAssembler* masm_; // to generate code
-
- // Code generation state
- Scope* scope_;
- Condition cc_reg_;
- CodeGenState* state_;
- int break_stack_height_;
-
- // Labels
- Label function_return_;
-
- // Construction/destruction
- ArmCodeGenerator(int buffer_size,
- Handle<Script> script,
- bool is_eval);
-
- virtual ~ArmCodeGenerator() { delete masm_; }
-
- // Main code generation function
- void GenCode(FunctionLiteral* fun);
-
- // The following are used by class Reference.
- void LoadReference(Reference* ref);
- void UnloadReference(Reference* ref);
-
- // State
- bool has_cc() const { return cc_reg_ != al; }
- CodeGenState::AccessType access() const { return state_->access(); }
- Reference* ref() const { return state_->ref(); }
- bool is_referenced() const { return state_->ref() != NULL; }
- Label* true_target() const { return state_->true_target(); }
- Label* false_target() const { return state_->false_target(); }
-
-
- // Expressions
- MemOperand GlobalObject() const {
- return ContextOperand(cp, Context::GLOBAL_INDEX);
- }
-
- static MemOperand ContextOperand(Register context, int index) {
- return MemOperand(context, Context::SlotOffset(index));
- }
-
- static MemOperand ParameterOperand(const CodeGenerator* cgen, int index) {
- int num_parameters = cgen->scope()->num_parameters();
- // index -2 corresponds to the activated closure, -1 corresponds
- // to the receiver
- ASSERT(-2 <= index && index < num_parameters);
- int offset = (1 + num_parameters - index) * kPointerSize;
- return MemOperand(fp, offset);
- }
-
- MemOperand ParameterOperand(int index) const {
- return ParameterOperand(this, index);
- }
-
- MemOperand FunctionOperand() const {
- return MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset);
- }
-
- static MemOperand SlotOperand(CodeGenerator* cgen,
- Slot* slot,
- Register tmp);
-
- MemOperand SlotOperand(Slot* slot, Register tmp) {
- return SlotOperand(this, slot, tmp);
- }
-
- void LoadCondition(Expression* x, CodeGenState::AccessType access,
- Label* true_target, Label* false_target, bool force_cc);
- void Load(Expression* x,
- CodeGenState::AccessType access = CodeGenState::LOAD);
- void LoadGlobal();
-
- // Special code for typeof expressions: Unfortunately, we must
- // be careful when loading the expression in 'typeof'
- // expressions. We are not allowed to throw reference errors for
- // non-existing properties of the global object, so we must make it
- // look like an explicit property access, instead of an access
- // through the context chain.
- void LoadTypeofExpression(Expression* x);
-
-
- // References
-
- // Generate code to fetch the value of a reference. The reference is
- // expected to be on top of the expression stack. It is left in place and
- // its value is pushed on top of it.
- void GetValue(Reference* ref) {
- ASSERT(!has_cc());
- ASSERT(!ref->is_illegal());
- CodeGenState new_state(this, ref);
- Visit(ref->expression());
- }
-
- // Generate code to store a value in a reference. The stored value is
- // expected on top of the expression stack, with the reference immediately
- // below it. The expression stack is left unchanged.
- void SetValue(Reference* ref) {
- ASSERT(!has_cc());
- ASSERT(!ref->is_illegal());
- ref->expression()->GenerateStoreCode(this, ref, NOT_CONST_INIT);
- }
-
- // Generate code to store a value in a reference. The stored value is
- // expected on top of the expression stack, with the reference immediately
- // below it. The expression stack is left unchanged.
- void InitConst(Reference* ref) {
- ASSERT(!has_cc());
- ASSERT(!ref->is_illegal());
- ref->expression()->GenerateStoreCode(this, ref, CONST_INIT);
- }
-
- // Generate code to fetch a value from a property of a reference. The
- // reference is expected on top of the expression stack. It is left in
- // place and its value is pushed on top of it.
- void GetReferenceProperty(Expression* key);
-
- // Generate code to store a value in a property of a reference. The
- // stored value is expected on top of the expression stack, with the
- // reference immediately below it. The expression stack is left
- // unchanged.
- static void SetReferenceProperty(CodeGenerator* cgen,
- Reference* ref,
- Expression* key);
-
-
- void ToBoolean(Label* true_target, Label* false_target);
-
- void GenericBinaryOperation(Token::Value op);
- void Comparison(Condition cc, bool strict = false);
-
- void SmiOperation(Token::Value op, Handle<Object> value, bool reversed);
-
- void CallWithArguments(ZoneList<Expression*>* arguments, int position);
-
- // Declare global variables and functions in the given array of
- // name/value pairs.
- virtual void DeclareGlobals(Handle<FixedArray> pairs);
-
- // Instantiate the function boilerplate.
- void InstantiateBoilerplate(Handle<JSFunction> boilerplate);
-
- // Control flow
- void Branch(bool if_true, Label* L);
- void CheckStack();
- void CleanStack(int num_bytes);
-
- // Node visitors
-#define DEF_VISIT(type) \
- virtual void Visit##type(type* node);
- NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- // Fast-case switch
- static const int kFastCaseSwitchMaxOverheadFactor = 10;
- static const int kFastCaseSwitchMinCaseCount = 5;
- virtual int FastCaseSwitchMaxOverheadFactor();
- virtual int FastCaseSwitchMinCaseCount();
- virtual void GenerateFastCaseSwitchJumpTable(
- SwitchStatement* node, int min_index, int range, Label *fail_label,
- SmartPointer<Label*> &case_targets, SmartPointer<Label> &case_labels);
-
- void RecordStatementPosition(Node* node);
-
- // Activation frames
- void EnterJSFrame();
- void ExitJSFrame();
-
- virtual void GenerateIsSmi(ZoneList<Expression*>* args);
- virtual void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
- virtual void GenerateIsArray(ZoneList<Expression*>* args);
-
- virtual void GenerateArgumentsLength(ZoneList<Expression*>* args);
- virtual void GenerateArgumentsAccess(ZoneList<Expression*>* args);
-
- virtual void GenerateValueOf(ZoneList<Expression*>* args);
- virtual void GenerateSetValueOf(ZoneList<Expression*>* args);
-
- virtual void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
-
- virtual void GenerateObjectEquals(ZoneList<Expression*>* args);
-
- friend class Reference;
- friend class Property;
- friend class VariableProxy;
- friend class Slot;
-};
-
-
// -------------------------------------------------------------------------
// CodeGenState implementation.
-CodeGenState::CodeGenState(ArmCodeGenerator* owner)
+CodeGenState::CodeGenState(CodeGenerator* owner)
: owner_(owner),
- access_(UNDEFINED),
- ref_(NULL),
+ typeof_state_(NOT_INSIDE_TYPEOF),
true_target_(NULL),
false_target_(NULL),
previous_(NULL) {
@@ -351,13 +48,12 @@
}
-CodeGenState::CodeGenState(ArmCodeGenerator* owner,
- AccessType access,
+CodeGenState::CodeGenState(CodeGenerator* owner,
+ TypeofState typeof_state,
Label* true_target,
Label* false_target)
: owner_(owner),
- access_(access),
- ref_(NULL),
+ typeof_state_(typeof_state),
true_target_(true_target),
false_target_(false_target),
previous_(owner->state()) {
@@ -365,17 +61,6 @@
}
-CodeGenState::CodeGenState(ArmCodeGenerator* owner, Reference* ref)
- : owner_(owner),
- access_(LOAD),
- ref_(ref),
- true_target_(owner->state()->true_target_),
- false_target_(owner->state()->false_target_),
- previous_(owner->state()) {
- owner_->set_state(this);
-}
-
-
CodeGenState::~CodeGenState() {
ASSERT(owner_->state() == this);
owner_->set_state(previous_);
@@ -383,98 +68,15 @@
// -----------------------------------------------------------------------------
-// ArmCodeGenerator implementation
+// CodeGenerator implementation
#define __ masm_->
-Handle<Code> ArmCodeGenerator::MakeCode(FunctionLiteral* flit,
- Handle<Script> script,
- bool is_eval) {
-#ifdef ENABLE_DISASSEMBLER
- bool print_code = FLAG_print_code && !Bootstrapper::IsActive();
-#endif // ENABLE_DISASSEMBLER
-
-#ifdef DEBUG
- bool print_source = false;
- bool print_ast = false;
- const char* ftype;
-
- if (Bootstrapper::IsActive()) {
- print_source = FLAG_print_builtin_source;
- print_ast = FLAG_print_builtin_ast;
- print_code = FLAG_print_builtin_code;
- ftype = "builtin";
- } else {
- print_source = FLAG_print_source;
- print_ast = FLAG_print_ast;
- ftype = "user-defined";
- }
-
- if (FLAG_trace_codegen || print_source || print_ast) {
- PrintF("*** Generate code for %s function: ", ftype);
- flit->name()->ShortPrint();
- PrintF(" ***\n");
- }
-
- if (print_source) {
- PrintF("--- Source from AST ---\n%s\n", PrettyPrinter().PrintProgram(flit));
- }
-
- if (print_ast) {
- PrintF("--- AST ---\n%s\n", AstPrinter().PrintProgram(flit));
- }
-#endif // DEBUG
-
- // Generate code.
- const int initial_buffer_size = 4 * KB;
- ArmCodeGenerator cgen(initial_buffer_size, script, is_eval);
- cgen.GenCode(flit);
- if (cgen.HasStackOverflow()) {
- ASSERT(!Top::has_pending_exception());
- return Handle<Code>::null();
- }
-
- // Process any deferred code.
- cgen.ProcessDeferred();
-
- // Allocate and install the code.
- CodeDesc desc;
- cgen.masm()->GetCode(&desc);
- ScopeInfo<> sinfo(flit->scope());
- Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
- Handle<Code> code = Factory::NewCode(desc, &sinfo, flags);
-
- // Add unresolved entries in the code to the fixup list.
- Bootstrapper::AddFixup(*code, cgen.masm());
-
-#ifdef ENABLE_DISASSEMBLER
- if (print_code) {
- // Print the source code if available.
- if (!script->IsUndefined() && !script->source()->IsUndefined()) {
- PrintF("--- Raw source ---\n");
- StringInputBuffer stream(String::cast(script->source()));
- stream.Seek(flit->start_position());
- // flit->end_position() points to the last character in the stream. We
- // need to compensate by adding one to calculate the length.
- int source_len = flit->end_position() - flit->start_position() + 1;
- for (int i = 0; i < source_len; i++) {
- if (stream.has_more()) PrintF("%c", stream.GetNext());
- }
- PrintF("\n\n");
- }
- PrintF("--- Code ---\n");
- code->Disassemble();
- }
-#endif // ENABLE_DISASSEMBLER
-
- return code;
-}
-
-
-ArmCodeGenerator::ArmCodeGenerator(int buffer_size,
- Handle<Script> script,
- bool is_eval)
- : CodeGenerator(is_eval, script),
+CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script,
+ bool is_eval)
+ : is_eval_(is_eval),
+ script_(script),
+ deferred_(8),
masm_(new MacroAssembler(NULL, buffer_size)),
scope_(NULL),
cc_reg_(al),
@@ -491,7 +93,7 @@
// pp: caller's parameter pointer
// cp: callee's context
-void ArmCodeGenerator::GenCode(FunctionLiteral* fun) {
+void CodeGenerator::GenCode(FunctionLiteral* fun) {
Scope* scope = fun->scope();
ZoneList<Statement*>* body = fun->body();
@@ -597,9 +199,9 @@
__ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
__ CallStub(&stub);
__ push(r0);
- SetValue(&arguments_ref);
+ arguments_ref.SetValue(NOT_CONST_INIT);
}
- SetValue(&shadow_ref);
+ shadow_ref.SetValue(NOT_CONST_INIT);
}
__ pop(r0); // Value is no longer needed.
}
@@ -681,21 +283,73 @@
}
+MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
+ // Currently, this assertion will fail if we try to assign to
+ // a constant variable that is constant because it is read-only
+ // (such as the variable referring to a named function expression).
+ // We need to implement assignments to read-only variables.
+ // Ideally, we should do this during AST generation (by converting
+ // such assignments into expression statements); however, in general
+ // we may not be able to make the decision until past AST generation,
+ // that is when the entire program is known.
+ ASSERT(slot != NULL);
+ int index = slot->index();
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ return ParameterOperand(index);
+
+ case Slot::LOCAL: {
+ ASSERT(0 <= index && index < scope()->num_stack_slots());
+ const int kLocalOffset = JavaScriptFrameConstants::kLocal0Offset;
+ return MemOperand(fp, kLocalOffset - index * kPointerSize);
+ }
+
+ case Slot::CONTEXT: {
+ // Follow the context chain if necessary.
+ ASSERT(!tmp.is(cp)); // do not overwrite context register
+ Register context = cp;
+ int chain_length = scope()->ContextChainLength(slot->var()->scope());
+ for (int i = chain_length; i-- > 0;) {
+ // Load the closure.
+ // (All contexts, even 'with' contexts, have a closure,
+ // and it is the same for all contexts inside a function.
+ // There is no need to go to the function context first.)
+ __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
+ // Load the function context (which is the incoming, outer context).
+ __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
+ context = tmp;
+ }
+ // We may have a 'with' context now. Get the function context.
+ // (In fact this mov may never be the needed, since the scope analysis
+ // may not permit a direct context access in this case and thus we are
+ // always at a function context. However it is safe to dereference be-
+ // cause the function context of a function context is itself. Before
+ // deleting this mov we should try to create a counter-example first,
+ // though...)
+ __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
+ return ContextOperand(tmp, index);
+ }
+
+ default:
+ UNREACHABLE();
+ return MemOperand(r0, 0);
+ }
+}
+
+
// Loads a value on the stack. If it is a boolean value, the result may have
// been (partially) translated into branches, or it may have set the condition
// code register. If force_cc is set, the value is forced to set the condition
// code register and no value is pushed. If the condition code register was set,
// has_cc() is true and cc_reg_ contains the condition to test for 'true'.
-void ArmCodeGenerator::LoadCondition(Expression* x,
- CodeGenState::AccessType access,
+void CodeGenerator::LoadCondition(Expression* x,
+ TypeofState typeof_state,
Label* true_target,
Label* false_target,
bool force_cc) {
- ASSERT(access == CodeGenState::LOAD ||
- access == CodeGenState::LOAD_TYPEOF_EXPR);
- ASSERT(!has_cc() && !is_referenced());
+ ASSERT(!has_cc());
- { CodeGenState new_state(this, access, true_target, false_target);
+ { CodeGenState new_state(this, typeof_state, true_target, false_target);
Visit(x);
}
if (force_cc && !has_cc()) {
@@ -706,13 +360,10 @@
}
-void ArmCodeGenerator::Load(Expression* x, CodeGenState::AccessType access) {
- ASSERT(access == CodeGenState::LOAD ||
- access == CodeGenState::LOAD_TYPEOF_EXPR);
-
+void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
Label true_target;
Label false_target;
- LoadCondition(x, access, &true_target, &false_target, false);
+ LoadCondition(x, typeof_state, &true_target, &false_target, false);
if (has_cc()) {
// convert cc_reg_ into a bool
@@ -758,16 +409,16 @@
}
-void ArmCodeGenerator::LoadGlobal() {
+void CodeGenerator::LoadGlobal() {
__ ldr(r0, GlobalObject());
__ push(r0);
}
// TODO(1241834): Get rid of this function in favor of just using Load, now
-// that we have the LOAD_TYPEOF_EXPR access type. => Need to handle
-// global variables w/o reference errors elsewhere.
-void ArmCodeGenerator::LoadTypeofExpression(Expression* x) {
+// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
+// variables w/o reference errors elsewhere.
+void CodeGenerator::LoadTypeofExpression(Expression* x) {
Variable* variable = x->AsVariableProxy()->AsVariable();
if (variable != NULL && !variable->is_this() && variable->is_global()) {
// NOTE: This is somewhat nasty. We force the compiler to load
@@ -780,12 +431,12 @@
Property property(&global, &key, RelocInfo::kNoPosition);
Load(&property);
} else {
- Load(x, CodeGenState::LOAD_TYPEOF_EXPR);
+ Load(x, INSIDE_TYPEOF);
}
}
-Reference::Reference(ArmCodeGenerator* cgen, Expression* expression)
+Reference::Reference(CodeGenerator* cgen, Expression* expression)
: cgen_(cgen), expression_(expression), type_(ILLEGAL) {
cgen->LoadReference(this);
}
@@ -796,44 +447,52 @@
}
-void ArmCodeGenerator::LoadReference(Reference* ref) {
+void CodeGenerator::LoadReference(Reference* ref) {
+ Comment cmnt(masm_, "[ LoadReference");
+
Expression* e = ref->expression();
Property* property = e->AsProperty();
Variable* var = e->AsVariableProxy()->AsVariable();
if (property != NULL) {
+ // The expression is either a property or a variable proxy that rewrites
+ // to a property.
Load(property->obj());
- // Used a named reference if the key is a literal symbol.
- // We don't use a named reference if they key is a string that can be
- // legally parsed as an integer. This is because, otherwise we don't
- // get into the slow case code that handles [] on String objects.
+ // We use a named reference if the key is a literal symbol, unless it is
+ // a string that can be legally parsed as an integer. This is because
+ // otherwise we will not get into the slow case code that handles [] on
+ // String objects.
Literal* literal = property->key()->AsLiteral();
uint32_t dummy;
- if (literal != NULL && literal->handle()->IsSymbol() &&
- !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
+ if (literal != NULL &&
+ literal->handle()->IsSymbol() &&
+ !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
ref->set_type(Reference::NAMED);
} else {
Load(property->key());
ref->set_type(Reference::KEYED);
}
} else if (var != NULL) {
+ // The expression is a variable proxy that does not rewrite to a
+ // property. Global variables are treated as named property references.
if (var->is_global()) {
- // global variable
LoadGlobal();
ref->set_type(Reference::NAMED);
} else {
- // local variable
- ref->set_type(Reference::EMPTY);
+ ASSERT(var->slot() != NULL);
+ ref->set_type(Reference::SLOT);
}
} else {
+ // Anything else is a runtime error.
Load(e);
__ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ push(r0);
}
}
-void ArmCodeGenerator::UnloadReference(Reference* ref) {
+void CodeGenerator::UnloadReference(Reference* ref) {
+ Comment cmnt(masm_, "[ UnloadReference");
+
int size = ref->size();
if (size <= 0) {
// Do nothing. No popping is necessary.
@@ -848,7 +507,7 @@
// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
// register to a boolean in the condition code register. The code
// may jump to 'false_target' in case the register converts to 'false'.
-void ArmCodeGenerator::ToBoolean(Label* true_target,
+void CodeGenerator::ToBoolean(Label* true_target,
Label* false_target) {
// Note: The generated code snippet does not change stack variables.
// Only the condition code should be set.
@@ -893,8 +552,6 @@
Major MajorKey() { return GetProperty; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "GetPropertyStub"; }
};
@@ -906,8 +563,6 @@
Major MajorKey() { return SetProperty; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "GetPropertyStub"; }
};
@@ -957,8 +612,6 @@
int MinorKey() { return (argc_ << 3) | static_cast<int>(kind_); }
void Generate(MacroAssembler* masm);
- const char* GetName() { return "InvokeBuiltinStub"; }
-
#ifdef DEBUG
void Print() {
PrintF("InvokeBuiltinStub (kind %d, argc, %d)\n",
@@ -969,45 +622,7 @@
};
-void ArmCodeGenerator::GetReferenceProperty(Expression* key) {
- ASSERT(!ref()->is_illegal());
- Reference::Type type = ref()->type();
-
- // TODO(1241834): Make sure that this it is safe to ignore the distinction
- // between access types LOAD and LOAD_TYPEOF_EXPR. If there is a chance
- // that reference errors can be thrown below, we must distinguish between
- // the two kinds of loads (typeof expression loads must not throw a
- // reference error).
- if (type == Reference::NAMED) {
- // Compute the name of the property.
- Literal* literal = key->AsLiteral();
- Handle<String> name(String::cast(*literal->handle()));
-
- // Call the appropriate IC code.
- // Setup the name register.
- __ mov(r2, Operand(name));
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- Variable* var = ref()->expression()->AsVariableProxy()->AsVariable();
- if (var != NULL) {
- ASSERT(var->is_global());
- __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- } else {
- __ Call(ic, RelocInfo::CODE_TARGET);
- }
-
- } else {
- // Access keyed property.
- ASSERT(type == Reference::KEYED);
-
- // TODO(1224671): Implement inline caching for keyed loads as on ia32.
- GetPropertyStub stub;
- __ CallStub(&stub);
- }
- __ push(r0);
-}
-
-
-void ArmCodeGenerator::GenericBinaryOperation(Token::Value op) {
+void CodeGenerator::GenericBinaryOperation(Token::Value op) {
// sp[0] : y
// sp[1] : x
// result : r0
@@ -1132,7 +747,7 @@
};
-void ArmCodeGenerator::SmiOperation(Token::Value op,
+void CodeGenerator::SmiOperation(Token::Value op,
Handle<Object> value,
bool reversed) {
// NOTE: This is an attempt to inline (a bit) more of the code for
@@ -1267,7 +882,7 @@
}
-void ArmCodeGenerator::Comparison(Condition cc, bool strict) {
+void CodeGenerator::Comparison(Condition cc, bool strict) {
// sp[0] : y
// sp[1] : x
// result : cc register
@@ -1338,8 +953,6 @@
private:
int argc_;
- const char* GetName() { return "CallFuntionStub"; }
-
#if defined(DEBUG)
void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); }
#endif // defined(DEBUG)
@@ -1350,7 +963,7 @@
// Call the function on the stack with the given arguments.
-void ArmCodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
+void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
int position) {
// Push the arguments ("left-to-right") on the stack.
for (int i = 0; i < args->length(); i++) {
@@ -1370,7 +983,7 @@
}
-void ArmCodeGenerator::Branch(bool if_true, Label* L) {
+void CodeGenerator::Branch(bool if_true, Label* L) {
ASSERT(has_cc());
Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
__ b(cc, L);
@@ -1378,7 +991,7 @@
}
-void ArmCodeGenerator::CheckStack() {
+void CodeGenerator::CheckStack() {
if (FLAG_check_stack) {
Comment cmnt(masm_, "[ check stack");
StackCheckStub stub;
@@ -1387,7 +1000,7 @@
}
-void ArmCodeGenerator::VisitBlock(Block* node) {
+void CodeGenerator::VisitBlock(Block* node) {
Comment cmnt(masm_, "[ Block");
if (FLAG_debug_info) RecordStatementPosition(node);
node->set_break_stack_height(break_stack_height_);
@@ -1396,7 +1009,7 @@
}
-void ArmCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
__ mov(r0, Operand(pairs));
__ push(r0);
__ push(cp);
@@ -1407,7 +1020,7 @@
}
-void ArmCodeGenerator::VisitDeclaration(Declaration* node) {
+void CodeGenerator::VisitDeclaration(Declaration* node) {
Comment cmnt(masm_, "[ Declaration");
Variable* var = node->proxy()->var();
ASSERT(var != NULL); // must have been resolved
@@ -1442,9 +1055,8 @@
__ mov(r0, Operand(0)); // no initial value!
__ push(r0);
}
- __ CallRuntime(Runtime::kDeclareContextSlot, 5);
- __ push(r0);
-
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ // Ignore the return value (declarations are statements).
return;
}
@@ -1461,15 +1073,19 @@
if (val != NULL) {
// Set initial value.
Reference target(this, node->proxy());
+ ASSERT(target.is_slot());
Load(val);
- SetValue(&target);
- // Get rid of the assigned value (declarations are statements).
+ target.SetValue(NOT_CONST_INIT);
+ // Get rid of the assigned value (declarations are statements). It's
+ // safe to pop the value lying on top of the reference before unloading
+ // the reference itself (which preserves the top of stack) because we
+ // know it is a zero-sized reference.
__ pop();
}
}
-void ArmCodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
+void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
Comment cmnt(masm_, "[ ExpressionStatement");
if (FLAG_debug_info) RecordStatementPosition(node);
Expression* expression = node->expression();
@@ -1479,13 +1095,13 @@
}
-void ArmCodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
+void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
Comment cmnt(masm_, "// EmptyStatement");
// nothing to do
}
-void ArmCodeGenerator::VisitIfStatement(IfStatement* node) {
+void CodeGenerator::VisitIfStatement(IfStatement* node) {
Comment cmnt(masm_, "[ IfStatement");
// Generate different code depending on which
// parts of the if statement are present or not.
@@ -1500,7 +1116,7 @@
Label then;
Label else_;
// if (cond)
- LoadCondition(node->condition(), CodeGenState::LOAD, &then, &else_, true);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &then, &else_, true);
Branch(false, &else_);
// then
__ bind(&then);
@@ -1515,7 +1131,7 @@
ASSERT(!has_else_stm);
Label then;
// if (cond)
- LoadCondition(node->condition(), CodeGenState::LOAD, &then, &exit, true);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &then, &exit, true);
Branch(false, &exit);
// then
__ bind(&then);
@@ -1526,7 +1142,7 @@
ASSERT(!has_then_stm);
Label else_;
// if (!cond)
- LoadCondition(node->condition(), CodeGenState::LOAD, &exit, &else_, true);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &exit, &else_, true);
Branch(true, &exit);
// else
__ bind(&else_);
@@ -1536,7 +1152,7 @@
Comment cmnt(masm_, "[ If");
ASSERT(!has_then_stm && !has_else_stm);
// if (cond)
- LoadCondition(node->condition(), CodeGenState::LOAD, &exit, &exit, false);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &exit, &exit, false);
if (has_cc()) {
cc_reg_ = al;
} else {
@@ -1549,7 +1165,7 @@
}
-void ArmCodeGenerator::CleanStack(int num_bytes) {
+void CodeGenerator::CleanStack(int num_bytes) {
ASSERT(num_bytes >= 0);
if (num_bytes > 0) {
__ add(sp, sp, Operand(num_bytes));
@@ -1557,7 +1173,7 @@
}
-void ArmCodeGenerator::VisitContinueStatement(ContinueStatement* node) {
+void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
Comment cmnt(masm_, "[ ContinueStatement");
if (FLAG_debug_info) RecordStatementPosition(node);
CleanStack(break_stack_height_ - node->target()->break_stack_height());
@@ -1565,7 +1181,7 @@
}
-void ArmCodeGenerator::VisitBreakStatement(BreakStatement* node) {
+void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
Comment cmnt(masm_, "[ BreakStatement");
if (FLAG_debug_info) RecordStatementPosition(node);
CleanStack(break_stack_height_ - node->target()->break_stack_height());
@@ -1573,7 +1189,7 @@
}
-void ArmCodeGenerator::VisitReturnStatement(ReturnStatement* node) {
+void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
Comment cmnt(masm_, "[ ReturnStatement");
if (FLAG_debug_info) RecordStatementPosition(node);
Load(node->expression());
@@ -1584,7 +1200,7 @@
}
-void ArmCodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
+void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
Comment cmnt(masm_, "[ WithEnterStatement");
if (FLAG_debug_info) RecordStatementPosition(node);
Load(node->expression());
@@ -1601,7 +1217,7 @@
}
-void ArmCodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
+void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
Comment cmnt(masm_, "[ WithExitStatement");
// Pop context.
__ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX));
@@ -1610,16 +1226,16 @@
}
-int ArmCodeGenerator::FastCaseSwitchMaxOverheadFactor() {
- return kFastCaseSwitchMaxOverheadFactor;
+int CodeGenerator::FastCaseSwitchMaxOverheadFactor() {
+ return kFastSwitchMaxOverheadFactor;
}
-int ArmCodeGenerator::FastCaseSwitchMinCaseCount() {
- return kFastCaseSwitchMinCaseCount;
+int CodeGenerator::FastCaseSwitchMinCaseCount() {
+ return kFastSwitchMinCaseCount;
}
-void ArmCodeGenerator::GenerateFastCaseSwitchJumpTable(
+void CodeGenerator::GenerateFastCaseSwitchJumpTable(
SwitchStatement* node, int min_index, int range, Label *fail_label,
SmartPointer<Label*> &case_targets, SmartPointer<Label> &case_labels) {
@@ -1652,7 +1268,7 @@
}
-void ArmCodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
+void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
Comment cmnt(masm_, "[ SwitchStatement");
if (FLAG_debug_info) RecordStatementPosition(node);
node->set_break_stack_height(break_stack_height_);
@@ -1720,7 +1336,7 @@
}
-void ArmCodeGenerator::VisitLoopStatement(LoopStatement* node) {
+void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
Comment cmnt(masm_, "[ LoopStatement");
if (FLAG_debug_info) RecordStatementPosition(node);
node->set_break_stack_height(break_stack_height_);
@@ -1779,7 +1395,7 @@
case DONT_KNOW:
CheckStack(); // TODO(1222600): ignore if body contains calls.
LoadCondition(node->cond(),
- CodeGenState::LOAD,
+ NOT_INSIDE_TYPEOF,
&loop,
node->break_target(),
true);
@@ -1792,7 +1408,7 @@
}
-void ArmCodeGenerator::VisitForInStatement(ForInStatement* node) {
+void CodeGenerator::VisitForInStatement(ForInStatement* node) {
Comment cmnt(masm_, "[ ForInStatement");
if (FLAG_debug_info) RecordStatementPosition(node);
@@ -1948,13 +1564,23 @@
__ ldr(r0, MemOperand(sp, kPointerSize * each.size()));
__ push(r0);
}
- SetValue(&each);
+ // If the reference was to a slot we rely on the convenient property
+ // that it doesn't matter whether a value (eg, r3 pushed above) is
+ // right on top of or right underneath a zero-sized reference.
+ each.SetValue(NOT_CONST_INIT);
if (each.size() > 0) {
- __ pop(r0); // discard the value
+ // It's safe to pop the value lying on top of the reference before
+ // unloading the reference itself (which preserves the top of stack,
+ // ie, now the topmost value of the non-zero sized reference), since
+ // we will discard the top of stack after unloading the reference
+ // anyway.
+ __ pop(r0);
}
}
}
- __ pop(); // pop the i'th entry pushed above
+ // Discard the i'th entry pushed above or else the remainder of the
+ // reference, whichever is currently on top of the stack.
+ __ pop();
CheckStack(); // TODO(1222600): ignore if body contains calls.
__ jmp(&loop);
@@ -1970,7 +1596,7 @@
}
-void ArmCodeGenerator::VisitTryCatch(TryCatch* node) {
+void CodeGenerator::VisitTryCatch(TryCatch* node) {
Comment cmnt(masm_, "[ TryCatch");
Label try_block, exit;
@@ -1982,11 +1608,11 @@
// Store the caught exception in the catch variable.
__ push(r0);
{ Reference ref(this, node->catch_var());
- // Load the exception to the top of the stack.
- __ ldr(r0, MemOperand(sp, ref.size() * kPointerSize));
- __ push(r0);
- SetValue(&ref);
- __ pop(r0);
+ ASSERT(ref.is_slot());
+ // Here we make use of the convenient property that it doesn't matter
+ // whether a value is immediately on top of or underneath a zero-sized
+ // reference.
+ ref.SetValue(NOT_CONST_INIT);
}
// Remove the exception from the stack.
@@ -2059,7 +1685,7 @@
}
-void ArmCodeGenerator::VisitTryFinally(TryFinally* node) {
+void CodeGenerator::VisitTryFinally(TryFinally* node) {
Comment cmnt(masm_, "[ TryFinally");
// State: Used to keep track of reason for entering the finally
@@ -2192,7 +1818,7 @@
}
-void ArmCodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
+void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
Comment cmnt(masm_, "[ DebuggerStatament");
if (FLAG_debug_info) RecordStatementPosition(node);
__ CallRuntime(Runtime::kDebugBreak, 1);
@@ -2200,7 +1826,7 @@
}
-void ArmCodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
+void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
ASSERT(boilerplate->IsBoilerplate());
// Push the boilerplate on the stack.
@@ -2214,7 +1840,7 @@
}
-void ArmCodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
+void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
@@ -2225,56 +1851,52 @@
}
-void ArmCodeGenerator::VisitFunctionBoilerplateLiteral(
+void CodeGenerator::VisitFunctionBoilerplateLiteral(
FunctionBoilerplateLiteral* node) {
Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
InstantiateBoilerplate(node->boilerplate());
}
-void ArmCodeGenerator::VisitConditional(Conditional* node) {
+void CodeGenerator::VisitConditional(Conditional* node) {
Comment cmnt(masm_, "[ Conditional");
Label then, else_, exit;
- LoadCondition(node->condition(), CodeGenState::LOAD, &then, &else_, true);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &then, &else_, true);
Branch(false, &else_);
__ bind(&then);
- Load(node->then_expression(), access());
+ Load(node->then_expression(), typeof_state());
__ b(&exit);
__ bind(&else_);
- Load(node->else_expression(), access());
+ Load(node->else_expression(), typeof_state());
__ bind(&exit);
}
-void ArmCodeGenerator::VisitSlot(Slot* node) {
- ASSERT(access() != CodeGenState::UNDEFINED);
- Comment cmnt(masm_, "[ Slot");
-
- if (node->type() == Slot::LOOKUP) {
- ASSERT(node->var()->mode() == Variable::DYNAMIC);
+void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+ if (slot->type() == Slot::LOOKUP) {
+ ASSERT(slot->var()->mode() == Variable::DYNAMIC);
// For now, just do a runtime call.
__ push(cp);
- __ mov(r0, Operand(node->var()->name()));
+ __ mov(r0, Operand(slot->var()->name()));
__ push(r0);
- if (access() == CodeGenState::LOAD) {
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- } else {
- ASSERT(access() == CodeGenState::LOAD_TYPEOF_EXPR);
+ if (typeof_state == INSIDE_TYPEOF) {
__ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ } else {
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
}
__ push(r0);
} else {
// Note: We would like to keep the assert below, but it fires because of
// some nasty code in LoadTypeofExpression() which should be removed...
- // ASSERT(node->var()->mode() != Variable::DYNAMIC);
+ // ASSERT(slot->var()->mode() != Variable::DYNAMIC);
// Special handling for locals allocated in registers.
- __ ldr(r0, SlotOperand(node, r2));
+ __ ldr(r0, SlotOperand(slot, r2));
__ push(r0);
- if (node->var()->mode() == Variable::CONST) {
+ if (slot->var()->mode() == Variable::CONST) {
// Const slots may contain 'the hole' value (the constant hasn't been
// initialized yet) which needs to be converted into the 'undefined'
// value.
@@ -2288,36 +1910,35 @@
}
-void ArmCodeGenerator::VisitVariableProxy(VariableProxy* node) {
- Comment cmnt(masm_, "[ VariableProxy");
- Variable* var_node = node->var();
+void CodeGenerator::VisitSlot(Slot* node) {
+ Comment cmnt(masm_, "[ Slot");
+ LoadFromSlot(node, typeof_state());
+}
- Expression* expr = var_node->rewrite();
+
+void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
+ Comment cmnt(masm_, "[ VariableProxy");
+
+ Variable* var = node->var();
+ Expression* expr = var->rewrite();
if (expr != NULL) {
Visit(expr);
} else {
- ASSERT(var_node->is_global());
- if (is_referenced()) {
- if (var_node->AsProperty() != NULL) {
- __ RecordPosition(var_node->AsProperty()->position());
- }
- GetReferenceProperty(new Literal(var_node->name()));
- } else {
- Reference property(this, node);
- GetValue(&property);
- }
+ ASSERT(var->is_global());
+ Reference ref(this, node);
+ ref.GetValue(typeof_state());
}
}
-void ArmCodeGenerator::VisitLiteral(Literal* node) {
+void CodeGenerator::VisitLiteral(Literal* node) {
Comment cmnt(masm_, "[ Literal");
__ mov(r0, Operand(node->handle()));
__ push(r0);
}
-void ArmCodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
+void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
Comment cmnt(masm_, "[ RexExp Literal");
// Retrieve the literal array and check the allocated entry.
@@ -2388,7 +2009,7 @@
}
-void ArmCodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
+void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Comment cmnt(masm_, "[ ObjectLiteral");
ObjectLiteralDeferred* deferred = new ObjectLiteralDeferred(this, node);
@@ -2461,7 +2082,7 @@
}
-void ArmCodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
+void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
Comment cmnt(masm_, "[ ArrayLiteral");
// Call runtime to create the array literal.
@@ -2505,7 +2126,7 @@
}
-void ArmCodeGenerator::VisitAssignment(Assignment* node) {
+void CodeGenerator::VisitAssignment(Assignment* node) {
Comment cmnt(masm_, "[ Assignment");
if (FLAG_debug_info) RecordStatementPosition(node);
@@ -2518,7 +2139,7 @@
Load(node->value());
} else {
- GetValue(&target);
+ target.GetValue(NOT_INSIDE_TYPEOF);
Literal* literal = node->value()->AsLiteral();
if (literal != NULL && literal->handle()->IsSmi()) {
SmiOperation(node->binary_op(), literal->handle(), false);
@@ -2543,15 +2164,15 @@
// Dynamic constant initializations must use the function context
// and initialize the actual constant declared. Dynamic variable
// initializations are simply assignments and use SetValue.
- InitConst(&target);
+ target.SetValue(CONST_INIT);
} else {
- SetValue(&target);
+ target.SetValue(NOT_CONST_INIT);
}
}
}
-void ArmCodeGenerator::VisitThrow(Throw* node) {
+void CodeGenerator::VisitThrow(Throw* node) {
Comment cmnt(masm_, "[ Throw");
Load(node->exception());
@@ -2561,21 +2182,15 @@
}
-void ArmCodeGenerator::VisitProperty(Property* node) {
+void CodeGenerator::VisitProperty(Property* node) {
Comment cmnt(masm_, "[ Property");
- if (is_referenced()) {
- __ RecordPosition(node->position());
- GetReferenceProperty(node->key());
- } else {
- Reference property(this, node);
- __ RecordPosition(node->position());
- GetValue(&property);
- }
+ Reference property(this, node);
+ property.GetValue(typeof_state());
}
-void ArmCodeGenerator::VisitCall(Call* node) {
+void CodeGenerator::VisitCall(Call* node) {
Comment cmnt(masm_, "[ Call");
ZoneList<Expression*>* args = node->arguments();
@@ -2675,7 +2290,7 @@
// Load the function to call from the property through a reference.
Reference ref(this, property);
- GetValue(&ref); // receiver
+ ref.GetValue(NOT_INSIDE_TYPEOF); // receiver
// Pass receiver to called function.
__ ldr(r0, MemOperand(sp, ref.size() * kPointerSize));
@@ -2701,7 +2316,7 @@
}
-void ArmCodeGenerator::VisitCallNew(CallNew* node) {
+void CodeGenerator::VisitCallNew(CallNew* node) {
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
@@ -2736,7 +2351,7 @@
}
-void ArmCodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Label leave;
Load(args->at(0));
@@ -2757,7 +2372,7 @@
}
-void ArmCodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
Label leave;
Load(args->at(0)); // Load the object.
@@ -2784,7 +2399,7 @@
}
-void ArmCodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
__ pop(r0);
@@ -2793,7 +2408,7 @@
}
-void ArmCodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
__ pop(r0);
@@ -2805,14 +2420,14 @@
// This should generate code that performs a charCodeAt() call or returns
// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
// It is not yet implemented on ARM, so it always goes to the slow case.
-void ArmCodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
__ mov(r0, Operand(Factory::undefined_value()));
__ push(r0);
}
-void ArmCodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
Label answer;
@@ -2833,7 +2448,7 @@
}
-void ArmCodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
// Seed the result with the formal parameters count, which will be used
@@ -2847,7 +2462,7 @@
}
-void ArmCodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
// Satisfy contract with ArgumentsAccessStub:
@@ -2863,7 +2478,7 @@
}
-void ArmCodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
// Load the two objects into registers and perform the comparison.
@@ -2876,7 +2491,7 @@
}
-void ArmCodeGenerator::VisitCallRuntime(CallRuntime* node) {
+void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (CheckForInlineRuntimeCall(node)) return;
ZoneList<Expression*>* args = node->arguments();
@@ -2912,14 +2527,14 @@
}
-void ArmCodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
+void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
Comment cmnt(masm_, "[ UnaryOperation");
Token::Value op = node->op();
if (op == Token::NOT) {
LoadCondition(node->expression(),
- CodeGenState::LOAD,
+ NOT_INSIDE_TYPEOF,
false_target(),
true_target(),
true);
@@ -3037,7 +2652,7 @@
}
-void ArmCodeGenerator::VisitCountOperation(CountOperation* node) {
+void CodeGenerator::VisitCountOperation(CountOperation* node) {
Comment cmnt(masm_, "[ CountOperation");
bool is_postfix = node->is_postfix();
@@ -3054,7 +2669,7 @@
{ Reference target(this, node->expression());
if (target.is_illegal()) return;
- GetValue(&target);
+ target.GetValue(NOT_INSIDE_TYPEOF);
__ pop(r0);
Label slow, exit;
@@ -3109,7 +2724,7 @@
// Store the new value in the target if not const.
__ bind(&exit);
__ push(r0);
- if (!is_const) SetValue(&target);
+ if (!is_const) target.SetValue(NOT_CONST_INIT);
}
// Postfix: Discard the new value and use the old.
@@ -3117,7 +2732,7 @@
}
-void ArmCodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
Comment cmnt(masm_, "[ BinaryOperation");
Token::Value op = node->op();
@@ -3136,7 +2751,7 @@
if (op == Token::AND) {
Label is_true;
LoadCondition(node->left(),
- CodeGenState::LOAD,
+ NOT_INSIDE_TYPEOF,
&is_true,
false_target(),
false);
@@ -3146,7 +2761,7 @@
// Evaluate right side expression.
__ bind(&is_true);
LoadCondition(node->right(),
- CodeGenState::LOAD,
+ NOT_INSIDE_TYPEOF,
true_target(),
false_target(),
false);
@@ -3177,7 +2792,7 @@
} else if (op == Token::OR) {
Label is_false;
LoadCondition(node->left(),
- CodeGenState::LOAD,
+ NOT_INSIDE_TYPEOF,
true_target(),
&is_false,
false);
@@ -3187,7 +2802,7 @@
// Evaluate right side expression.
__ bind(&is_false);
LoadCondition(node->right(),
- CodeGenState::LOAD,
+ NOT_INSIDE_TYPEOF,
true_target(),
false_target(),
false);
@@ -3239,13 +2854,13 @@
}
-void ArmCodeGenerator::VisitThisFunction(ThisFunction* node) {
+void CodeGenerator::VisitThisFunction(ThisFunction* node) {
__ ldr(r0, FunctionOperand());
__ push(r0);
}
-void ArmCodeGenerator::VisitCompareOperation(CompareOperation* node) {
+void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
Comment cmnt(masm_, "[ CompareOperation");
// Get the expressions from the node.
@@ -3430,7 +3045,8 @@
case Token::INSTANCEOF:
__ mov(r0, Operand(1)); // not counting receiver
__ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_JS);
- __ push(r0);
+ __ tst(r0, Operand(r0));
+ cc_reg_ = eq;
break;
default:
@@ -3439,7 +3055,7 @@
}
-void ArmCodeGenerator::RecordStatementPosition(Node* node) {
+void CodeGenerator::RecordStatementPosition(Node* node) {
if (FLAG_debug_info) {
int statement_pos = node->statement_pos();
if (statement_pos == RelocInfo::kNoPosition) return;
@@ -3448,7 +3064,7 @@
}
-void ArmCodeGenerator::EnterJSFrame() {
+void CodeGenerator::EnterJSFrame() {
#if defined(DEBUG)
{ Label done, fail;
__ tst(r1, Operand(kSmiTagMask));
@@ -3458,7 +3074,7 @@
__ cmp(r2, Operand(JS_FUNCTION_TYPE));
__ b(eq, &done);
__ bind(&fail);
- __ stop("ArmCodeGenerator::EnterJSFrame - r1 not a function");
+ __ stop("CodeGenerator::EnterJSFrame - r1 not a function");
__ bind(&done);
}
#endif // DEBUG
@@ -3468,7 +3084,7 @@
}
-void ArmCodeGenerator::ExitJSFrame() {
+void CodeGenerator::ExitJSFrame() {
// Drop the execution stack down to the frame pointer and restore the caller
// frame pointer and return address.
__ mov(sp, fp);
@@ -3479,177 +3095,201 @@
#undef __
#define __ masm->
-MemOperand ArmCodeGenerator::SlotOperand(CodeGenerator* cgen,
- Slot* slot,
- Register tmp) {
- // Currently, this assertion will fail if we try to assign to
- // a constant variable that is constant because it is read-only
- // (such as the variable referring to a named function expression).
- // We need to implement assignments to read-only variables.
- // Ideally, we should do this during AST generation (by converting
- // such assignments into expression statements); however, in general
- // we may not be able to make the decision until past AST generation,
- // that is when the entire program is known.
- ASSERT(slot != NULL);
- int index = slot->index();
- switch (slot->type()) {
- case Slot::PARAMETER:
- return ParameterOperand(cgen, index);
+Handle<String> Reference::GetName() {
+ ASSERT(type_ == NAMED);
+ Property* property = expression_->AsProperty();
+ if (property == NULL) {
+ // Global variable reference treated as a named property reference.
+ VariableProxy* proxy = expression_->AsVariableProxy();
+ ASSERT(proxy->AsVariable() != NULL);
+ ASSERT(proxy->AsVariable()->is_global());
+ return proxy->name();
+ } else {
+ Literal* raw_name = property->key()->AsLiteral();
+ ASSERT(raw_name != NULL);
+ return Handle<String>(String::cast(*raw_name->handle()));
+ }
+}
- case Slot::LOCAL: {
- ASSERT(0 <= index &&
- index < cgen->scope()->num_stack_slots() &&
- index >= 0);
- int local_offset = JavaScriptFrameConstants::kLocal0Offset -
- index * kPointerSize;
- return MemOperand(fp, local_offset);
+
+void Reference::GetValue(TypeofState typeof_state) {
+ ASSERT(!is_illegal());
+ ASSERT(!cgen_->has_cc());
+ MacroAssembler* masm = cgen_->masm();
+ Property* property = expression_->AsProperty();
+ if (property != NULL) {
+ __ RecordPosition(property->position());
+ }
+
+ switch (type_) {
+ case SLOT: {
+ Comment cmnt(masm, "[ Load from Slot");
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ ASSERT(slot != NULL);
+ cgen_->LoadFromSlot(slot, typeof_state);
+ break;
}
- case Slot::CONTEXT: {
- MacroAssembler* masm = cgen->masm();
- // Follow the context chain if necessary.
- ASSERT(!tmp.is(cp)); // do not overwrite context register
- Register context = cp;
- int chain_length =
- cgen->scope()->ContextChainLength(slot->var()->scope());
- for (int i = chain_length; i-- > 0;) {
- // Load the closure.
- // (All contexts, even 'with' contexts, have a closure,
- // and it is the same for all contexts inside a function.
- // There is no need to go to the function context first.)
- __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
- // Load the function context (which is the incoming, outer context).
- __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset));
- context = tmp;
+ case NAMED: {
+ // TODO(1241834): Make sure that this it is safe to ignore the
+ // distinction between expressions in a typeof and not in a typeof. If
+ // there is a chance that reference errors can be thrown below, we
+ // must distinguish between the two kinds of loads (typeof expression
+ // loads must not throw a reference error).
+ Comment cmnt(masm, "[ Load from named Property");
+ // Setup the name register.
+ Handle<String> name(GetName());
+ __ mov(r2, Operand(name));
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+
+ Variable* var = expression_->AsVariableProxy()->AsVariable();
+ if (var != NULL) {
+ ASSERT(var->is_global());
+ __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ } else {
+ __ Call(ic, RelocInfo::CODE_TARGET);
}
- // We may have a 'with' context now. Get the function context.
- // (In fact this mov may never be the needed, since the scope analysis
- // may not permit a direct context access in this case and thus we are
- // always at a function context. However it is safe to dereference be-
- // cause the function context of a function context is itself. Before
- // deleting this mov we should try to create a counter-example first,
- // though...)
- __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp, index);
+ __ push(r0);
+ break;
+ }
+
+ case KEYED: {
+ // TODO(1241834): Make sure that this it is safe to ignore the
+ // distinction between expressions in a typeof and not in a typeof.
+ Comment cmnt(masm, "[ Load from keyed Property");
+ ASSERT(property != NULL);
+ // TODO(1224671): Implement inline caching for keyed loads as on ia32.
+ GetPropertyStub stub;
+ __ CallStub(&stub);
+ __ push(r0);
+ break;
}
default:
UNREACHABLE();
- return MemOperand(r0, 0);
}
}
-void Property::GenerateStoreCode(CodeGenerator* cgen,
- Reference* ref,
- InitState init_state) {
- MacroAssembler* masm = cgen->masm();
- Comment cmnt(masm, "[ Store to Property");
- __ RecordPosition(position());
- ArmCodeGenerator::SetReferenceProperty(cgen, ref, key());
-}
-
-
-void VariableProxy::GenerateStoreCode(CodeGenerator* cgen,
- Reference* ref,
- InitState init_state) {
- MacroAssembler* masm = cgen->masm();
- Comment cmnt(masm, "[ Store to VariableProxy");
- Variable* node = var();
-
- Expression* expr = node->rewrite();
- if (expr != NULL) {
- expr->GenerateStoreCode(cgen, ref, init_state);
- } else {
- ASSERT(node->is_global());
- if (node->AsProperty() != NULL) {
- __ RecordPosition(node->AsProperty()->position());
- }
- Expression* key = new Literal(node->name());
- ArmCodeGenerator::SetReferenceProperty(cgen, ref, key);
+void Reference::SetValue(InitState init_state) {
+ ASSERT(!is_illegal());
+ ASSERT(!cgen_->has_cc());
+ MacroAssembler* masm = cgen_->masm();
+ Property* property = expression_->AsProperty();
+ if (property != NULL) {
+ __ RecordPosition(property->position());
}
-}
+ switch (type_) {
+ case SLOT: {
+ Comment cmnt(masm, "[ Store to Slot");
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ ASSERT(slot != NULL);
+ if (slot->type() == Slot::LOOKUP) {
+ ASSERT(slot->var()->mode() == Variable::DYNAMIC);
-void Slot::GenerateStoreCode(CodeGenerator* cgen,
- Reference* ref,
- InitState init_state) {
- MacroAssembler* masm = cgen->masm();
- Comment cmnt(masm, "[ Store to Slot");
+ // For now, just do a runtime call.
+ __ push(cp);
+ __ mov(r0, Operand(slot->var()->name()));
+ __ push(r0);
- if (type() == Slot::LOOKUP) {
- ASSERT(var()->mode() == Variable::DYNAMIC);
+ if (init_state == CONST_INIT) {
+ // Same as the case for a normal store, but ignores attribute
+ // (e.g. READ_ONLY) of context slot so that we can initialize
+ // const properties (introduced via eval("const foo = (some
+ // expr);")). Also, uses the current function context instead of
+ // the top context.
+ //
+ // Note that we must declare the foo upon entry of eval(), via a
+ // context slot declaration, but we cannot initialize it at the
+ // same time, because the const declaration may be at the end of
+ // the eval code (sigh...) and the const variable may have been
+ // used before (where its value is 'undefined'). Thus, we can only
+ // do the initialization when we actually encounter the expression
+ // and when the expression operands are defined and valid, and
+ // thus we need the split into 2 operations: declaration of the
+ // context slot followed by initialization.
+ __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ } else {
+ __ CallRuntime(Runtime::kStoreContextSlot, 3);
+ }
+ // Storing a variable must keep the (new) value on the expression
+ // stack. This is necessary for compiling assignment expressions.
+ __ push(r0);
- // For now, just do a runtime call.
- __ push(cp);
- __ mov(r0, Operand(var()->name()));
- __ push(r0);
+ } else {
+ ASSERT(slot->var()->mode() != Variable::DYNAMIC);
- if (init_state == CONST_INIT) {
- // Same as the case for a normal store, but ignores attribute
- // (e.g. READ_ONLY) of context slot so that we can initialize const
- // properties (introduced via eval("const foo = (some expr);")). Also,
- // uses the current function context instead of the top context.
- //
- // Note that we must declare the foo upon entry of eval(), via a
- // context slot declaration, but we cannot initialize it at the same
- // time, because the const declaration may be at the end of the eval
- // code (sigh...) and the const variable may have been used before
- // (where its value is 'undefined'). Thus, we can only do the
- // initialization when we actually encounter the expression and when
- // the expression operands are defined and valid, and thus we need the
- // split into 2 operations: declaration of the context slot followed
- // by initialization.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- __ CallRuntime(Runtime::kStoreContextSlot, 3);
- }
- // Storing a variable must keep the (new) value on the expression
- // stack. This is necessary for compiling assignment expressions.
- __ push(r0);
+ Label exit;
+ if (init_state == CONST_INIT) {
+ ASSERT(slot->var()->mode() == Variable::CONST);
+ // Only the first const initialization must be executed (the slot
+ // still contains 'the hole' value). When the assignment is
+ // executed, the code is identical to a normal store (see below).
+ Comment cmnt(masm, "[ Init const");
+ __ ldr(r2, cgen_->SlotOperand(slot, r2));
+ __ cmp(r2, Operand(Factory::the_hole_value()));
+ __ b(ne, &exit);
+ }
- } else {
- ASSERT(var()->mode() != Variable::DYNAMIC);
-
- Label exit;
- if (init_state == CONST_INIT) {
- ASSERT(var()->mode() == Variable::CONST);
- // Only the first const initialization must be executed (the slot
- // still contains 'the hole' value). When the assignment is executed,
- // the code is identical to a normal store (see below).
- Comment cmnt(masm, "[ Init const");
- __ ldr(r2, ArmCodeGenerator::SlotOperand(cgen, this, r2));
- __ cmp(r2, Operand(Factory::the_hole_value()));
- __ b(ne, &exit);
+ // We must execute the store. Storing a variable must keep the
+ // (new) value on the stack. This is necessary for compiling
+ // assignment expressions.
+ //
+ // Note: We will reach here even with slot->var()->mode() ==
+ // Variable::CONST because of const declarations which will
+ // initialize consts to 'the hole' value and by doing so, end up
+ // calling this code. r2 may be loaded with context; used below in
+ // RecordWrite.
+ __ pop(r0);
+ __ str(r0, cgen_->SlotOperand(slot, r2));
+ __ push(r0);
+ if (slot->type() == Slot::CONTEXT) {
+ // Skip write barrier if the written value is a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &exit);
+ // r2 is loaded with context when calling SlotOperand above.
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ __ mov(r3, Operand(offset));
+ __ RecordWrite(r2, r3, r1);
+ }
+ // If we definitely did not jump over the assignment, we do not need
+ // to bind the exit label. Doing so can defeat peephole
+ // optimization.
+ if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
+ __ bind(&exit);
+ }
+ }
+ break;
}
- // We must execute the store.
- // r2 may be loaded with context; used below in RecordWrite.
- // Storing a variable must keep the (new) value on the stack. This is
- // necessary for compiling assignment expressions.
- //
- // Note: We will reach here even with var()->mode() == Variable::CONST
- // because of const declarations which will initialize consts to 'the
- // hole' value and by doing so, end up calling this code. r2 may be
- // loaded with context; used below in RecordWrite.
- __ pop(r0);
- __ str(r0, ArmCodeGenerator::SlotOperand(cgen, this, r2));
- __ push(r0);
+ case NAMED: {
+ Comment cmnt(masm, "[ Store to named Property");
+ // Call the appropriate IC code.
+ __ pop(r0); // value
+ // Setup the name register.
+ Handle<String> name(GetName());
+ __ mov(r2, Operand(name));
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ __ push(r0);
+ break;
+ }
- if (type() == Slot::CONTEXT) {
- // Skip write barrier if the written value is a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &exit);
- // r2 is loaded with context when calling SlotOperand above.
- int offset = FixedArray::kHeaderSize + index() * kPointerSize;
- __ mov(r3, Operand(offset));
- __ RecordWrite(r2, r3, r1);
+ case KEYED: {
+ Comment cmnt(masm, "[ Store to keyed Property");
+ Property* property = expression_->AsProperty();
+ ASSERT(property != NULL);
+ __ RecordPosition(property->position());
+ __ pop(r0); // value
+ SetPropertyStub stub;
+ __ CallStub(&stub);
+ __ push(r0);
+ break;
}
- // If we definitely did not jump over the assignment, we do not need to
- // bind the exit label. Doing so can defeat peephole optimization.
- if (init_state == CONST_INIT || type() == Slot::CONTEXT) {
- __ bind(&exit);
- }
+
+ default:
+ UNREACHABLE();
}
}
@@ -4403,132 +4043,96 @@
}
-void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
- // ----------- S t a t e -------------
- // -- r0: formal number of parameters for the calling function
- // -- r1: key (if value access)
- // -- lr: return address
- // -----------------------------------
-
- // If we're reading an element we need to check that the key is a smi.
- Label slow;
- if (type_ == READ_ELEMENT) {
- __ tst(r1, Operand(kSmiTagMask));
- __ b(ne, &slow);
- }
-
+void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
// Check if the calling frame is an arguments adaptor frame.
- // r0: formal number of parameters
- // r1: key (if access)
Label adaptor;
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
__ cmp(r3, Operand(ArgumentsAdaptorFrame::SENTINEL));
- if (type_ == NEW_OBJECT) {
- __ b(ne, &slow);
- } else {
- __ b(eq, &adaptor);
- }
+ __ b(eq, &adaptor);
- static const int kParamDisplacement =
- StandardFrameConstants::kCallerSPOffset - kPointerSize;
+ // Nothing to do: The formal number of parameters has already been
+ // passed in register r0 by calling function. Just return it.
+ __ mov(pc, lr);
- if (type_ == READ_LENGTH) {
- // Nothing to do: The formal number of parameters has already been
- // passed in register r0 by calling function. Just return it.
- __ mov(pc, lr);
- } else if (type_ == READ_ELEMENT) {
- // Check index against formal parameter count. Use unsigned comparison to
- // get the negative check for free.
- // r0: formal number of parameters
- // r1: index
- __ cmp(r1, r0);
- __ b(cs, &slow);
-
- // Read the argument from the current frame and return it.
- __ sub(r3, r0, r1);
- __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r0, MemOperand(r3, kParamDisplacement));
- __ mov(pc, lr);
- } else {
- ASSERT(type_ == NEW_OBJECT);
- // Do nothing here.
- }
-
- // An arguments adaptor frame is present. Find the length or the actual
- // argument in the calling frame.
- // r0: formal number of parameters
- // r1: key
- // r2: adaptor frame pointer
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame and return it.
__ bind(&adaptor);
- // Read the arguments length from the adaptor frame. This is the result if
- // only accessing the length, otherwise it is used in accessing the value
__ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-
- if (type_ == READ_LENGTH) {
- // Return the length in r0.
- __ mov(pc, lr);
- } else if (type_ == READ_ELEMENT) {
- // Check index against actual arguments count. Use unsigned comparison to
- // get the negative check for free.
- // r0: actual number of parameter
- // r1: index
- // r2: adaptor frame point
- __ cmp(r1, r0);
- __ b(cs, &slow);
-
- // Read the argument from the adaptor frame and return it.
- __ sub(r3, r0, r1);
- __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r0, MemOperand(r3, kParamDisplacement));
- __ mov(pc, lr);
- } else {
- ASSERT(type_ == NEW_OBJECT);
- // Patch the arguments.length and the parameters pointer.
- __ str(r0, MemOperand(sp, 0 * kPointerSize));
- __ add(r3, r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ add(r3, r3, Operand(kParamDisplacement + 1 * kPointerSize));
- __ str(r3, MemOperand(sp, 1 * kPointerSize));
- __ bind(&slow);
- __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3);
- }
-
- // Return to the calling function.
- if (type_ == READ_ELEMENT) {
- __ bind(&slow);
- __ push(r1);
- __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1);
- }
+ __ mov(pc, lr);
}
-void ArmCodeGenerator::SetReferenceProperty(CodeGenerator* cgen,
- Reference* ref,
- Expression* key) {
- ASSERT(!ref->is_illegal());
- MacroAssembler* masm = cgen->masm();
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ // The displacement is the offset of the last parameter (if any)
+ // relative to the frame pointer.
+ static const int kDisplacement =
+ StandardFrameConstants::kCallerSPOffset - kPointerSize;
- if (ref->type() == Reference::NAMED) {
- // Compute the name of the property.
- Literal* literal = key->AsLiteral();
- Handle<String> name(String::cast(*literal->handle()));
+ // Check that the key is a smi.
+ Label slow;
+ __ tst(r1, Operand(kSmiTagMask));
+ __ b(ne, &slow);
- // Call the appropriate IC code.
- __ pop(r0); // value
- // Setup the name register.
- __ mov(r2, Operand(name));
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
+ __ cmp(r3, Operand(ArgumentsAdaptorFrame::SENTINEL));
+ __ b(eq, &adaptor);
- } else {
- // Access keyed property.
- ASSERT(ref->type() == Reference::KEYED);
+ // Check index against formal parameters count limit passed in
+ // through register eax. Use unsigned comparison to get negative
+ // check for free.
+ __ cmp(r1, r0);
+ __ b(cs, &slow);
- __ pop(r0); // value
- SetPropertyStub stub;
- __ CallStub(&stub);
- }
- __ push(r0);
+ // Read the argument from the stack and return it.
+ __ sub(r3, r0, r1);
+ __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(r0, MemOperand(r3, kDisplacement));
+ __ mov(pc, lr);
+
+ // Arguments adaptor case: Check index against actual arguments
+ // limit found in the arguments adaptor frame. Use unsigned
+ // comparison to get negative check for free.
+ __ bind(&adaptor);
+ __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ cmp(r1, r0);
+ __ b(cs, &slow);
+
+ // Read the argument from the adaptor frame and return it.
+ __ sub(r3, r0, r1);
+ __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(r0, MemOperand(r3, kDisplacement));
+ __ mov(pc, lr);
+
+ // Slow-case: Handle non-smi or out-of-bounds access to arguments
+ // by calling the runtime system.
+ __ bind(&slow);
+ __ push(r1);
+ __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
+ __ cmp(r3, Operand(ArgumentsAdaptorFrame::SENTINEL));
+ __ b(ne, &runtime);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ str(r0, MemOperand(sp, 0 * kPointerSize));
+ __ add(r3, r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ str(r3, MemOperand(sp, 1 * kPointerSize));
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3);
}
@@ -4562,21 +4166,4 @@
#undef __
-// -----------------------------------------------------------------------------
-// CodeGenerator interface
-
-// MakeCode() is just a wrapper for CodeGenerator::MakeCode()
-// so we don't have to expose the entire CodeGenerator class in
-// the .h file.
-Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* fun,
- Handle<Script> script,
- bool is_eval) {
- Handle<Code> code = ArmCodeGenerator::MakeCode(fun, script, is_eval);
- if (!code.is_null()) {
- Counters::total_compiled_code_size.Increment(code->instruction_size());
- }
- return code;
-}
-
-
} } // namespace v8::internal
diff --git a/src/codegen-arm.h b/src/codegen-arm.h
new file mode 100644
index 0000000..5342b5a
--- /dev/null
+++ b/src/codegen-arm.h
@@ -0,0 +1,371 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CODEGEN_ARM_H_
+#define V8_CODEGEN_ARM_H_
+
+#include "scopes.h"
+
+namespace v8 { namespace internal {
+
+// Forward declarations
+class DeferredCode;
+
+// Mode to overwrite BinaryExpression values.
+enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
+
+
+// -----------------------------------------------------------------------------
+// Reference support
+
+// A reference is a C++ stack-allocated object that keeps an ECMA
+// reference on the execution stack while in scope. For variables
+// the reference is empty, indicating that it isn't necessary to
+// store state on the stack for keeping track of references to those.
+// For properties, we keep either one (named) or two (indexed) values
+// on the execution stack to represent the reference.
+
+enum InitState { CONST_INIT, NOT_CONST_INIT };
+enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+
+class Reference BASE_EMBEDDED {
+ public:
+ // The values of the types is important, see size().
+ enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+ Reference(CodeGenerator* cgen, Expression* expression);
+ ~Reference();
+
+ Expression* expression() const { return expression_; }
+ Type type() const { return type_; }
+ void set_type(Type value) {
+ ASSERT(type_ == ILLEGAL);
+ type_ = value;
+ }
+
+ // The size of the reference or -1 if the reference is illegal.
+ int size() const { return type_; }
+
+ bool is_illegal() const { return type_ == ILLEGAL; }
+ bool is_slot() const { return type_ == SLOT; }
+ bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+
+ // Return the name. Only valid for named property references.
+ Handle<String> GetName();
+
+ // Generate code to push the value of the reference on top of the
+ // expression stack. The reference is expected to be already on top of
+ // the expression stack, and it is left in place with its value above it.
+ void GetValue(TypeofState typeof_state);
+
+ // Generate code to store the value on top of the expression stack in the
+ // reference. The reference is expected to be immediately below the value
+ // on the expression stack. The stored value is left in place (with the
+ // reference intact below it) to support chained assignments.
+ void SetValue(InitState init_state);
+
+ private:
+ CodeGenerator* cgen_;
+ Expression* expression_;
+ Type type_;
+};
+
+
+// -------------------------------------------------------------------------
+// Code generation state
+
+// The state is passed down the AST by the code generator (and back up, in
+// the form of the state of the label pair). It is threaded through the
+// call stack. Constructing a state implicitly pushes it on the owning code
+// generator's stack of states, and destroying one implicitly pops it.
+
+class CodeGenState BASE_EMBEDDED {
+ public:
+ // Create an initial code generator state. Destroying the initial state
+ // leaves the code generator with a NULL state.
+ explicit CodeGenState(CodeGenerator* owner);
+
+ // Create a code generator state based on a code generator's current
+ // state. The new state has its own typeof state and pair of branch
+ // labels.
+ CodeGenState(CodeGenerator* owner,
+ TypeofState typeof_state,
+ Label* true_target,
+ Label* false_target);
+
+ // Destroy a code generator state and restore the owning code generator's
+ // previous state.
+ ~CodeGenState();
+
+ TypeofState typeof_state() const { return typeof_state_; }
+ Label* true_target() const { return true_target_; }
+ Label* false_target() const { return false_target_; }
+
+ private:
+ CodeGenerator* owner_;
+ TypeofState typeof_state_;
+ Label* true_target_;
+ Label* false_target_;
+ CodeGenState* previous_;
+};
+
+
+// -----------------------------------------------------------------------------
+// CodeGenerator
+
+class CodeGenerator: public Visitor {
+ public:
+ // Takes a function literal, generates code for it. This function should only
+ // be called by compiler.cc.
+ static Handle<Code> MakeCode(FunctionLiteral* fun,
+ Handle<Script> script,
+ bool is_eval);
+
+ static void SetFunctionInfo(Handle<JSFunction> fun,
+ int length,
+ int function_token_position,
+ int start_position,
+ int end_position,
+ bool is_expression,
+ bool is_toplevel,
+ Handle<Script> script);
+
+ // Accessors
+ MacroAssembler* masm() { return masm_; }
+
+ CodeGenState* state() { return state_; }
+ void set_state(CodeGenState* state) { state_ = state; }
+
+ void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
+
+ private:
+ // Construction/Destruction
+ CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
+ virtual ~CodeGenerator() { delete masm_; }
+
+ // Accessors
+ Scope* scope() const { return scope_; }
+
+ void ProcessDeferred();
+
+ bool is_eval() { return is_eval_; }
+
+ // State
+ bool has_cc() const { return cc_reg_ != al; }
+ TypeofState typeof_state() const { return state_->typeof_state(); }
+ Label* true_target() const { return state_->true_target(); }
+ Label* false_target() const { return state_->false_target(); }
+
+
+ // Node visitors.
+#define DEF_VISIT(type) \
+ void Visit##type(type* node);
+ NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+ // Main code generation function
+ void GenCode(FunctionLiteral* fun);
+
+ // The following are used by class Reference.
+ void LoadReference(Reference* ref);
+ void UnloadReference(Reference* ref);
+
+ // Support functions for accessing parameters and other operands.
+ MemOperand ParameterOperand(int index) const {
+ int num_parameters = scope()->num_parameters();
+ // index -2 corresponds to the activated closure, -1 corresponds
+ // to the receiver
+ ASSERT(-2 <= index && index < num_parameters);
+ int offset = (1 + num_parameters - index) * kPointerSize;
+ return MemOperand(fp, offset);
+ }
+
+ MemOperand FunctionOperand() const {
+ return MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset);
+ }
+
+ MemOperand ContextOperand(Register context, int index) const {
+ return MemOperand(context, Context::SlotOffset(index));
+ }
+
+ MemOperand SlotOperand(Slot* slot, Register tmp);
+
+ // Expressions
+ MemOperand GlobalObject() const {
+ return ContextOperand(cp, Context::GLOBAL_INDEX);
+ }
+
+ void LoadCondition(Expression* x,
+ TypeofState typeof_state,
+ Label* true_target,
+ Label* false_target,
+ bool force_cc);
+ void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+ void LoadGlobal();
+
+ // Read a value from a slot and leave it on top of the expression stack.
+ void LoadFromSlot(Slot* slot, TypeofState typeof_state);
+
+ // Special code for typeof expressions: Unfortunately, we must
+ // be careful when loading the expression in 'typeof'
+ // expressions. We are not allowed to throw reference errors for
+ // non-existing properties of the global object, so we must make it
+ // look like an explicit property access, instead of an access
+ // through the context chain.
+ void LoadTypeofExpression(Expression* x);
+
+ void ToBoolean(Label* true_target, Label* false_target);
+
+ void GenericBinaryOperation(Token::Value op);
+ void Comparison(Condition cc, bool strict = false);
+
+ void SmiOperation(Token::Value op, Handle<Object> value, bool reversed);
+
+ void CallWithArguments(ZoneList<Expression*>* arguments, int position);
+
+ // Control flow
+ void Branch(bool if_true, Label* L);
+ void CheckStack();
+ void CleanStack(int num_bytes);
+
+ bool CheckForInlineRuntimeCall(CallRuntime* node);
+ Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
+ void ProcessDeclarations(ZoneList<Declaration*>* declarations);
+
+ Handle<Code> ComputeCallInitialize(int argc);
+
+ // Declare global variables and functions in the given array of
+ // name/value pairs.
+ void DeclareGlobals(Handle<FixedArray> pairs);
+
+ // Instantiate the function boilerplate.
+ void InstantiateBoilerplate(Handle<JSFunction> boilerplate);
+
+ // Support for type checks.
+ void GenerateIsSmi(ZoneList<Expression*>* args);
+ void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
+ void GenerateIsArray(ZoneList<Expression*>* args);
+
+ // Support for arguments.length and arguments[?].
+ void GenerateArgumentsLength(ZoneList<Expression*>* args);
+ void GenerateArgumentsAccess(ZoneList<Expression*>* args);
+
+ // Support for accessing the value field of an object (used by Date).
+ void GenerateValueOf(ZoneList<Expression*>* args);
+ void GenerateSetValueOf(ZoneList<Expression*>* args);
+
+ // Fast support for charCodeAt(n).
+ void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+
+ // Fast support for object equality testing.
+ void GenerateObjectEquals(ZoneList<Expression*>* args);
+
+ // Methods and constants for fast case switch statement support.
+ //
+ // Only allow fast-case switch if the range of labels is at most
+ // this factor times the number of case labels.
+ // Value is derived from comparing the size of code generated by the normal
+ // switch code for Smi-labels to the size of a single pointer. If code
+ // quality increases this number should be decreased to match.
+ static const int kFastSwitchMaxOverheadFactor = 10;
+
+ // Minimal number of switch cases required before we allow jump-table
+ // optimization.
+ static const int kFastSwitchMinCaseCount = 5;
+
+ // The limit of the range of a fast-case switch, as a factor of the number
+ // of cases of the switch. Each platform should return a value that
+ // is optimal compared to the default code generated for a switch statement
+ // on that platform.
+ int FastCaseSwitchMaxOverheadFactor();
+
+ // The minimal number of cases in a switch before the fast-case switch
+ // optimization is enabled. Each platform should return a value that
+ // is optimal compared to the default code generated for a switch statement
+ // on that platform.
+ int FastCaseSwitchMinCaseCount();
+
+ // Allocate a jump table and create code to jump through it.
+ // Should call GenerateFastCaseSwitchCases to generate the code for
+ // all the cases at the appropriate point.
+ void GenerateFastCaseSwitchJumpTable(SwitchStatement* node, int min_index,
+ int range, Label *fail_label,
+ SmartPointer<Label*> &case_targets,
+ SmartPointer<Label>& case_labels);
+
+ // Generate the code for cases for the fast case switch.
+ // Called by GenerateFastCaseSwitchJumpTable.
+ void GenerateFastCaseSwitchCases(SwitchStatement* node,
+ SmartPointer<Label> &case_labels);
+
+ // Fast support for constant-Smi switches.
+ void GenerateFastCaseSwitchStatement(SwitchStatement *node, int min_index,
+ int range, int default_index);
+
+ // Fast support for constant-Smi switches. Tests whether switch statement
+ // permits optimization and calls GenerateFastCaseSwitch if it does.
+ // Returns true if the fast-case switch was generated, and false if not.
+ bool TryGenerateFastCaseSwitchStatement(SwitchStatement *node);
+
+
+ // Bottle-neck interface to call the Assembler to generate the statement
+ // position. This allows us to easily control whether statement positions
+ // should be generated or not.
+ void RecordStatementPosition(Node* node);
+
+ // Activation frames.
+ void EnterJSFrame();
+ void ExitJSFrame();
+
+
+ bool is_eval_; // Tells whether code is generated for eval.
+ Handle<Script> script_;
+ List<DeferredCode*> deferred_;
+
+ // Assembler
+ MacroAssembler* masm_; // to generate code
+
+ // Code generation state
+ Scope* scope_;
+ Condition cc_reg_;
+ CodeGenState* state_;
+ bool is_inside_try_;
+ int break_stack_height_;
+
+ // Labels
+ Label function_return_;
+
+ friend class Reference;
+ friend class Property;
+ friend class VariableProxy;
+ friend class Slot;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
+};
+
+} } // namespace v8::internal
+
+#endif // V8_CODEGEN_ARM_H_
diff --git a/src/codegen-ia32.cc b/src/codegen-ia32.cc
index 9f97626..54ff293 100644
--- a/src/codegen-ia32.cc
+++ b/src/codegen-ia32.cc
@@ -30,348 +30,111 @@
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "debug.h"
-#include "prettyprinter.h"
-#include "scopeinfo.h"
#include "scopes.h"
#include "runtime.h"
namespace v8 { namespace internal {
-#define TOS (Operand(esp, 0))
-
-
-class Ia32CodeGenerator;
-
-// Mode to overwrite BinaryExpression values.
-enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
-
-
-// -----------------------------------------------------------------------------
-// Reference support
-
-// A reference is a C++ stack-allocated object that keeps an ECMA
-// reference on the execution stack while in scope. For variables
-// the reference is empty, indicating that it isn't necessary to
-// store state on the stack for keeping track of references to those.
-// For properties, we keep either one (named) or two (indexed) values
-// on the execution stack to represent the reference.
-
-class Reference BASE_EMBEDDED {
- public:
- enum Type { ILLEGAL = -1, EMPTY = 0, NAMED = 1, KEYED = 2 };
- Reference(Ia32CodeGenerator* cgen, Expression* expression);
- ~Reference();
-
- Expression* expression() const { return expression_; }
- Type type() const { return type_; }
- void set_type(Type value) {
- ASSERT(type_ == ILLEGAL);
- type_ = value;
- }
- int size() const { return type_; }
-
- bool is_illegal() const { return type_ == ILLEGAL; }
-
- private:
- Ia32CodeGenerator* cgen_;
- Expression* expression_;
- Type type_;
-};
-
+#define __ masm_->
// -------------------------------------------------------------------------
-// Code generation state
+// VirtualFrame implementation.
-// The state is passed down the AST by the code generator. It is passed
-// implicitly (in a member variable) to the non-static code generator member
-// functions, and explicitly (as an argument) to the static member functions
-// and the AST node member functions.
-//
-// The state is threaded through the call stack. Constructing a state
-// implicitly pushes it on the owning code generator's stack of states, and
-// destroying one implicitly pops it.
+VirtualFrame::VirtualFrame(CodeGenerator* cgen) {
+ ASSERT(cgen->scope() != NULL);
-class CodeGenState BASE_EMBEDDED {
- public:
- enum AccessType {
- UNDEFINED,
- LOAD,
- LOAD_TYPEOF_EXPR
- };
-
- // Create an initial code generator state. Destroying the initial state
- // leaves the code generator with a NULL state.
- explicit CodeGenState(Ia32CodeGenerator* owner);
-
- // Create a code generator state based on a code generator's current
- // state. The new state has its own access type and pair of branch
- // labels, and no reference.
- CodeGenState(Ia32CodeGenerator* owner,
- AccessType access,
- Label* true_target,
- Label* false_target);
-
- // Create a code generator state based on a code generator's current
- // state. The new state has an access type of LOAD, its own reference,
- // and inherits the pair of branch labels of the current state.
- CodeGenState(Ia32CodeGenerator* owner, Reference* ref);
-
- // Destroy a code generator state and restore the owning code generator's
- // previous state.
- ~CodeGenState();
-
- AccessType access() const { return access_; }
- Reference* ref() const { return ref_; }
- Label* true_target() const { return true_target_; }
- Label* false_target() const { return false_target_; }
-
- private:
- Ia32CodeGenerator* owner_;
- AccessType access_;
- Reference* ref_;
- Label* true_target_;
- Label* false_target_;
- CodeGenState* previous_;
-};
+ masm_ = cgen->masm();
+ frame_local_count_ = cgen->scope()->num_stack_slots();
+ parameter_count_ = cgen->scope()->num_parameters();
+}
-// -----------------------------------------------------------------------------
-// Ia32CodeGenerator
+void VirtualFrame::Enter() {
+ Comment cmnt(masm_, "[ Enter JS frame");
+ __ push(ebp);
+ __ mov(ebp, Operand(esp));
-class Ia32CodeGenerator: public CodeGenerator {
- public:
- static Handle<Code> MakeCode(FunctionLiteral* fun,
- Handle<Script> script,
- bool is_eval);
+ // Store the context and the function in the frame.
+ __ push(esi);
+ __ push(edi);
- MacroAssembler* masm() { return masm_; }
-
- Scope* scope() const { return scope_; }
-
- CodeGenState* state() { return state_; }
- void set_state(CodeGenState* state) { state_ = state; }
-
- private:
- // Assembler
- MacroAssembler* masm_; // to generate code
-
- // Code generation state
- Scope* scope_;
- Condition cc_reg_;
- CodeGenState* state_;
- bool is_inside_try_;
- int break_stack_height_;
-
- // Labels
- Label function_return_;
-
- // Construction/destruction
- Ia32CodeGenerator(int buffer_size,
- Handle<Script> script,
- bool is_eval);
- virtual ~Ia32CodeGenerator() { delete masm_; }
-
- // Main code generation function
- void GenCode(FunctionLiteral* fun);
-
- // The following are used by class Reference.
- void LoadReference(Reference* ref);
- void UnloadReference(Reference* ref);
-
- // State
- bool has_cc() const { return cc_reg_ >= 0; }
- CodeGenState::AccessType access() const { return state_->access(); }
- Reference* ref() const { return state_->ref(); }
- bool is_referenced() const { return state_->ref() != NULL; }
- Label* true_target() const { return state_->true_target(); }
- Label* false_target() const { return state_->false_target(); }
-
- // Expressions
- Operand GlobalObject() const {
- return ContextOperand(esi, Context::GLOBAL_INDEX);
+ // Clear the function slot when generating debug code.
+ if (FLAG_debug_code) {
+ __ Set(edi, Immediate(reinterpret_cast<int>(kZapValue)));
}
+}
- // Support functions for accessing parameters. Static versions can
- // require some code generator state to be passed in as arguments.
- static Operand ParameterOperand(const CodeGenerator* cgen, int index) {
- int num_parameters = cgen->scope()->num_parameters();
- ASSERT(-2 <= index && index < num_parameters);
- return Operand(ebp, (1 + num_parameters - index) * kPointerSize);
+
+void VirtualFrame::Exit() {
+ Comment cmnt(masm_, "[ Exit JS frame");
+ // Record the location of the JS exit code for patching when setting
+ // break point.
+ __ RecordJSReturn();
+
+ // Avoid using the leave instruction here, because it is too
+ // short. We need the return sequence to be a least the size of a
+ // call instruction to support patching the exit code in the
+ // debugger. See VisitReturnStatement for the full return sequence.
+ __ mov(esp, Operand(ebp));
+ __ pop(ebp);
+}
+
+
+void VirtualFrame::AllocateLocals() {
+ if (frame_local_count_ > 0) {
+ Comment cmnt(masm_, "[ Allocate space for locals");
+ __ Set(eax, Immediate(Factory::undefined_value()));
+ for (int i = 0; i < frame_local_count_; i++) {
+ __ push(eax);
+ }
}
+}
- Operand ParameterOperand(int index) const {
- return ParameterOperand(this, index);
+
+void VirtualFrame::Drop(int count) {
+ ASSERT(count >= 0);
+ if (count > 0) {
+ __ add(Operand(esp), Immediate(count * kPointerSize));
}
+}
- Operand ReceiverOperand() const { return ParameterOperand(-1); }
- Operand FunctionOperand() const {
- return Operand(ebp, JavaScriptFrameConstants::kFunctionOffset);
- }
+void VirtualFrame::Pop() {
+ __ add(Operand(esp), Immediate(kPointerSize));
+}
- static Operand ContextOperand(Register context, int index) {
- return Operand(context, Context::SlotOffset(index));
- }
- static Operand SlotOperand(CodeGenerator* cgen,
- Slot* slot,
- Register tmp);
+void VirtualFrame::Pop(Register reg) {
+ __ pop(reg);
+}
- Operand SlotOperand(Slot* slot, Register tmp) {
- return SlotOperand(this, slot, tmp);
- }
- void LoadCondition(Expression* x,
- CodeGenState::AccessType access,
- Label* true_target,
- Label* false_target,
- bool force_cc);
- void Load(Expression* x,
- CodeGenState::AccessType access = CodeGenState::LOAD);
- void LoadGlobal();
+void VirtualFrame::Pop(Operand operand) {
+ __ pop(operand);
+}
- // Special code for typeof expressions: Unfortunately, we must
- // be careful when loading the expression in 'typeof'
- // expressions. We are not allowed to throw reference errors for
- // non-existing properties of the global object, so we must make it
- // look like an explicit property access, instead of an access
- // through the context chain.
- void LoadTypeofExpression(Expression* x);
- // References
+void VirtualFrame::Push(Register reg) {
+ __ push(reg);
+}
- // Generate code to fetch the value of a reference. The reference is
- // expected to be on top of the expression stack. It is left in place and
- // its value is pushed on top of it.
- void GetValue(Reference* ref) {
- ASSERT(!has_cc());
- ASSERT(!ref->is_illegal());
- CodeGenState new_state(this, ref);
- Visit(ref->expression());
- }
- // Generate code to store a value in a reference. The stored value is
- // expected on top of the expression stack, with the reference immediately
- // below it. The expression stack is left unchanged.
- void SetValue(Reference* ref) {
- ASSERT(!has_cc());
- ASSERT(!ref->is_illegal());
- ref->expression()->GenerateStoreCode(this, ref, NOT_CONST_INIT);
- }
+void VirtualFrame::Push(Operand operand) {
+ __ push(operand);
+}
- // Same as SetValue, used to set the initial value of a constant.
- void InitConst(Reference* ref) {
- ASSERT(!has_cc());
- ASSERT(!ref->is_illegal());
- ref->expression()->GenerateStoreCode(this, ref, CONST_INIT);
- }
- // Generate code to fetch a value from a property of a reference. The
- // reference is expected on top of the expression stack. It is left in
- // place and its value is pushed on top of it.
- void GetReferenceProperty(Expression* key);
-
- // Generate code to store a value in a property of a reference. The
- // stored value is expected on top of the expression stack, with the
- // reference immediately below it. The expression stack is left
- // unchanged.
- static void SetReferenceProperty(CodeGenerator* cgen,
- Reference* ref,
- Expression* key);
-
- void ToBoolean(Label* true_target, Label* false_target);
-
- void GenericBinaryOperation(
- Token::Value op,
- const OverwriteMode overwrite_mode = NO_OVERWRITE);
- void Comparison(Condition cc, bool strict = false);
-
- // Inline small integer literals. To prevent long attacker-controlled byte
- // sequences, we only inline small Smi:s.
- static const int kMaxSmiInlinedBits = 16;
- bool IsInlineSmi(Literal* literal);
- void SmiComparison(Condition cc, Handle<Object> value, bool strict = false);
- void SmiOperation(Token::Value op,
- Handle<Object> value,
- bool reversed,
- OverwriteMode overwrite_mode);
-
- void CallWithArguments(ZoneList<Expression*>* arguments, int position);
-
- // Declare global variables and functions in the given array of
- // name/value pairs.
- virtual void DeclareGlobals(Handle<FixedArray> pairs);
-
- // Instantiate the function boilerplate.
- void InstantiateBoilerplate(Handle<JSFunction> boilerplate);
-
- // Control flow
- void Branch(bool if_true, Label* L);
- void CheckStack();
- void CleanStack(int num_bytes);
-
- // Node visitors
-#define DEF_VISIT(type) \
- virtual void Visit##type(type* node);
- NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- // Only allow fast-case switch if the range of labels is at most
- // this factor times the number of case labels.
- // Value is derived from comparing the size of code generated by the normal
- // switch code for Smi-labels to the size of a single pointer. If code
- // quality increases this number should be decreased to match.
- static const int kFastSwitchMaxOverheadFactor = 5;
-
- // Minimal number of switch cases required before we allow jump-table
- // optimization.
- static const int kFastSwitchMinCaseCount = 5;
-
- virtual int FastCaseSwitchMaxOverheadFactor();
- virtual int FastCaseSwitchMinCaseCount();
-
- // Generate a computed jump with an empty jump table.
- // Binds a label to the start of the jump table. This table must
- // be populated later when the addresses of the targets are known.
- // Used by GenerateFastCaseSwitchStatement.
- virtual void GenerateFastCaseSwitchJumpTable(
- SwitchStatement* node, int min_index, int range, Label *fail_label,
- SmartPointer<Label*> &case_targets, SmartPointer<Label> &case_labels);
-
- void RecordStatementPosition(Node* node);
-
- // Activation frames.
- void EnterJSFrame();
- void ExitJSFrame();
-
- virtual void GenerateIsSmi(ZoneList<Expression*>* args);
- virtual void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
- virtual void GenerateIsArray(ZoneList<Expression*>* args);
-
- virtual void GenerateArgumentsLength(ZoneList<Expression*>* args);
- virtual void GenerateArgumentsAccess(ZoneList<Expression*>* args);
-
- virtual void GenerateValueOf(ZoneList<Expression*>* args);
- virtual void GenerateSetValueOf(ZoneList<Expression*>* args);
-
- virtual void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
-
- virtual void GenerateObjectEquals(ZoneList<Expression*>* args);
-
- friend class Reference;
- friend class Property;
- friend class VariableProxy;
- friend class Slot;
-};
+void VirtualFrame::Push(Immediate immediate) {
+ __ push(immediate);
+}
// -------------------------------------------------------------------------
// CodeGenState implementation.
-CodeGenState::CodeGenState(Ia32CodeGenerator* owner)
+CodeGenState::CodeGenState(CodeGenerator* owner)
: owner_(owner),
- access_(UNDEFINED),
- ref_(NULL),
+ typeof_state_(NOT_INSIDE_TYPEOF),
true_target_(NULL),
false_target_(NULL),
previous_(NULL) {
@@ -379,13 +142,12 @@
}
-CodeGenState::CodeGenState(Ia32CodeGenerator* owner,
- AccessType access,
+CodeGenState::CodeGenState(CodeGenerator* owner,
+ TypeofState typeof_state,
Label* true_target,
Label* false_target)
: owner_(owner),
- access_(access),
- ref_(NULL),
+ typeof_state_(typeof_state),
true_target_(true_target),
false_target_(false_target),
previous_(owner->state()) {
@@ -393,118 +155,23 @@
}
-CodeGenState::CodeGenState(Ia32CodeGenerator* owner, Reference* ref)
- : owner_(owner),
- access_(LOAD),
- ref_(ref),
- true_target_(owner->state()->true_target_),
- false_target_(owner->state()->false_target_),
- previous_(owner->state()) {
- owner_->set_state(this);
-}
-
-
CodeGenState::~CodeGenState() {
ASSERT(owner_->state() == this);
owner_->set_state(previous_);
}
-// -----------------------------------------------------------------------------
-// Ia32CodeGenerator implementation
+// -------------------------------------------------------------------------
+// CodeGenerator implementation
-#define __ masm_->
-
-Handle<Code> Ia32CodeGenerator::MakeCode(FunctionLiteral* flit,
- Handle<Script> script,
- bool is_eval) {
-#ifdef ENABLE_DISASSEMBLER
- bool print_code = FLAG_print_code && !Bootstrapper::IsActive();
-#endif
-
-#ifdef DEBUG
- bool print_source = false;
- bool print_ast = false;
- const char* ftype;
-
- if (Bootstrapper::IsActive()) {
- print_source = FLAG_print_builtin_source;
- print_ast = FLAG_print_builtin_ast;
- print_code = FLAG_print_builtin_code;
- ftype = "builtin";
- } else {
- print_source = FLAG_print_source;
- print_ast = FLAG_print_ast;
- ftype = "user-defined";
- }
-
- if (FLAG_trace_codegen || print_source || print_ast) {
- PrintF("*** Generate code for %s function: ", ftype);
- flit->name()->ShortPrint();
- PrintF(" ***\n");
- }
-
- if (print_source) {
- PrintF("--- Source from AST ---\n%s\n", PrettyPrinter().PrintProgram(flit));
- }
-
- if (print_ast) {
- PrintF("--- AST ---\n%s\n", AstPrinter().PrintProgram(flit));
- }
-#endif // DEBUG
-
- // Generate code.
- const int initial_buffer_size = 4 * KB;
- Ia32CodeGenerator cgen(initial_buffer_size, script, is_eval);
- cgen.GenCode(flit);
- if (cgen.HasStackOverflow()) {
- ASSERT(!Top::has_pending_exception());
- return Handle<Code>::null();
- }
-
- // Process any deferred code.
- cgen.ProcessDeferred();
-
- // Allocate and install the code.
- CodeDesc desc;
- cgen.masm()->GetCode(&desc);
- ScopeInfo<> sinfo(flit->scope());
- Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
- Handle<Code> code = Factory::NewCode(desc, &sinfo, flags);
-
- // Add unresolved entries in the code to the fixup list.
- Bootstrapper::AddFixup(*code, cgen.masm());
-
-#ifdef ENABLE_DISASSEMBLER
- if (print_code) {
- // Print the source code if available.
- if (!script->IsUndefined() && !script->source()->IsUndefined()) {
- PrintF("--- Raw source ---\n");
- StringInputBuffer stream(String::cast(script->source()));
- stream.Seek(flit->start_position());
- // flit->end_position() points to the last character in the stream. We
- // need to compensate by adding one to calculate the length.
- int source_len = flit->end_position() - flit->start_position() + 1;
- for (int i = 0; i < source_len; i++) {
- if (stream.has_more()) PrintF("%c", stream.GetNext());
- }
- PrintF("\n\n");
- }
- PrintF("--- Code ---\n");
- code->Disassemble();
- }
-#endif // ENABLE_DISASSEMBLER
-
- return code;
-}
-
-
-Ia32CodeGenerator::Ia32CodeGenerator(int buffer_size,
- Handle<Script> script,
- bool is_eval)
- : CodeGenerator(is_eval, script),
+CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script,
+ bool is_eval)
+ : is_eval_(is_eval),
+ script_(script),
+ deferred_(8),
masm_(new MacroAssembler(NULL, buffer_size)),
scope_(NULL),
+ frame_(NULL),
cc_reg_(no_condition),
state_(NULL),
is_inside_try_(false),
@@ -518,17 +185,21 @@
// edi: caller's parameter pointer
// esi: callee's context
-void Ia32CodeGenerator::GenCode(FunctionLiteral* fun) {
+void CodeGenerator::GenCode(FunctionLiteral* fun) {
// Record the position for debugging purposes.
__ RecordPosition(fun->start_position());
- Scope* scope = fun->scope();
ZoneList<Statement*>* body = fun->body();
// Initialize state.
- { CodeGenState state(this);
- scope_ = scope;
- cc_reg_ = no_condition;
+ ASSERT(scope_ == NULL);
+ scope_ = fun->scope();
+ ASSERT(frame_ == NULL);
+ VirtualFrame virtual_frame(this);
+ frame_ = &virtual_frame;
+ cc_reg_ = no_condition;
+ {
+ CodeGenState state(this);
// Entry
// stack: function, receiver, arguments, return address
@@ -537,9 +208,7 @@
// edi: caller's parameter pointer
// esi: callee's context
- { Comment cmnt(masm_, "[ enter JS frame");
- EnterJSFrame();
- }
+ frame_->Enter();
// tos: code slot
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
@@ -561,36 +230,32 @@
// Allocate arguments object.
// The arguments object pointer needs to be saved in ecx, since we need
// to store arguments into the context.
- if (scope->arguments() != NULL) {
- ASSERT(scope->arguments_shadow() != NULL);
+ if (scope_->arguments() != NULL) {
+ ASSERT(scope_->arguments_shadow() != NULL);
Comment cmnt(masm_, "[ allocate arguments object");
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- __ lea(eax, ReceiverOperand());
- __ push(FunctionOperand());
- __ push(eax);
- __ push(Immediate(Smi::FromInt(scope->num_parameters())));
+ __ lea(eax, frame_->Receiver());
+ frame_->Push(frame_->Function());
+ frame_->Push(eax);
+ frame_->Push(Immediate(Smi::FromInt(scope_->num_parameters())));
__ CallStub(&stub);
__ mov(ecx, Operand(eax));
arguments_object_allocated = true;
}
// Allocate space for locals and initialize them.
- if (scope->num_stack_slots() > 0) {
- Comment cmnt(masm_, "[ allocate space for locals");
- __ Set(eax, Immediate(Factory::undefined_value()));
- for (int i = scope->num_stack_slots(); i-- > 0; ) __ push(eax);
- }
+ frame_->AllocateLocals();
- if (scope->num_heap_slots() > 0) {
+ if (scope_->num_heap_slots() > 0) {
Comment cmnt(masm_, "[ allocate local context");
// Save the arguments object pointer, if any.
if (arguments_object_allocated && !arguments_object_saved) {
- __ push(Operand(ecx));
+ frame_->Push(ecx);
arguments_object_saved = true;
}
// Allocate local context.
// Get outer context and create a new context based on it.
- __ push(FunctionOperand());
+ frame_->Push(frame_->Function());
__ CallRuntime(Runtime::kNewContext, 1); // eax holds the result
if (kDebug) {
@@ -603,7 +268,7 @@
}
// Update context local.
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+ __ mov(frame_->Context(), esi);
// Restore the arguments array pointer, if any.
}
@@ -622,17 +287,17 @@
// order: such a parameter is copied repeatedly into the same
// context location and thus the last value is what is seen inside
// the function.
- for (int i = 0; i < scope->num_parameters(); i++) {
- Variable* par = scope->parameter(i);
+ for (int i = 0; i < scope_->num_parameters(); i++) {
+ Variable* par = scope_->parameter(i);
Slot* slot = par->slot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
// Save the arguments object pointer, if any.
if (arguments_object_allocated && !arguments_object_saved) {
- __ push(Operand(ecx));
+ frame_->Push(ecx);
arguments_object_saved = true;
}
- ASSERT(!scope->is_global_scope()); // no parameters in global scope
- __ mov(eax, ParameterOperand(i));
+ ASSERT(!scope_->is_global_scope()); // no parameters in global scope
+ __ mov(eax, frame_->Parameter(i));
// Loads ecx with context; used below in RecordWrite.
__ mov(SlotOperand(slot, ecx), eax);
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
@@ -649,42 +314,42 @@
// This must happen after context initialization because
// the arguments object may be stored in the context
if (arguments_object_allocated) {
- ASSERT(scope->arguments() != NULL);
- ASSERT(scope->arguments_shadow() != NULL);
+ ASSERT(scope_->arguments() != NULL);
+ ASSERT(scope_->arguments_shadow() != NULL);
Comment cmnt(masm_, "[ store arguments object");
- { Reference shadow_ref(this, scope->arguments_shadow());
- { Reference arguments_ref(this, scope->arguments());
+ { Reference shadow_ref(this, scope_->arguments_shadow());
+ ASSERT(shadow_ref.is_slot());
+ { Reference arguments_ref(this, scope_->arguments());
+ ASSERT(arguments_ref.is_slot());
// If the newly-allocated arguments object is already on the
- // stack, we make use of the property that references representing
- // variables take up no space on the expression stack (ie, it
- // doesn't matter that the stored value is actually below the
- // reference).
- ASSERT(arguments_ref.size() == 0);
- ASSERT(shadow_ref.size() == 0);
-
- // If the newly-allocated argument object is not already on the
- // stack, we rely on the property that loading a
- // (zero-sized) reference will not clobber the ecx register.
+ // stack, we make use of the convenient property that references
+ // representing slots take up no space on the expression stack
+ // (ie, it doesn't matter that the stored value is actually below
+ // the reference).
+ //
+ // If the newly-allocated argument object is not already on
+ // the stack, we rely on the property that loading a
+ // zero-sized reference will not clobber the ecx register.
if (!arguments_object_saved) {
- __ push(ecx);
+ frame_->Push(ecx);
}
- SetValue(&arguments_ref);
+ arguments_ref.SetValue(NOT_CONST_INIT);
}
- SetValue(&shadow_ref);
+ shadow_ref.SetValue(NOT_CONST_INIT);
}
- __ pop(eax); // Value is no longer needed.
+ frame_->Pop(); // Value is no longer needed.
}
// Generate code to 'execute' declarations and initialize
// functions (source elements). In case of an illegal
// redeclaration we need to handle that instead of processing the
// declarations.
- if (scope->HasIllegalRedeclaration()) {
+ if (scope_->HasIllegalRedeclaration()) {
Comment cmnt(masm_, "[ illegal redeclarations");
- scope->VisitIllegalRedeclaration(this);
+ scope_->VisitIllegalRedeclaration(this);
} else {
Comment cmnt(masm_, "[ declarations");
- ProcessDeclarations(scope->declarations());
+ ProcessDeclarations(scope_->declarations());
// Bail out if a stack-overflow exception occurred when
// processing declarations.
if (HasStackOverflow()) return;
@@ -692,14 +357,14 @@
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 1);
- __ push(eax);
+ frame_->Push(eax);
}
CheckStack();
// Compile the body of the function in a vanilla state. Don't
// bother compiling all the code if the scope has an illegal
// redeclaration.
- if (!scope->HasIllegalRedeclaration()) {
+ if (!scope_->HasIllegalRedeclaration()) {
Comment cmnt(masm_, "[ function body");
#ifdef DEBUG
bool is_builtin = Bootstrapper::IsActive();
@@ -707,7 +372,7 @@
is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
if (should_trace) {
__ CallRuntime(Runtime::kDebugTrace, 1);
- __ push(eax);
+ frame_->Push(eax);
}
#endif
VisitStatements(body);
@@ -724,26 +389,76 @@
// Code generation state must be reset.
scope_ = NULL;
+ frame_ = NULL;
ASSERT(!has_cc());
ASSERT(state_ == NULL);
}
+Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
+ // Currently, this assertion will fail if we try to assign to
+ // a constant variable that is constant because it is read-only
+ // (such as the variable referring to a named function expression).
+ // We need to implement assignments to read-only variables.
+ // Ideally, we should do this during AST generation (by converting
+ // such assignments into expression statements); however, in general
+ // we may not be able to make the decision until past AST generation,
+ // that is when the entire program is known.
+ ASSERT(slot != NULL);
+ int index = slot->index();
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ return frame_->Parameter(index);
+
+ case Slot::LOCAL:
+ return frame_->Local(index);
+
+ case Slot::CONTEXT: {
+ // Follow the context chain if necessary.
+ ASSERT(!tmp.is(esi)); // do not overwrite context register
+ Register context = esi;
+ int chain_length = scope()->ContextChainLength(slot->var()->scope());
+ for (int i = chain_length; i-- > 0;) {
+ // Load the closure.
+ // (All contexts, even 'with' contexts, have a closure,
+ // and it is the same for all contexts inside a function.
+ // There is no need to go to the function context first.)
+ __ mov(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
+ // Load the function context (which is the incoming, outer context).
+ __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
+ context = tmp;
+ }
+ // We may have a 'with' context now. Get the function context.
+ // (In fact this mov may never be the needed, since the scope analysis
+ // may not permit a direct context access in this case and thus we are
+ // always at a function context. However it is safe to dereference be-
+ // cause the function context of a function context is itself. Before
+ // deleting this mov we should try to create a counter-example first,
+ // though...)
+ __ mov(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
+ return ContextOperand(tmp, index);
+ }
+
+ default:
+ UNREACHABLE();
+ return Operand(eax);
+ }
+}
+
+
// Loads a value on TOS. If it is a boolean value, the result may have been
// (partially) translated into branches, or it may have set the condition code
// register. If force_cc is set, the value is forced to set the condition code
// register and no value is pushed. If the condition code register was set,
// has_cc() is true and cc_reg_ contains the condition to test for 'true'.
-void Ia32CodeGenerator::LoadCondition(Expression* x,
- CodeGenState::AccessType access,
- Label* true_target,
- Label* false_target,
- bool force_cc) {
- ASSERT(access == CodeGenState::LOAD ||
- access == CodeGenState::LOAD_TYPEOF_EXPR);
- ASSERT(!has_cc() && !is_referenced());
+void CodeGenerator::LoadCondition(Expression* x,
+ TypeofState typeof_state,
+ Label* true_target,
+ Label* false_target,
+ bool force_cc) {
+ ASSERT(!has_cc());
- { CodeGenState new_state(this, access, true_target, false_target);
+ { CodeGenState new_state(this, typeof_state, true_target, false_target);
Visit(x);
}
if (force_cc && !has_cc()) {
@@ -753,23 +468,20 @@
}
-void Ia32CodeGenerator::Load(Expression* x, CodeGenState::AccessType access) {
- ASSERT(access == CodeGenState::LOAD ||
- access == CodeGenState::LOAD_TYPEOF_EXPR);
-
+void CodeGenerator::Load(Expression* x, TypeofState typeof_state) {
Label true_target;
Label false_target;
- LoadCondition(x, access, &true_target, &false_target, false);
+ LoadCondition(x, typeof_state, &true_target, &false_target, false);
if (has_cc()) {
// convert cc_reg_ into a bool
Label loaded, materialize_true;
__ j(cc_reg_, &materialize_true);
- __ push(Immediate(Factory::false_value()));
+ frame_->Push(Immediate(Factory::false_value()));
__ jmp(&loaded);
__ bind(&materialize_true);
- __ push(Immediate(Factory::true_value()));
+ frame_->Push(Immediate(Factory::true_value()));
__ bind(&loaded);
cc_reg_ = no_condition;
}
@@ -784,7 +496,7 @@
// reincarnate "true", if necessary
if (true_target.is_linked()) {
__ bind(&true_target);
- __ push(Immediate(Factory::true_value()));
+ frame_->Push(Immediate(Factory::true_value()));
}
// if both "true" and "false" need to be reincarnated,
// jump across code for "false"
@@ -793,7 +505,7 @@
// reincarnate "false", if necessary
if (false_target.is_linked()) {
__ bind(&false_target);
- __ push(Immediate(Factory::false_value()));
+ frame_->Push(Immediate(Factory::false_value()));
}
// everything is loaded at this point
__ bind(&loaded);
@@ -802,15 +514,15 @@
}
-void Ia32CodeGenerator::LoadGlobal() {
- __ push(GlobalObject());
+void CodeGenerator::LoadGlobal() {
+ frame_->Push(GlobalObject());
}
// TODO(1241834): Get rid of this function in favor of just using Load, now
-// that we have the LOAD_TYPEOF_EXPR access type. => Need to handle
-// global variables w/o reference errors elsewhere.
-void Ia32CodeGenerator::LoadTypeofExpression(Expression* x) {
+// that we have the INSIDE_TYPEOF typeof state. => Need to handle global
+// variables w/o reference errors elsewhere.
+void CodeGenerator::LoadTypeofExpression(Expression* x) {
Variable* variable = x->AsVariableProxy()->AsVariable();
if (variable != NULL && !variable->is_this() && variable->is_global()) {
// NOTE: This is somewhat nasty. We force the compiler to load
@@ -823,12 +535,12 @@
Property property(&global, &key, RelocInfo::kNoPosition);
Load(&property);
} else {
- Load(x, CodeGenState::LOAD_TYPEOF_EXPR);
+ Load(x, INSIDE_TYPEOF);
}
}
-Reference::Reference(Ia32CodeGenerator* cgen, Expression* expression)
+Reference::Reference(CodeGenerator* cgen, Expression* expression)
: cgen_(cgen), expression_(expression), type_(ILLEGAL) {
cgen->LoadReference(this);
}
@@ -839,56 +551,61 @@
}
-void Ia32CodeGenerator::LoadReference(Reference* ref) {
+void CodeGenerator::LoadReference(Reference* ref) {
+ Comment cmnt(masm_, "[ LoadReference");
Expression* e = ref->expression();
Property* property = e->AsProperty();
Variable* var = e->AsVariableProxy()->AsVariable();
if (property != NULL) {
+ // The expression is either a property or a variable proxy that rewrites
+ // to a property.
Load(property->obj());
- // Used a named reference if the key is a literal symbol.
- // We don't use a named reference if they key is a string that can be
- // legally parsed as an integer. This is because, otherwise we don't
- // get into the slow case code that handles [] on String objects.
+ // We use a named reference if the key is a literal symbol, unless it is
+ // a string that can be legally parsed as an integer. This is because
+ // otherwise we will not get into the slow case code that handles [] on
+ // String objects.
Literal* literal = property->key()->AsLiteral();
uint32_t dummy;
- if (literal != NULL && literal->handle()->IsSymbol() &&
- !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
+ if (literal != NULL &&
+ literal->handle()->IsSymbol() &&
+ !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
ref->set_type(Reference::NAMED);
} else {
Load(property->key());
ref->set_type(Reference::KEYED);
}
} else if (var != NULL) {
+ // The expression is a variable proxy that does not rewrite to a
+ // property. Global variables are treated as named property references.
if (var->is_global()) {
- // global variable
LoadGlobal();
ref->set_type(Reference::NAMED);
} else {
- // local variable
- ref->set_type(Reference::EMPTY);
+ ASSERT(var->slot() != NULL);
+ ref->set_type(Reference::SLOT);
}
} else {
+ // Anything else is a runtime error.
Load(e);
__ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ push(eax);
}
}
-void Ia32CodeGenerator::UnloadReference(Reference* ref) {
- // Pop n references on the stack while preserving TOS
+void CodeGenerator::UnloadReference(Reference* ref) {
+ // Pop a reference from the stack while preserving TOS.
Comment cmnt(masm_, "[ UnloadReference");
int size = ref->size();
if (size <= 0) {
// Do nothing. No popping is necessary.
} else if (size == 1) {
- __ pop(eax);
- __ mov(TOS, eax);
+ frame_->Pop(eax);
+ __ mov(frame_->Top(), eax);
} else {
- __ pop(eax);
- __ add(Operand(esp), Immediate(size * kPointerSize));
- __ push(eax);
+ frame_->Pop(eax);
+ frame_->Drop(size);
+ frame_->Push(eax);
}
}
@@ -900,29 +617,19 @@
void Generate(MacroAssembler* masm);
private:
-
Major MajorKey() { return ToBoolean; }
-
int MinorKey() { return 0; }
-
- const char* GetName() { return "ToBooleanStub"; }
-
-#ifdef DEBUG
- void Print() {
- PrintF("ToBooleanStub\n");
- }
-#endif
};
// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
// convert it to a boolean in the condition code register or jump to
// 'false_target'/'true_target' as appropriate.
-void Ia32CodeGenerator::ToBoolean(Label* true_target, Label* false_target) {
+void CodeGenerator::ToBoolean(Label* true_target, Label* false_target) {
Comment cmnt(masm_, "[ ToBoolean");
// The value to convert should be popped from the stack.
- __ pop(eax);
+ frame_->Pop(eax);
// Fast case checks.
@@ -946,7 +653,7 @@
__ j(zero, true_target);
// Call the stub for all other cases.
- __ push(eax); // Undo the pop(eax) from above.
+ frame_->Push(eax); // Undo the pop(eax) from above.
ToBooleanStub stub;
__ CallStub(&stub);
// Convert result (eax) to condition code.
@@ -957,48 +664,6 @@
}
-void Ia32CodeGenerator::GetReferenceProperty(Expression* key) {
- ASSERT(!ref()->is_illegal());
- Reference::Type type = ref()->type();
-
- // TODO(1241834): Make sure that this it is safe to ignore the distinction
- // between access types LOAD and LOAD_TYPEOF_EXPR. If there is a chance
- // that reference errors can be thrown below, we must distinguish between
- // the two kinds of loads (typeof expression loads must not throw a
- // reference error).
- if (type == Reference::NAMED) {
- // Compute the name of the property.
- Literal* literal = key->AsLiteral();
- Handle<String> name(String::cast(*literal->handle()));
-
- Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
- Variable* var = ref()->expression()->AsVariableProxy()->AsVariable();
- // Setup the name register.
- __ Set(ecx, Immediate(name));
- if (var != NULL) {
- ASSERT(var->is_global());
- __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- } else {
- __ call(ic, RelocInfo::CODE_TARGET);
- }
- } else {
- // Access keyed property.
- ASSERT(type == Reference::KEYED);
-
- // Call IC code.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
- Variable* var = ref()->expression()->AsVariableProxy()->AsVariable();
- if (var != NULL) {
- ASSERT(var->is_global());
- __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- } else {
- __ call(ic, RelocInfo::CODE_TARGET);
- }
- }
- __ push(eax); // IC call leaves result in eax, push it out
-}
-
-
class FloatingPointHelper : public AllStatic {
public:
// Code pattern for loading floating point values. Input values must
@@ -1071,7 +736,7 @@
}
-void Ia32CodeGenerator::GenericBinaryOperation(Token::Value op,
+void CodeGenerator::GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode) {
Comment cmnt(masm_, "[ BinaryOperation");
Comment cmnt_token(masm_, Token::String(op));
@@ -1083,15 +748,15 @@
case Token::MOD: {
GenericBinaryOpStub stub(op, overwrite_mode);
__ CallStub(&stub);
- __ push(eax);
+ frame_->Push(eax);
break;
}
case Token::BIT_OR:
case Token::BIT_AND:
case Token::BIT_XOR: {
Label slow, exit;
- __ pop(eax); // get y
- __ pop(edx); // get x
+ frame_->Pop(eax); // get y
+ frame_->Pop(edx); // get x
__ mov(ecx, Operand(edx)); // Prepare smi check.
// tag check
__ or_(ecx, Operand(eax)); // ecx = x | y;
@@ -1106,20 +771,20 @@
}
__ jmp(&exit);
__ bind(&slow);
- __ push(edx); // restore stack slots
- __ push(eax);
+ frame_->Push(edx); // restore stack slots
+ frame_->Push(eax);
GenericBinaryOpStub stub(op, overwrite_mode);
__ CallStub(&stub);
__ bind(&exit);
- __ push(eax); // push the result to the stack
+ frame_->Push(eax); // push the result to the stack
break;
}
case Token::SHL:
case Token::SHR:
case Token::SAR: {
Label slow, exit;
- __ pop(edx); // get y
- __ pop(eax); // get x
+ frame_->Pop(edx); // get y
+ frame_->Pop(eax); // get x
// tag check
__ mov(ecx, Operand(edx));
__ or_(ecx, Operand(eax)); // ecx = x | y;
@@ -1164,19 +829,19 @@
__ jmp(&exit);
// slow case
__ bind(&slow);
- __ push(eax); // restore stack
- __ push(edx);
+ frame_->Push(eax); // restore stack
+ frame_->Push(edx);
GenericBinaryOpStub stub(op, overwrite_mode);
__ CallStub(&stub);
__ bind(&exit);
- __ push(eax);
+ frame_->Push(eax);
break;
}
case Token::COMMA: {
// simply discard left value
- __ pop(eax);
- __ add(Operand(esp), Immediate(kPointerSize));
- __ push(eax);
+ frame_->Pop(eax);
+ frame_->Pop();
+ frame_->Push(eax);
break;
}
default: UNREACHABLE();
@@ -1317,7 +982,7 @@
// Undo the optimistic sub operation and call the shared stub.
__ add(eax, Operand(tos_reg_));
__ push(eax);
- __ push(Operand(tos_reg_));
+ __ push(tos_reg_);
GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_);
__ CallStub(&igostub);
}
@@ -1328,10 +993,10 @@
};
-void Ia32CodeGenerator::SmiOperation(Token::Value op,
- Handle<Object> value,
- bool reversed,
- OverwriteMode overwrite_mode) {
+void CodeGenerator::SmiOperation(Token::Value op,
+ Handle<Object> value,
+ bool reversed,
+ OverwriteMode overwrite_mode) {
// NOTE: This is an attempt to inline (a bit) more of the code for
// some possible smi operations (like + and -) when (at least) one
// of the operands is a literal smi. With this optimization, the
@@ -1355,19 +1020,19 @@
deferred = new DeferredInlinedSmiAddReversed(this, int_value,
overwrite_mode);
}
- __ pop(eax);
+ frame_->Pop(eax);
__ add(Operand(eax), Immediate(value));
__ j(overflow, deferred->enter(), not_taken);
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, deferred->enter(), not_taken);
__ bind(deferred->exit());
- __ push(eax);
+ frame_->Push(eax);
break;
}
case Token::SUB: {
DeferredCode* deferred = NULL;
- __ pop(eax);
+ frame_->Pop(eax);
if (!reversed) {
deferred = new DeferredInlinedSmiSub(this, int_value, overwrite_mode);
__ sub(Operand(eax), Immediate(value));
@@ -1381,44 +1046,44 @@
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, deferred->enter(), not_taken);
__ bind(deferred->exit());
- __ push(eax);
+ frame_->Push(eax);
break;
}
case Token::SAR: {
if (reversed) {
- __ pop(eax);
- __ push(Immediate(value));
- __ push(eax);
+ frame_->Pop(eax);
+ frame_->Push(Immediate(value));
+ frame_->Push(eax);
GenericBinaryOperation(op, overwrite_mode);
} else {
int shift_value = int_value & 0x1f; // only least significant 5 bits
DeferredCode* deferred =
new DeferredInlinedSmiOperation(this, Token::SAR, shift_value,
overwrite_mode);
- __ pop(eax);
+ frame_->Pop(eax);
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, deferred->enter(), not_taken);
__ sar(eax, shift_value);
__ and_(eax, ~kSmiTagMask);
__ bind(deferred->exit());
- __ push(eax);
+ frame_->Push(eax);
}
break;
}
case Token::SHR: {
if (reversed) {
- __ pop(eax);
- __ push(Immediate(value));
- __ push(eax);
+ frame_->Pop(eax);
+ frame_->Push(Immediate(value));
+ frame_->Push(eax);
GenericBinaryOperation(op, overwrite_mode);
} else {
int shift_value = int_value & 0x1f; // only least significant 5 bits
DeferredCode* deferred =
new DeferredInlinedSmiOperation(this, Token::SHR, shift_value,
overwrite_mode);
- __ pop(eax);
+ frame_->Pop(eax);
__ test(eax, Immediate(kSmiTagMask));
__ mov(ebx, Operand(eax));
__ j(not_zero, deferred->enter(), not_taken);
@@ -1430,23 +1095,23 @@
ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ lea(eax, Operand(ebx, times_2, kSmiTag));
__ bind(deferred->exit());
- __ push(eax);
+ frame_->Push(eax);
}
break;
}
case Token::SHL: {
if (reversed) {
- __ pop(eax);
- __ push(Immediate(value));
- __ push(eax);
+ frame_->Pop(eax);
+ frame_->Push(Immediate(value));
+ frame_->Push(eax);
GenericBinaryOperation(op, overwrite_mode);
} else {
int shift_value = int_value & 0x1f; // only least significant 5 bits
DeferredCode* deferred =
new DeferredInlinedSmiOperation(this, Token::SHL, shift_value,
overwrite_mode);
- __ pop(eax);
+ frame_->Pop(eax);
__ test(eax, Immediate(kSmiTagMask));
__ mov(ebx, Operand(eax));
__ j(not_zero, deferred->enter(), not_taken);
@@ -1459,7 +1124,7 @@
ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ lea(eax, Operand(ebx, times_2, kSmiTag));
__ bind(deferred->exit());
- __ push(eax);
+ frame_->Push(eax);
}
break;
}
@@ -1475,7 +1140,7 @@
deferred = new DeferredInlinedSmiOperationReversed(this, op, int_value,
overwrite_mode);
}
- __ pop(eax);
+ frame_->Pop(eax);
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, deferred->enter(), not_taken);
if (op == Token::BIT_AND) {
@@ -1487,17 +1152,17 @@
__ or_(Operand(eax), Immediate(value));
}
__ bind(deferred->exit());
- __ push(eax);
+ frame_->Push(eax);
break;
}
default: {
if (!reversed) {
- __ push(Immediate(value));
+ frame_->Push(Immediate(value));
} else {
- __ pop(eax);
- __ push(Immediate(value));
- __ push(eax);
+ frame_->Pop(eax);
+ frame_->Push(Immediate(value));
+ frame_->Push(eax);
}
GenericBinaryOperation(op, overwrite_mode);
break;
@@ -1524,8 +1189,6 @@
return (static_cast<int>(cc_) << 1) | (strict_ ? 1 : 0);
}
- const char* GetName() { return "CompareStub"; }
-
#ifdef DEBUG
void Print() {
PrintF("CompareStub (cc %d), (strict %s)\n",
@@ -1536,23 +1199,22 @@
};
-void Ia32CodeGenerator::Comparison(Condition cc, bool strict) {
+void CodeGenerator::Comparison(Condition cc, bool strict) {
// Strict only makes sense for equality comparisons.
ASSERT(!strict || cc == equal);
// Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
if (cc == greater || cc == less_equal) {
cc = ReverseCondition(cc);
- __ pop(edx);
- __ pop(eax);
+ frame_->Pop(edx);
+ frame_->Pop(eax);
} else {
- __ pop(eax);
- __ pop(edx);
+ frame_->Pop(eax);
+ frame_->Pop(edx);
}
+ // Check for the smi case.
Label is_smi, done;
- CompareStub stub(cc, strict);
-
__ mov(ecx, Operand(eax));
__ or_(ecx, Operand(edx));
__ test(ecx, Immediate(kSmiTagMask));
@@ -1560,8 +1222,13 @@
// When non-smi, call out to the compare stub. "parameters" setup by
// calling code in edx and eax and "result" is returned in the flags.
+ CompareStub stub(cc, strict);
__ CallStub(&stub);
- __ cmp(eax, 0);
+ if (cc == equal) {
+ __ test(eax, Operand(eax));
+ } else {
+ __ cmp(eax, 0);
+ }
__ jmp(&done);
// Test smi equality by pointer comparison.
@@ -1603,7 +1270,7 @@
}
-void Ia32CodeGenerator::SmiComparison(Condition cc,
+void CodeGenerator::SmiComparison(Condition cc,
Handle<Object> value,
bool strict) {
// Strict only makes sense for equality comparisons.
@@ -1614,7 +1281,7 @@
SmiComparisonDeferred* deferred =
new SmiComparisonDeferred(this, cc, strict, int_value);
- __ pop(eax);
+ frame_->Pop(eax);
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, deferred->enter(), not_taken);
// Test smi equality by pointer comparison.
@@ -1633,8 +1300,6 @@
private:
int argc_;
- const char* GetName() { return "CallFunctionStub"; }
-
#ifdef DEBUG
void Print() { PrintF("CallFunctionStub (args %d)\n", argc_); }
#endif
@@ -1646,10 +1311,12 @@
// Call the function just below TOS on the stack with the given
// arguments. The receiver is the TOS.
-void Ia32CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
+void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
int position) {
// Push the arguments ("left-to-right") on the stack.
- for (int i = 0; i < args->length(); i++) Load(args->at(i));
+ for (int i = 0; i < args->length(); i++) {
+ Load(args->at(i));
+ }
// Record the position for debugging purposes.
__ RecordPosition(position);
@@ -1659,12 +1326,12 @@
__ CallStub(&call_function);
// Restore context and pop function from the stack.
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(TOS, eax);
+ __ mov(esi, frame_->Context());
+ __ mov(frame_->Top(), eax);
}
-void Ia32CodeGenerator::Branch(bool if_true, Label* L) {
+void CodeGenerator::Branch(bool if_true, Label* L) {
ASSERT(has_cc());
Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_);
__ j(cc, L);
@@ -1672,7 +1339,7 @@
}
-void Ia32CodeGenerator::CheckStack() {
+void CodeGenerator::CheckStack() {
if (FLAG_check_stack) {
Label stack_is_ok;
StackCheckStub stub;
@@ -1686,7 +1353,7 @@
}
-void Ia32CodeGenerator::VisitBlock(Block* node) {
+void CodeGenerator::VisitBlock(Block* node) {
Comment cmnt(masm_, "[ Block");
RecordStatementPosition(node);
node->set_break_stack_height(break_stack_height_);
@@ -1695,16 +1362,16 @@
}
-void Ia32CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- __ push(Immediate(pairs));
- __ push(Operand(esi));
- __ push(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
+void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ frame_->Push(Immediate(pairs));
+ frame_->Push(esi);
+ frame_->Push(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
__ CallRuntime(Runtime::kDeclareGlobals, 3);
// Return value is ignored.
}
-void Ia32CodeGenerator::VisitDeclaration(Declaration* node) {
+void CodeGenerator::VisitDeclaration(Declaration* node) {
Comment cmnt(masm_, "[ Declaration");
Variable* var = node->proxy()->var();
ASSERT(var != NULL); // must have been resolved
@@ -1718,28 +1385,25 @@
// during variable resolution and must have mode DYNAMIC.
ASSERT(var->mode() == Variable::DYNAMIC);
// For now, just do a runtime call.
- __ push(Operand(esi));
- __ push(Immediate(var->name()));
+ frame_->Push(esi);
+ frame_->Push(Immediate(var->name()));
// Declaration nodes are always introduced in one of two modes.
ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST);
PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY;
- __ push(Immediate(Smi::FromInt(attr)));
+ frame_->Push(Immediate(Smi::FromInt(attr)));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (node->mode() == Variable::CONST) {
- __ push(Immediate(Factory::the_hole_value()));
+ frame_->Push(Immediate(Factory::the_hole_value()));
} else if (node->fun() != NULL) {
Load(node->fun());
} else {
- __ push(Immediate(0)); // no initial value!
+ frame_->Push(Immediate(0)); // no initial value!
}
- __ CallRuntime(Runtime::kDeclareContextSlot, 5);
- // DeclareContextSlot pops the assigned value by accepting an
- // extra argument and returning the TOS; no need to explicitly
- // pop here.
- __ push(eax);
+ __ CallRuntime(Runtime::kDeclareContextSlot, 4);
+ // Ignore the return value (declarations are statements).
return;
}
@@ -1756,31 +1420,36 @@
if (val != NULL) {
// Set initial value.
Reference target(this, node->proxy());
+ ASSERT(target.is_slot());
Load(val);
- SetValue(&target);
- // Get rid of the assigned value (declarations are statements).
- __ pop(eax); // Pop(no_reg);
+ target.SetValue(NOT_CONST_INIT);
+ // Get rid of the assigned value (declarations are statements). It's
+ // safe to pop the value lying on top of the reference before unloading
+ // the reference itself (which preserves the top of stack) because we
+ // know that it is a zero-sized reference.
+ frame_->Pop();
}
}
-void Ia32CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
+void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
Comment cmnt(masm_, "[ ExpressionStatement");
RecordStatementPosition(node);
Expression* expression = node->expression();
expression->MarkAsStatement();
Load(expression);
- __ pop(eax); // remove the lingering expression result from the top of stack
+ // Remove the lingering expression result from the top of stack.
+ frame_->Pop();
}
-void Ia32CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
+void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
Comment cmnt(masm_, "// EmptyStatement");
// nothing to do
}
-void Ia32CodeGenerator::VisitIfStatement(IfStatement* node) {
+void CodeGenerator::VisitIfStatement(IfStatement* node) {
Comment cmnt(masm_, "[ IfStatement");
// Generate different code depending on which
// parts of the if statement are present or not.
@@ -1793,7 +1462,7 @@
Label then;
Label else_;
// if (cond)
- LoadCondition(node->condition(), CodeGenState::LOAD, &then, &else_, true);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &then, &else_, true);
Branch(false, &else_);
// then
__ bind(&then);
@@ -1807,7 +1476,7 @@
ASSERT(!has_else_stm);
Label then;
// if (cond)
- LoadCondition(node->condition(), CodeGenState::LOAD, &then, &exit, true);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &then, &exit, true);
Branch(false, &exit);
// then
__ bind(&then);
@@ -1817,7 +1486,7 @@
ASSERT(!has_then_stm);
Label else_;
// if (!cond)
- LoadCondition(node->condition(), CodeGenState::LOAD, &exit, &else_, true);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &exit, &else_, true);
Branch(true, &exit);
// else
__ bind(&else_);
@@ -1826,13 +1495,13 @@
} else {
ASSERT(!has_then_stm && !has_else_stm);
// if (cond)
- LoadCondition(node->condition(), CodeGenState::LOAD, &exit, &exit, false);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &exit, &exit, false);
if (has_cc()) {
cc_reg_ = no_condition;
} else {
// No cc value set up, that means the boolean was pushed.
// Pop it again, since it is not going to be used.
- __ pop(eax);
+ frame_->Pop();
}
}
@@ -1841,15 +1510,13 @@
}
-void Ia32CodeGenerator::CleanStack(int num_bytes) {
- ASSERT(num_bytes >= 0);
- if (num_bytes > 0) {
- __ add(Operand(esp), Immediate(num_bytes));
- }
+void CodeGenerator::CleanStack(int num_bytes) {
+ ASSERT(num_bytes % kPointerSize == 0);
+ frame_->Drop(num_bytes / kPointerSize);
}
-void Ia32CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
+void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
Comment cmnt(masm_, "[ ContinueStatement");
RecordStatementPosition(node);
CleanStack(break_stack_height_ - node->target()->break_stack_height());
@@ -1857,7 +1524,7 @@
}
-void Ia32CodeGenerator::VisitBreakStatement(BreakStatement* node) {
+void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
Comment cmnt(masm_, "[ BreakStatement");
RecordStatementPosition(node);
CleanStack(break_stack_height_ - node->target()->break_stack_height());
@@ -1865,13 +1532,13 @@
}
-void Ia32CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
+void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
Comment cmnt(masm_, "[ ReturnStatement");
RecordStatementPosition(node);
Load(node->expression());
// Move the function result into eax
- __ pop(eax);
+ frame_->Pop(eax);
// If we're inside a try statement or the return instruction
// sequence has been generated, we just jump to that
@@ -1882,7 +1549,7 @@
} else {
__ bind(&function_return_);
if (FLAG_trace) {
- __ push(eax); // undo the pop(eax) from above
+ frame_->Push(eax); // undo the pop(eax) from above
__ CallRuntime(Runtime::kTraceExit, 1);
}
@@ -1892,7 +1559,7 @@
// Leave the frame and return popping the arguments and the
// receiver.
- ExitJSFrame();
+ frame_->Exit();
__ ret((scope_->num_parameters() + 1) * kPointerSize);
// Check that the size of the code used for returning matches what is
@@ -1903,7 +1570,7 @@
}
-void Ia32CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
+void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
Comment cmnt(masm_, "[ WithEnterStatement");
RecordStatementPosition(node);
Load(node->expression());
@@ -1919,28 +1586,28 @@
}
// Update context local.
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+ __ mov(frame_->Context(), esi);
}
-void Ia32CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
+void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
Comment cmnt(masm_, "[ WithExitStatement");
// Pop context.
__ mov(esi, ContextOperand(esi, Context::PREVIOUS_INDEX));
// Update context local.
- __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
+ __ mov(frame_->Context(), esi);
}
-int Ia32CodeGenerator::FastCaseSwitchMaxOverheadFactor() {
+int CodeGenerator::FastCaseSwitchMaxOverheadFactor() {
return kFastSwitchMaxOverheadFactor;
}
-int Ia32CodeGenerator::FastCaseSwitchMinCaseCount() {
+int CodeGenerator::FastCaseSwitchMinCaseCount() {
return kFastSwitchMinCaseCount;
}
// Generate a computed jump to a switch case.
-void Ia32CodeGenerator::GenerateFastCaseSwitchJumpTable(
+void CodeGenerator::GenerateFastCaseSwitchJumpTable(
SwitchStatement* node, int min_index, int range, Label *fail_label,
SmartPointer<Label*> &case_targets, SmartPointer<Label> &case_labels) {
// Notice: Internal references, used by both the jmp instruction and
@@ -1951,7 +1618,7 @@
// placeholders, and fill in the addresses after the labels have been
// bound.
- __ pop(eax); // supposed smi
+ frame_->Pop(eax); // supposed smi
// check range of value, if outside [0..length-1] jump to default/end label.
ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
if (min_index != 0) {
@@ -1986,7 +1653,7 @@
}
-void Ia32CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
+void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
Comment cmnt(masm_, "[ SwitchStatement");
RecordStatementPosition(node);
node->set_break_stack_height(break_stack_height_);
@@ -2017,8 +1684,8 @@
} else {
__ bind(&next);
next.Unuse();
- __ mov(eax, TOS);
- __ push(eax); // duplicate TOS
+ __ mov(eax, frame_->Top());
+ frame_->Push(eax); // duplicate TOS
Load(clause->label());
Comparison(equal, true);
Branch(false, &next);
@@ -2026,7 +1693,7 @@
// Entering the case statement for the first time. Remove the switch value
// from the stack.
- __ pop(eax);
+ frame_->Pop(eax);
// Generate code for the body.
// This is also the target for the fall through from the previous case's
@@ -2045,7 +1712,7 @@
__ jmp(&default_case);
} else {
// Remove the switch value from the stack.
- __ pop(eax);
+ frame_->Pop();
}
__ bind(&fall_through);
@@ -2053,7 +1720,7 @@
}
-void Ia32CodeGenerator::VisitLoopStatement(LoopStatement* node) {
+void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
Comment cmnt(masm_, "[ LoopStatement");
RecordStatementPosition(node);
node->set_break_stack_height(break_stack_height_);
@@ -2111,7 +1778,7 @@
case ALWAYS_FALSE:
break;
case DONT_KNOW:
- LoadCondition(node->cond(), CodeGenState::LOAD, &loop,
+ LoadCondition(node->cond(), NOT_INSIDE_TYPEOF, &loop,
node->break_target(), true);
Branch(true, &loop);
break;
@@ -2122,7 +1789,7 @@
}
-void Ia32CodeGenerator::VisitForInStatement(ForInStatement* node) {
+void CodeGenerator::VisitForInStatement(ForInStatement* node) {
Comment cmnt(masm_, "[ ForInStatement");
RecordStatementPosition(node);
@@ -2141,7 +1808,7 @@
// Both SpiderMonkey and kjs ignore null and undefined in contrast
// to the specification. 12.6.4 mandates a call to ToObject.
- __ pop(eax);
+ frame_->Pop(eax);
// eax: value to be iterated over
__ cmp(eax, Factory::undefined_value());
@@ -2166,7 +1833,7 @@
__ j(above_equal, &jsobject);
__ bind(&primitive);
- __ push(eax);
+ frame_->Push(eax);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
// function call returns the value in eax, which is where we want it below
@@ -2175,9 +1842,9 @@
// Get the set of properties (as a FixedArray or Map).
// eax: value to be iterated over
- __ push(eax); // push the object being iterated over (slot 4)
+ frame_->Push(eax); // push the object being iterated over (slot 4)
- __ push(eax); // push the Object (slot 4) for the runtime call
+ frame_->Push(eax); // push the Object (slot 4) for the runtime call
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
// If we got a Map, we can do a fast modification check.
@@ -2198,26 +1865,26 @@
// Get the cache from the bridge array.
__ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
- __ push(eax); // <- slot 3
- __ push(Operand(edx)); // <- slot 2
+ frame_->Push(eax); // <- slot 3
+ frame_->Push(edx); // <- slot 2
__ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
__ shl(eax, kSmiTagSize);
- __ push(eax); // <- slot 1
- __ push(Immediate(Smi::FromInt(0))); // <- slot 0
+ frame_->Push(eax); // <- slot 1
+ frame_->Push(Immediate(Smi::FromInt(0))); // <- slot 0
__ jmp(&entry);
__ bind(&fixed_array);
// eax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
- __ push(Immediate(Smi::FromInt(0))); // <- slot 3
- __ push(eax); // <- slot 2
+ frame_->Push(Immediate(Smi::FromInt(0))); // <- slot 3
+ frame_->Push(eax); // <- slot 2
// Push the length of the array and the initial index onto the stack.
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
__ shl(eax, kSmiTagSize);
- __ push(eax); // <- slot 1
- __ push(Immediate(Smi::FromInt(0))); // <- slot 0
+ frame_->Push(eax); // <- slot 1
+ frame_->Push(Immediate(Smi::FromInt(0))); // <- slot 0
__ jmp(&entry);
// Body.
@@ -2227,39 +1894,39 @@
// Next.
__ bind(node->continue_target());
__ bind(&next);
- __ pop(eax);
+ frame_->Pop(eax);
__ add(Operand(eax), Immediate(Smi::FromInt(1)));
- __ push(eax);
+ frame_->Push(eax);
// Condition.
__ bind(&entry);
- __ mov(eax, Operand(esp, 0 * kPointerSize)); // load the current count
- __ cmp(eax, Operand(esp, kPointerSize)); // compare to the array length
+ __ mov(eax, frame_->Element(0)); // load the current count
+ __ cmp(eax, frame_->Element(1)); // compare to the array length
__ j(above_equal, &cleanup);
// Get the i'th entry of the array.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
+ __ mov(edx, frame_->Element(2));
__ mov(ebx, Operand(edx, eax, times_2,
FixedArray::kHeaderSize - kHeapObjectTag));
// Get the expected map from the stack or a zero map in the
// permanent slow case eax: current iteration count ebx: i'th entry
// of the enum cache
- __ mov(edx, Operand(esp, 3 * kPointerSize));
+ __ mov(edx, frame_->Element(3));
// Check if the expected map still matches that of the enumerable.
// If not, we have to filter the key.
// eax: current iteration count
// ebx: i'th entry of the enum cache
// edx: expected map value
- __ mov(ecx, Operand(esp, 4 * kPointerSize));
+ __ mov(ecx, frame_->Element(4));
__ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
__ cmp(ecx, Operand(edx));
__ j(equal, &end_del_check);
// Convert the entry to a string (or null if it isn't a property anymore).
- __ push(Operand(esp, 4 * kPointerSize)); // push enumerable
- __ push(Operand(ebx)); // push entry
+ frame_->Push(frame_->Element(4)); // push enumerable
+ frame_->Push(ebx); // push entry
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
__ mov(ebx, Operand(eax));
@@ -2272,26 +1939,36 @@
// Store the entry in the 'each' expression and take another spin in the loop.
// edx: i'th entry of the enum cache (or string there of)
- __ push(Operand(ebx));
+ frame_->Push(ebx);
{ Reference each(this, node->each());
if (!each.is_illegal()) {
if (each.size() > 0) {
- __ push(Operand(esp, kPointerSize * each.size()));
+ frame_->Push(frame_->Element(each.size()));
}
- SetValue(&each);
+ // If the reference was to a slot we rely on the convenient property
+ // that it doesn't matter whether a value (eg, ebx pushed above) is
+ // right on top of or right underneath a zero-sized reference.
+ each.SetValue(NOT_CONST_INIT);
if (each.size() > 0) {
- __ pop(eax);
+ // It's safe to pop the value lying on top of the reference before
+ // unloading the reference itself (which preserves the top of stack,
+ // ie, now the topmost value of the non-zero sized reference), since
+ // we will discard the top of stack after unloading the reference
+ // anyway.
+ frame_->Pop();
}
}
}
- __ pop(eax); // pop the i'th entry pushed above
+ // Discard the i'th entry pushed above or else the remainder of the
+ // reference, whichever is currently on top of the stack.
+ frame_->Pop();
CheckStack(); // TODO(1222600): ignore if body contains calls.
__ jmp(&loop);
// Cleanup.
__ bind(&cleanup);
__ bind(node->break_target());
- __ add(Operand(esp), Immediate(5 * kPointerSize));
+ frame_->Drop(5);
// Exit.
__ bind(&exit);
@@ -2300,25 +1977,26 @@
}
-void Ia32CodeGenerator::VisitTryCatch(TryCatch* node) {
+void CodeGenerator::VisitTryCatch(TryCatch* node) {
Comment cmnt(masm_, "[ TryCatch");
Label try_block, exit;
__ call(&try_block);
// --- Catch block ---
- __ push(eax);
+ frame_->Push(eax);
// Store the caught exception in the catch variable.
{ Reference ref(this, node->catch_var());
- // Load the exception to the top of the stack.
- __ push(Operand(esp, ref.size() * kPointerSize));
- SetValue(&ref);
- __ pop(eax); // pop the pushed exception
+ ASSERT(ref.is_slot());
+ // Load the exception to the top of the stack. Here we make use of the
+ // convenient property that it doesn't matter whether a value is
+ // immediately on top of or underneath a zero-sized reference.
+ ref.SetValue(NOT_CONST_INIT);
}
// Remove the exception from the stack.
- __ pop(edx);
+ frame_->Pop();
VisitStatements(node->catch_block()->statements());
__ jmp(&exit);
@@ -2329,7 +2007,7 @@
__ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
// TODO(1222589): remove the reliance of PushTryHandler on a cached TOS
- __ push(eax); //
+ frame_->Push(eax); //
// Introduce shadow labels for all escapes from the try block,
// including returns. We should probably try to unify the escaping
@@ -2367,9 +2045,9 @@
}
// Unlink from try chain.
- __ pop(eax);
+ frame_->Pop(eax);
__ mov(Operand::StaticVariable(handler_address), eax); // TOS == next_sp
- __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
// next_sp popped.
if (nof_unlinks > 0) __ jmp(&exit);
@@ -2386,9 +2064,8 @@
StackHandlerConstants::kAddressDisplacement;
__ lea(esp, Operand(edx, kNextOffset));
- __ pop(Operand::StaticVariable(handler_address));
- __ add(Operand(esp),
- Immediate(StackHandlerConstants::kSize - kPointerSize));
+ frame_->Pop(Operand::StaticVariable(handler_address));
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
// next_sp popped.
__ jmp(shadows[i]->shadowed());
}
@@ -2398,7 +2075,7 @@
}
-void Ia32CodeGenerator::VisitTryFinally(TryFinally* node) {
+void CodeGenerator::VisitTryFinally(TryFinally* node) {
Comment cmnt(masm_, "[ TryFinally");
// State: Used to keep track of reason for entering the finally
@@ -2410,7 +2087,7 @@
__ call(&try_block);
- __ push(eax);
+ frame_->Push(eax);
// In case of thrown exceptions, this is where we continue.
__ Set(ecx, Immediate(Smi::FromInt(THROWING)));
__ jmp(&finally_block);
@@ -2421,7 +2098,7 @@
__ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
// TODO(1222589): remove the reliance of PushTryHandler on a cached TOS
- __ push(eax);
+ frame_->Push(eax);
// Introduce shadow labels for all escapes from the try block,
// including returns. We should probably try to unify the escaping
@@ -2448,7 +2125,7 @@
}
// Set the state on the stack to FALLING.
- __ push(Immediate(Factory::undefined_value())); // fake TOS
+ frame_->Push(Immediate(Factory::undefined_value())); // fake TOS
__ Set(ecx, Immediate(Smi::FromInt(FALLING)));
if (nof_unlinks > 0) __ jmp(&unlink);
@@ -2458,10 +2135,10 @@
__ bind(shadows[i]);
if (shadows[i]->shadowed() == &function_return_) {
// Materialize the return value on the stack.
- __ push(eax);
+ frame_->Push(eax);
} else {
// Fake TOS for break and continue.
- __ push(Immediate(Factory::undefined_value()));
+ frame_->Push(Immediate(Factory::undefined_value()));
}
__ Set(ecx, Immediate(Smi::FromInt(JUMPING + i)));
__ jmp(&unlink);
@@ -2472,23 +2149,25 @@
__ bind(&unlink);
// Reload sp from the top handler, because some statements that we
// break from (eg, for...in) may have left stuff on the stack.
- __ pop(eax); // preserve the TOS in a register across stack manipulation
+ // Preserve the TOS in a register across stack manipulation.
+ frame_->Pop(eax);
ExternalReference handler_address(Top::k_handler_address);
__ mov(edx, Operand::StaticVariable(handler_address));
const int kNextOffset = StackHandlerConstants::kNextOffset +
StackHandlerConstants::kAddressDisplacement;
__ lea(esp, Operand(edx, kNextOffset));
- __ pop(Operand::StaticVariable(handler_address));
- __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
- // next_sp popped.
- __ push(eax); // preserve the TOS in a register across stack manipulation
+ frame_->Pop(Operand::StaticVariable(handler_address));
+ frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
+ // Next_sp popped.
+ // Preserve the TOS in a register across stack manipulation.
+ frame_->Push(eax);
// --- Finally block ---
__ bind(&finally_block);
// Push the state on the stack.
- __ push(ecx);
+ frame_->Push(ecx);
// We keep two elements on the stack - the (possibly faked) result
// and the state - while evaluating the finally block. Record it, so
@@ -2501,8 +2180,8 @@
VisitStatements(node->finally_block()->statements());
// Restore state and return value or faked TOS.
- __ pop(ecx);
- __ pop(eax);
+ frame_->Pop(ecx);
+ frame_->Pop(eax);
break_stack_height_ -= kFinallyStackSize;
// Generate code that jumps to the right destination for all used
@@ -2519,7 +2198,7 @@
__ j(not_equal, &exit);
// Rethrow exception.
- __ push(eax); // undo pop from above
+ frame_->Push(eax); // undo pop from above
__ CallRuntime(Runtime::kReThrow, 1);
// Done.
@@ -2527,28 +2206,28 @@
}
-void Ia32CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
+void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
Comment cmnt(masm_, "[ DebuggerStatement");
RecordStatementPosition(node);
__ CallRuntime(Runtime::kDebugBreak, 1);
- __ push(eax);
+ frame_->Push(eax);
}
-void Ia32CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
+void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
ASSERT(boilerplate->IsBoilerplate());
// Push the boilerplate on the stack.
- __ push(Immediate(boilerplate));
+ frame_->Push(Immediate(boilerplate));
// Create a new closure.
- __ push(esi);
+ frame_->Push(esi);
__ CallRuntime(Runtime::kNewClosure, 2);
- __ push(eax);
+ frame_->Push(eax);
}
-void Ia32CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
+void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
@@ -2559,93 +2238,86 @@
}
-void Ia32CodeGenerator::VisitFunctionBoilerplateLiteral(
+void CodeGenerator::VisitFunctionBoilerplateLiteral(
FunctionBoilerplateLiteral* node) {
Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
InstantiateBoilerplate(node->boilerplate());
}
-void Ia32CodeGenerator::VisitConditional(Conditional* node) {
+void CodeGenerator::VisitConditional(Conditional* node) {
Comment cmnt(masm_, "[ Conditional");
Label then, else_, exit;
- LoadCondition(node->condition(), CodeGenState::LOAD, &then, &else_, true);
+ LoadCondition(node->condition(), NOT_INSIDE_TYPEOF, &then, &else_, true);
Branch(false, &else_);
__ bind(&then);
- Load(node->then_expression(), access());
+ Load(node->then_expression(), typeof_state());
__ jmp(&exit);
__ bind(&else_);
- Load(node->else_expression(), access());
+ Load(node->else_expression(), typeof_state());
__ bind(&exit);
}
-void Ia32CodeGenerator::VisitSlot(Slot* node) {
- ASSERT(access() != CodeGenState::UNDEFINED);
- Comment cmnt(masm_, "[ Slot");
-
- if (node->type() == Slot::LOOKUP) {
- ASSERT(node->var()->mode() == Variable::DYNAMIC);
+void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+ if (slot->type() == Slot::LOOKUP) {
+ ASSERT(slot->var()->mode() == Variable::DYNAMIC);
// For now, just do a runtime call.
- __ push(Operand(esi));
- __ push(Immediate(node->var()->name()));
+ frame_->Push(esi);
+ frame_->Push(Immediate(slot->var()->name()));
- if (access() == CodeGenState::LOAD) {
- __ CallRuntime(Runtime::kLoadContextSlot, 2);
- } else {
- ASSERT(access() == CodeGenState::LOAD_TYPEOF_EXPR);
+ if (typeof_state == INSIDE_TYPEOF) {
__ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
+ } else {
+ __ CallRuntime(Runtime::kLoadContextSlot, 2);
}
- __ push(eax);
+ frame_->Push(eax);
} else {
// Note: We would like to keep the assert below, but it fires because of
// some nasty code in LoadTypeofExpression() which should be removed...
- // ASSERT(node->var()->mode() != Variable::DYNAMIC);
-
- if (node->var()->mode() == Variable::CONST) {
+ // ASSERT(slot->var()->mode() != Variable::DYNAMIC);
+ if (slot->var()->mode() == Variable::CONST) {
// Const slots may contain 'the hole' value (the constant hasn't been
// initialized yet) which needs to be converted into the 'undefined'
// value.
Comment cmnt(masm_, "[ Load const");
- Label L;
- __ mov(eax, SlotOperand(node, ecx));
+ Label exit;
+ __ mov(eax, SlotOperand(slot, ecx));
__ cmp(eax, Factory::the_hole_value());
- __ j(not_equal, &L);
+ __ j(not_equal, &exit);
__ mov(eax, Factory::undefined_value());
- __ bind(&L);
- __ push(eax);
+ __ bind(&exit);
+ frame_->Push(eax);
} else {
- __ push(SlotOperand(node, ecx));
+ frame_->Push(SlotOperand(slot, ecx));
}
}
}
-void Ia32CodeGenerator::VisitVariableProxy(VariableProxy* node) {
- Comment cmnt(masm_, "[ VariableProxy");
- Variable* var_node = node->var();
+void CodeGenerator::VisitSlot(Slot* node) {
+ Comment cmnt(masm_, "[ Slot");
+ LoadFromSlot(node, typeof_state());
+}
- Expression* expr = var_node->rewrite();
+
+void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
+ Comment cmnt(masm_, "[ VariableProxy");
+ Variable* var = node->var();
+ Expression* expr = var->rewrite();
if (expr != NULL) {
Visit(expr);
} else {
- ASSERT(var_node->is_global());
- if (is_referenced()) {
- if (var_node->AsProperty() != NULL) {
- __ RecordPosition(var_node->AsProperty()->position());
- }
- GetReferenceProperty(new Literal(var_node->name()));
- } else {
- Reference property(this, node);
- GetValue(&property);
- }
+ ASSERT(var->is_global());
+ Reference ref(this, node);
+ ref.GetValue(typeof_state());
}
}
-void Ia32CodeGenerator::VisitLiteral(Literal* node) {
+void CodeGenerator::VisitLiteral(Literal* node) {
Comment cmnt(masm_, "[ Literal");
if (node->handle()->IsSmi() && !IsInlineSmi(node)) {
// To prevent long attacker-controlled byte sequences in code, larger
@@ -2653,9 +2325,9 @@
int bits = reinterpret_cast<int>(*node->handle());
__ mov(eax, bits & 0x0000FFFF);
__ xor_(eax, bits & 0xFFFF0000);
- __ push(eax);
+ frame_->Push(eax);
} else {
- __ push(Immediate(node->handle()));
+ frame_->Push(Immediate(node->handle()));
}
}
@@ -2689,14 +2361,14 @@
}
-void Ia32CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
+void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
Comment cmnt(masm_, "[ RegExp Literal");
RegExpDeferred* deferred = new RegExpDeferred(this, node);
// Retrieve the literal array and check the allocated entry.
// Load the function of this activation.
- __ mov(ecx, FunctionOperand());
+ __ mov(ecx, frame_->Function());
// Load the literals array of the function.
__ mov(ecx, FieldOperand(ecx, JSFunction::kLiteralsOffset));
@@ -2713,7 +2385,7 @@
__ bind(deferred->exit());
// Push the literal.
- __ push(ebx);
+ frame_->Push(ebx);
}
@@ -2739,7 +2411,7 @@
// the literal.
// Literal array (0).
- __ push(Operand(ecx));
+ __ push(ecx);
// Literal index (1).
__ push(Immediate(Smi::FromInt(node_->literal_index())));
// Constant properties (2).
@@ -2749,14 +2421,14 @@
}
-void Ia32CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
+void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Comment cmnt(masm_, "[ ObjectLiteral");
ObjectLiteralDeferred* deferred = new ObjectLiteralDeferred(this, node);
// Retrieve the literal array and check the allocated entry.
// Load the function of this activation.
- __ mov(ecx, FunctionOperand());
+ __ mov(ecx, frame_->Function());
// Load the literals array of the function.
__ mov(ecx, FieldOperand(ecx, JSFunction::kLiteralsOffset));
@@ -2773,11 +2445,11 @@
__ bind(deferred->exit());
// Push the literal.
- __ push(ebx);
+ frame_->Push(ebx);
// Clone the boilerplate object.
__ CallRuntime(Runtime::kCloneObjectLiteralBoilerplate, 1);
// Push the new cloned literal object as the result.
- __ push(eax);
+ frame_->Push(eax);
for (int i = 0; i < node->properties()->length(); i++) {
@@ -2788,21 +2460,21 @@
Handle<Object> key(property->key()->handle());
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
if (key->IsSymbol()) {
- __ mov(eax, TOS);
- __ push(eax);
+ __ mov(eax, frame_->Top());
+ frame_->Push(eax);
Load(property->value());
- __ pop(eax);
+ frame_->Pop(eax);
__ Set(ecx, Immediate(key));
__ call(ic, RelocInfo::CODE_TARGET);
- __ add(Operand(esp), Immediate(kPointerSize));
+ frame_->Pop();
// Ignore result.
break;
}
// Fall through
}
case ObjectLiteral::Property::PROTOTYPE: {
- __ mov(eax, TOS);
- __ push(eax);
+ __ mov(eax, frame_->Top());
+ frame_->Push(eax);
Load(property->key());
Load(property->value());
__ CallRuntime(Runtime::kSetProperty, 3);
@@ -2812,10 +2484,10 @@
case ObjectLiteral::Property::SETTER: {
// Duplicate the resulting object on the stack. The runtime
// function will pop the three arguments passed in.
- __ mov(eax, TOS);
- __ push(eax);
+ __ mov(eax, frame_->Top());
+ frame_->Push(eax);
Load(property->key());
- __ push(Immediate(Smi::FromInt(1)));
+ frame_->Push(Immediate(Smi::FromInt(1)));
Load(property->value());
__ CallRuntime(Runtime::kDefineAccessor, 4);
// Ignore result.
@@ -2824,10 +2496,10 @@
case ObjectLiteral::Property::GETTER: {
// Duplicate the resulting object on the stack. The runtime
// function will pop the three arguments passed in.
- __ mov(eax, TOS);
- __ push(eax);
+ __ mov(eax, frame_->Top());
+ frame_->Push(eax);
Load(property->key());
- __ push(Immediate(Smi::FromInt(0)));
+ frame_->Push(Immediate(Smi::FromInt(0)));
Load(property->value());
__ CallRuntime(Runtime::kDefineAccessor, 4);
// Ignore result.
@@ -2839,20 +2511,20 @@
}
-void Ia32CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
+void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
Comment cmnt(masm_, "[ ArrayLiteral");
// Call runtime to create the array literal.
- __ push(Immediate(node->literals()));
+ frame_->Push(Immediate(node->literals()));
// Load the function of this frame.
- __ mov(ecx, FunctionOperand());
+ __ mov(ecx, frame_->Function());
// Load the literals array of the function.
__ mov(ecx, FieldOperand(ecx, JSFunction::kLiteralsOffset));
- __ push(ecx);
+ frame_->Push(ecx);
__ CallRuntime(Runtime::kCreateArrayLiteral, 2);
// Push the resulting array literal on the stack.
- __ push(eax);
+ frame_->Push(eax);
// Generate code to set the elements in the array that are not
// literals.
@@ -2866,9 +2538,9 @@
Load(value);
// Get the value off the stack.
- __ pop(eax);
+ frame_->Pop(eax);
// Fetch the object literal while leaving on the stack.
- __ mov(ecx, TOS);
+ __ mov(ecx, frame_->Top());
// Get the elements array.
__ mov(ecx, FieldOperand(ecx, JSObject::kElementsOffset));
@@ -2883,14 +2555,14 @@
}
-bool Ia32CodeGenerator::IsInlineSmi(Literal* literal) {
+bool CodeGenerator::IsInlineSmi(Literal* literal) {
if (literal == NULL || !literal->handle()->IsSmi()) return false;
int int_value = Smi::cast(*literal->handle())->value();
return is_intn(int_value, kMaxSmiInlinedBits);
}
-void Ia32CodeGenerator::VisitAssignment(Assignment* node) {
+void CodeGenerator::VisitAssignment(Assignment* node) {
Comment cmnt(masm_, "[ Assignment");
RecordStatementPosition(node);
@@ -2903,7 +2575,7 @@
Load(node->value());
} else {
- GetValue(&target);
+ target.GetValue(NOT_INSIDE_TYPEOF);
Literal* literal = node->value()->AsLiteral();
if (IsInlineSmi(literal)) {
SmiOperation(node->binary_op(), literal->handle(), false, NO_OVERWRITE);
@@ -2924,39 +2596,33 @@
// Dynamic constant initializations must use the function context
// and initialize the actual constant declared. Dynamic variable
// initializations are simply assignments and use SetValue.
- InitConst(&target);
+ target.SetValue(CONST_INIT);
} else {
- SetValue(&target);
+ target.SetValue(NOT_CONST_INIT);
}
}
}
-void Ia32CodeGenerator::VisitThrow(Throw* node) {
+void CodeGenerator::VisitThrow(Throw* node) {
Comment cmnt(masm_, "[ Throw");
Load(node->exception());
__ RecordPosition(node->position());
__ CallRuntime(Runtime::kThrow, 1);
- __ push(eax);
+ frame_->Push(eax);
}
-void Ia32CodeGenerator::VisitProperty(Property* node) {
+void CodeGenerator::VisitProperty(Property* node) {
Comment cmnt(masm_, "[ Property");
- if (is_referenced()) {
- __ RecordPosition(node->position());
- GetReferenceProperty(node->key());
- } else {
- Reference property(this, node);
- __ RecordPosition(node->position());
- GetValue(&property);
- }
+ Reference property(this, node);
+ property.GetValue(typeof_state());
}
-void Ia32CodeGenerator::VisitCall(Call* node) {
+void CodeGenerator::VisitCall(Call* node) {
Comment cmnt(masm_, "[ Call");
ZoneList<Expression*>* args = node->arguments();
@@ -2983,7 +2649,7 @@
// ----------------------------------
// Push the name of the function and the receiver onto the stack.
- __ push(Immediate(var->name()));
+ frame_->Push(Immediate(var->name()));
LoadGlobal();
// Load the arguments.
@@ -2995,10 +2661,10 @@
Handle<Code> stub = ComputeCallInitialize(args->length());
__ RecordPosition(node->position());
__ call(stub, RelocInfo::CODE_TARGET_CONTEXT);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ mov(esi, frame_->Context());
// Overwrite the function on the stack with the result.
- __ mov(TOS, eax);
+ __ mov(frame_->Top(), eax);
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
@@ -3007,14 +2673,14 @@
// ----------------------------------
// Load the function
- __ push(Operand(esi));
- __ push(Immediate(var->name()));
+ frame_->Push(esi);
+ frame_->Push(Immediate(var->name()));
__ CallRuntime(Runtime::kLoadContextSlot, 2);
// eax: slot value; edx: receiver
// Load the receiver.
- __ push(eax);
- __ push(edx);
+ frame_->Push(eax);
+ frame_->Push(edx);
// Call the function.
CallWithArguments(args, node->position());
@@ -3029,7 +2695,7 @@
// ------------------------------------------------------------------
// Push the name of the function and the receiver onto the stack.
- __ push(Immediate(literal->handle()));
+ frame_->Push(Immediate(literal->handle()));
Load(property->obj());
// Load the arguments.
@@ -3039,10 +2705,10 @@
Handle<Code> stub = ComputeCallInitialize(args->length());
__ RecordPosition(node->position());
__ call(stub, RelocInfo::CODE_TARGET);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+ __ mov(esi, frame_->Context());
// Overwrite the function on the stack with the result.
- __ mov(TOS, eax);
+ __ mov(frame_->Top(), eax);
} else {
// -------------------------------------------
@@ -3051,10 +2717,11 @@
// Load the function to call from the property through a reference.
Reference ref(this, property);
- GetValue(&ref);
+ ref.GetValue(NOT_INSIDE_TYPEOF);
// Pass receiver to called function.
- __ push(Operand(esp, ref.size() * kPointerSize));
+ // The reference's size is non-negative.
+ frame_->Push(frame_->Element(ref.size()));
// Call the function.
CallWithArguments(args, node->position());
@@ -3077,7 +2744,7 @@
}
-void Ia32CodeGenerator::VisitCallNew(CallNew* node) {
+void CodeGenerator::VisitCallNew(CallNew* node) {
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
@@ -3102,30 +2769,31 @@
// Load the function into temporary function slot as per calling
// convention.
- __ mov(edi, Operand(esp, (args->length() + 1) * kPointerSize));
+ __ mov(edi, frame_->Element(args->length() + 1));
// Call the construct call builtin that handles allocation and
// constructor invocation.
__ RecordPosition(node->position());
__ call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
RelocInfo::CONSTRUCT_CALL);
- __ mov(TOS, eax); // discard the function and "push" the newly created object
+ // Discard the function and "push" the newly created object.
+ __ mov(frame_->Top(), eax);
}
-void Ia32CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
- __ pop(eax);
+ frame_->Pop(eax);
__ test(eax, Immediate(kSmiTagMask));
cc_reg_ = zero;
}
-void Ia32CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
- __ pop(eax);
+ frame_->Pop(eax);
__ test(eax, Immediate(kSmiTagMask | 0x80000000));
cc_reg_ = zero;
}
@@ -3137,7 +2805,7 @@
// cons strings where the answer is found in the left hand branch of the
// cons. The slow case will flatten the string, which will ensure that
// the answer is in the left hand side the next time around.
-void Ia32CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
Label slow_case;
@@ -3150,7 +2818,7 @@
// Load the string into eax.
Load(args->at(0));
- __ pop(eax);
+ frame_->Pop(eax);
// If the receiver is a smi return undefined.
ASSERT(kSmiTag == 0);
__ test(eax, Immediate(kSmiTagMask));
@@ -3158,7 +2826,7 @@
// Load the index into ebx.
Load(args->at(1));
- __ pop(ebx);
+ frame_->Pop(ebx);
// Check for negative or non-smi index.
ASSERT(kSmiTag == 0);
@@ -3217,18 +2885,19 @@
// 2-byte string.
// Load the 2-byte character code.
- __ movzx_w(eax, FieldOperand(eax, ebx, times_2, TwoByteString::kHeaderSize));
+ __ movzx_w(eax,
+ FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
__ jmp(&got_char_code);
// ASCII string.
__ bind(&ascii_string);
// Load the byte.
- __ movzx_b(eax, FieldOperand(eax, ebx, times_1, AsciiString::kHeaderSize));
+ __ movzx_b(eax, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
__ bind(&got_char_code);
ASSERT(kSmiTag == 0);
__ shl(eax, kSmiTagSize);
- __ push(eax);
+ frame_->Push(eax);
__ jmp(&end);
@@ -3256,13 +2925,13 @@
__ jmp(&try_again_with_new_string);
__ bind(&slow_case);
- __ push(Immediate(Factory::undefined_value()));
+ frame_->Push(Immediate(Factory::undefined_value()));
__ bind(&end);
}
-void Ia32CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Load(args->at(0));
Label answer;
@@ -3270,7 +2939,7 @@
// object is a smi. This can't be done with the usual test opcode so
// we copy the object to ecx and do some destructive ops on it that
// result in the right CC bits.
- __ pop(eax);
+ frame_->Pop(eax);
__ mov(ecx, Operand(eax));
__ and_(ecx, kSmiTagMask);
__ xor_(ecx, kSmiTagMask);
@@ -3285,7 +2954,7 @@
}
-void Ia32CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
// Seed the result with the formal parameters count, which will be
@@ -3296,15 +2965,15 @@
// Call the shared stub to get to the arguments.length.
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH);
__ CallStub(&stub);
- __ push(eax);
+ frame_->Push(eax);
}
-void Ia32CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
Label leave;
Load(args->at(0)); // Load the object.
- __ mov(eax, TOS);
+ __ mov(eax, frame_->Top());
// if (object->IsSmi()) return object.
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &leave, taken);
@@ -3315,18 +2984,18 @@
__ cmp(ecx, JS_VALUE_TYPE);
__ j(not_equal, &leave, not_taken);
__ mov(eax, FieldOperand(eax, JSValue::kValueOffset));
- __ mov(TOS, eax);
+ __ mov(frame_->Top(), eax);
__ bind(&leave);
}
-void Ia32CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
Label leave;
Load(args->at(0)); // Load the object.
Load(args->at(1)); // Load the value.
- __ mov(eax, (Operand(esp, kPointerSize)));
- __ mov(ecx, TOS);
+ __ mov(eax, frame_->Element(1));
+ __ mov(ecx, frame_->Top());
// if (object->IsSmi()) return object.
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &leave, taken);
@@ -3342,13 +3011,13 @@
__ RecordWrite(eax, JSValue::kValueOffset, ecx, ebx);
// Leave.
__ bind(&leave);
- __ mov(ecx, TOS);
- __ pop(eax);
- __ mov(TOS, ecx);
+ __ mov(ecx, frame_->Top());
+ frame_->Pop();
+ __ mov(frame_->Top(), ecx);
}
-void Ia32CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
// Load the key onto the stack and set register eax to the formal
@@ -3359,24 +3028,24 @@
// Call the shared stub to get to arguments[key].
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
- __ mov(TOS, eax);
+ __ mov(frame_->Top(), eax);
}
-void Ia32CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
ASSERT(args->length() == 2);
// Load the two objects into registers and perform the comparison.
Load(args->at(0));
Load(args->at(1));
- __ pop(eax);
- __ pop(ecx);
+ frame_->Pop(eax);
+ frame_->Pop(ecx);
__ cmp(eax, Operand(ecx));
cc_reg_ = equal;
}
-void Ia32CodeGenerator::VisitCallRuntime(CallRuntime* node) {
+void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (CheckForInlineRuntimeCall(node)) return;
ZoneList<Expression*>* args = node->arguments();
@@ -3385,10 +3054,10 @@
if (function == NULL) {
// Prepare stack for calling JS runtime function.
- __ push(Immediate(node->name()));
+ frame_->Push(Immediate(node->name()));
// Push the builtins object found in the current global object.
__ mov(edx, GlobalObject());
- __ push(FieldOperand(edx, GlobalObject::kBuiltinsOffset));
+ frame_->Push(FieldOperand(edx, GlobalObject::kBuiltinsOffset));
}
// Push the arguments ("left-to-right").
@@ -3398,25 +3067,25 @@
if (function != NULL) {
// Call the C runtime function.
__ CallRuntime(function, args->length());
- __ push(eax);
+ frame_->Push(eax);
} else {
// Call the JS runtime function.
Handle<Code> stub = ComputeCallInitialize(args->length());
__ Set(eax, Immediate(args->length()));
__ call(stub, RelocInfo::CODE_TARGET);
- __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- __ mov(TOS, eax);
+ __ mov(esi, frame_->Context());
+ __ mov(frame_->Top(), eax);
}
}
-void Ia32CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
+void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
Comment cmnt(masm_, "[ UnaryOperation");
Token::Value op = node->op();
if (op == Token::NOT) {
- LoadCondition(node->expression(), CodeGenState::LOAD,
+ LoadCondition(node->expression(), NOT_INSIDE_TYPEOF,
false_target(), true_target(), true);
cc_reg_ = NegateCondition(cc_reg_);
@@ -3426,7 +3095,7 @@
Load(property->obj());
Load(property->key());
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- __ push(eax);
+ frame_->Push(eax);
return;
}
@@ -3435,32 +3104,32 @@
Slot* slot = variable->slot();
if (variable->is_global()) {
LoadGlobal();
- __ push(Immediate(variable->name()));
+ frame_->Push(Immediate(variable->name()));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- __ push(eax);
+ frame_->Push(eax);
return;
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// lookup the context holding the named variable
- __ push(Operand(esi));
- __ push(Immediate(variable->name()));
+ frame_->Push(esi);
+ frame_->Push(Immediate(variable->name()));
__ CallRuntime(Runtime::kLookupContext, 2);
// eax: context
- __ push(eax);
- __ push(Immediate(variable->name()));
+ frame_->Push(eax);
+ frame_->Push(Immediate(variable->name()));
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- __ push(eax);
+ frame_->Push(eax);
return;
}
// Default: Result of deleting non-global, not dynamically
// introduced variables is false.
- __ push(Immediate(Factory::false_value()));
+ frame_->Push(Immediate(Factory::false_value()));
} else {
// Default: Result of deleting expressions is true.
Load(node->expression()); // may have side-effects
- __ Set(TOS, Immediate(Factory::true_value()));
+ __ Set(frame_->Top(), Immediate(Factory::true_value()));
}
} else if (op == Token::TYPEOF) {
@@ -3468,7 +3137,7 @@
// LoadTypeofExpression().
LoadTypeofExpression(node->expression());
__ CallRuntime(Runtime::kTypeof, 1);
- __ push(eax);
+ frame_->Push(eax);
} else {
Load(node->expression());
@@ -3482,9 +3151,9 @@
case Token::SUB: {
UnarySubStub stub;
// TODO(1222589): remove dependency of TOS being cached inside stub
- __ pop(eax);
+ frame_->Pop(eax);
__ CallStub(&stub);
- __ push(eax);
+ frame_->Push(eax);
break;
}
@@ -3492,11 +3161,11 @@
// Smi check.
Label smi_label;
Label continue_label;
- __ pop(eax);
+ frame_->Pop(eax);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &smi_label, taken);
- __ push(eax); // undo popping of TOS
+ frame_->Push(eax); // undo popping of TOS
__ InvokeBuiltin(Builtins::BIT_NOT, CALL_FUNCTION);
__ jmp(&continue_label);
@@ -3504,26 +3173,26 @@
__ not_(eax);
__ and_(eax, ~kSmiTagMask); // Remove inverted smi-tag.
__ bind(&continue_label);
- __ push(eax);
+ frame_->Push(eax);
break;
}
case Token::VOID:
- __ mov(TOS, Factory::undefined_value());
+ __ mov(frame_->Top(), Factory::undefined_value());
break;
case Token::ADD: {
// Smi check.
Label continue_label;
- __ pop(eax);
+ frame_->Pop(eax);
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &continue_label);
- __ push(eax);
+ frame_->Push(eax);
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
__ bind(&continue_label);
- __ push(eax);
+ frame_->Push(eax);
break;
}
@@ -3568,8 +3237,6 @@
int MinorKey() { return is_increment_ ? 1 : 0; }
void Generate(MacroAssembler* masm);
- const char* GetName() { return "RevertToNumberStub"; }
-
#ifdef DEBUG
void Print() {
PrintF("RevertToNumberStub (is_increment %s)\n",
@@ -3599,8 +3266,6 @@
}
void Generate(MacroAssembler* masm);
- const char* GetName() { return "CounterOpStub"; }
-
#ifdef DEBUG
void Print() {
PrintF("CounterOpStub (result_offset %d), (is_postfix %s),"
@@ -3623,7 +3288,7 @@
}
-void Ia32CodeGenerator::VisitCountOperation(CountOperation* node) {
+void CodeGenerator::VisitCountOperation(CountOperation* node) {
Comment cmnt(masm_, "[ CountOperation");
bool is_postfix = node->is_postfix();
@@ -3633,21 +3298,24 @@
bool is_const = (var != NULL && var->mode() == Variable::CONST);
// Postfix: Make room for the result.
- if (is_postfix) __ push(Immediate(0));
+ if (is_postfix) {
+ frame_->Push(Immediate(0));
+ }
{ Reference target(this, node->expression());
if (target.is_illegal()) return;
- GetValue(&target);
+ target.GetValue(NOT_INSIDE_TYPEOF);
- int result_offset = target.size() * kPointerSize;
CountOperationDeferred* deferred =
- new CountOperationDeferred(this, is_postfix,
- is_increment, result_offset);
+ new CountOperationDeferred(this, is_postfix, is_increment,
+ target.size() * kPointerSize);
- __ pop(eax); // Load TOS into eax for calculations below
+ frame_->Pop(eax); // Load TOS into eax for calculations below
// Postfix: Store the old value as the result.
- if (is_postfix) __ mov(Operand(esp, result_offset), eax);
+ if (is_postfix) {
+ __ mov(frame_->Element(target.size()), eax);
+ }
// Perform optimistic increment/decrement.
if (is_increment) {
@@ -3665,16 +3333,18 @@
// Store the new value in the target if not const.
__ bind(deferred->exit());
- __ push(eax); // Push the new value to TOS
- if (!is_const) SetValue(&target);
+ frame_->Push(eax); // Push the new value to TOS
+ if (!is_const) target.SetValue(NOT_CONST_INIT);
}
// Postfix: Discard the new value and use the old.
- if (is_postfix) __ pop(eax);
+ if (is_postfix) {
+ frame_->Pop();
+ }
}
-void Ia32CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
Comment cmnt(masm_, "[ BinaryOperation");
Token::Value op = node->op();
@@ -3692,14 +3362,14 @@
if (op == Token::AND) {
Label is_true;
- LoadCondition(node->left(), CodeGenState::LOAD, &is_true,
+ LoadCondition(node->left(), NOT_INSIDE_TYPEOF, &is_true,
false_target(), false);
if (has_cc()) {
Branch(false, false_target());
// Evaluate right side expression.
__ bind(&is_true);
- LoadCondition(node->right(), CodeGenState::LOAD, true_target(),
+ LoadCondition(node->right(), NOT_INSIDE_TYPEOF, true_target(),
false_target(), false);
} else {
@@ -3709,14 +3379,14 @@
// standard ToBoolean() conversion as described in ECMA-262,
// section 9.2, page 30.
// Duplicate the TOS value. The duplicate will be popped by ToBoolean.
- __ mov(eax, TOS);
- __ push(eax);
+ __ mov(eax, frame_->Top());
+ frame_->Push(eax);
ToBoolean(&pop_and_continue, &exit);
Branch(false, &exit);
// Pop the result of evaluating the first part.
__ bind(&pop_and_continue);
- __ pop(eax);
+ frame_->Pop();
// Evaluate right side expression.
__ bind(&is_true);
@@ -3728,14 +3398,14 @@
} else if (op == Token::OR) {
Label is_false;
- LoadCondition(node->left(), CodeGenState::LOAD, true_target(),
+ LoadCondition(node->left(), NOT_INSIDE_TYPEOF, true_target(),
&is_false, false);
if (has_cc()) {
Branch(true, true_target());
// Evaluate right side expression.
__ bind(&is_false);
- LoadCondition(node->right(), CodeGenState::LOAD, true_target(),
+ LoadCondition(node->right(), NOT_INSIDE_TYPEOF, true_target(),
false_target(), false);
} else {
@@ -3745,14 +3415,14 @@
// standard ToBoolean() conversion as described in ECMA-262,
// section 9.2, page 30.
// Duplicate the TOS value. The duplicate will be popped by ToBoolean.
- __ mov(eax, TOS);
- __ push(eax);
+ __ mov(eax, frame_->Top());
+ frame_->Push(eax);
ToBoolean(&exit, &pop_and_continue);
Branch(true, &exit);
// Pop the result of evaluating the first part.
__ bind(&pop_and_continue);
- __ pop(eax);
+ frame_->Pop();
// Evaluate right side expression.
__ bind(&is_false);
@@ -3796,12 +3466,24 @@
}
-void Ia32CodeGenerator::VisitThisFunction(ThisFunction* node) {
- __ push(FunctionOperand());
+void CodeGenerator::VisitThisFunction(ThisFunction* node) {
+ frame_->Push(frame_->Function());
}
-void Ia32CodeGenerator::VisitCompareOperation(CompareOperation* node) {
+class InstanceofStub: public CodeStub {
+ public:
+ InstanceofStub() { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return Instanceof; }
+ int MinorKey() { return 0; }
+};
+
+
+void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
Comment cmnt(masm_, "[ CompareOperation");
// Get the expressions from the node.
@@ -3824,7 +3506,7 @@
if (left_is_null || right_is_null) {
Load(left_is_null ? right : left);
Label exit, undetectable;
- __ pop(eax);
+ frame_->Pop(eax);
__ cmp(eax, Factory::null_value());
// The 'null' value is only equal to 'undefined' if using
@@ -3868,7 +3550,7 @@
// Load the operand, move it to register edx, and restore TOS.
LoadTypeofExpression(operation->expression());
- __ pop(edx);
+ frame_->Pop(edx);
if (check->Equals(Heap::number_symbol())) {
__ test(edx, Immediate(kSmiTagMask));
@@ -3975,14 +3657,16 @@
Load(left);
Load(right);
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
- __ push(eax); // push the result
+ frame_->Push(eax); // push the result
return;
}
case Token::INSTANCEOF: {
Load(left);
Load(right);
- __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
- __ push(eax); // push the result
+ InstanceofStub stub;
+ __ CallStub(&stub);
+ __ test(eax, Operand(eax));
+ cc_reg_ = zero;
return;
}
default:
@@ -4008,7 +3692,7 @@
}
-void Ia32CodeGenerator::RecordStatementPosition(Node* node) {
+void CodeGenerator::RecordStatementPosition(Node* node) {
if (FLAG_debug_info) {
int pos = node->statement_pos();
if (pos != RelocInfo::kNoPosition) {
@@ -4018,197 +3702,202 @@
}
-void Ia32CodeGenerator::EnterJSFrame() {
- __ push(ebp);
- __ mov(ebp, Operand(esp));
+#undef __
+#define __ masm->
- // Store the context and the function in the frame.
- __ push(esi);
- __ push(edi);
-
- // Clear the function slot when generating debug code.
- if (FLAG_debug_code) {
- __ Set(edi, Immediate(reinterpret_cast<int>(kZapValue)));
+Handle<String> Reference::GetName() {
+ ASSERT(type_ == NAMED);
+ Property* property = expression_->AsProperty();
+ if (property == NULL) {
+ // Global variable reference treated as a named property reference.
+ VariableProxy* proxy = expression_->AsVariableProxy();
+ ASSERT(proxy->AsVariable() != NULL);
+ ASSERT(proxy->AsVariable()->is_global());
+ return proxy->name();
+ } else {
+ MacroAssembler* masm = cgen_->masm();
+ __ RecordPosition(property->position());
+ Literal* raw_name = property->key()->AsLiteral();
+ ASSERT(raw_name != NULL);
+ return Handle<String>(String::cast(*raw_name->handle()));
}
}
-void Ia32CodeGenerator::ExitJSFrame() {
- // Record the location of the JS exit code for patching when setting
- // break point.
- __ RecordJSReturn();
-
- // Avoid using the leave instruction here, because it is too
- // short. We need the return sequence to be a least the size of a
- // call instruction to support patching the exit code in the
- // debugger. See VisitReturnStatement for the full return sequence.
- __ mov(esp, Operand(ebp));
- __ pop(ebp);
-}
-
-
-#undef __
-#define __ masm->
-
-Operand Ia32CodeGenerator::SlotOperand(CodeGenerator* cgen,
- Slot* slot,
- Register tmp) {
- // Currently, this assertion will fail if we try to assign to
- // a constant variable that is constant because it is read-only
- // (such as the variable referring to a named function expression).
- // We need to implement assignments to read-only variables.
- // Ideally, we should do this during AST generation (by converting
- // such assignments into expression statements); however, in general
- // we may not be able to make the decision until past AST generation,
- // that is when the entire program is known.
- ASSERT(slot != NULL);
- int index = slot->index();
- switch (slot->type()) {
- case Slot::PARAMETER: return ParameterOperand(cgen, index);
-
- case Slot::LOCAL: {
- ASSERT(0 <= index && index < cgen->scope()->num_stack_slots());
- const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
- return Operand(ebp, kLocal0Offset - index * kPointerSize);
+void Reference::GetValue(TypeofState typeof_state) {
+ ASSERT(!is_illegal());
+ ASSERT(!cgen_->has_cc());
+ MacroAssembler* masm = cgen_->masm();
+ VirtualFrame* frame = cgen_->frame();
+ switch (type_) {
+ case SLOT: {
+ Comment cmnt(masm, "[ Load from Slot");
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ ASSERT(slot != NULL);
+ cgen_->LoadFromSlot(slot, typeof_state);
+ break;
}
- case Slot::CONTEXT: {
- MacroAssembler* masm = cgen->masm();
- // Follow the context chain if necessary.
- ASSERT(!tmp.is(esi)); // do not overwrite context register
- Register context = esi;
- int chain_length =
- cgen->scope()->ContextChainLength(slot->var()->scope());
- for (int i = chain_length; i-- > 0;) {
- // Load the closure.
- // (All contexts, even 'with' contexts, have a closure,
- // and it is the same for all contexts inside a function.
- // There is no need to go to the function context first.)
- __ mov(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
- // Load the function context (which is the incoming, outer context).
- __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
- context = tmp;
+ case NAMED: {
+ // TODO(1241834): Make sure that this it is safe to ignore the
+ // distinction between expressions in a typeof and not in a typeof. If
+ // there is a chance that reference errors can be thrown below, we
+ // must distinguish between the two kinds of loads (typeof expression
+ // loads must not throw a reference error).
+ Comment cmnt(masm, "[ Load from named Property");
+ Handle<String> name(GetName());
+ Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
+ // Setup the name register.
+ __ mov(ecx, name);
+
+ Variable* var = expression_->AsVariableProxy()->AsVariable();
+ if (var != NULL) {
+ ASSERT(var->is_global());
+ __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ } else {
+ __ call(ic, RelocInfo::CODE_TARGET);
}
- // We may have a 'with' context now. Get the function context.
- // (In fact this mov may never be the needed, since the scope analysis
- // may not permit a direct context access in this case and thus we are
- // always at a function context. However it is safe to dereference be-
- // cause the function context of a function context is itself. Before
- // deleting this mov we should try to create a counter-example first,
- // though...)
- __ mov(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp, index);
+ frame->Push(eax); // IC call leaves result in eax, push it out
+ break;
+ }
+
+ case KEYED: {
+ // TODO(1241834): Make sure that this it is safe to ignore the
+ // distinction between expressions in a typeof and not in a typeof.
+ Comment cmnt(masm, "[ Load from keyed Property");
+ Property* property = expression_->AsProperty();
+ ASSERT(property != NULL);
+ __ RecordPosition(property->position());
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
+
+ Variable* var = expression_->AsVariableProxy()->AsVariable();
+ if (var != NULL) {
+ ASSERT(var->is_global());
+ __ call(ic, RelocInfo::CODE_TARGET_CONTEXT);
+ } else {
+ __ call(ic, RelocInfo::CODE_TARGET);
+ }
+ frame->Push(eax); // IC call leaves result in eax, push it out
+ break;
}
default:
UNREACHABLE();
- return Operand(eax);
}
}
-void Property::GenerateStoreCode(CodeGenerator* cgen,
- Reference* ref,
- InitState init_state) {
- MacroAssembler* masm = cgen->masm();
- Comment cmnt(masm, "[ Store to Property");
- __ RecordPosition(position());
- Ia32CodeGenerator::SetReferenceProperty(cgen, ref, key());
-}
+void Reference::SetValue(InitState init_state) {
+ ASSERT(!is_illegal());
+ ASSERT(!cgen_->has_cc());
+ MacroAssembler* masm = cgen_->masm();
+ VirtualFrame* frame = cgen_->frame();
+ switch (type_) {
+ case SLOT: {
+ Comment cmnt(masm, "[ Store to Slot");
+ Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
+ ASSERT(slot != NULL);
+ if (slot->type() == Slot::LOOKUP) {
+ ASSERT(slot->var()->mode() == Variable::DYNAMIC);
+ // For now, just do a runtime call.
+ frame->Push(esi);
+ frame->Push(Immediate(slot->var()->name()));
-void VariableProxy::GenerateStoreCode(CodeGenerator* cgen,
- Reference* ref,
- InitState init_state) {
- MacroAssembler* masm = cgen->masm();
- Comment cmnt(masm, "[ Store to VariableProxy");
- Variable* node = var();
+ if (init_state == CONST_INIT) {
+ // Same as the case for a normal store, but ignores attribute
+ // (e.g. READ_ONLY) of context slot so that we can initialize
+ // const properties (introduced via eval("const foo = (some
+ // expr);")). Also, uses the current function context instead of
+ // the top context.
+ //
+ // Note that we must declare the foo upon entry of eval(), via a
+ // context slot declaration, but we cannot initialize it at the
+ // same time, because the const declaration may be at the end of
+ // the eval code (sigh...) and the const variable may have been
+ // used before (where its value is 'undefined'). Thus, we can only
+ // do the initialization when we actually encounter the expression
+ // and when the expression operands are defined and valid, and
+ // thus we need the split into 2 operations: declaration of the
+ // context slot followed by initialization.
+ __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ } else {
+ __ CallRuntime(Runtime::kStoreContextSlot, 3);
+ }
+ // Storing a variable must keep the (new) value on the expression
+ // stack. This is necessary for compiling chained assignment
+ // expressions.
+ frame->Push(eax);
- Expression* expr = node->rewrite();
- if (expr != NULL) {
- expr->GenerateStoreCode(cgen, ref, init_state);
- } else {
- ASSERT(node->is_global());
- if (node->AsProperty() != NULL) {
- __ RecordPosition(node->AsProperty()->position());
- }
- Expression* key = new Literal(node->name());
- Ia32CodeGenerator::SetReferenceProperty(cgen, ref, key);
- }
-}
+ } else {
+ ASSERT(slot->var()->mode() != Variable::DYNAMIC);
+ Label exit;
+ if (init_state == CONST_INIT) {
+ ASSERT(slot->var()->mode() == Variable::CONST);
+ // Only the first const initialization must be executed (the slot
+ // still contains 'the hole' value). When the assignment is
+ // executed, the code is identical to a normal store (see below).
+ Comment cmnt(masm, "[ Init const");
+ __ mov(eax, cgen_->SlotOperand(slot, ecx));
+ __ cmp(eax, Factory::the_hole_value());
+ __ j(not_equal, &exit);
+ }
-void Slot::GenerateStoreCode(CodeGenerator* cgen,
- Reference* ref,
- InitState init_state) {
- MacroAssembler* masm = cgen->masm();
- Comment cmnt(masm, "[ Store to Slot");
-
- if (type() == Slot::LOOKUP) {
- ASSERT(var()->mode() == Variable::DYNAMIC);
-
- // For now, just do a runtime call.
- __ push(esi);
- __ push(Immediate(var()->name()));
-
- if (init_state == CONST_INIT) {
- // Same as the case for a normal store, but ignores attribute
- // (e.g. READ_ONLY) of context slot so that we can initialize const
- // properties (introduced via eval("const foo = (some expr);")). Also,
- // uses the current function context instead of the top context.
- //
- // Note that we must declare the foo upon entry of eval(), via a
- // context slot declaration, but we cannot initialize it at the same
- // time, because the const declaration may be at the end of the eval
- // code (sigh...) and the const variable may have been used before
- // (where its value is 'undefined'). Thus, we can only do the
- // initialization when we actually encounter the expression and when
- // the expression operands are defined and valid, and thus we need the
- // split into 2 operations: declaration of the context slot followed
- // by initialization.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- __ CallRuntime(Runtime::kStoreContextSlot, 3);
- }
- // Storing a variable must keep the (new) value on the expression
- // stack. This is necessary for compiling assignment expressions.
- __ push(eax);
-
- } else {
- ASSERT(var()->mode() != Variable::DYNAMIC);
-
- Label exit;
- if (init_state == CONST_INIT) {
- ASSERT(var()->mode() == Variable::CONST);
- // Only the first const initialization must be executed (the slot
- // still contains 'the hole' value). When the assignment is executed,
- // the code is identical to a normal store (see below).
- Comment cmnt(masm, "[ Init const");
- __ mov(eax, Ia32CodeGenerator::SlotOperand(cgen, this, ecx));
- __ cmp(eax, Factory::the_hole_value());
- __ j(not_equal, &exit);
+ // We must execute the store. Storing a variable must keep the
+ // (new) value on the stack. This is necessary for compiling
+ // assignment expressions.
+ //
+ // Note: We will reach here even with slot->var()->mode() ==
+ // Variable::CONST because of const declarations which will
+ // initialize consts to 'the hole' value and by doing so, end up
+ // calling this code.
+ frame->Pop(eax);
+ __ mov(cgen_->SlotOperand(slot, ecx), eax);
+ frame->Push(eax); // RecordWrite may destroy the value in eax.
+ if (slot->type() == Slot::CONTEXT) {
+ // ecx is loaded with context when calling SlotOperand above.
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ __ RecordWrite(ecx, offset, eax, ebx);
+ }
+ // If we definitely did not jump over the assignment, we do not need
+ // to bind the exit label. Doing so can defeat peephole
+ // optimization.
+ if (init_state == CONST_INIT) __ bind(&exit);
+ }
+ break;
}
- // We must execute the store.
- // Storing a variable must keep the (new) value on the stack. This is
- // necessary for compiling assignment expressions. ecx may be loaded
- // with context; used below in RecordWrite.
- //
- // Note: We will reach here even with node->var()->mode() ==
- // Variable::CONST because of const declarations which will initialize
- // consts to 'the hole' value and by doing so, end up calling this
- // code.
- __ pop(eax);
- __ mov(Ia32CodeGenerator::SlotOperand(cgen, this, ecx), eax);
- __ push(eax); // RecordWrite may destroy the value in eax.
- if (type() == Slot::CONTEXT) {
- // ecx is loaded with context when calling SlotOperand above.
- int offset = FixedArray::kHeaderSize + index() * kPointerSize;
- __ RecordWrite(ecx, offset, eax, ebx);
+ case NAMED: {
+ Comment cmnt(masm, "[ Store to named Property");
+ // Call the appropriate IC code.
+ Handle<String> name(GetName());
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ // TODO(1222589): Make the IC grab the values from the stack.
+ frame->Pop(eax);
+ // Setup the name register.
+ __ mov(ecx, name);
+ __ call(ic, RelocInfo::CODE_TARGET);
+ frame->Push(eax); // IC call leaves result in eax, push it out
+ break;
}
- // If we definitely did not jump over the assignment, we do not need to
- // bind the exit label. Doing so can defeat peephole optimization.
- if (init_state == CONST_INIT) __ bind(&exit);
+
+ case KEYED: {
+ Comment cmnt(masm, "[ Store to keyed Property");
+ Property* property = expression_->AsProperty();
+ ASSERT(property != NULL);
+ __ RecordPosition(property->position());
+ // Call IC code.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ // TODO(1222589): Make the IC grab the values from the stack.
+ frame->Pop(eax);
+ __ call(ic, RelocInfo::CODE_TARGET);
+ frame->Push(eax); // IC call leaves result in eax, push it out
+ break;
+ }
+
+ default:
+ UNREACHABLE();
}
}
@@ -4270,38 +3959,6 @@
}
-void Ia32CodeGenerator::SetReferenceProperty(CodeGenerator* cgen,
- Reference* ref,
- Expression* key) {
- ASSERT(!ref->is_illegal());
- MacroAssembler* masm = cgen->masm();
-
- if (ref->type() == Reference::NAMED) {
- // Compute the name of the property.
- Literal* literal = key->AsLiteral();
- Handle<String> name(String::cast(*literal->handle()));
-
- // Call the appropriate IC code.
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- // TODO(1222589): Make the IC grab the values from the stack.
- __ pop(eax);
- // Setup the name register.
- __ Set(ecx, Immediate(name));
- __ call(ic, RelocInfo::CODE_TARGET);
- } else {
- // Access keyed property.
- ASSERT(ref->type() == Reference::KEYED);
-
- // Call IC code.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- // TODO(1222589): Make the IC grab the values from the stack.
- __ pop(eax);
- __ call(ic, RelocInfo::CODE_TARGET);
- }
- __ push(eax); // IC call leaves result in eax, push it out
-}
-
-
void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
Label call_runtime;
__ mov(eax, Operand(esp, 1 * kPointerSize)); // Get y.
@@ -4795,95 +4452,174 @@
}
-void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
- // If we're reading an element we need to check that the key is a smi.
+void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL);
+ __ j(equal, &adaptor);
+
+ // Nothing to do: The formal number of parameters has already been
+ // passed in register eax by calling function. Just return it.
+ __ ret(0);
+
+ // Arguments adaptor case: Read the arguments length from the
+ // adaptor frame and return it.
+ __ bind(&adaptor);
+ __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ ret(0);
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ // The displacement is used for skipping the frame pointer on the
+ // stack. It is the offset of the last parameter (if any) relative
+ // to the frame pointer.
+ static const int kDisplacement = 1 * kPointerSize;
+
+ // Check that the key is a smi.
Label slow;
- if (type_ == READ_ELEMENT) {
- __ mov(ebx, Operand(esp, 1 * kPointerSize)); // skip return address
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow, not_taken);
- }
+ __ mov(ebx, Operand(esp, 1 * kPointerSize)); // skip return address
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow, not_taken);
// Check if the calling frame is an arguments adaptor frame.
Label adaptor;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
__ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL);
- if (type_ == NEW_OBJECT) {
- __ j(not_equal, &slow);
- } else {
- __ j(equal, &adaptor);
- }
+ __ j(equal, &adaptor);
- // The displacement is used for skipping the return address on the
- // stack. It is the offset of the last parameter (if any) relative
- // to the frame pointer.
- static const int kDisplacement = 1 * kPointerSize;
+ // Check index against formal parameters count limit passed in
+ // through register eax. Use unsigned comparison to get negative
+ // check for free.
+ __ cmp(ebx, Operand(eax));
+ __ j(above_equal, &slow, not_taken);
+
+ // Read the argument from the stack and return it.
ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this
+ __ lea(edx, Operand(ebp, eax, times_2, 0));
+ __ neg(ebx);
+ __ mov(eax, Operand(edx, ebx, times_2, kDisplacement));
+ __ ret(0);
- if (type_ == READ_LENGTH) {
- // Nothing to do: The formal number of parameters has already been
- // passed in register eax by calling function. Just return it.
- __ ret(0);
- } else if (type_ == READ_ELEMENT) {
- // Check index against formal parameters count limit passed in
- // through register eax. Use unsigned comparison to get negative
- // check for free.
- __ cmp(ebx, Operand(eax));
- __ j(above_equal, &slow, not_taken);
-
- // Read the argument from the stack and return it.
- __ lea(edx, Operand(ebp, eax, times_2, 0));
- __ neg(ebx);
- __ mov(eax, Operand(edx, ebx, times_2, kDisplacement));
- __ ret(0);
- } else {
- ASSERT(type_ == NEW_OBJECT);
- // Do nothing here.
- }
-
- // Arguments adaptor case: Find the length or the actual argument in
- // the calling frame.
+ // Arguments adaptor case: Check index against actual arguments
+ // limit found in the arguments adaptor frame. Use unsigned
+ // comparison to get negative check for free.
__ bind(&adaptor);
- if (type_ == READ_LENGTH) {
- // Read the arguments length from the adaptor frame and return it.
- __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ ret(0);
- } else if (type_ == READ_ELEMENT) {
- // Check index against actual arguments limit found in the
- // arguments adaptor frame. Use unsigned comparison to get
- // negative check for free.
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmp(ebx, Operand(ecx));
- __ j(above_equal, &slow, not_taken);
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ cmp(ebx, Operand(ecx));
+ __ j(above_equal, &slow, not_taken);
- // Read the argument from the stack and return it.
- __ lea(edx, Operand(edx, ecx, times_2, 0));
- __ neg(ebx);
- __ mov(eax, Operand(edx, ebx, times_2, kDisplacement));
- __ ret(0);
- } else {
- ASSERT(type_ == NEW_OBJECT);
- // Patch the arguments.length and the parameters pointer.
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ mov(Operand(esp, 1 * kPointerSize), ecx);
- __ lea(edx, Operand(edx, ecx, times_2, kDisplacement + 1 * kPointerSize));
- __ mov(Operand(esp, 2 * kPointerSize), edx);
- __ bind(&slow);
- __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3);
- }
+ // Read the argument from the stack and return it.
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this
+ __ lea(edx, Operand(edx, ecx, times_2, 0));
+ __ neg(ebx);
+ __ mov(eax, Operand(edx, ebx, times_2, kDisplacement));
+ __ ret(0);
// Slow-case: Handle non-smi or out-of-bounds access to arguments
// by calling the runtime system.
- if (type_ == READ_ELEMENT) {
- __ bind(&slow);
- __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1);
- }
+ __ bind(&slow);
+ __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+ // The displacement is used for skipping the return address and the
+ // frame pointer on the stack. It is the offset of the last
+ // parameter (if any) relative to the frame pointer.
+ static const int kDisplacement = 2 * kPointerSize;
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL);
+ __ j(not_equal, &runtime);
+
+ // Patch the arguments.length and the parameters pointer.
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ mov(Operand(esp, 1 * kPointerSize), ecx);
+ __ lea(edx, Operand(edx, ecx, times_2, kDisplacement));
+ __ mov(Operand(esp, 2 * kPointerSize), edx);
+
+ // Do the runtime call to allocate the arguments object.
+ __ bind(&runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3);
}
void CompareStub::Generate(MacroAssembler* masm) {
Label call_builtin, done;
+
+ // If we're doing a strict equality comparison, we generate code
+ // to do fast comparison for objects and oddballs. Numbers and
+ // strings still go through the usual slow-case code.
+ if (strict_) {
+ Label slow;
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &slow);
+
+ // Get the type of the first operand.
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+
+ // If the first object is an object, we do pointer comparison.
+ ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ Label non_object;
+ __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+ __ j(less, &non_object);
+ __ sub(eax, Operand(edx));
+ __ ret(0);
+
+ // Check for oddballs: true, false, null, undefined.
+ __ bind(&non_object);
+ __ cmp(ecx, ODDBALL_TYPE);
+ __ j(not_equal, &slow);
+
+ // If the oddball isn't undefined, we do pointer comparison. For
+ // the undefined value, we have to be careful and check for
+ // 'undetectable' objects too.
+ Label undefined;
+ __ cmp(Operand(eax), Immediate(Factory::undefined_value()));
+ __ j(equal, &undefined);
+ __ sub(eax, Operand(edx));
+ __ ret(0);
+
+ // Undefined case: If the other operand isn't undefined too, we
+ // have to check if it's 'undetectable'.
+ Label check_undetectable;
+ __ bind(&undefined);
+ __ cmp(Operand(edx), Immediate(Factory::undefined_value()));
+ __ j(not_equal, &check_undetectable);
+ __ Set(eax, Immediate(0));
+ __ ret(0);
+
+ // Check for undetectability of the other operand.
+ Label not_strictly_equal;
+ __ bind(&check_undetectable);
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, ¬_strictly_equal);
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kBitFieldOffset));
+ __ and_(ecx, 1 << Map::kIsUndetectable);
+ __ cmp(ecx, 1 << Map::kIsUndetectable);
+ __ j(not_equal, ¬_strictly_equal);
+ __ Set(eax, Immediate(0));
+ __ ret(0);
+
+ // No cigar: Objects aren't strictly equal. Register eax contains
+ // a non-smi value so it can't be 0. Just return.
+ ASSERT(kHeapObjectTag != 0);
+ __ bind(¬_strictly_equal);
+ __ ret(0);
+
+ // Fall through to the general case.
+ __ bind(&slow);
+ }
+
// Save the return address (and get it off the stack).
__ pop(ecx);
@@ -5324,23 +5060,61 @@
}
-#undef __
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ // Get the object - go slow case if it's a smi.
+ Label slow;
+ __ mov(eax, Operand(esp, 2 * kPointerSize)); // 2 ~ return address, function
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &slow, not_taken);
-// -----------------------------------------------------------------------------
-// CodeGenerator interfaces
+ // Check that the left hand is a JS object.
+ __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset)); // ebx - object map
+ __ movzx_b(ecx, FieldOperand(eax, Map::kInstanceTypeOffset)); // ecx - type
+ __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+ __ j(less, &slow, not_taken);
+ __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+ __ j(greater, &slow, not_taken);
-// MakeCode() is just a wrapper for CodeGenerator::MakeCode()
-// so we don't have to expose the entire CodeGenerator class in
-// the .h file.
-Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* fun,
- Handle<Script> script,
- bool is_eval) {
- Handle<Code> code = Ia32CodeGenerator::MakeCode(fun, script, is_eval);
- if (!code.is_null()) {
- Counters::total_compiled_code_size.Increment(code->instruction_size());
- }
- return code;
+ // Get the prototype of the function.
+ __ mov(edx, Operand(esp, 1 * kPointerSize)); // 1 ~ return address
+ __ TryGetFunctionPrototype(edx, ebx, ecx, &slow);
+
+ // Check that the function prototype is a JS object.
+ __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+ __ j(less, &slow, not_taken);
+ __ cmp(ecx, LAST_JS_OBJECT_TYPE);
+ __ j(greater, &slow, not_taken);
+
+ // Register mapping: eax is object map and ebx is function prototype.
+ __ mov(ecx, FieldOperand(eax, Map::kPrototypeOffset));
+
+ // Loop through the prototype chain looking for the function prototype.
+ Label loop, is_instance, is_not_instance;
+ __ bind(&loop);
+ __ cmp(ecx, Operand(ebx));
+ __ j(equal, &is_instance);
+ __ cmp(Operand(ecx), Immediate(Factory::null_value()));
+ __ j(equal, &is_not_instance);
+ __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
+ __ mov(ecx, FieldOperand(ecx, Map::kPrototypeOffset));
+ __ jmp(&loop);
+
+ __ bind(&is_instance);
+ __ Set(eax, Immediate(0));
+ __ ret(2 * kPointerSize);
+
+ __ bind(&is_not_instance);
+ __ Set(eax, Immediate(Smi::FromInt(1)));
+ __ ret(2 * kPointerSize);
+
+ // Slow-case: Go through the JavaScript implementation.
+ __ bind(&slow);
+ __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
}
+#undef __
+
} } // namespace v8::internal
diff --git a/src/codegen-ia32.h b/src/codegen-ia32.h
new file mode 100644
index 0000000..21d5167
--- /dev/null
+++ b/src/codegen-ia32.h
@@ -0,0 +1,422 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_CODEGEN_IA32_H_
+#define V8_CODEGEN_IA32_H_
+
+#include "scopes.h"
+
+namespace v8 { namespace internal {
+
+// Forward declarations
+class DeferredCode;
+
+// Mode to overwrite BinaryExpression values.
+enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
+
+enum InitState { CONST_INIT, NOT_CONST_INIT };
+enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+
+
+// -------------------------------------------------------------------------
+// Virtual frame
+
+class VirtualFrame BASE_EMBEDDED {
+ public:
+ explicit VirtualFrame(CodeGenerator* cgen);
+
+ void Enter();
+ void Exit();
+
+ void AllocateLocals();
+
+ Operand Top() const { return Operand(esp, 0); }
+
+ Operand Element(int index) const {
+ return Operand(esp, index * kPointerSize);
+ }
+
+ Operand Local(int index) const {
+ ASSERT(0 <= index && index < frame_local_count_);
+ return Operand(ebp, kLocal0Offset - index * kPointerSize);
+ }
+
+ Operand Function() const { return Operand(ebp, kFunctionOffset); }
+
+ Operand Context() const { return Operand(ebp, kContextOffset); }
+
+ Operand Parameter(int index) const {
+ ASSERT(-1 <= index && index < parameter_count_);
+ return Operand(ebp, (1 + parameter_count_ - index) * kPointerSize);
+ }
+
+ Operand Receiver() const { return Parameter(-1); }
+
+ inline void Drop(int count);
+
+ inline void Pop();
+ inline void Pop(Register reg);
+ inline void Pop(Operand operand);
+
+ inline void Push(Register reg);
+ inline void Push(Operand operand);
+ inline void Push(Immediate immediate);
+
+ private:
+ static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
+ static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
+ static const int kContextOffset = StandardFrameConstants::kContextOffset;
+
+ MacroAssembler* masm_;
+ int frame_local_count_;
+ int parameter_count_;
+};
+
+
+// -------------------------------------------------------------------------
+// Reference support
+
+// A reference is a C++ stack-allocated object that keeps an ECMA
+// reference on the execution stack while in scope. For variables
+// the reference is empty, indicating that it isn't necessary to
+// store state on the stack for keeping track of references to those.
+// For properties, we keep either one (named) or two (indexed) values
+// on the execution stack to represent the reference.
+
+class Reference BASE_EMBEDDED {
+ public:
+ // The values of the types is important, see size().
+ enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
+ Reference(CodeGenerator* cgen, Expression* expression);
+ ~Reference();
+
+ Expression* expression() const { return expression_; }
+ Type type() const { return type_; }
+ void set_type(Type value) {
+ ASSERT(type_ == ILLEGAL);
+ type_ = value;
+ }
+
+ // The size of the reference or -1 if the reference is illegal.
+ int size() const { return type_; }
+
+ bool is_illegal() const { return type_ == ILLEGAL; }
+ bool is_slot() const { return type_ == SLOT; }
+ bool is_property() const { return type_ == NAMED || type_ == KEYED; }
+
+ // Return the name. Only valid for named property references.
+ Handle<String> GetName();
+
+ // Generate code to push the value of the reference on top of the
+ // expression stack. The reference is expected to be already on top of
+ // the expression stack, and it is left in place with its value above it.
+ void GetValue(TypeofState typeof_state);
+
+ // Generate code to store the value on top of the expression stack in the
+ // reference. The reference is expected to be immediately below the value
+ // on the expression stack. The stored value is left in place (with the
+ // reference intact below it) to support chained assignments.
+ void SetValue(InitState init_state);
+
+ private:
+ CodeGenerator* cgen_;
+ Expression* expression_;
+ Type type_;
+};
+
+
+// -------------------------------------------------------------------------
+// Code generation state
+
+// The state is passed down the AST by the code generator (and back up, in
+// the form of the state of the label pair). It is threaded through the
+// call stack. Constructing a state implicitly pushes it on the owning code
+// generator's stack of states, and destroying one implicitly pops it.
+
+class CodeGenState BASE_EMBEDDED {
+ public:
+ // Create an initial code generator state. Destroying the initial state
+ // leaves the code generator with a NULL state.
+ explicit CodeGenState(CodeGenerator* owner);
+
+ // Create a code generator state based on a code generator's current
+ // state. The new state has its own access type and pair of branch
+ // labels, and no reference.
+ CodeGenState(CodeGenerator* owner,
+ TypeofState typeof_state,
+ Label* true_target,
+ Label* false_target);
+
+ // Destroy a code generator state and restore the owning code generator's
+ // previous state.
+ ~CodeGenState();
+
+ TypeofState typeof_state() const { return typeof_state_; }
+ Label* true_target() const { return true_target_; }
+ Label* false_target() const { return false_target_; }
+
+ private:
+ CodeGenerator* owner_;
+ TypeofState typeof_state_;
+ Label* true_target_;
+ Label* false_target_;
+ CodeGenState* previous_;
+};
+
+
+
+
+// -------------------------------------------------------------------------
+// CodeGenerator
+
+class CodeGenerator: public Visitor {
+ public:
+ // Takes a function literal, generates code for it. This function should only
+ // be called by compiler.cc.
+ static Handle<Code> MakeCode(FunctionLiteral* fun,
+ Handle<Script> script,
+ bool is_eval);
+
+ static void SetFunctionInfo(Handle<JSFunction> fun,
+ int length,
+ int function_token_position,
+ int start_position,
+ int end_position,
+ bool is_expression,
+ bool is_toplevel,
+ Handle<Script> script);
+
+ // Accessors
+ MacroAssembler* masm() { return masm_; }
+
+ VirtualFrame* frame() const { return frame_; }
+
+ CodeGenState* state() { return state_; }
+ void set_state(CodeGenState* state) { state_ = state; }
+
+ void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
+
+ private:
+ // Construction/Destruction
+ CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
+ virtual ~CodeGenerator() { delete masm_; }
+
+ // Accessors
+ Scope* scope() const { return scope_; }
+
+ void ProcessDeferred();
+
+ bool is_eval() { return is_eval_; }
+
+ // State
+ bool has_cc() const { return cc_reg_ >= 0; }
+ TypeofState typeof_state() const { return state_->typeof_state(); }
+ Label* true_target() const { return state_->true_target(); }
+ Label* false_target() const { return state_->false_target(); }
+
+
+ // Node visitors.
+#define DEF_VISIT(type) \
+ void Visit##type(type* node);
+ NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+ // Main code generation function
+ void GenCode(FunctionLiteral* fun);
+
+ // The following are used by class Reference.
+ void LoadReference(Reference* ref);
+ void UnloadReference(Reference* ref);
+
+ Operand ContextOperand(Register context, int index) const {
+ return Operand(context, Context::SlotOffset(index));
+ }
+
+ Operand SlotOperand(Slot* slot, Register tmp);
+
+
+ // Expressions
+ Operand GlobalObject() const {
+ return ContextOperand(esi, Context::GLOBAL_INDEX);
+ }
+
+ void LoadCondition(Expression* x,
+ TypeofState typeof_state,
+ Label* true_target,
+ Label* false_target,
+ bool force_cc);
+ void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
+ void LoadGlobal();
+
+ // Read a value from a slot and leave it on top of the expression stack.
+ void LoadFromSlot(Slot* slot, TypeofState typeof_state);
+
+ // Special code for typeof expressions: Unfortunately, we must
+ // be careful when loading the expression in 'typeof'
+ // expressions. We are not allowed to throw reference errors for
+ // non-existing properties of the global object, so we must make it
+ // look like an explicit property access, instead of an access
+ // through the context chain.
+ void LoadTypeofExpression(Expression* x);
+
+ void ToBoolean(Label* true_target, Label* false_target);
+
+ void GenericBinaryOperation(Token::Value op,
+ const OverwriteMode overwrite_mode = NO_OVERWRITE);
+ void Comparison(Condition cc, bool strict = false);
+
+ // Inline small integer literals. To prevent long attacker-controlled byte
+ // sequences, we only inline small Smi:s.
+ static const int kMaxSmiInlinedBits = 16;
+ bool IsInlineSmi(Literal* literal);
+ void SmiComparison(Condition cc, Handle<Object> value, bool strict = false);
+ void SmiOperation(Token::Value op,
+ Handle<Object> value,
+ bool reversed,
+ OverwriteMode overwrite_mode);
+
+ void CallWithArguments(ZoneList<Expression*>* arguments, int position);
+
+ // Control flow
+ void Branch(bool if_true, Label* L);
+ void CheckStack();
+ void CleanStack(int num_bytes);
+
+ bool CheckForInlineRuntimeCall(CallRuntime* node);
+ Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
+ void ProcessDeclarations(ZoneList<Declaration*>* declarations);
+
+ Handle<Code> ComputeCallInitialize(int argc);
+
+ // Declare global variables and functions in the given array of
+ // name/value pairs.
+ void DeclareGlobals(Handle<FixedArray> pairs);
+
+ // Instantiate the function boilerplate.
+ void InstantiateBoilerplate(Handle<JSFunction> boilerplate);
+
+ // Support for type checks.
+ void GenerateIsSmi(ZoneList<Expression*>* args);
+ void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
+ void GenerateIsArray(ZoneList<Expression*>* args);
+
+ // Support for arguments.length and arguments[?].
+ void GenerateArgumentsLength(ZoneList<Expression*>* args);
+ void GenerateArgumentsAccess(ZoneList<Expression*>* args);
+
+ // Support for accessing the value field of an object (used by Date).
+ void GenerateValueOf(ZoneList<Expression*>* args);
+ void GenerateSetValueOf(ZoneList<Expression*>* args);
+
+ // Fast support for charCodeAt(n).
+ void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+
+ // Fast support for object equality testing.
+ void GenerateObjectEquals(ZoneList<Expression*>* args);
+
+
+ // Methods and constants for fast case switch statement support.
+ //
+ // Only allow fast-case switch if the range of labels is at most
+ // this factor times the number of case labels.
+ // Value is derived from comparing the size of code generated by the normal
+ // switch code for Smi-labels to the size of a single pointer. If code
+ // quality increases this number should be decreased to match.
+ static const int kFastSwitchMaxOverheadFactor = 5;
+
+ // Minimal number of switch cases required before we allow jump-table
+ // optimization.
+ static const int kFastSwitchMinCaseCount = 5;
+
+ // The limit of the range of a fast-case switch, as a factor of the number
+ // of cases of the switch. Each platform should return a value that
+ // is optimal compared to the default code generated for a switch statement
+ // on that platform.
+ int FastCaseSwitchMaxOverheadFactor();
+
+ // The minimal number of cases in a switch before the fast-case switch
+ // optimization is enabled. Each platform should return a value that
+ // is optimal compared to the default code generated for a switch statement
+ // on that platform.
+ int FastCaseSwitchMinCaseCount();
+
+ // Allocate a jump table and create code to jump through it.
+ // Should call GenerateFastCaseSwitchCases to generate the code for
+ // all the cases at the appropriate point.
+ void GenerateFastCaseSwitchJumpTable(SwitchStatement* node, int min_index,
+ int range, Label *fail_label,
+ SmartPointer<Label*> &case_targets,
+ SmartPointer<Label>& case_labels);
+
+ // Generate the code for cases for the fast case switch.
+ // Called by GenerateFastCaseSwitchJumpTable.
+ void GenerateFastCaseSwitchCases(SwitchStatement* node,
+ SmartPointer<Label> &case_labels);
+
+ // Fast support for constant-Smi switches.
+ void GenerateFastCaseSwitchStatement(SwitchStatement *node, int min_index,
+ int range, int default_index);
+
+ // Fast support for constant-Smi switches. Tests whether switch statement
+ // permits optimization and calls GenerateFastCaseSwitch if it does.
+ // Returns true if the fast-case switch was generated, and false if not.
+ bool TryGenerateFastCaseSwitchStatement(SwitchStatement *node);
+
+
+ // Bottle-neck interface to call the Assembler to generate the statement
+ // position. This allows us to easily control whether statement positions
+ // should be generated or not.
+ void RecordStatementPosition(Node* node);
+
+ bool is_eval_; // Tells whether code is generated for eval.
+ Handle<Script> script_;
+ List<DeferredCode*> deferred_;
+
+ // Assembler
+ MacroAssembler* masm_; // to generate code
+
+ // Code generation state
+ Scope* scope_;
+ VirtualFrame* frame_;
+ Condition cc_reg_;
+ CodeGenState* state_;
+ bool is_inside_try_;
+ int break_stack_height_;
+
+ // Labels
+ Label function_return_;
+
+ friend class VirtualFrame;
+ friend class Reference;
+
+ DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_CODEGEN_IA32_H_
diff --git a/src/codegen.cc b/src/codegen.cc
index 844d934..6c3a113 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -27,8 +27,11 @@
#include "v8.h"
+#include "bootstrapper.h"
#include "codegen-inl.h"
#include "debug.h"
+#include "prettyprinter.h"
+#include "scopeinfo.h"
#include "runtime.h"
#include "stub-cache.h"
@@ -68,6 +71,97 @@
}
+// Generate the code. Takes a function literal, generates code for it, assemble
+// all the pieces into a Code object. This function is only to be called by
+// the compiler.cc code.
+Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* flit,
+ Handle<Script> script,
+ bool is_eval) {
+#ifdef ENABLE_DISASSEMBLER
+ bool print_code = FLAG_print_code && !Bootstrapper::IsActive();
+#endif
+
+#ifdef DEBUG
+ bool print_source = false;
+ bool print_ast = false;
+ const char* ftype;
+
+ if (Bootstrapper::IsActive()) {
+ print_source = FLAG_print_builtin_source;
+ print_ast = FLAG_print_builtin_ast;
+ print_code = FLAG_print_builtin_code;
+ ftype = "builtin";
+ } else {
+ print_source = FLAG_print_source;
+ print_ast = FLAG_print_ast;
+ ftype = "user-defined";
+ }
+
+ if (FLAG_trace_codegen || print_source || print_ast) {
+ PrintF("*** Generate code for %s function: ", ftype);
+ flit->name()->ShortPrint();
+ PrintF(" ***\n");
+ }
+
+ if (print_source) {
+ PrintF("--- Source from AST ---\n%s\n", PrettyPrinter().PrintProgram(flit));
+ }
+
+ if (print_ast) {
+ PrintF("--- AST ---\n%s\n", AstPrinter().PrintProgram(flit));
+ }
+#endif // DEBUG
+
+ // Generate code.
+ const int initial_buffer_size = 4 * KB;
+ CodeGenerator cgen(initial_buffer_size, script, is_eval);
+ cgen.GenCode(flit);
+ if (cgen.HasStackOverflow()) {
+ ASSERT(!Top::has_pending_exception());
+ return Handle<Code>::null();
+ }
+
+ // Process any deferred code.
+ cgen.ProcessDeferred();
+
+ // Allocate and install the code.
+ CodeDesc desc;
+ cgen.masm()->GetCode(&desc);
+ ScopeInfo<> sinfo(flit->scope());
+ Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
+ Handle<Code> code = Factory::NewCode(desc, &sinfo, flags);
+
+ // Add unresolved entries in the code to the fixup list.
+ Bootstrapper::AddFixup(*code, cgen.masm());
+
+#ifdef ENABLE_DISASSEMBLER
+ if (print_code) {
+ // Print the source code if available.
+ if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+ PrintF("--- Raw source ---\n");
+ StringInputBuffer stream(String::cast(script->source()));
+ stream.Seek(flit->start_position());
+ // flit->end_position() points to the last character in the stream. We
+ // need to compensate by adding one to calculate the length.
+ int source_len = flit->end_position() - flit->start_position() + 1;
+ for (int i = 0; i < source_len; i++) {
+ if (stream.has_more()) PrintF("%c", stream.GetNext());
+ }
+ PrintF("\n\n");
+ }
+ PrintF("--- Code ---\n");
+ code->Disassemble();
+ }
+#endif // ENABLE_DISASSEMBLER
+
+ if (!code.is_null()) {
+ Counters::total_compiled_code_size.Increment(code->instruction_size());
+ }
+
+ return code;
+}
+
+
// Sets the function info on a function.
// The start_position points to the first '(' character after the function name
// in the full script source. When counting characters in the script source the
@@ -362,4 +456,13 @@
}
+void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
+ switch (type_) {
+ case READ_LENGTH: GenerateReadLength(masm); break;
+ case READ_ELEMENT: GenerateReadElement(masm); break;
+ case NEW_OBJECT: GenerateNewObject(masm); break;
+ }
+}
+
+
} } // namespace v8::internal
diff --git a/src/codegen.h b/src/codegen.h
index 2367aef..f2ac05c 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -26,12 +26,43 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_CODEGEN_H_
+#define V8_CODEGEN_H_
#include "ast.h"
#include "code-stubs.h"
#include "runtime.h"
-#define V8_CODEGEN_H_
+// Include the declaration of the architecture defined class CodeGenerator.
+// The contract to the shared code is that the the CodeGenerator is a subclass
+// of Visitor and that the following methods are available publicly:
+// CodeGenerator::MakeCode
+// CodeGenerator::SetFunctionInfo
+// CodeGenerator::AddDeferred
+// CodeGenerator::masm
+//
+// These methods are either used privately by the shared code or implemented as
+// shared code:
+// CodeGenerator::CodeGenerator
+// CodeGenerator::~CodeGenerator
+// CodeGenerator::ProcessDeferred
+// CodeGenerator::GenCode
+// CodeGenerator::BuildBoilerplate
+// CodeGenerator::ComputeCallInitialize
+// CodeGenerator::ProcessDeclarations
+// CodeGenerator::DeclareGlobals
+// CodeGenerator::CheckForInlineRuntimeCall
+// CodeGenerator::GenerateFastCaseSwitchStatement
+// CodeGenerator::GenerateFastCaseSwitchCases
+// CodeGenerator::TryGenerateFastCaseSwitchStatement
+// CodeGenerator::GenerateFastCaseSwitchJumpTable
+// CodeGenerator::FastCaseSwitchMinCaseCount
+// CodeGenerator::FastCaseSwitchMaxOverheadFactor
+
+#if defined(ARM)
+#include "codegen-arm.h"
+#else
+#include "codegen-ia32.h"
+#endif
namespace v8 { namespace internal {
@@ -43,10 +74,6 @@
// and we can only run the tests with --nolazy.
-// Forward declaration.
-class CodeGenerator;
-
-
// Deferred code objects are small pieces of code that are compiled
// out of line. They are used to defer the compilation of uncommon
// paths thereby avoiding expensive jumps around uncommon code parts.
@@ -93,124 +120,6 @@
};
-// A superclass for gode generators. The implementations of methods
-// declared in this class are partially in codegen.c and partially in
-// codegen_<arch>.c.
-class CodeGenerator: public Visitor {
- public:
- CodeGenerator(bool is_eval,
- Handle<Script> script)
- : is_eval_(is_eval),
- script_(script),
- deferred_(8) { }
-
-
- // The code generator: Takes a function literal, generates code for it,
- // and assembles it all into a Code* object. This function should only
- // be called by compiler.cc.
- static Handle<Code> MakeCode(FunctionLiteral* fun,
- Handle<Script> script,
- bool is_eval);
-
- static void SetFunctionInfo(Handle<JSFunction> fun,
- int length,
- int function_token_position,
- int start_position,
- int end_position,
- bool is_expression,
- bool is_toplevel,
- Handle<Script> script);
-
- virtual MacroAssembler* masm() = 0;
-
- virtual Scope* scope() const = 0;
-
- void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
- void ProcessDeferred();
-
- // Accessors for is_eval.
- bool is_eval() { return is_eval_; }
-
- // Abstract node visitors.
-#define DEF_VISIT(type) \
- virtual void Visit##type(type* node) = 0;
- NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
- protected:
- bool CheckForInlineRuntimeCall(CallRuntime* node);
- Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
- void ProcessDeclarations(ZoneList<Declaration*>* declarations);
-
- Handle<Code> ComputeCallInitialize(int argc);
-
- // Declare global variables and functions in the given array of
- // name/value pairs.
- virtual void DeclareGlobals(Handle<FixedArray> pairs) = 0;
-
- // Support for type checks.
- virtual void GenerateIsSmi(ZoneList<Expression*>* args) = 0;
- virtual void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) = 0;
- virtual void GenerateIsArray(ZoneList<Expression*>* args) = 0;
-
- // Support for arguments.length and arguments[?].
- virtual void GenerateArgumentsLength(ZoneList<Expression*>* args) = 0;
- virtual void GenerateArgumentsAccess(ZoneList<Expression*>* args) = 0;
-
- // Support for accessing the value field of an object (used by Date).
- virtual void GenerateValueOf(ZoneList<Expression*>* args) = 0;
- virtual void GenerateSetValueOf(ZoneList<Expression*>* args) = 0;
-
- // Fast support for charCodeAt(n).
- virtual void GenerateFastCharCodeAt(ZoneList<Expression*>* args) = 0;
-
- // Fast support for object equality testing.
- virtual void GenerateObjectEquals(ZoneList<Expression*>* args) = 0;
-
-
- // Multiple methods for fast case switch statement support.
-
- // The limit of the range of a fast-case switch, as a factor of the number
- // of cases of the switch. Each platform should return a value that
- // is optimal compared to the default code generated for a switch statement
- // on that platform.
- virtual int FastCaseSwitchMaxOverheadFactor() = 0;
-
- // The minimal number of cases in a switch before the fast-case switch
- // optimization is enabled. Each platform should return a value that
- // is optimal compared to the default code generated for a switch statement
- // on that platform.
- virtual int FastCaseSwitchMinCaseCount() = 0;
-
- // Allocate a jump table and create code to jump through it.
- // Should call GenerateFastCaseSwitchCases to generate the code for
- // all the cases at the appropriate point.
- virtual void GenerateFastCaseSwitchJumpTable(
- SwitchStatement* node, int min_index, int range, Label *fail_label,
- SmartPointer<Label*> &case_targets, SmartPointer<Label>& case_labels) = 0;
-
- // Generate the code for cases for the fast case switch.
- // Called by GenerateFastCaseSwitchJumpTable.
- virtual void GenerateFastCaseSwitchCases(
- SwitchStatement* node, SmartPointer<Label> &case_labels);
-
- // Fast support for constant-Smi switches.
- virtual void GenerateFastCaseSwitchStatement(
- SwitchStatement *node, int min_index, int range, int default_index);
-
- // Fast support for constant-Smi switches. Tests whether switch statement
- // permits optimization and calls GenerateFastCaseSwitch if it does.
- // Returns true if the fast-case switch was generated, and false if not.
- virtual bool TryGenerateFastCaseSwitchStatement(SwitchStatement *node);
-
-
- private:
- bool is_eval_; // Tells whether code is generated for eval.
- Handle<Script> script_;
- List<DeferredCode*> deferred_;
-};
-
-
// RuntimeStub models code stubs calling entry points in the Runtime class.
class RuntimeStub : public CodeStub {
public:
@@ -357,7 +266,11 @@
Major MajorKey() { return ArgumentsAccess; }
int MinorKey() { return type_; }
+
void Generate(MacroAssembler* masm);
+ void GenerateReadLength(MacroAssembler* masm);
+ void GenerateReadElement(MacroAssembler* masm);
+ void GenerateNewObject(MacroAssembler* masm);
const char* GetName() { return "ArgumentsAccessStub"; }
diff --git a/src/date-delay.js b/src/date-delay.js
index 9a52ec4..da7aebd 100644
--- a/src/date-delay.js
+++ b/src/date-delay.js
@@ -43,7 +43,7 @@
// ECMA 262 - 15.9.1.2
function Day(time) {
- return $floor(time/msPerDay);
+ return FLOOR(time/msPerDay);
}
@@ -69,9 +69,9 @@
function DayFromYear(year) {
return 365 * (year-1970)
- + $floor((year-1969)/4)
- - $floor((year-1901)/100)
- + $floor((year-1601)/400);
+ + FLOOR((year-1969)/4)
+ - FLOOR((year-1901)/100)
+ + FLOOR((year-1601)/400);
}
@@ -170,6 +170,10 @@
return time + local_time_offset + DaylightSavingsOffset(time);
}
+function LocalTimeNoCheck(time) {
+ return time + local_time_offset + DaylightSavingsOffset(time);
+}
+
function UTC(time) {
if ($isNaN(time)) return time;
@@ -180,17 +184,17 @@
// ECMA 262 - 15.9.1.10
function HourFromTime(time) {
- return Modulo($floor(time / msPerHour), HoursPerDay);
+ return Modulo(FLOOR(time / msPerHour), HoursPerDay);
}
function MinFromTime(time) {
- return Modulo($floor(time / msPerMinute), MinutesPerHour);
+ return Modulo(FLOOR(time / msPerMinute), MinutesPerHour);
}
function SecFromTime(time) {
- return Modulo($floor(time / msPerSecond), SecondsPerMinute);
+ return Modulo(FLOOR(time / msPerSecond), SecondsPerMinute);
}
@@ -223,12 +227,11 @@
function ToJulianDay(year, month, date) {
var jy = (month > 1) ? year : year - 1;
var jm = (month > 1) ? month + 2 : month + 14;
- var ja = $floor(0.01*jy);
- return $floor($floor(365.25*jy) + $floor(30.6001*jm) + date + 1720995) + 2 - ja + $floor(0.25*ja);
+ var ja = FLOOR(0.01*jy);
+ return FLOOR(FLOOR(365.25*jy) + FLOOR(30.6001*jm) + date + 1720995) + 2 - ja + FLOOR(0.25*ja);
}
-
-var four_year_cycle_table;
+var four_year_cycle_table = CalculateDateTable();
function CalculateDateTable() {
@@ -261,7 +264,6 @@
}
-
// Constructor for creating objects holding year, month, and date.
// Introduced to ensure the two return points in FromJulianDay match same map.
function DayTriplet(year, month, date) {
@@ -279,8 +281,6 @@
// when doing the multiply-to-divide trick.
if (julian > kDayZeroInJulianDay &&
(julian - kDayZeroInJulianDay) < 40177) { // 1970 - 2080
- if (!four_year_cycle_table)
- four_year_cycle_table = CalculateDateTable();
var jsimple = (julian - kDayZeroInJulianDay) + 731; // Day 0 is 1st January 1968
var y = 1968;
// Divide by 1461 by multiplying with 22967 and shifting down by 25!
@@ -292,19 +292,20 @@
(four_year_cycle & kMonthMask) >> kMonthShift,
four_year_cycle & kDayMask);
}
- var jalpha = $floor((julian - 1867216.25) / 36524.25);
- var jb = julian + 1 + jalpha - $floor(0.25 * jalpha) + 1524;
- var jc = $floor(6680.0 + ((jb-2439870) - 122.1)/365.25);
- var jd = $floor(365 * jc + (0.25 * jc));
- var je = $floor((jb - jd)/30.6001);
+ var jalpha = FLOOR((julian - 1867216.25) / 36524.25);
+ var jb = julian + 1 + jalpha - FLOOR(0.25 * jalpha) + 1524;
+ var jc = FLOOR(6680.0 + ((jb-2439870) - 122.1)/365.25);
+ var jd = FLOOR(365 * jc + (0.25 * jc));
+ var je = FLOOR((jb - jd)/30.6001);
var m = je - 1;
if (m > 12) m -= 13;
var y = jc - 4715;
if (m > 2) { --y; --m; }
- var d = jb - jd - $floor(30.6001 * je);
+ var d = jb - jd - FLOOR(30.6001 * je);
return new DayTriplet(y, m, d);
}
+
// Compute number of days given a year, month, date.
// Note that month and date can lie outside the normal range.
// For example:
@@ -320,7 +321,7 @@
date = TO_INTEGER(date);
// Overflow months into year.
- year = year + $floor(month/12);
+ year = year + FLOOR(month/12);
month = month % 12;
if (month < 0) {
month += 12;
@@ -400,7 +401,7 @@
function GetMillisecondsFrom(aDate) {
var t = GetTimeFrom(aDate);
if ($isNaN(t)) return t;
- return msFromTime(LocalTime(t));
+ return msFromTime(LocalTimeNoCheck(t));
}
@@ -414,7 +415,7 @@
function GetSecondsFrom(aDate) {
var t = GetTimeFrom(aDate);
if ($isNaN(t)) return t;
- return SecFromTime(LocalTime(t));
+ return SecFromTime(LocalTimeNoCheck(t));
}
@@ -428,7 +429,7 @@
function GetMinutesFrom(aDate) {
var t = GetTimeFrom(aDate);
if ($isNaN(t)) return t;
- return MinFromTime(LocalTime(t));
+ return MinFromTime(LocalTimeNoCheck(t));
}
@@ -442,7 +443,7 @@
function GetHoursFrom(aDate) {
var t = GetTimeFrom(aDate);
if ($isNaN(t)) return t;
- return HourFromTime(LocalTime(t));
+ return HourFromTime(LocalTimeNoCheck(t));
}
@@ -456,7 +457,7 @@
function GetFullYearFrom(aDate) {
var t = GetTimeFrom(aDate);
if ($isNaN(t)) return t;
- return YearFromTime(LocalTime(t));
+ return YearFromTime(LocalTimeNoCheck(t));
}
@@ -470,7 +471,7 @@
function GetMonthFrom(aDate) {
var t = GetTimeFrom(aDate);
if ($isNaN(t)) return t;
- return MonthFromTime(LocalTime(t));
+ return MonthFromTime(LocalTimeNoCheck(t));
}
@@ -484,7 +485,7 @@
function GetDateFrom(aDate) {
var t = GetTimeFrom(aDate);
if ($isNaN(t)) return t;
- return DateFromTime(LocalTime(t));
+ return DateFromTime(LocalTimeNoCheck(t));
}
@@ -526,8 +527,8 @@
function LocalTimezoneString(time) {
var timezoneOffset = (local_time_offset + DaylightSavingsOffset(time)) / msPerMinute;
var sign = (timezoneOffset >= 0) ? 1 : -1;
- var hours = $floor((sign * timezoneOffset)/60);
- var min = $floor((sign * timezoneOffset)%60);
+ var hours = FLOOR((sign * timezoneOffset)/60);
+ var min = FLOOR((sign * timezoneOffset)%60);
var gmt = ' GMT' + ((sign == 1) ? '+' : '-') + TwoDigitString(hours) + TwoDigitString(min);
return gmt + ' (' + LocalTimezone(time) + ')';
}
@@ -586,7 +587,7 @@
function DateToString() {
var t = GetTimeFrom(this);
if ($isNaN(t)) return kInvalidDate;
- return DatePrintString(LocalTime(t)) + LocalTimezoneString(t);
+ return DatePrintString(LocalTimeNoCheck(t)) + LocalTimezoneString(t);
}
@@ -594,7 +595,7 @@
function DateToDateString() {
var t = GetTimeFrom(this);
if ($isNaN(t)) return kInvalidDate;
- return DateString(LocalTime(t));
+ return DateString(LocalTimeNoCheck(t));
}
@@ -602,7 +603,7 @@
function DateToTimeString() {
var t = GetTimeFrom(this);
if ($isNaN(t)) return kInvalidDate;
- var lt = LocalTime(t);
+ var lt = LocalTimeNoCheck(t);
return TimeString(lt) + LocalTimezoneString(lt);
}
@@ -623,7 +624,7 @@
function DateToLocaleTimeString() {
var t = GetTimeFrom(this);
if ($isNaN(t)) return kInvalidDate;
- var lt = LocalTime(t);
+ var lt = LocalTimeNoCheck(t);
return TimeString(lt);
}
@@ -680,7 +681,7 @@
function DateGetDay() {
var t = GetTimeFrom(this);
if ($isNaN(t)) return t;
- return WeekDay(LocalTime(t));
+ return WeekDay(LocalTimeNoCheck(t));
}
@@ -744,7 +745,7 @@
function DateGetTimezoneOffset() {
var t = GetTimeFrom(this);
if ($isNaN(t)) return t;
- return (t - LocalTime(t)) / msPerMinute;
+ return (t - LocalTimeNoCheck(t)) / msPerMinute;
}
@@ -884,7 +885,7 @@
// ECMA 262 - 15.9.5.40
function DateSetFullYear(year, month, date) {
var t = GetTimeFrom(this);
- t = $isNaN(t) ? 0 : LocalTime(t);
+ t = $isNaN(t) ? 0 : LocalTimeNoCheck(t);
year = ToNumber(year);
var argc = %_ArgumentsLength();
month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
@@ -924,7 +925,7 @@
function DateGetYear() {
var t = GetTimeFrom(this);
if ($isNaN(t)) return $NaN;
- return YearFromTime(LocalTime(t)) - 1900;
+ return YearFromTime(LocalTimeNoCheck(t)) - 1900;
}
diff --git a/src/factory.cc b/src/factory.cc
index 3167a71..2eeb3b9 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -283,10 +283,12 @@
Handle<Object> Factory::NewError(const char* maker, const char* type,
Vector< Handle<Object> > args) {
HandleScope scope;
- Handle<JSArray> array = NewJSArray(args.length());
- for (int i = 0; i < args.length(); i++)
- SetElement(array, i, args[i]);
- Handle<Object> result = NewError(maker, type, array);
+ Handle<FixedArray> array = Factory::NewFixedArray(args.length());
+ for (int i = 0; i < args.length(); i++) {
+ array->set(i, *args[i]);
+ }
+ Handle<JSArray> object = Factory::NewJSArrayWithElements(array);
+ Handle<Object> result = NewError(maker, type, object);
return result.EscapeFrom(&scope);
}
@@ -467,10 +469,12 @@
GC_GREEDY_CHECK();
CallbacksDescriptor desc(*key, *value, attributes);
Object* obj = array->CopyInsert(&desc, REMOVE_TRANSITIONS);
- if (obj->IsRetryAfterGC()) {
- CALL_GC(obj);
- CallbacksDescriptor desc(*key, *value, attributes);
- obj = array->CopyInsert(&desc, REMOVE_TRANSITIONS);
+ if (obj->IsFailure()) {
+ if (obj->IsRetryAfterGC()) {
+ CALL_GC(obj);
+ CallbacksDescriptor desc(*key, *value, attributes);
+ obj = array->CopyInsert(&desc, REMOVE_TRANSITIONS);
+ }
if (obj->IsFailure()) {
// TODO(1181417): Fix this.
V8::FatalProcessOutOfMemory("CopyAppendProxyDescriptor");
diff --git a/src/frames-inl.h b/src/frames-inl.h
index fac1418..2b50d55 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -145,8 +145,10 @@
}
-inline bool StandardFrame::IsConstructTrampolineFrame(Address pc) {
- return Builtins::builtin(Builtins::JSConstructCall)->contains(pc);
+inline bool StandardFrame::IsConstructFrame(Address fp) {
+ Object* marker =
+ Memory::Object_at(fp + StandardFrameConstants::kMarkerOffset);
+ return marker == Smi::FromInt(CONSTRUCT);
}
@@ -167,15 +169,6 @@
}
-inline bool InternalFrame::is_construct_trampoline() const {
- // TODO(1233795): This doesn't work when the stack frames have been
- // cooked. We need to find another way of identifying construct
- // trampoline frames possibly by manipulating the context field like
- // we do for argument adaptor frames.
- return IsConstructTrampolineFrame(pc());
-}
-
-
inline JavaScriptFrame* JavaScriptFrameIterator::frame() const {
// TODO(1233797): The frame hierarchy needs to change. It's
// problematic that we can't use the safe-cast operator to cast to
diff --git a/src/frames.cc b/src/frames.cc
index f5a72ec..09c6a02 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -311,10 +311,12 @@
bool JavaScriptFrame::IsConstructor() const {
- Address pc = has_adapted_arguments()
- ? Memory::Address_at(ComputePCAddress(caller_fp()))
- : caller_pc();
- return IsConstructTrampolineFrame(pc);
+ Address fp = caller_fp();
+ if (has_adapted_arguments()) {
+ // Skip the arguments adaptor frame and look at the real caller.
+ fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset);
+ }
+ return IsConstructFrame(fp);
}
diff --git a/src/frames.h b/src/frames.h
index 44e9a91..662d237 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -98,6 +98,7 @@
V(EXIT_DEBUG, ExitDebugFrame) \
V(JAVA_SCRIPT, JavaScriptFrame) \
V(INTERNAL, InternalFrame) \
+ V(CONSTRUCT, ConstructFrame) \
V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame)
@@ -124,6 +125,7 @@
bool is_java_script() const { return type() == JAVA_SCRIPT; }
bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; }
bool is_internal() const { return type() == INTERNAL; }
+ bool is_construct() const { return type() == CONSTRUCT; }
virtual bool is_standard() const { return false; }
// Accessors.
@@ -352,9 +354,9 @@
// an arguments adaptor frame.
static inline bool IsArgumentsAdaptorFrame(Address fp);
- // Determines if the standard frame for the given program counter is
- // a construct trampoline.
- static inline bool IsConstructTrampolineFrame(Address pc);
+ // Determines if the standard frame for the given frame pointer is a
+ // construct frame.
+ static inline bool IsConstructFrame(Address fp);
private:
friend class StackFrame;
@@ -380,9 +382,7 @@
// computed parameters count.
int GetProvidedParametersCount() const;
- // Check if this frame is a constructor frame invoked through
- // 'new'. The operation may involve digging through a few stack
- // frames to account for arguments adaptors.
+ // Check if this frame is a constructor frame invoked through 'new'.
bool IsConstructor() const;
// Check if this frame has "adapted" arguments in the sense that the
@@ -459,11 +459,6 @@
public:
virtual Type type() const { return INTERNAL; }
- // Returns if this frame is a special trampoline frame introduced by
- // the construct trampoline. NOTE: We should consider introducing a
- // special stack frame type for this.
- inline bool is_construct_trampoline() const;
-
// Garbage colletion support.
virtual void Iterate(ObjectVisitor* v) const;
@@ -486,6 +481,26 @@
};
+// Construct frames are special trampoline frames introduced to handle
+// function invocations through 'new'.
+class ConstructFrame: public InternalFrame {
+ public:
+ virtual Type type() const { return CONSTRUCT; }
+
+ static ConstructFrame* cast(StackFrame* frame) {
+ ASSERT(frame->is_construct());
+ return static_cast<ConstructFrame*>(frame);
+ }
+
+ protected:
+ explicit ConstructFrame(StackFrameIterator* iterator)
+ : InternalFrame(iterator) { }
+
+ private:
+ friend class StackFrameIterator;
+};
+
+
class StackFrameIterator BASE_EMBEDDED {
public:
// An iterator that iterates over the current thread's stack.
diff --git a/src/handles.cc b/src/handles.cc
index 5b6bc12..5f86792 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -128,7 +128,7 @@
void FlattenString(Handle<String> string) {
if (string->IsFlat()) return;
- CALL_HEAP_FUNCTION_VOID(String::cast(*string)->Flatten());
+ CALL_HEAP_FUNCTION_VOID(string->Flatten());
ASSERT(string->IsFlat());
}
@@ -391,13 +391,8 @@
Handle<JSArray> GetKeysFor(Handle<JSObject> object) {
Counters::for_in.Increment();
-
- Handle<FixedArray> content = GetKeysInFixedArrayFor(object);
-
- // Allocate the JSArray with the result.
- Handle<JSArray> obj = Factory::NewJSArray(content->length());
- Handle<JSArray>::cast(obj)->SetContent(*content);
- return Handle<JSArray>::cast(obj);
+ Handle<FixedArray> elements = GetKeysInFixedArrayFor(object);
+ return Factory::NewJSArrayWithElements(elements);
}
diff --git a/src/heap-inl.h b/src/heap-inl.h
index ea6c480..c841464 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -183,6 +183,9 @@
return Handle<TYPE>(); \
} \
} else { \
+ if (__object__->IsOutOfMemoryFailure()) { \
+ v8::internal::V8::FatalProcessOutOfMemory("CALL_HEAP_FUNCTION"); \
+ } \
return Handle<TYPE>(); \
} \
} \
@@ -211,7 +214,10 @@
return; \
} \
} else { \
- return; \
+ if (__object__->IsOutOfMemoryFailure()) { \
+ V8::FatalProcessOutOfMemory("Handles"); \
+ } \
+ UNREACHABLE(); \
} \
}
diff --git a/src/heap.cc b/src/heap.cc
index 526f0fe..7ebc96c 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -733,10 +733,20 @@
int size) {
void** src = reinterpret_cast<void**>((*source_p)->address());
void** dst = reinterpret_cast<void**>(target->address());
- int counter = size/kPointerSize - 1;
- do {
- *dst++ = *src++;
- } while (counter-- > 0);
+
+ // Use block copying memcpy if the object we're migrating is big
+ // enough to justify the extra call/setup overhead.
+ static const int kBlockCopyLimit = 16 * kPointerSize;
+
+ if (size >= kBlockCopyLimit) {
+ memcpy(dst, src, size);
+ } else {
+ int remaining = size / kPointerSize;
+ do {
+ remaining--;
+ *dst++ = *src++;
+ } while (remaining > 0);
+ }
// Set the forwarding address.
(*source_p)->set_map_word(MapWord::FromForwardingAddress(target));
@@ -833,6 +843,7 @@
reinterpret_cast<Map*>(result)->set_map(meta_map());
reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
+ reinterpret_cast<Map*>(result)->set_inobject_properties(0);
reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
return result;
}
@@ -848,6 +859,7 @@
map->set_prototype(null_value());
map->set_constructor(null_value());
map->set_instance_size(instance_size);
+ map->set_inobject_properties(0);
map->set_instance_descriptors(empty_descriptor_array());
map->set_code_cache(empty_fixed_array());
map->set_unused_property_fields(0);
@@ -921,32 +933,32 @@
STRING_TYPE_LIST(ALLOCATE_STRING_MAP);
#undef ALLOCATE_STRING_MAP
- obj = AllocateMap(SHORT_STRING_TYPE, TwoByteString::kHeaderSize);
+ obj = AllocateMap(SHORT_STRING_TYPE, SeqTwoByteString::kHeaderSize);
if (obj->IsFailure()) return false;
undetectable_short_string_map_ = Map::cast(obj);
undetectable_short_string_map_->set_is_undetectable();
- obj = AllocateMap(MEDIUM_STRING_TYPE, TwoByteString::kHeaderSize);
+ obj = AllocateMap(MEDIUM_STRING_TYPE, SeqTwoByteString::kHeaderSize);
if (obj->IsFailure()) return false;
undetectable_medium_string_map_ = Map::cast(obj);
undetectable_medium_string_map_->set_is_undetectable();
- obj = AllocateMap(LONG_STRING_TYPE, TwoByteString::kHeaderSize);
+ obj = AllocateMap(LONG_STRING_TYPE, SeqTwoByteString::kHeaderSize);
if (obj->IsFailure()) return false;
undetectable_long_string_map_ = Map::cast(obj);
undetectable_long_string_map_->set_is_undetectable();
- obj = AllocateMap(SHORT_ASCII_STRING_TYPE, AsciiString::kHeaderSize);
+ obj = AllocateMap(SHORT_ASCII_STRING_TYPE, SeqAsciiString::kHeaderSize);
if (obj->IsFailure()) return false;
undetectable_short_ascii_string_map_ = Map::cast(obj);
undetectable_short_ascii_string_map_->set_is_undetectable();
- obj = AllocateMap(MEDIUM_ASCII_STRING_TYPE, AsciiString::kHeaderSize);
+ obj = AllocateMap(MEDIUM_ASCII_STRING_TYPE, SeqAsciiString::kHeaderSize);
if (obj->IsFailure()) return false;
undetectable_medium_ascii_string_map_ = Map::cast(obj);
undetectable_medium_ascii_string_map_->set_is_undetectable();
- obj = AllocateMap(LONG_ASCII_STRING_TYPE, AsciiString::kHeaderSize);
+ obj = AllocateMap(LONG_ASCII_STRING_TYPE, SeqAsciiString::kHeaderSize);
if (obj->IsFailure()) return false;
undetectable_long_ascii_string_map_ = Map::cast(obj);
undetectable_long_ascii_string_map_->set_is_undetectable();
@@ -1318,7 +1330,8 @@
Object* Heap::AllocateConsString(String* first, String* second) {
int length = first->length() + second->length();
- bool is_ascii = first->is_ascii() && second->is_ascii();
+ bool is_ascii = first->is_ascii_representation()
+ && second->is_ascii_representation();
// If the resulting string is small make a flat string.
if (length < ConsString::kMinLength) {
@@ -1375,13 +1388,13 @@
Map* map;
if (length <= String::kMaxShortStringSize) {
- map = buffer->is_ascii() ? short_sliced_ascii_string_map()
+ map = buffer->is_ascii_representation() ? short_sliced_ascii_string_map()
: short_sliced_string_map();
} else if (length <= String::kMaxMediumStringSize) {
- map = buffer->is_ascii() ? medium_sliced_ascii_string_map()
+ map = buffer->is_ascii_representation() ? medium_sliced_ascii_string_map()
: medium_sliced_string_map();
} else {
- map = buffer->is_ascii() ? long_sliced_ascii_string_map()
+ map = buffer->is_ascii_representation() ? long_sliced_ascii_string_map()
: long_sliced_string_map();
}
@@ -1400,19 +1413,33 @@
Object* Heap::AllocateSubString(String* buffer, int start, int end) {
int length = end - start;
+ if (length == 1) {
+ return Heap::LookupSingleCharacterStringFromCode(buffer->Get(start));
+ }
+
// Make an attempt to flatten the buffer to reduce access time.
buffer->TryFlatten();
- Object* result = buffer->is_ascii()
+ Object* result = buffer->is_ascii_representation()
? AllocateRawAsciiString(length)
: AllocateRawTwoByteString(length);
if (result->IsFailure()) return result;
// Copy the characters into the new object.
String* string_result = String::cast(result);
- for (int i = 0; i < length; i++) {
- string_result->Set(i, buffer->Get(start + i));
+ StringHasher hasher(length);
+ int i = 0;
+ for (; i < length && hasher.is_array_index(); i++) {
+ uc32 c = buffer->Get(start + i);
+ hasher.AddCharacter(c);
+ string_result->Set(i, c);
}
+ for (; i < length; i++) {
+ uc32 c = buffer->Get(start + i);
+ hasher.AddCharacterNoIndex(c);
+ string_result->Set(i, c);
+ }
+ string_result->set_length_field(hasher.GetHashField());
return result;
}
@@ -1636,8 +1663,17 @@
Object* Heap::AllocateInitialMap(JSFunction* fun) {
ASSERT(!fun->has_initial_map());
- // First create a new map.
- Object* map_obj = Heap::AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ // First create a new map with the expected number of properties being
+ // allocated in-object.
+ int expected_nof_properties = fun->shared()->expected_nof_properties();
+ int instance_size = JSObject::kHeaderSize +
+ expected_nof_properties * kPointerSize;
+ if (instance_size > JSObject::kMaxInstanceSize) {
+ instance_size = JSObject::kMaxInstanceSize;
+ expected_nof_properties = (instance_size - JSObject::kHeaderSize) /
+ kPointerSize;
+ }
+ Object* map_obj = Heap::AllocateMap(JS_OBJECT_TYPE, instance_size);
if (map_obj->IsFailure()) return map_obj;
// Fetch or allocate prototype.
@@ -1649,7 +1685,8 @@
if (prototype->IsFailure()) return prototype;
}
Map* map = Map::cast(map_obj);
- map->set_unused_property_fields(fun->shared()->expected_nof_properties());
+ map->set_inobject_properties(expected_nof_properties);
+ map->set_unused_property_fields(expected_nof_properties);
map->set_prototype(prototype);
return map;
}
@@ -1677,7 +1714,8 @@
ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
// Allocate the backing storage for the properties.
- Object* properties = AllocateFixedArray(map->unused_property_fields());
+ int prop_size = map->unused_property_fields() - map->inobject_properties();
+ Object* properties = AllocateFixedArray(prop_size);
if (properties->IsFailure()) return properties;
// Allocate the JSObject.
@@ -1726,7 +1764,8 @@
ASSERT(map->instance_size() == object->map()->instance_size());
// Allocate the backing storage for the properties.
- Object* properties = AllocateFixedArray(map->unused_property_fields());
+ int prop_size = map->unused_property_fields() - map->inobject_properties();
+ Object* properties = AllocateFixedArray(prop_size);
if (properties->IsFailure()) return properties;
// Reset the map for the object.
@@ -1744,9 +1783,9 @@
if (result->IsFailure()) return result;
// Copy the characters into the new object.
- AsciiString* string_result = AsciiString::cast(result);
+ SeqAsciiString* string_result = SeqAsciiString::cast(result);
for (int i = 0; i < string.length(); i++) {
- string_result->AsciiStringSet(i, string[i]);
+ string_result->SeqAsciiStringSet(i, string[i]);
}
return result;
}
@@ -1872,7 +1911,7 @@
Object* Heap::AllocateSymbol(unibrow::CharacterStream* buffer,
int chars,
- int hash) {
+ uint32_t length_field) {
// Ensure the chars matches the number of characters in the buffer.
ASSERT(static_cast<unsigned>(chars) == buffer->Length());
// Determine whether the string is ascii.
@@ -1894,7 +1933,7 @@
} else {
map = long_ascii_symbol_map();
}
- size = AsciiString::SizeFor(chars);
+ size = SeqAsciiString::SizeFor(chars);
} else {
if (chars <= String::kMaxShortStringSize) {
map = short_symbol_map();
@@ -1903,7 +1942,7 @@
} else {
map = long_symbol_map();
}
- size = TwoByteString::SizeFor(chars);
+ size = SeqTwoByteString::SizeFor(chars);
}
// Allocate string.
@@ -1914,7 +1953,7 @@
reinterpret_cast<HeapObject*>(result)->set_map(map);
// The hash value contains the length of the string.
- String::cast(result)->set_length_field(hash);
+ String::cast(result)->set_length_field(length_field);
ASSERT_EQ(size, String::cast(result)->Size());
@@ -1928,7 +1967,7 @@
Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- int size = AsciiString::SizeFor(length);
+ int size = SeqAsciiString::SizeFor(length);
if (size > MaxHeapObjectSize()) {
space = LO_SPACE;
}
@@ -1958,7 +1997,7 @@
Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- int size = TwoByteString::SizeFor(length);
+ int size = SeqTwoByteString::SizeFor(length);
if (size > MaxHeapObjectSize()) {
space = LO_SPACE;
}
@@ -2251,6 +2290,16 @@
}
+bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
+ if (string->IsSymbol()) {
+ *symbol = string;
+ return true;
+ }
+ SymbolTable* table = SymbolTable::cast(symbol_table_);
+ return table->LookupSymbolIfExists(string, symbol);
+}
+
+
#ifdef DEBUG
void Heap::ZapFromSpace() {
ASSERT(HAS_HEAP_OBJECT_TAG(kFromSpaceZapValue));
diff --git a/src/heap.h b/src/heap.h
index ba101bb..4da9a98 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -154,6 +154,8 @@
V(object_symbol, "object") \
V(prototype_symbol, "prototype") \
V(string_symbol, "string") \
+ V(String_symbol, "String") \
+ V(Date_symbol, "Date") \
V(this_symbol, "this") \
V(to_string_symbol, "toString") \
V(char_at_symbol, "CharAt") \
@@ -341,7 +343,7 @@
// Please note this function does not perform a garbage collection.
static Object* AllocateSymbol(unibrow::CharacterStream* buffer,
int chars,
- int hash);
+ uint32_t length_field);
// Allocates and partially initializes a String. There are two String
// encodings: ASCII and two byte. These functions allocate a string of the
@@ -528,6 +530,7 @@
return LookupSymbol(CStrVector(str));
}
static Object* LookupSymbol(String* str);
+ static bool LookupSymbolIfExists(String* str, String** symbol);
// Compute the matching symbol map for a string if possible.
// NULL is returned if string is in new space or not flattened.
diff --git a/src/ic-arm.cc b/src/ic-arm.cc
index 8c165a6..5a60322 100644
--- a/src/ic-arm.cc
+++ b/src/ic-arm.cc
@@ -94,6 +94,7 @@
for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
__ ldr(t1, FieldMemOperand(r2, String::kLengthOffset));
+ __ mov(t1, Operand(t1, LSR, String::kHashShift));
if (i > 0) __ add(t1, t1, Operand(Dictionary::GetProbeOffset(i)));
__ and_(t1, t1, Operand(r3));
diff --git a/src/ic-ia32.cc b/src/ic-ia32.cc
index 496af24..3b12f29 100644
--- a/src/ic-ia32.cc
+++ b/src/ic-ia32.cc
@@ -90,6 +90,7 @@
for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
__ mov(r1, FieldOperand(name, String::kLengthOffset));
+ __ shr(r1, String::kHashShift);
if (i > 0) __ add(Operand(r1), Immediate(Dictionary::GetProbeOffset(i)));
__ and_(r1, Operand(r2));
@@ -244,7 +245,7 @@
// Slow case: Load name and receiver from stack and jump to runtime.
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1);
- KeyedLoadIC::Generate(masm, ExternalReference(Runtime::kGetProperty));
+ KeyedLoadIC::Generate(masm, ExternalReference(Runtime::kKeyedGetProperty));
// Check if the key is a symbol that is not an array index.
__ bind(&check_string);
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 0e4dee1..768c719 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -128,7 +128,7 @@
ASSERT(!flat_string->IsConsString());
ASSERT(flat_string->IsSeqString() || flat_string->IsSlicedString() ||
flat_string->IsExternalString());
- if (!flat_string->IsAscii()) {
+ if (!flat_string->IsAsciiRepresentation()) {
return flat_string;
}
@@ -218,12 +218,13 @@
}
LOG(RegExpExecEvent(re, start_index, subject));
- int value = Runtime::StringMatchKmp(*subject, *needle, start_index);
+ int value = Runtime::StringMatch(subject, needle, start_index);
if (value == -1) return Factory::null_value();
- Handle<JSArray> result = Factory::NewJSArray(2);
- SetElement(result, 0, Handle<Smi>(Smi::FromInt(value)));
- SetElement(result, 1, Handle<Smi>(Smi::FromInt(value + needle->length())));
- return result;
+
+ Handle<FixedArray> array = Factory::NewFixedArray(2);
+ array->set(0, Smi::FromInt(value));
+ array->set(1, Smi::FromInt(value + needle->length()));
+ return Factory::NewJSArrayWithElements(array);
}
@@ -231,24 +232,28 @@
Handle<String> subject) {
Handle<String> needle(String::cast(re->data()));
Handle<JSArray> result = Factory::NewJSArray(1);
- bool keep_going = true;
int index = 0;
int match_count = 0;
+ int subject_length = subject->length();
int needle_length = needle->length();
- while (keep_going) {
+ while (true) {
LOG(RegExpExecEvent(re, index, subject));
- int value = Runtime::StringMatchKmp(*subject, *needle, index);
+ int value = -1;
+ if (index + needle_length <= subject_length) {
+ value = Runtime::StringMatch(subject, needle, index);
+ }
if (value == -1) break;
HandleScope scope;
int end = value + needle_length;
- Handle<JSArray> pair = Factory::NewJSArray(2);
- SetElement(pair, 0, Handle<Smi>(Smi::FromInt(value)));
- SetElement(pair, 1, Handle<Smi>(Smi::FromInt(end)));
+
+ Handle<FixedArray> array = Factory::NewFixedArray(2);
+ array->set(0, Smi::FromInt(value));
+ array->set(1, Smi::FromInt(end));
+ Handle<JSArray> pair = Factory::NewJSArrayWithElements(array);
SetElement(result, match_count, pair);
match_count++;
index = end;
- if (needle_length == 0)
- index++;
+ if (needle_length == 0) index++;
}
return result;
}
@@ -270,55 +275,56 @@
unsigned number_of_captures;
const char* error_message = NULL;
- malloc_failure = Failure::Exception();
- JscreRegExp* code = jsRegExpCompile(two_byte_pattern->GetTwoByteData(),
- pattern->length(), case_option,
- multiline_option, &number_of_captures,
- &error_message, &JSREMalloc, &JSREFree);
+ JscreRegExp* code = NULL;
+ FlattenString(pattern);
- if (code == NULL && malloc_failure->IsRetryAfterGC()) {
- // Performs a GC, then retries.
- if (!Heap::CollectGarbage(malloc_failure->requested(),
- malloc_failure->allocation_space())) {
- // TODO(1181417): Fix this.
- V8::FatalProcessOutOfMemory("RegExpImpl::JsreCompile");
- }
+ bool first_time = true;
+
+ while (true) {
malloc_failure = Failure::Exception();
code = jsRegExpCompile(two_byte_pattern->GetTwoByteData(),
pattern->length(), case_option,
multiline_option, &number_of_captures,
&error_message, &JSREMalloc, &JSREFree);
- if (code == NULL && malloc_failure->IsRetryAfterGC()) {
- // TODO(1181417): Fix this.
- V8::FatalProcessOutOfMemory("RegExpImpl::JsreCompile");
+ if (code == NULL) {
+ if (first_time && malloc_failure->IsRetryAfterGC()) {
+ first_time = false;
+ if (!Heap::CollectGarbage(malloc_failure->requested(),
+ malloc_failure->allocation_space())) {
+ // TODO(1181417): Fix this.
+ V8::FatalProcessOutOfMemory("RegExpImpl::JsreCompile");
+ }
+ continue;
+ }
+ if (malloc_failure->IsRetryAfterGC() ||
+ malloc_failure->IsOutOfMemoryFailure()) {
+ // TODO(1181417): Fix this.
+ V8::FatalProcessOutOfMemory("RegExpImpl::JsreCompile");
+ } else {
+ // Throw an exception.
+ Handle<JSArray> array = Factory::NewJSArray(2);
+ SetElement(array, 0, pattern);
+ SetElement(array, 1, Factory::NewStringFromUtf8(CStrVector(
+ (error_message == NULL) ? "Unknown regexp error" : error_message)));
+ Handle<Object> regexp_err =
+ Factory::NewSyntaxError("malformed_regexp", array);
+ return Handle<Object>(Top::Throw(*regexp_err));
+ }
}
+
+ ASSERT(code != NULL);
+ // Convert the return address to a ByteArray pointer.
+ Handle<ByteArray> internal(
+ ByteArray::FromDataStartAddress(reinterpret_cast<Address>(code)));
+
+ Handle<FixedArray> value = Factory::NewFixedArray(2);
+ value->set(CAPTURE_INDEX, Smi::FromInt(number_of_captures));
+ value->set(INTERNAL_INDEX, *internal);
+ re->set_type_tag(JSRegExp::JSCRE);
+ re->set_data(*value);
+
+ return re;
}
-
- if (error_message != NULL) {
- // Throw an exception.
- SmartPointer<char> char_pattern =
- two_byte_pattern->ToCString(DISALLOW_NULLS);
- Handle<JSArray> array = Factory::NewJSArray(2);
- SetElement(array, 0, Factory::NewStringFromUtf8(CStrVector(*char_pattern)));
- SetElement(array, 1, Factory::NewStringFromUtf8(CStrVector(error_message)));
- Handle<Object> regexp_err =
- Factory::NewSyntaxError("malformed_regexp", array);
- return Handle<Object>(Top::Throw(*regexp_err));
- }
-
- ASSERT(code != NULL);
-
- // Convert the return address to a ByteArray pointer.
- Handle<ByteArray> internal(
- ByteArray::FromDataStartAddress(reinterpret_cast<Address>(code)));
-
- Handle<FixedArray> value = Factory::NewFixedArray(2);
- value->set(CAPTURE_INDEX, Smi::FromInt(number_of_captures));
- value->set(INTERNAL_INDEX, *internal);
- re->set_type_tag(JSRegExp::JSCRE);
- re->set_data(*value);
-
- return re;
}
@@ -363,14 +369,13 @@
return Handle<Object>(Top::Throw(*regexp_err));
}
- Handle<JSArray> result = Factory::NewJSArray(2 * (num_captures+1));
-
+ Handle<FixedArray> array = Factory::NewFixedArray(2 * (num_captures+1));
// The captures come in (start, end+1) pairs.
for (int i = 0; i < 2 * (num_captures+1); i += 2) {
- SetElement(result, i, Handle<Object>(Smi::FromInt(offsets_vector[i])));
- SetElement(result, i+1, Handle<Object>(Smi::FromInt(offsets_vector[i+1])));
+ array->set(i, Smi::FromInt(offsets_vector[i]));
+ array->set(i+1, Smi::FromInt(offsets_vector[i+1]));
}
- return result;
+ return Factory::NewJSArrayWithElements(array);
}
@@ -445,7 +450,7 @@
int previous_index = 0;
- Handle<JSArray> result = Factory::NewJSArray(0);
+ Handle<JSArray> result = Factory::NewJSArray(0);
int i = 0;
Handle<Object> matches;
diff --git a/src/log.cc b/src/log.cc
index 72eb455..01ca6dd 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -31,6 +31,7 @@
#include "log.h"
#include "platform.h"
+#include "string-stream.h"
namespace v8 { namespace internal {
@@ -686,6 +687,42 @@
if (open_log_file) {
if (strcmp(FLAG_logfile, "-") == 0) {
logfile_ = stdout;
+ } else if (strchr(FLAG_logfile, '%') != NULL) {
+ // If there's a '%' in the log file name we have to expand
+ // placeholders.
+ HeapStringAllocator allocator;
+ StringStream stream(&allocator);
+ for (const char* p = FLAG_logfile; *p; p++) {
+ if (*p == '%') {
+ p++;
+ switch (*p) {
+ case '\0':
+ // If there's a % at the end of the string we back up
+ // one character so we can escape the loop properly.
+ p--;
+ break;
+ case 't': {
+ // %t expands to the current time in milliseconds.
+ uint32_t time = static_cast<uint32_t>(OS::TimeCurrentMillis());
+ stream.Add("%u", time);
+ break;
+ }
+ case '%':
+ // %% expands (contracts really) to %.
+ stream.Put('%');
+ break;
+ default:
+ // All other %'s expand to themselves.
+ stream.Put('%');
+ stream.Put(*p);
+ break;
+ }
+ } else {
+ stream.Put(*p);
+ }
+ }
+ SmartPointer<char> expanded = stream.ToCString();
+ logfile_ = OS::FOpen(*expanded, "w");
} else {
logfile_ = OS::FOpen(FLAG_logfile, "w");
}
diff --git a/src/macro-assembler-arm.cc b/src/macro-assembler-arm.cc
index a6c336f..c7139f4 100644
--- a/src/macro-assembler-arm.cc
+++ b/src/macro-assembler-arm.cc
@@ -251,10 +251,8 @@
}
-void MacroAssembler::EnterInternalFrame() {
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
// r0-r3: preserved
- int type = StackFrame::INTERNAL;
-
stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
mov(ip, Operand(Smi::FromInt(type)));
push(ip);
@@ -264,13 +262,13 @@
}
-void MacroAssembler::LeaveInternalFrame() {
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
// r0: preserved
// r1: preserved
// r2: preserved
- // Drop the execution stack down to the frame pointer and restore the caller
- // frame pointer and return address.
+ // Drop the execution stack down to the frame pointer and restore
+ // the caller frame pointer and return address.
mov(sp, fp);
ldm(ia_w, sp, fp.bit() | lr.bit());
}
diff --git a/src/macro-assembler-arm.h b/src/macro-assembler-arm.h
index 7930e4c..d1a4a44 100644
--- a/src/macro-assembler-arm.h
+++ b/src/macro-assembler-arm.h
@@ -99,8 +99,11 @@
// ---------------------------------------------------------------------------
// Activation frames
- void EnterInternalFrame();
- void LeaveInternalFrame();
+ void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
+ void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
+
+ void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
+ void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
// Enter specific kind of exit frame; either EXIT or
// EXIT_DEBUG. Expects the number of arguments in register r0 and
@@ -260,6 +263,10 @@
// Get the code for the given builtin. Returns if able to resolve
// the function in the 'resolved' flag.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
+
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
};
diff --git a/src/macro-assembler-ia32.cc b/src/macro-assembler-ia32.cc
index 11ffa01..e72eeb2 100644
--- a/src/macro-assembler-ia32.cc
+++ b/src/macro-assembler-ia32.cc
@@ -99,8 +99,6 @@
Register addr_;
Register scratch_;
- const char* GetName() { return "RecordWriteStub"; }
-
#ifdef DEBUG
void Print() {
PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
@@ -314,9 +312,7 @@
}
-void MacroAssembler::EnterInternalFrame() {
- int type = StackFrame::INTERNAL;
-
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
push(ebp);
mov(ebp, Operand(esp));
push(esi);
@@ -325,9 +321,8 @@
}
-void MacroAssembler::LeaveInternalFrame() {
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (FLAG_debug_code) {
- StackFrame::Type type = StackFrame::INTERNAL;
cmp(Operand(ebp, StandardFrameConstants::kMarkerOffset),
Immediate(Smi::FromInt(type)));
Check(equal, "stack frame types must match");
@@ -589,6 +584,57 @@
}
+void MacroAssembler::TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss) {
+ // Check that the receiver isn't a smi.
+ test(function, Immediate(kSmiTagMask));
+ j(zero, miss, not_taken);
+
+ // Check that the function really is a function.
+ mov(result, FieldOperand(function, HeapObject::kMapOffset));
+ movzx_b(scratch, FieldOperand(result, Map::kInstanceTypeOffset));
+ cmp(scratch, JS_FUNCTION_TYPE);
+ j(not_equal, miss, not_taken);
+
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
+ test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
+ j(not_zero, &non_instance, not_taken);
+
+ // Get the prototype or initial map from the function.
+ mov(result,
+ FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // If the prototype or initial map is the hole, don't return it and
+ // simply miss the cache instead. This will allow us to allocate a
+ // prototype object on-demand in the runtime system.
+ cmp(Operand(result), Immediate(Factory::the_hole_value()));
+ j(equal, miss, not_taken);
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ mov(scratch, FieldOperand(result, HeapObject::kMapOffset));
+ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ cmp(scratch, MAP_TYPE);
+ j(not_equal, &done);
+
+ // Get the prototype from the initial map.
+ mov(result, FieldOperand(result, Map::kPrototypeOffset));
+ jmp(&done);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in initial map.
+ bind(&non_instance);
+ mov(result, FieldOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ bind(&done);
+}
+
+
void MacroAssembler::CallStub(CodeStub* stub) {
ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
call(stub->GetCode(), RelocInfo::CODE_TARGET);
diff --git a/src/macro-assembler-ia32.h b/src/macro-assembler-ia32.h
index d644c0f..e126c3c 100644
--- a/src/macro-assembler-ia32.h
+++ b/src/macro-assembler-ia32.h
@@ -86,8 +86,11 @@
// ---------------------------------------------------------------------------
// Activation frames
- void EnterInternalFrame();
- void LeaveInternalFrame();
+ void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
+ void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
+
+ void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
+ void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
// Enter specific kind of exit frame; either EXIT or
// EXIT_DEBUG. Expects the number of arguments in register eax and
@@ -179,6 +182,16 @@
void NegativeZeroTest(Register result, Register op1, Register op2,
Register scratch, Label* then_label);
+ // Try to get function prototype of a function and puts the value in
+ // the result register. Checks that the function really is a
+ // function and jumps to the miss label if the fast checks fail. The
+ // function register will be untouched; the other registers may be
+ // clobbered.
+ void TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss);
+
// Generates code for reporting that an illegal operation has
// occurred.
void IllegalOperation(int num_arguments);
@@ -264,6 +277,10 @@
// Get the code for the given builtin. Returns if able to resolve
// the function in the 'resolved' flag.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
+
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void LeaveFrame(StackFrame::Type type);
};
diff --git a/src/macros.py b/src/macros.py
index dbdaa34..48ca367 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -79,10 +79,11 @@
macro IS_OBJECT(arg) = (typeof(arg) === 'object');
macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean');
macro IS_REGEXP(arg) = (%ClassOf(arg) === 'RegExp');
-macro IS_ARRAY(arg) = (%ClassOf(arg) === 'Array');
-macro IS_DATE(arg) = (%ClassOf(arg) === 'Date');
+macro IS_ARRAY(arg) = %IsArrayClass(arg);
+macro IS_DATE(arg) = %IsDateClass(arg);
macro IS_ERROR(arg) = (%ClassOf(arg) === 'Error');
macro IS_SCRIPT(arg) = (%ClassOf(arg) === 'Script');
+macro FLOOR(arg) = %Math_floor(arg);
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
diff --git a/src/messages.js b/src/messages.js
index a6d9651..531c710 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -639,8 +639,8 @@
// prototype of 'Error' must be as default: new Object().
if (name != 'Error') %FunctionSetPrototype(f, new $Error());
%FunctionSetInstanceClassName(f, 'Error');
+ %SetProperty(f.prototype, 'constructor', f, DONT_ENUM);
f.prototype.name = name;
- f.prototype.constructor = f;
%SetCode(f, function(m) {
if (%IsConstructCall()) {
if (!IS_UNDEFINED(m)) this.message = ToString(m);
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 951575c..3c56590 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -263,7 +263,7 @@
r.GetKey()->StringPrint();
PrintF(": ");
if (r.type() == FIELD) {
- properties()->get(r.GetFieldIndex())->ShortPrint();
+ FastPropertyAt(r.GetFieldIndex())->ShortPrint();
PrintF(" (field at offset %d)\n", r.GetFieldIndex());
} else if (r.type() == CONSTANT_FUNCTION) {
r.GetConstantFunction()->ShortPrint();
@@ -313,7 +313,8 @@
VerifyHeapPointer(elements());
if (HasFastProperties()) {
CHECK(map()->unused_property_fields() ==
- (properties()->length() - map()->NextFreePropertyIndex()));
+ (map()->inobject_properties() + properties()->length() -
+ map()->NextFreePropertyIndex()));
}
}
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 2645363..ce2da12 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -119,13 +119,25 @@
}
-bool Object::IsAsciiString() {
- return IsString() && (String::cast(this)->is_ascii());
+bool Object::IsSeqAsciiString() {
+ return IsSeqString()
+ && String::cast(this)->IsAsciiRepresentation();
}
-bool Object::IsTwoByteString() {
- return IsString() && (!String::cast(this)->is_ascii());
+bool Object::IsSeqTwoByteString() {
+ return IsSeqString()
+ && !String::cast(this)->IsAsciiRepresentation();
+}
+
+
+bool Object::IsAsciiStringRepresentation() {
+ return IsString() && (String::cast(this)->is_ascii_representation());
+}
+
+
+bool Object::IsTwoByteStringRepresentation() {
+ return IsString() && (!String::cast(this)->is_ascii_representation());
}
@@ -148,12 +160,12 @@
bool Object::IsExternalAsciiString() {
- return IsExternalString() && (String::cast(this)->is_ascii());
+ return IsExternalString() && (String::cast(this)->is_ascii_representation());
}
bool Object::IsExternalTwoByteString() {
- return IsExternalString() && (!String::cast(this)->is_ascii());
+ return IsExternalString() && (!String::cast(this)->is_ascii_representation());
}
@@ -199,6 +211,12 @@
}
+bool Object::IsOutOfMemoryFailure() {
+ return HAS_FAILURE_TAG(this)
+ && Failure::cast(this)->IsOutOfMemoryException();
+}
+
+
bool Object::IsException() {
return this == Failure::Exception();
}
@@ -484,6 +502,12 @@
#define WRITE_INT_FIELD(p, offset, value) \
(*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value)
+#define READ_UINT32_FIELD(p, offset) \
+ (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)))
+
+#define WRITE_UINT32_FIELD(p, offset, value) \
+ (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
+
#define READ_SHORT_FIELD(p, offset) \
(*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)))
@@ -603,23 +627,19 @@
bool MapWord::IsForwardingAddress() {
- // This function only works for map words that are heap object pointers.
- // Since it is a heap object, it has a map. We use that map's instance
- // type to detect if this map word is not actually a map (ie, it is a
- // forwarding address during a scavenge collection).
- return reinterpret_cast<HeapObject*>(value_)->map()->instance_type() !=
- MAP_TYPE;
+ return HAS_SMI_TAG(reinterpret_cast<Object*>(value_));
}
MapWord MapWord::FromForwardingAddress(HeapObject* object) {
- return MapWord(reinterpret_cast<uintptr_t>(object));
+ Address raw = reinterpret_cast<Address>(object) - kHeapObjectTag;
+ return MapWord(reinterpret_cast<uintptr_t>(raw));
}
HeapObject* MapWord::ToForwardingAddress() {
ASSERT(IsForwardingAddress());
- return reinterpret_cast<HeapObject*>(value_);
+ return HeapObject::FromAddress(reinterpret_cast<Address>(value_));
}
@@ -874,24 +894,64 @@
int JSObject::GetInternalFieldCount() {
ASSERT(1 << kPointerSizeLog2 == kPointerSize);
- return (Size() - GetHeaderSize()) >> kPointerSizeLog2;
+ // Make sure to adjust for the number of in-object properties. These
+ // properties do contribute to the size, but are not internal fields.
+ return ((Size() - GetHeaderSize()) >> kPointerSizeLog2) -
+ map()->inobject_properties();
}
Object* JSObject::GetInternalField(int index) {
ASSERT(index < GetInternalFieldCount() && index >= 0);
+ // Internal objects do follow immediately after the header, whereas in-object
+ // properties are at the end of the object. Therefore there is no need
+ // to adjust the index here.
return READ_FIELD(this, GetHeaderSize() + (kPointerSize * index));
}
void JSObject::SetInternalField(int index, Object* value) {
ASSERT(index < GetInternalFieldCount() && index >= 0);
+ // Internal objects do follow immediately after the header, whereas in-object
+ // properties are at the end of the object. Therefore there is no need
+ // to adjust the index here.
int offset = GetHeaderSize() + (kPointerSize * index);
WRITE_FIELD(this, offset, value);
WRITE_BARRIER(this, offset);
}
+// Access fast-case object properties at index. The use of these routines
+// is needed to correctly distinguish between properties stored in-object and
+// properties stored in the properties array.
+inline Object* JSObject::FastPropertyAt(int index) {
+ // Adjust for the number of properties stored in the object.
+ index -= map()->inobject_properties();
+ if (index < 0) {
+ int offset = map()->instance_size() + (index * kPointerSize);
+ return READ_FIELD(this, offset);
+ } else {
+ ASSERT(index < properties()->length());
+ return properties()->get(index);
+ }
+}
+
+
+inline Object* JSObject::FastPropertyAtPut(int index, Object* value) {
+ // Adjust for the number of properties stored in the object.
+ index -= map()->inobject_properties();
+ if (index < 0) {
+ int offset = map()->instance_size() + (index * kPointerSize);
+ WRITE_FIELD(this, offset, value);
+ WRITE_BARRIER(this, offset);
+ } else {
+ ASSERT(index < properties()->length());
+ properties()->set(index, value);
+ }
+ return value;
+}
+
+
void JSObject::InitializeBody(int object_size) {
for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
WRITE_FIELD(this, offset, Heap::undefined_value());
@@ -1120,8 +1180,8 @@
CAST_ACCESSOR(MapCache)
CAST_ACCESSOR(String)
CAST_ACCESSOR(SeqString)
-CAST_ACCESSOR(AsciiString)
-CAST_ACCESSOR(TwoByteString)
+CAST_ACCESSOR(SeqAsciiString)
+CAST_ACCESSOR(SeqTwoByteString)
CAST_ACCESSOR(ConsString)
CAST_ACCESSOR(SlicedString)
CAST_ACCESSOR(ExternalString)
@@ -1204,13 +1264,13 @@
}
-int String::length_field() {
- return READ_INT_FIELD(this, kLengthOffset);
+uint32_t String::length_field() {
+ return READ_UINT32_FIELD(this, kLengthOffset);
}
-void String::set_length_field(int value) {
- WRITE_INT_FIELD(this, kLengthOffset, value);
+void String::set_length_field(uint32_t value) {
+ WRITE_UINT32_FIELD(this, kLengthOffset, value);
}
@@ -1228,15 +1288,15 @@
ASSERT(index >= 0 && index < length());
switch (representation_tag()) {
case kSeqStringTag:
- return is_ascii()
- ? AsciiString::cast(this)->AsciiStringGet(index)
- : TwoByteString::cast(this)->TwoByteStringGet(index);
+ return is_ascii_representation()
+ ? SeqAsciiString::cast(this)->SeqAsciiStringGet(index)
+ : SeqTwoByteString::cast(this)->SeqTwoByteStringGet(index);
case kConsStringTag:
return ConsString::cast(this)->ConsStringGet(index);
case kSlicedStringTag:
return SlicedString::cast(this)->SlicedStringGet(index);
case kExternalStringTag:
- return is_ascii()
+ return is_ascii_representation()
? ExternalAsciiString::cast(this)->ExternalAsciiStringGet(index)
: ExternalTwoByteString::cast(this)->ExternalTwoByteStringGet(index);
default:
@@ -1252,14 +1312,14 @@
ASSERT(index >= 0 && index < length());
ASSERT(IsSeqString());
- return is_ascii()
- ? AsciiString::cast(this)->AsciiStringSet(index, value)
- : TwoByteString::cast(this)->TwoByteStringSet(index, value);
+ return is_ascii_representation()
+ ? SeqAsciiString::cast(this)->SeqAsciiStringSet(index, value)
+ : SeqTwoByteString::cast(this)->SeqTwoByteStringSet(index, value);
}
-bool String::IsAscii() {
- return is_ascii();
+bool String::IsAsciiRepresentation() {
+ return is_ascii_representation();
}
@@ -1293,12 +1353,12 @@
}
-bool String::is_ascii() {
- return is_ascii_map(map());
+bool String::is_ascii_representation() {
+ return is_ascii_representation_map(map());
}
-bool String::is_ascii_map(Map* map) {
+bool String::is_ascii_representation_map(Map* map) {
return (map->instance_type() & kStringEncodingMask) != 0;
}
@@ -1315,52 +1375,57 @@
bool String::IsFlat() {
- String* current = this;
- while (true) {
- switch (current->representation_tag()) {
- case kConsStringTag:
- return String::cast(ConsString::cast(current)->second())->length() == 0;
- case kSlicedStringTag:
- current = String::cast(SlicedString::cast(this)->buffer());
- break;
- default:
- return true;
+ switch (this->representation_tag()) {
+ case kConsStringTag:
+ // Only flattened strings have second part empty.
+ return String::cast(ConsString::cast(this)->second())->length() == 0;
+ case kSlicedStringTag: {
+ String* slice = String::cast(SlicedString::cast(this)->buffer());
+ StringRepresentationTag tag = slice->representation_tag();
+ return tag == kSeqStringTag || tag == kExternalStringTag;
}
+ default:
+ return true;
}
}
-uint16_t AsciiString::AsciiStringGet(int index) {
+uint16_t SeqAsciiString::SeqAsciiStringGet(int index) {
ASSERT(index >= 0 && index < length());
return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
}
-void AsciiString::AsciiStringSet(int index, uint16_t value) {
+void SeqAsciiString::SeqAsciiStringSet(int index, uint16_t value) {
ASSERT(index >= 0 && index < length() && value <= kMaxAsciiCharCode);
WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize,
static_cast<byte>(value));
}
-Address AsciiString::GetCharsAddress() {
+Address SeqAsciiString::GetCharsAddress() {
return FIELD_ADDR(this, kHeaderSize);
}
-uint16_t TwoByteString::TwoByteStringGet(int index) {
+Address SeqTwoByteString::GetCharsAddress() {
+ return FIELD_ADDR(this, kHeaderSize);
+}
+
+
+uint16_t SeqTwoByteString::SeqTwoByteStringGet(int index) {
ASSERT(index >= 0 && index < length());
return READ_SHORT_FIELD(this, kHeaderSize + index * kShortSize);
}
-void TwoByteString::TwoByteStringSet(int index, uint16_t value) {
+void SeqTwoByteString::SeqTwoByteStringSet(int index, uint16_t value) {
ASSERT(index >= 0 && index < length());
WRITE_SHORT_FIELD(this, kHeaderSize + index * kShortSize, value);
}
-int TwoByteString::TwoByteStringSize(Map* map) {
+int SeqTwoByteString::SeqTwoByteStringSize(Map* map) {
uint32_t length = READ_INT_FIELD(this, kLengthOffset);
// Use the map (and not 'this') to compute the size tag, since
@@ -1382,7 +1447,7 @@
}
-int AsciiString::AsciiStringSize(Map* map) {
+int SeqAsciiString::SeqAsciiStringSize(Map* map) {
uint32_t length = READ_INT_FIELD(this, kLengthOffset);
// Use the map (and not 'this') to compute the size tag, since
@@ -1500,7 +1565,12 @@
int Map::instance_size() {
- return READ_BYTE_FIELD(this, kInstanceSizeOffset);
+ return READ_BYTE_FIELD(this, kInstanceSizeOffset) << kPointerSizeLog2;
+}
+
+
+int Map::inobject_properties() {
+ return READ_BYTE_FIELD(this, kInObjectPropertiesOffset);
}
@@ -1517,11 +1587,19 @@
void Map::set_instance_size(int value) {
+ ASSERT((value & ~(kPointerSize - 1)) == value);
+ value >>= kPointerSizeLog2;
ASSERT(0 <= value && value < 256);
WRITE_BYTE_FIELD(this, kInstanceSizeOffset, static_cast<byte>(value));
}
+void Map::set_inobject_properties(int value) {
+ ASSERT(0 <= value && value < 256);
+ WRITE_BYTE_FIELD(this, kInObjectPropertiesOffset, static_cast<byte>(value));
+}
+
+
InstanceType Map::instance_type() {
return static_cast<InstanceType>(READ_BYTE_FIELD(this, kInstanceTypeOffset));
}
@@ -2070,16 +2148,75 @@
uint32_t String::Hash() {
// Fast case: has hash code already been computed?
- int hash = length_field();
- if (hash & kHashComputedMask) return hash;
+ uint32_t field = length_field();
+ if (field & kHashComputedMask) return field >> kHashShift;
// Slow case: compute hash code and set it..
return ComputeAndSetHash();
}
+StringHasher::StringHasher(int length)
+ : length_(length),
+ raw_running_hash_(0),
+ array_index_(0),
+ is_array_index_(0 < length_ && length_ <= String::kMaxArrayIndexSize),
+ is_first_char_(true),
+ is_valid_(true) { }
+
+
+bool StringHasher::has_trivial_hash() {
+ return length_ > String::kMaxMediumStringSize;
+}
+
+
+void StringHasher::AddCharacter(uc32 c) {
+ // Note: the Jenkins one-at-a-time hash function
+ raw_running_hash_ += c;
+ raw_running_hash_ += (raw_running_hash_ << 10);
+ raw_running_hash_ ^= (raw_running_hash_ >> 6);
+ // Incremental array index computation
+ if (is_array_index_) {
+ if (c < '0' || c > '9') {
+ is_array_index_ = false;
+ } else {
+ int d = c - '0';
+ if (is_first_char_) {
+ is_first_char_ = false;
+ if (c == '0' && length_ > 1) {
+ is_array_index_ = false;
+ return;
+ }
+ }
+ if (array_index_ > 429496729U - ((d + 2) >> 3)) {
+ is_array_index_ = false;
+ } else {
+ array_index_ = array_index_ * 10 + d;
+ }
+ }
+ }
+}
+
+
+void StringHasher::AddCharacterNoIndex(uc32 c) {
+ ASSERT(!is_array_index());
+ raw_running_hash_ += c;
+ raw_running_hash_ += (raw_running_hash_ << 10);
+ raw_running_hash_ ^= (raw_running_hash_ >> 6);
+}
+
+
+uint32_t StringHasher::GetHash() {
+ uint32_t result = raw_running_hash_;
+ result += (result << 3);
+ result ^= (result >> 11);
+ result += (result << 15);
+ return result;
+}
+
+
bool String::AsArrayIndex(uint32_t* index) {
- int hash = length_field();
- if ((hash & kHashComputedMask) && !(hash & kIsArrayIndexMask)) return false;
+ uint32_t field = length_field();
+ if ((field & kHashComputedMask) && !(field & kIsArrayIndexMask)) return false;
return SlowAsArrayIndex(index);
}
@@ -2152,6 +2289,12 @@
}
+void JSArray::SetContent(FixedArray* storage) {
+ set_length(Smi::FromInt(storage->length()));
+ set_elements(storage);
+}
+
+
#undef CAST_ACCESSOR
#undef INT_ACCESSORS
#undef SMI_ACCESSORS
diff --git a/src/objects.cc b/src/objects.cc
index dc64337..da81f52 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -137,19 +137,6 @@
} else if (IsBoolean()) {
holder = global_context->boolean_function()->instance_prototype();
}
-#ifdef DEBUG
- // Used to track outstanding bug #1308895.
- // TODO(1308895) Remove when bug is fixed.
- if (holder == NULL) {
- PrintF("\nName being looked up: ");
- name->Print();
- PrintF("\nThis (object name is looked up in: ");
- this->Print();
- if (IsScript()) {
- PrintF("IsScript() returns true.\n");
- }
- }
-#endif
ASSERT(holder != NULL); // cannot handle null or undefined.
JSObject::cast(holder)->Lookup(name, result);
}
@@ -379,7 +366,7 @@
ASSERT(!value->IsTheHole() || result->IsReadOnly());
return value->IsTheHole() ? Heap::undefined_value() : value;
case FIELD:
- value = holder->properties()->get(result->GetFieldIndex());
+ value = holder->FastPropertyAt(result->GetFieldIndex());
ASSERT(!value->IsTheHole() || result->IsReadOnly());
return value->IsTheHole() ? Heap::undefined_value() : value;
case CONSTANT_FUNCTION:
@@ -534,12 +521,13 @@
// cons string is in old space. It can never get GCed until there is
// an old space GC.
PretenureFlag tenure = Heap::InNewSpace(this) ? NOT_TENURED : TENURED;
- Object* object = IsAscii() ?
- Heap::AllocateRawAsciiString(length(), tenure) :
- Heap::AllocateRawTwoByteString(length(), tenure);
+ int len = length();
+ Object* object = IsAsciiRepresentation() ?
+ Heap::AllocateRawAsciiString(len, tenure) :
+ Heap::AllocateRawTwoByteString(len, tenure);
if (object->IsFailure()) return object;
String* result = String::cast(object);
- Flatten(this, result, 0, length(), 0);
+ Flatten(this, result, 0, len, 0);
cs->set_first(result);
cs->set_second(Heap::empty_string());
return this;
@@ -767,10 +755,11 @@
if (instance_type < FIRST_NONSTRING_TYPE
&& (reinterpret_cast<String*>(this)->map_representation_tag(map)
== kSeqStringTag)) {
- if (reinterpret_cast<String*>(this)->is_ascii_map(map)) {
- return reinterpret_cast<AsciiString*>(this)->AsciiStringSize(map);
+ if (reinterpret_cast<String*>(this)->is_ascii_representation_map(map)) {
+ return reinterpret_cast<SeqAsciiString*>(this)->SeqAsciiStringSize(map);
} else {
- return reinterpret_cast<TwoByteString*>(this)->TwoByteStringSize(map);
+ SeqTwoByteString* self = reinterpret_cast<SeqTwoByteString*>(this);
+ return self->SeqTwoByteStringSize(map);
}
}
@@ -947,20 +936,16 @@
String* name,
Object* value) {
int index = new_map->PropertyIndexFor(name);
- if (map()->unused_property_fields() > 0) {
- ASSERT(index < properties()->length());
- properties()->set(index, value);
- } else {
+ if (map()->unused_property_fields() == 0) {
ASSERT(map()->unused_property_fields() == 0);
int new_unused = new_map->unused_property_fields();
Object* values =
properties()->CopySize(properties()->length() + new_unused + 1);
if (values->IsFailure()) return values;
- FixedArray::cast(values)->set(index, value);
set_properties(FixedArray::cast(values));
}
set_map(new_map);
- return value;
+ return FastPropertyAtPut(index, value);
}
@@ -975,21 +960,7 @@
return AddSlowProperty(name, value, attributes);
}
- // Replace a CONSTANT_TRANSITION flag with a transition.
- // Do this by removing it, and the standard code for adding a map transition
- // will then run.
DescriptorArray* old_descriptors = map()->instance_descriptors();
- int old_name_index = old_descriptors->Search(name);
- bool constant_transition = false; // Only used in assertions.
- if (old_name_index != DescriptorArray::kNotFound && CONSTANT_TRANSITION ==
- PropertyDetails(old_descriptors->GetDetails(old_name_index)).type()) {
- constant_transition = true;
- Object* r = old_descriptors->CopyRemove(name);
- if (r->IsFailure()) return r;
- old_descriptors = DescriptorArray::cast(r);
- old_name_index = DescriptorArray::kNotFound;
- }
-
// Compute the new index for new field.
int index = map()->NextFreePropertyIndex();
@@ -1004,66 +975,43 @@
bool allow_map_transition =
!old_descriptors->Contains(name) &&
(Top::context()->global_context()->object_function()->map() != map());
- ASSERT(allow_map_transition || !constant_transition);
- if (map()->unused_property_fields() > 0) {
- ASSERT(index < properties()->length());
- // Allocate a new map for the object.
- Object* r = map()->Copy();
+ ASSERT(index < map()->inobject_properties() ||
+ (index - map()->inobject_properties()) < properties()->length() ||
+ map()->unused_property_fields() == 0);
+ // Allocate a new map for the object.
+ Object* r = map()->Copy();
+ if (r->IsFailure()) return r;
+ Map* new_map = Map::cast(r);
+ if (allow_map_transition) {
+ // Allocate new instance descriptors for the old map with map transition.
+ MapTransitionDescriptor d(name, Map::cast(new_map), attributes);
+ Object* r = old_descriptors->CopyInsert(&d, KEEP_TRANSITIONS);
if (r->IsFailure()) return r;
- Map* new_map = Map::cast(r);
- if (allow_map_transition) {
- // Allocate new instance descriptors for the old map with map transition.
- MapTransitionDescriptor d(name, Map::cast(new_map), attributes);
- Object* r = old_descriptors->CopyInsert(&d, KEEP_TRANSITIONS);
- if (r->IsFailure()) return r;
- old_descriptors = DescriptorArray::cast(r);
- }
- // We have now allocated all the necessary objects.
- // All the changes can be applied at once, so they are atomic.
- map()->set_instance_descriptors(old_descriptors);
- new_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
- new_map->set_unused_property_fields(map()->unused_property_fields() - 1);
- set_map(new_map);
- properties()->set(index, value);
- } else {
- ASSERT(map()->unused_property_fields() == 0);
+ old_descriptors = DescriptorArray::cast(r);
+ }
+ if (map()->unused_property_fields() == 0) {
if (properties()->length() > kMaxFastProperties) {
Object* obj = NormalizeProperties();
if (obj->IsFailure()) return obj;
return AddSlowProperty(name, value, attributes);
}
-
- static const int kExtraFields = 3;
// Make room for the new value
Object* values =
- properties()->CopySize(properties()->length() + kExtraFields);
+ properties()->CopySize(properties()->length() + kFieldsAdded);
if (values->IsFailure()) return values;
- FixedArray::cast(values)->set(index, value);
-
- // Allocate a new map for the object.
- Object* r = map()->Copy();
- if (r->IsFailure()) return r;
- Map* new_map = Map::cast(r);
-
- if (allow_map_transition) {
- MapTransitionDescriptor d(name, Map::cast(new_map), attributes);
- // Allocate new instance descriptors for the old map with map transition.
- Object* r = old_descriptors->CopyInsert(&d, KEEP_TRANSITIONS);
- if (r->IsFailure()) return r;
- old_descriptors = DescriptorArray::cast(r);
- }
- // We have now allocated all the necessary objects.
- // All changes can be done at once, atomically.
- map()->set_instance_descriptors(old_descriptors);
- new_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
- new_map->set_unused_property_fields(kExtraFields - 1);
- set_map(new_map);
set_properties(FixedArray::cast(values));
+ new_map->set_unused_property_fields(kFieldsAdded - 1);
+ } else {
+ new_map->set_unused_property_fields(map()->unused_property_fields() - 1);
}
-
- return value;
+ // We have now allocated all the necessary objects.
+ // All the changes can be applied at once, so they are atomic.
+ map()->set_instance_descriptors(old_descriptors);
+ new_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
+ set_map(new_map);
+ return FastPropertyAtPut(index, value);
}
@@ -1115,74 +1063,6 @@
}
-Object* JSObject::ReplaceConstantFunctionProperty(String* name,
- Object* value) {
- // There are two situations to handle here:
- // 1: Replace a constant function with another function.
- // 2: Replace a constant function with an object.
- if (value->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(value);
-
- Object* new_map = map()->CopyDropTransitions();
- if (new_map->IsFailure()) return new_map;
- set_map(Map::cast(new_map));
-
- // Replace the function entry
- int index = map()->instance_descriptors()->Search(name);
- ASSERT(index != DescriptorArray::kNotFound);
- map()->instance_descriptors()->ReplaceConstantFunction(index, function);
- } else {
- // Allocate new instance descriptors with updated property index.
- int index = map()->NextFreePropertyIndex();
- Object* new_descriptors =
- map()->instance_descriptors()->CopyReplace(name, index, NONE);
- if (new_descriptors->IsFailure()) return new_descriptors;
-
- if (map()->unused_property_fields() > 0) {
- ASSERT(index < properties()->length());
-
- // Allocate a new map for the object.
- Object* new_map = map()->Copy();
- if (new_map->IsFailure()) return new_map;
-
- Map::cast(new_map)->
- set_instance_descriptors(DescriptorArray::cast(new_descriptors));
- Map::cast(new_map)->
- set_unused_property_fields(map()->unused_property_fields()-1);
- set_map(Map::cast(new_map));
- properties()->set(index, value);
- } else {
- ASSERT(map()->unused_property_fields() == 0);
- static const int kFastNofProperties = 20;
- if (properties()->length() > kFastNofProperties) {
- Object* obj = NormalizeProperties();
- if (obj->IsFailure()) return obj;
- return SetProperty(name, value, NONE);
- }
-
- static const int kExtraFields = 5;
- // Make room for the more properties.
- Object* values =
- properties()->CopySize(properties()->length() + kExtraFields);
- if (values->IsFailure()) return values;
- FixedArray::cast(values)->set(index, value);
-
- // Allocate a new map for the object.
- Object* new_map = map()->Copy();
- if (new_map->IsFailure()) return new_map;
-
- Map::cast(new_map)->
- set_instance_descriptors(DescriptorArray::cast(new_descriptors));
- Map::cast(new_map)->
- set_unused_property_fields(kExtraFields - 1);
- set_map(Map::cast(new_map));
- set_properties(FixedArray::cast(values));
- }
- }
- return value;
-}
-
-
// Add property in slow mode
Object* JSObject::AddSlowProperty(String* name,
Object* value,
@@ -1234,6 +1114,102 @@
}
+Object* JSObject::ReplaceSlowProperty(String* name,
+ Object* value,
+ PropertyAttributes attributes) {
+ Dictionary* dictionary = property_dictionary();
+ PropertyDetails old_details =
+ dictionary->DetailsAt(dictionary->FindStringEntry(name));
+ int new_index = old_details.index();
+ if (old_details.IsTransition()) new_index = 0;
+
+ PropertyDetails new_details(attributes, NORMAL, old_details.index());
+ Object* result =
+ property_dictionary()->SetOrAddStringEntry(name, value, new_details);
+ if (result->IsFailure()) return result;
+ if (property_dictionary() != result) {
+ set_properties(Dictionary::cast(result));
+ }
+ return value;
+}
+
+Object* JSObject::ConvertDescriptorToFieldAndMapTransition(
+ String* name,
+ Object* new_value,
+ PropertyAttributes attributes) {
+ Map* old_map = map();
+ Object* result = ConvertDescriptorToField(name, new_value, attributes);
+ if (result->IsFailure()) return result;
+ // If we get to this point we have succeeded - do not return failure
+ // after this point. Later stuff is optional.
+ if (!HasFastProperties()) {
+ return result;
+ }
+ // Do not add transitions to the map of "new Object()".
+ if (map() == Top::context()->global_context()->object_function()->map()) {
+ return result;
+ }
+
+ MapTransitionDescriptor transition(name,
+ map(),
+ attributes);
+ Object* new_descriptors =
+ old_map->instance_descriptors()->
+ CopyInsert(&transition, KEEP_TRANSITIONS);
+ if (new_descriptors->IsFailure()) return result; // Yes, return _result_.
+ old_map->set_instance_descriptors(DescriptorArray::cast(new_descriptors));
+ return result;
+}
+
+
+Object* JSObject::ConvertDescriptorToField(String* name,
+ Object* new_value,
+ PropertyAttributes attributes) {
+ if (map()->unused_property_fields() == 0 &&
+ properties()->length() > kMaxFastProperties) {
+ Object* obj = NormalizeProperties();
+ if (obj->IsFailure()) return obj;
+ return ReplaceSlowProperty(name, new_value, attributes);
+ }
+
+ int index = map()->NextFreePropertyIndex();
+ FieldDescriptor new_field(name, index, attributes);
+ // Make a new DescriptorArray replacing an entry with FieldDescriptor.
+ Object* descriptors_unchecked = map()->instance_descriptors()->
+ CopyInsert(&new_field, REMOVE_TRANSITIONS);
+ if (descriptors_unchecked->IsFailure()) return descriptors_unchecked;
+ DescriptorArray* new_descriptors =
+ DescriptorArray::cast(descriptors_unchecked);
+
+ // Make a new map for the object.
+ Object* new_map_unchecked = map()->Copy();
+ if (new_map_unchecked->IsFailure()) return new_map_unchecked;
+ Map* new_map = Map::cast(new_map_unchecked);
+ new_map->set_instance_descriptors(new_descriptors);
+
+ // Make new properties array if necessary.
+ FixedArray* new_properties = 0; // Will always be NULL or a valid pointer.
+ int new_unused_property_fields = map()->unused_property_fields() - 1;
+ if (map()->unused_property_fields() == 0) {
+ new_unused_property_fields = kFieldsAdded - 1;
+ Object* new_properties_unchecked =
+ properties()->CopySize(properties()->length() + kFieldsAdded);
+ if (new_properties_unchecked->IsFailure()) return new_properties_unchecked;
+ new_properties = FixedArray::cast(new_properties_unchecked);
+ }
+
+ // Update pointers to commit changes.
+ // Object points to the new map.
+ new_map->set_unused_property_fields(new_unused_property_fields);
+ set_map(new_map);
+ if (new_properties) {
+ set_properties(FixedArray::cast(new_properties));
+ }
+ return FastPropertyAtPut(index, new_value);
+}
+
+
+
Object* JSObject::SetPropertyWithInterceptor(String* name,
Object* value,
PropertyAttributes attributes) {
@@ -1394,7 +1370,7 @@
// Disallow caching for uninitialized constants. These can only
// occur as fields.
if (result->IsReadOnly() && result->type() == FIELD &&
- properties()->get(result->GetFieldIndex())->IsTheHole()) {
+ FastPropertyAt(result->GetFieldIndex())->IsTheHole()) {
result->DisallowCaching();
}
return;
@@ -1531,21 +1507,19 @@
property_dictionary()->ValueAtPut(result->GetDictionaryEntry(), value);
return value;
case FIELD:
- properties()->set(result->GetFieldIndex(), value);
- return value;
+ return FastPropertyAtPut(result->GetFieldIndex(), value);
case MAP_TRANSITION:
if (attributes == result->GetAttributes()) {
// Only use map transition if the attributes match.
return AddFastPropertyUsingMap(result->GetTransitionMap(),
name,
value);
- } else {
- return AddFastProperty(name, value, attributes);
}
+ return ConvertDescriptorToField(name, value, attributes);
case CONSTANT_FUNCTION:
if (value == result->GetConstantFunction()) return value;
// Only replace the function if necessary.
- return ReplaceConstantFunctionProperty(name, value);
+ return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
case CALLBACKS:
return SetPropertyWithCallback(result->GetCallbackObject(),
name,
@@ -1556,10 +1530,9 @@
case CONSTANT_TRANSITION:
// Replace with a MAP_TRANSITION to a new map with a FIELD, even
// if the value is a function.
- // AddProperty has been extended to do this, in this case.
- return AddFastProperty(name, value, attributes);
+ return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
case NULL_DESCRIPTOR:
- UNREACHABLE();
+ return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
default:
UNREACHABLE();
}
@@ -1591,40 +1564,20 @@
&& !Top::MayNamedAccess(this, name, v8::ACCESS_SET)) {
return SetPropertyWithFailedAccessCheck(result, name, value);
}
- /*
- REMOVED FROM CLONE
- if (result->IsNotFound() || !result->IsProperty()) {
- // We could not find a local property so let's check whether there is an
- // accessor that wants to handle the property.
- LookupResult accessor_result;
- LookupCallbackSetterInPrototypes(name, &accessor_result);
- if (accessor_result.IsValid()) {
- return SetPropertyWithCallback(accessor_result.GetCallbackObject(),
- name,
- value,
- accessor_result.holder());
- }
- }
- */
+ // Check for accessor in prototype chain removed here in clone.
if (result->IsNotFound()) {
return AddProperty(name, value, attributes);
}
if (!result->IsLoaded()) {
return SetLazyProperty(result, name, value, attributes);
}
- /*
- REMOVED FROM CLONE
- if (result->IsReadOnly() && result->IsProperty()) return value;
- */
- // This is a real property that is not read-only, or it is a
- // transition or null descriptor and there are no setters in the prototypes.
+ // Check of IsReadOnly removed from here in clone.
switch (result->type()) {
case NORMAL:
property_dictionary()->ValueAtPut(result->GetDictionaryEntry(), value);
return value;
case FIELD:
- properties()->set(result->GetFieldIndex(), value);
- return value;
+ return FastPropertyAtPut(result->GetFieldIndex(), value);
case MAP_TRANSITION:
if (attributes == result->GetAttributes()) {
// Only use map transition if the attributes match.
@@ -1632,12 +1585,12 @@
name,
value);
} else {
- return AddFastProperty(name, value, attributes);
+ return ConvertDescriptorToField(name, value, attributes);
}
case CONSTANT_FUNCTION:
if (value == result->GetConstantFunction()) return value;
// Only replace the function if necessary.
- return ReplaceConstantFunctionProperty(name, value);
+ return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
case CALLBACKS:
return SetPropertyWithCallback(result->GetCallbackObject(),
name,
@@ -1648,10 +1601,9 @@
case CONSTANT_TRANSITION:
// Replace with a MAP_TRANSITION to a new map with a FIELD, even
// if the value is a function.
- // AddProperty has been extended to do this, in this case.
- return AddFastProperty(name, value, attributes);
+ return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
case NULL_DESCRIPTOR:
- UNREACHABLE();
+ return ConvertDescriptorToFieldAndMapTransition(name, value, attributes);
default:
UNREACHABLE();
}
@@ -1819,7 +1771,7 @@
case FIELD: {
PropertyDetails d =
PropertyDetails(details.attributes(), NORMAL, details.index());
- Object* value = properties()->get(r.GetFieldIndex());
+ Object* value = FastPropertyAt(r.GetFieldIndex());
Object* result = dictionary->AddStringEntry(r.GetKey(), value, d);
if (result->IsFailure()) return result;
dictionary = Dictionary::cast(result);
@@ -2374,7 +2326,7 @@
!r.eos();
r.advance()) {
if (r.type() == FIELD) {
- if (properties()->get(r.GetFieldIndex()) == value) {
+ if (FastPropertyAt(r.GetFieldIndex()) == value) {
return r.GetKey();
}
} else if (r.type() == CONSTANT_FUNCTION) {
@@ -2398,6 +2350,7 @@
// Don't copy descriptors, so map transitions always remain a forest.
Map::cast(result)->set_instance_descriptors(Heap::empty_descriptor_array());
// Please note instance_type and instance_size are set when allocated.
+ Map::cast(result)->set_inobject_properties(inobject_properties());
Map::cast(result)->set_unused_property_fields(unused_property_fields());
Map::cast(result)->set_bit_field(bit_field());
Map::cast(result)->ClearCodeCache();
@@ -2674,14 +2627,6 @@
}
-void DescriptorArray::ReplaceConstantFunction(int descriptor_number,
- JSFunction* value) {
- ASSERT(!Heap::InNewSpace(value));
- FixedArray* content_array = GetContentArray();
- fast_set(content_array, ToValueIndex(descriptor_number), value);
-}
-
-
Object* DescriptorArray::CopyInsert(Descriptor* descriptor,
TransitionFlag transition_flag) {
// Transitions are only kept when inserting another transition.
@@ -2782,69 +2727,6 @@
}
-Object* DescriptorArray::CopyReplace(String* name,
- int index,
- PropertyAttributes attributes) {
- // Allocate the new descriptor array.
- Object* result = DescriptorArray::Allocate(number_of_descriptors());
- if (result->IsFailure()) return result;
-
- // Make sure only symbols are added to the instance descriptor.
- if (!name->IsSymbol()) {
- Object* result = Heap::LookupSymbol(name);
- if (result->IsFailure()) return result;
- name = String::cast(result);
- }
-
- DescriptorWriter w(DescriptorArray::cast(result));
- for (DescriptorReader r(this); !r.eos(); r.advance()) {
- if (r.Equals(name)) {
- FieldDescriptor d(name, index, attributes);
- d.SetEnumerationIndex(r.GetDetails().index());
- w.Write(&d);
- } else {
- w.WriteFrom(&r);
- }
- }
-
- // Copy the next enumeration index.
- DescriptorArray::cast(result)->
- SetNextEnumerationIndex(NextEnumerationIndex());
-
- ASSERT(w.eos());
- return result;
-}
-
-
-Object* DescriptorArray::CopyRemove(String* name) {
- if (!name->IsSymbol()) {
- Object* result = Heap::LookupSymbol(name);
- if (result->IsFailure()) return result;
- name = String::cast(result);
- }
- ASSERT(name->IsSymbol());
- Object* result = Allocate(number_of_descriptors() - 1);
- if (result->IsFailure()) return result;
- DescriptorArray* new_descriptors = DescriptorArray::cast(result);
-
- // Set the enumeration index in the descriptors and set the enumeration index
- // in the result.
- new_descriptors->SetNextEnumerationIndex(NextEnumerationIndex());
- // Write the old content and the descriptor information
- DescriptorWriter w(new_descriptors);
- DescriptorReader r(this);
- while (!r.eos()) {
- if (r.GetKey() != name) { // Both are symbols; object identity suffices.
- w.WriteFrom(&r);
- }
- r.advance();
- }
- ASSERT(w.eos());
-
- return new_descriptors;
-}
-
-
Object* DescriptorArray::RemoveTransitions() {
// Remove all transitions. Return a copy of the array with all transitions
// removed, or a Failure object if the new array could not be allocated.
@@ -2982,7 +2864,7 @@
int String::Utf8Length() {
- if (is_ascii()) return length();
+ if (is_ascii_representation()) return length();
// Attempt to flatten before accessing the string. It probably
// doesn't make Utf8Length faster, but it is very likely that
// the string will be accessed later (for example by WriteUtf8)
@@ -2997,6 +2879,69 @@
}
+Vector<const char> String::ToAsciiVector() {
+ ASSERT(IsAsciiRepresentation());
+ ASSERT(IsFlat());
+
+ int offset = 0;
+ int length = this->length();
+ StringRepresentationTag string_tag = representation_tag();
+ String* string = this;
+ if (string_tag == kSlicedStringTag) {
+ SlicedString* sliced = SlicedString::cast(string);
+ offset += sliced->start();
+ string = String::cast(sliced->buffer());
+ string_tag = string->representation_tag();
+ } else if (string_tag == kConsStringTag) {
+ ConsString* cons = ConsString::cast(string);
+ ASSERT(String::cast(cons->second())->length() == 0);
+ string = String::cast(cons->first());
+ string_tag = string->representation_tag();
+ }
+ if (string_tag == kSeqStringTag) {
+ SeqAsciiString* seq = SeqAsciiString::cast(string);
+ char* start = reinterpret_cast<char*>(seq->GetCharsAddress());
+ return Vector<const char>(start + offset, length);
+ }
+ ASSERT(string_tag == kExternalStringTag);
+ ExternalAsciiString* ext = ExternalAsciiString::cast(string);
+ const char* start = ext->resource()->data();
+ return Vector<const char>(start + offset, length);
+}
+
+
+Vector<const uc16> String::ToUC16Vector() {
+ ASSERT(IsTwoByteStringRepresentation());
+ ASSERT(IsFlat());
+
+ int offset = 0;
+ int length = this->length();
+ StringRepresentationTag string_tag = representation_tag();
+ String* string = this;
+ if (string_tag == kSlicedStringTag) {
+ SlicedString* sliced = SlicedString::cast(string);
+ offset += sliced->start();
+ string = String::cast(sliced->buffer());
+ string_tag = string->representation_tag();
+ } else if (string_tag == kConsStringTag) {
+ ConsString* cons = ConsString::cast(string);
+ ASSERT(String::cast(cons->second())->length() == 0);
+ string = String::cast(cons->first());
+ string_tag = string->representation_tag();
+ }
+ if (string_tag == kSeqStringTag) {
+ SeqTwoByteString* seq = SeqTwoByteString::cast(string);
+ uc16* start = reinterpret_cast<uc16*>(seq->GetCharsAddress());
+ return Vector<const uc16>(start + offset, length);
+ }
+ ASSERT(string_tag == kExternalStringTag);
+ ExternalTwoByteString* ext = ExternalTwoByteString::cast(string);
+ const uc16* start =
+ reinterpret_cast<const uc16*>(ext->resource()->data());
+ return Vector<const uc16>(start + offset, length);
+}
+
+
SmartPointer<char> String::ToCString(AllowNullsFlag allow_nulls,
RobustnessFlag robust_flag,
int offset,
@@ -3063,10 +3008,10 @@
const uc16* String::GetTwoByteData(unsigned start) {
- ASSERT(!IsAscii());
+ ASSERT(!IsAsciiRepresentation());
switch (representation_tag()) {
case kSeqStringTag:
- return TwoByteString::cast(this)->TwoByteStringGetData(start);
+ return SeqTwoByteString::cast(this)->SeqTwoByteStringGetData(start);
case kExternalStringTag:
return ExternalTwoByteString::cast(this)->
ExternalTwoByteStringGetData(start);
@@ -3112,13 +3057,13 @@
}
-const uc16* TwoByteString::TwoByteStringGetData(unsigned start) {
+const uc16* SeqTwoByteString::SeqTwoByteStringGetData(unsigned start) {
return reinterpret_cast<uc16*>(
reinterpret_cast<char*>(this) - kHeapObjectTag + kHeaderSize) + start;
}
-void TwoByteString::TwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
+void SeqTwoByteString::SeqTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
unsigned* offset_ptr,
unsigned max_chars) {
unsigned chars_read = 0;
@@ -3151,9 +3096,10 @@
}
-const unibrow::byte* AsciiString::AsciiStringReadBlock(unsigned* remaining,
- unsigned* offset_ptr,
- unsigned max_chars) {
+const unibrow::byte* SeqAsciiString::SeqAsciiStringReadBlock(
+ unsigned* remaining,
+ unsigned* offset_ptr,
+ unsigned max_chars) {
// Cast const char* to unibrow::byte* (signedness difference).
const unibrow::byte* b = reinterpret_cast<unibrow::byte*>(this) -
kHeapObjectTag + kHeaderSize + *offset_ptr * kCharSize;
@@ -3313,7 +3259,7 @@
}
-void AsciiString::AsciiStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
+void SeqAsciiString::SeqAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
unsigned* offset_ptr,
unsigned max_chars) {
unsigned capacity = rbb->capacity - rbb->cursor;
@@ -3358,14 +3304,16 @@
}
switch (input->representation_tag()) {
case kSeqStringTag:
- if (input->is_ascii()) {
- return AsciiString::cast(input)->AsciiStringReadBlock(&rbb->remaining,
- offset_ptr,
- max_chars);
+ if (input->is_ascii_representation()) {
+ SeqAsciiString* str = SeqAsciiString::cast(input);
+ return str->SeqAsciiStringReadBlock(&rbb->remaining,
+ offset_ptr,
+ max_chars);
} else {
- TwoByteString::cast(input)->TwoByteStringReadBlockIntoBuffer(rbb,
- offset_ptr,
- max_chars);
+ SeqTwoByteString* str = SeqTwoByteString::cast(input);
+ str->SeqTwoByteStringReadBlockIntoBuffer(rbb,
+ offset_ptr,
+ max_chars);
return rbb->util_buffer;
}
case kConsStringTag:
@@ -3377,7 +3325,7 @@
offset_ptr,
max_chars);
case kExternalStringTag:
- if (input->is_ascii()) {
+ if (input->is_ascii_representation()) {
return ExternalAsciiString::cast(input)->ExternalAsciiStringReadBlock(
&rbb->remaining,
offset_ptr,
@@ -3421,13 +3369,13 @@
switch (input->representation_tag()) {
case kSeqStringTag:
- if (input->is_ascii()) {
- AsciiString::cast(input)->AsciiStringReadBlockIntoBuffer(rbb,
+ if (input->is_ascii_representation()) {
+ SeqAsciiString::cast(input)->SeqAsciiStringReadBlockIntoBuffer(rbb,
offset_ptr,
max_chars);
return;
} else {
- TwoByteString::cast(input)->TwoByteStringReadBlockIntoBuffer(rbb,
+ SeqTwoByteString::cast(input)->SeqTwoByteStringReadBlockIntoBuffer(rbb,
offset_ptr,
max_chars);
return;
@@ -3443,7 +3391,7 @@
max_chars);
return;
case kExternalStringTag:
- if (input->is_ascii()) {
+ if (input->is_ascii_representation()) {
ExternalAsciiString::cast(input)->
ExternalAsciiStringReadBlockIntoBuffer(rbb, offset_ptr, max_chars);
} else {
@@ -3619,7 +3567,11 @@
}
-void String::Flatten(String* src, String* sink, int f, int t, int so) {
+void String::Flatten(String* src,
+ String* sink,
+ int f,
+ int t,
+ int so) {
String* source = src;
int from = f;
int to = t;
@@ -3634,7 +3586,8 @@
buffer->Reset(from, source);
int j = sink_offset;
for (int i = from; i < to; i++) {
- sink->Set(j++, buffer->GetNext());
+ uc32 c = buffer->GetNext();
+ sink->Set(j++, c);
}
return;
}
@@ -3650,7 +3603,7 @@
ConsString* cons_string = ConsString::cast(source);
String* first = String::cast(cons_string->first());
int boundary = first->length();
- if (to - boundary > boundary - from) {
+ if (to - boundary >= boundary - from) {
// Right hand side is longer. Recurse over left.
if (from < boundary) {
Flatten(first, sink, from, boundary, sink_offset);
@@ -3662,7 +3615,9 @@
to -= boundary;
source = String::cast(cons_string->second());
} else {
- // Left hand side is longer. Recurse over right.
+ // Left hand side is longer. Recurse over right. The hasher
+ // needs us to visit the string from left to right so doing
+ // this invalidates that hash.
if (to > boundary) {
String* second = String::cast(cons_string->second());
Flatten(second,
@@ -3693,6 +3648,43 @@
}
+template <typename IteratorA, typename IteratorB>
+static inline bool CompareStringContents(IteratorA* ia, IteratorB* ib) {
+ // General slow case check. We know that the ia and ib iterators
+ // have the same length.
+ while (ia->has_more()) {
+ uc32 ca = ia->GetNext();
+ uc32 cb = ib->GetNext();
+ if (ca != cb)
+ return false;
+ }
+ return true;
+}
+
+
+static StringInputBuffer string_compare_buffer_b;
+
+
+template <typename IteratorA>
+static inline bool CompareStringContentsPartial(IteratorA* ia, String* b) {
+ if (b->IsFlat()) {
+ if (b->IsAsciiRepresentation()) {
+ VectorIterator<char> ib(b->ToAsciiVector());
+ return CompareStringContents(ia, &ib);
+ } else {
+ VectorIterator<uc16> ib(b->ToUC16Vector());
+ return CompareStringContents(ia, &ib);
+ }
+ } else {
+ string_compare_buffer_b.Reset(0, b);
+ return CompareStringContents(ia, &string_compare_buffer_b);
+ }
+}
+
+
+static StringInputBuffer string_compare_buffer_a;
+
+
bool String::SlowEquals(String* other) {
// Fast check: negative check with lengths.
int len = length();
@@ -3705,24 +3697,18 @@
if (Hash() != other->Hash()) return false;
}
- // Fast case: avoid input buffers for small strings.
- const int kMaxLenthForFastCaseCheck = 5;
- for (int i = 0; i < kMaxLenthForFastCaseCheck; i++) {
- if (Get(i) != other->Get(i)) return false;
- if (i + 1 == len) return true;
- }
-
- // General slow case check.
- static StringInputBuffer buf1;
- static StringInputBuffer buf2;
- buf1.Reset(kMaxLenthForFastCaseCheck, this);
- buf2.Reset(kMaxLenthForFastCaseCheck, other);
- while (buf1.has_more()) {
- if (buf1.GetNext() != buf2.GetNext()) {
- return false;
+ if (this->IsFlat()) {
+ if (this->IsAsciiRepresentation()) {
+ VectorIterator<char> buf1(this->ToAsciiVector());
+ return CompareStringContentsPartial(&buf1, other);
+ } else {
+ VectorIterator<uc16> buf1(this->ToUC16Vector());
+ return CompareStringContentsPartial(&buf1, other);
}
+ } else {
+ string_compare_buffer_a.Reset(0, this);
+ return CompareStringContentsPartial(&string_compare_buffer_a, other);
}
- return true;
}
@@ -3773,14 +3759,14 @@
// Compute the hash code.
StringInputBuffer buffer(this);
- int hash = ComputeHashCode(&buffer, length());
+ uint32_t field = ComputeLengthAndHashField(&buffer, length());
// Store the hash code in the object.
- set_length_field(hash);
+ set_length_field(field);
// Check the hash code is there.
ASSERT(length_field() & kHashComputedMask);
- return hash;
+ return field >> kHashShift;
}
@@ -3825,39 +3811,45 @@
}
-uint32_t String::ComputeHashCode(unibrow::CharacterStream* buffer,
- int length) {
- // Large string (please note large strings cannot be an array index).
- if (length > kMaxMediumStringSize) return HashField(length, false);
-
- // Note: the Jenkins one-at-a-time hash function
- uint32_t hash = 0;
- while (buffer->has_more()) {
- uc32 r = buffer->GetNext();
- hash += r;
- hash += (hash << 10);
- hash ^= (hash >> 6);
+uint32_t StringHasher::GetHashField() {
+ ASSERT(is_valid());
+ if (length_ <= String::kMaxShortStringSize) {
+ uint32_t payload;
+ if (is_array_index()) {
+ payload = v8::internal::HashField(array_index(), true);
+ } else {
+ payload = v8::internal::HashField(GetHash(), false);
+ }
+ return (payload & 0x00FFFFFF) | (length_ << String::kShortLengthShift);
+ } else if (length_ <= String::kMaxMediumStringSize) {
+ uint32_t payload = v8::internal::HashField(GetHash(), false);
+ return (payload & 0x0000FFFF) | (length_ << String::kMediumLengthShift);
+ } else {
+ return v8::internal::HashField(length_, false);
}
- hash += (hash << 3);
- hash ^= (hash >> 11);
- hash += (hash << 15);
+}
- // Short string.
- if (length <= kMaxShortStringSize) {
- // Make hash value consistent with value returned from String::Hash.
- buffer->Rewind();
- uint32_t index;
- hash = HashField(hash, ComputeArrayIndex(buffer, &index, length));
- hash = (hash & 0x00FFFFFF) | (length << kShortLengthShift);
- return hash;
- }
- // Medium string (please note medium strings cannot be an array index).
- ASSERT(length <= kMaxMediumStringSize);
- // Make hash value consistent with value returned from String::Hash.
- hash = HashField(hash, false);
- hash = (hash & 0x0000FFFF) | (length << kMediumLengthShift);
- return hash;
+uint32_t String::ComputeLengthAndHashField(unibrow::CharacterStream* buffer,
+ int length) {
+ StringHasher hasher(length);
+
+ // Very long strings have a trivial hash that doesn't inspect the
+ // string contents.
+ if (hasher.has_trivial_hash())
+ return hasher.GetHashField();
+
+ // Do the iterative array index computation as long as there is a
+ // chance this is an array index.
+ while (buffer->has_more() && hasher.is_array_index())
+ hasher.AddCharacter(buffer->GetNext());
+
+ // Process the remaining characters without updating the array
+ // index.
+ while (buffer->has_more())
+ hasher.AddCharacterNoIndex(buffer->GetNext());
+
+ return hasher.GetHashField();
}
@@ -4381,12 +4373,6 @@
}
-void JSArray::SetContent(FixedArray* storage) {
- set_length(Smi::FromInt(storage->length()));
- set_elements(storage);
-}
-
-
// Computes the new capacity when expanding the elements of a JSObject.
static int NewElementsCapacity(int old_capacity) {
// (old_capacity + 50%) + 16
@@ -4695,7 +4681,6 @@
return SetElement(index, value);
}
-
Object* JSObject::SetElement(uint32_t index, Object* value) {
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
@@ -5430,7 +5415,7 @@
class Utf8SymbolKey : public HashTableKey {
public:
explicit Utf8SymbolKey(Vector<const char> string)
- : string_(string), hash_(0) { }
+ : string_(string), length_field_(0) { }
bool IsMatch(Object* other) {
if (!other->IsString()) return false;
@@ -5442,19 +5427,19 @@
}
uint32_t Hash() {
- if (hash_ != 0) return hash_;
+ if (length_field_ != 0) return length_field_ >> String::kHashShift;
unibrow::Utf8InputBuffer<> buffer(string_.start(),
static_cast<unsigned>(string_.length()));
chars_ = buffer.Length();
- hash_ = String::ComputeHashCode(&buffer, chars_);
- return hash_;
+ length_field_ = String::ComputeLengthAndHashField(&buffer, chars_);
+ return length_field_ >> String::kHashShift;
}
Object* GetObject() {
- if (hash_ == 0) Hash();
+ if (length_field_ == 0) Hash();
unibrow::Utf8InputBuffer<> buffer(string_.start(),
static_cast<unsigned>(string_.length()));
- return Heap::AllocateSymbol(&buffer, chars_, hash_);
+ return Heap::AllocateSymbol(&buffer, chars_, length_field_);
}
static uint32_t StringHash(Object* obj) {
@@ -5464,7 +5449,7 @@
bool IsStringKey() { return true; }
Vector<const char> string_;
- uint32_t hash_;
+ uint32_t length_field_;
int chars_; // Caches the number of characters when computing the hash code.
};
@@ -5503,7 +5488,9 @@
}
// Otherwise allocate a new symbol.
StringInputBuffer buffer(string_);
- return Heap::AllocateSymbol(&buffer, string_->length(), string_->Hash());
+ return Heap::AllocateSymbol(&buffer,
+ string_->length(),
+ string_->length_field());
}
static uint32_t StringHash(Object* obj) {
@@ -5641,6 +5628,20 @@
}
+bool SymbolTable::LookupSymbolIfExists(String* string, String** symbol) {
+ SymbolKey key(string);
+ int entry = FindEntry(&key);
+ if (entry == -1) {
+ return false;
+ } else {
+ String* result = String::cast(KeyAt(entry));
+ ASSERT(result->is_symbol());
+ *symbol = result;
+ return true;
+ }
+}
+
+
Object* SymbolTable::LookupSymbol(Vector<const char> str, Object** s) {
Utf8SymbolKey key(str);
return LookupKey(&key, s);
diff --git a/src/objects.h b/src/objects.h
index 9aaa24b..3f0150e 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -61,8 +61,8 @@
// - GlobalContext
// - String
// - SeqString
-// - AsciiString
-// - TwoByteString
+// - SeqAsciiString
+// - SeqTwoByteString
// - ConsString
// - SlicedString
// - ExternalString
@@ -272,12 +272,12 @@
// Since string types are not consecutive, this macro is used to
// iterate over them.
#define STRING_TYPE_LIST(V) \
- V(SHORT_SYMBOL_TYPE, TwoByteString::kHeaderSize, short_symbol) \
- V(MEDIUM_SYMBOL_TYPE, TwoByteString::kHeaderSize, medium_symbol) \
- V(LONG_SYMBOL_TYPE, TwoByteString::kHeaderSize, long_symbol) \
- V(SHORT_ASCII_SYMBOL_TYPE, AsciiString::kHeaderSize, short_ascii_symbol) \
- V(MEDIUM_ASCII_SYMBOL_TYPE, AsciiString::kHeaderSize, medium_ascii_symbol) \
- V(LONG_ASCII_SYMBOL_TYPE, AsciiString::kHeaderSize, long_ascii_symbol) \
+ V(SHORT_SYMBOL_TYPE, SeqTwoByteString::kHeaderSize, short_symbol) \
+ V(MEDIUM_SYMBOL_TYPE, SeqTwoByteString::kHeaderSize, medium_symbol) \
+ V(LONG_SYMBOL_TYPE, SeqTwoByteString::kHeaderSize, long_symbol) \
+ V(SHORT_ASCII_SYMBOL_TYPE, SeqAsciiString::kHeaderSize, short_ascii_symbol) \
+ V(MEDIUM_ASCII_SYMBOL_TYPE, SeqAsciiString::kHeaderSize, medium_ascii_symbol)\
+ V(LONG_ASCII_SYMBOL_TYPE, SeqAsciiString::kHeaderSize, long_ascii_symbol) \
V(SHORT_CONS_SYMBOL_TYPE, ConsString::kSize, short_cons_symbol) \
V(MEDIUM_CONS_SYMBOL_TYPE, ConsString::kSize, medium_cons_symbol) \
V(LONG_CONS_SYMBOL_TYPE, ConsString::kSize, long_cons_symbol) \
@@ -314,12 +314,12 @@
V(LONG_EXTERNAL_ASCII_SYMBOL_TYPE, \
ExternalAsciiString::kSize, \
long_external_ascii_symbol) \
- V(SHORT_STRING_TYPE, TwoByteString::kHeaderSize, short_string) \
- V(MEDIUM_STRING_TYPE, TwoByteString::kHeaderSize, medium_string) \
- V(LONG_STRING_TYPE, TwoByteString::kHeaderSize, long_string) \
- V(SHORT_ASCII_STRING_TYPE, AsciiString::kHeaderSize, short_ascii_string) \
- V(MEDIUM_ASCII_STRING_TYPE, AsciiString::kHeaderSize, medium_ascii_string) \
- V(LONG_ASCII_STRING_TYPE, AsciiString::kHeaderSize, long_ascii_string) \
+ V(SHORT_STRING_TYPE, SeqTwoByteString::kHeaderSize, short_string) \
+ V(MEDIUM_STRING_TYPE, SeqTwoByteString::kHeaderSize, medium_string) \
+ V(LONG_STRING_TYPE, SeqTwoByteString::kHeaderSize, long_string) \
+ V(SHORT_ASCII_STRING_TYPE, SeqAsciiString::kHeaderSize, short_ascii_string) \
+ V(MEDIUM_ASCII_STRING_TYPE, SeqAsciiString::kHeaderSize, medium_ascii_string)\
+ V(LONG_ASCII_STRING_TYPE, SeqAsciiString::kHeaderSize, long_ascii_string) \
V(SHORT_CONS_STRING_TYPE, ConsString::kSize, short_cons_string) \
V(MEDIUM_CONS_STRING_TYPE, ConsString::kSize, medium_cons_string) \
V(LONG_CONS_STRING_TYPE, ConsString::kSize, long_cons_string) \
@@ -584,8 +584,10 @@
inline bool IsHeapNumber();
inline bool IsString();
inline bool IsSeqString();
- inline bool IsAsciiString();
- inline bool IsTwoByteString();
+ inline bool IsAsciiStringRepresentation();
+ inline bool IsTwoByteStringRepresentation();
+ inline bool IsSeqAsciiString();
+ inline bool IsSeqTwoByteString();
inline bool IsConsString();
inline bool IsSlicedString();
inline bool IsExternalString();
@@ -599,6 +601,7 @@
inline bool IsByteArray();
inline bool IsFailure();
inline bool IsRetryAfterGC();
+ inline bool IsOutOfMemoryFailure();
inline bool IsException();
inline bool IsJSObject();
inline bool IsMap();
@@ -1301,9 +1304,26 @@
JSFunction* function,
PropertyAttributes attributes);
- // Replace a constant function property on a fast-case object.
- Object* ReplaceConstantFunctionProperty(String* name,
- Object* value);
+ Object* ReplaceSlowProperty(String* name,
+ Object* value,
+ PropertyAttributes attributes);
+
+ // Converts a descriptor of any other type to a real field,
+ // backed by the properties array. Descriptors of visible
+ // types, such as CONSTANT_FUNCTION, keep their enumeration order.
+ // Converts the descriptor on the original object's map to a
+ // map transition, and the the new field is on the object's new map.
+ Object* ConvertDescriptorToFieldAndMapTransition(
+ String* name,
+ Object* new_value,
+ PropertyAttributes attributes);
+
+ // Converts a descriptor of any other type to a real field,
+ // backed by the properties array. Descriptors of visible
+ // types, such as CONSTANT_FUNCTION, keep their enumeration order.
+ Object* ConvertDescriptorToField(String* name,
+ Object* new_value,
+ PropertyAttributes attributes);
// Add a property to a fast-case object.
Object* AddFastProperty(String* name,
@@ -1329,6 +1349,11 @@
// Returns failure if allocation failed.
Object* TransformToFastProperties(int unused_property_fields);
+ // Access fast-case object properties at index.
+ inline Object* FastPropertyAt(int index);
+ inline Object* FastPropertyAtPut(int index, Object* value);
+
+
// initializes the body after properties slot, properties slot is
// initialized by set_properties
// Note: this call does not update write barrier, it is caller's
@@ -1375,6 +1400,11 @@
static const uint32_t kMaxGap = 1024;
static const int kMaxFastElementsLength = 5000;
static const int kMaxFastProperties = 8;
+ static const int kMaxInstanceSize = 255 * kPointerSize;
+ // When extending the backing storage for property values, we increase
+ // its size by more than the 1 entry necessary, so sequentially adding fields
+ // to the same object requires fewer allocations and copies.
+ static const int kFieldsAdded = 3;
// Layout description.
static const int kPropertiesOffset = HeapObject::kHeaderSize;
@@ -1560,7 +1590,6 @@
inline void Get(int descriptor_number, Descriptor* desc);
inline void Set(int descriptor_number, Descriptor* desc);
- void ReplaceConstantFunction(int descriptor_number, JSFunction* value);
// Copy the descriptor array, insert a new descriptor and optionally
// remove map transitions. If the descriptor is already present, it is
@@ -1570,20 +1599,6 @@
// a transition, they must not be removed. All null descriptors are removed.
Object* CopyInsert(Descriptor* descriptor, TransitionFlag transition_flag);
- // Makes a copy of the descriptor array with the descriptor with key name
- // removed. If name is the empty string, the descriptor array is copied.
- // Transitions are removed if TransitionFlag is REMOVE_TRANSITIONS.
- // All null descriptors are removed.
- Object* CopyRemove(TransitionFlag remove_transitions, String* name);
-
- // Copy the descriptor array, replace the property index and attributes
- // of the named property, but preserve its enumeration index.
- Object* CopyReplace(String* name, int index, PropertyAttributes attributes);
-
- // Copy the descriptor array, removing the property index and attributes
- // of the named property.
- Object* CopyRemove(String* name);
-
// Remove all transitions. Return a copy of the array with all transitions
// removed, or a Failure object if the new array could not be allocated.
Object* RemoveTransitions();
@@ -1815,6 +1830,11 @@
Object* LookupSymbol(Vector<const char> str, Object** s);
Object* LookupString(String* key, Object** s);
+ // Looks up a symbol that is equal to the given string and returns
+ // true if it is found, assigning the symbol to the given output
+ // parameter.
+ bool LookupSymbolIfExists(String* str, String** symbol);
+
// Casting.
static inline SymbolTable* cast(Object* obj);
@@ -2240,6 +2260,10 @@
inline int instance_size();
inline void set_instance_size(int value);
+ // Count of properties allocated in the object.
+ inline int inobject_properties();
+ inline void set_inobject_properties(int value);
+
// instance type.
inline InstanceType instance_type();
inline void set_instance_type(InstanceType value);
@@ -2382,7 +2406,8 @@
#endif
// Layout description.
- static const int kInstanceAttributesOffset = HeapObject::kHeaderSize;
+ static const int kInstanceSizesOffset = HeapObject::kHeaderSize;
+ static const int kInstanceAttributesOffset = kInstanceSizesOffset + kIntSize;
static const int kPrototypeOffset = kInstanceAttributesOffset + kIntSize;
static const int kConstructorOffset = kPrototypeOffset + kPointerSize;
static const int kInstanceDescriptorsOffset =
@@ -2390,11 +2415,17 @@
static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize;
static const int kSize = kCodeCacheOffset + kIntSize;
+ // Byte offsets within kInstanceSizesOffset.
+ static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
+ static const int kInObjectPropertiesOffset = kInstanceSizesOffset + 1;
+ // The bytes at positions 2 and 3 are not in use at the moment.
+
+
// Byte offsets within kInstanceAttributesOffset attributes.
- static const int kInstanceSizeOffset = kInstanceAttributesOffset + 0;
- static const int kInstanceTypeOffset = kInstanceAttributesOffset + 1;
- static const int kUnusedPropertyFieldsOffset = kInstanceAttributesOffset + 2;
- static const int kBitFieldOffset = kInstanceAttributesOffset + 3;
+ static const int kInstanceTypeOffset = kInstanceAttributesOffset + 0;
+ static const int kUnusedPropertyFieldsOffset = kInstanceAttributesOffset + 1;
+ static const int kBitFieldOffset = kInstanceAttributesOffset + 2;
+ // The byte at position 3 is not in use at the moment.
// Bit positions for bit field.
static const int kUnused = 0; // To be used for marking recently used maps.
@@ -2839,6 +2870,52 @@
enum RobustnessFlag {ROBUST_STRING_TRAVERSAL, FAST_STRING_TRAVERSAL};
+class StringHasher {
+ public:
+ inline StringHasher(int length);
+
+ // Returns true if the hash of this string can be computed without
+ // looking at the contents.
+ inline bool has_trivial_hash();
+
+ // Add a character to the hash and update the array index calculation.
+ inline void AddCharacter(uc32 c);
+
+ // Adds a character to the hash but does not update the array index
+ // calculation. This can only be called when it has been verified
+ // that the input is not an array index.
+ inline void AddCharacterNoIndex(uc32 c);
+
+ // Returns the value to store in the hash field of a string with
+ // the given length and contents.
+ uint32_t GetHashField();
+
+ // Returns true if the characters seen so far make up a legal array
+ // index.
+ bool is_array_index() { return is_array_index_; }
+
+ bool is_valid() { return is_valid_; }
+
+ void invalidate() { is_valid_ = false; }
+
+ private:
+
+ uint32_t array_index() {
+ ASSERT(is_array_index());
+ return array_index_;
+ }
+
+ inline uint32_t GetHash();
+
+ int length_;
+ uint32_t raw_running_hash_;
+ uint32_t array_index_;
+ bool is_array_index_;
+ bool is_first_char_;
+ bool is_valid_;
+};
+
+
// The String abstract class captures JavaScript string values:
//
// Ecma-262:
@@ -2857,8 +2934,8 @@
// that the length field is also used to cache the hash value of
// strings. In order to get or set the actual length of the string
// use the length() and set_length methods.
- inline int length_field();
- inline void set_length_field(int value);
+ inline uint32_t length_field();
+ inline void set_length_field(uint32_t value);
// Get and set individual two byte chars in the string.
inline void Set(int index, uint16_t value);
@@ -2877,13 +2954,16 @@
inline void TryFlatten();
// Is this string an ascii string.
- inline bool IsAscii();
+ inline bool IsAsciiRepresentation();
// Fast testing routines that assume the receiver is a string and
// just check whether it is a certain kind of string.
inline bool StringIsSlicedString();
inline bool StringIsConsString();
+ Vector<const char> ToAsciiVector();
+ Vector<const uc16> ToUC16Vector();
+
// Mark the string as an undetectable object. It only applies to
// ascii and two byte string types.
bool MarkAsUndetectable();
@@ -2930,7 +3010,9 @@
// Returns a hash value used for the property table
inline uint32_t Hash();
- static uint32_t ComputeHashCode(unibrow::CharacterStream* buffer, int length);
+ static uint32_t ComputeLengthAndHashField(unibrow::CharacterStream* buffer,
+ int length);
+
static bool ComputeArrayIndex(unibrow::CharacterStream* buffer,
uint32_t* index,
int length);
@@ -2952,8 +3034,8 @@
static inline bool is_symbol_map(Map* map);
// True if the string is ASCII.
- inline bool is_ascii();
- static inline bool is_ascii_map(Map* map);
+ inline bool is_ascii_representation();
+ static inline bool is_ascii_representation_map(Map* map);
// Get the representation tag.
inline StringRepresentationTag representation_tag();
@@ -2978,14 +3060,11 @@
static const int kMaxShortStringSize = 255;
static const int kMaxMediumStringSize = 65535;
+ static const int kMaxArrayIndexSize = 10;
+
// Max ascii char code.
static const int kMaxAsciiCharCode = 127;
- // Shift constants for retriving length from length/hash field.
- static const int kShortLengthShift = 3 * kBitsPerByte;
- static const int kMediumLengthShift = 2 * kBitsPerByte;
- static const int kLongLengthShift = 2;
-
// Mask constant for checking if a string has a computed hash code
// and if it is an array index. The least significant bit indicates
// whether a hash code has been computed. If the hash code has been
@@ -2993,6 +3072,15 @@
// array index.
static const int kHashComputedMask = 1;
static const int kIsArrayIndexMask = 1 << 1;
+ static const int kNofLengthBitFields = 2;
+
+ // Shift constants for retriving length and hash code from
+ // length/hash field.
+ static const int kHashShift = kNofLengthBitFields;
+ static const int kShortLengthShift = 3 * kBitsPerByte;
+ static const int kMediumLengthShift = 2 * kBitsPerByte;
+ static const int kLongLengthShift = kHashShift;
+
// Limit for truncation in short printing.
static const int kMaxShortPrintLength = 1024;
@@ -3086,22 +3174,22 @@
// The AsciiString class captures sequential ascii string objects.
// Each character in the AsciiString is an ascii character.
-class AsciiString: public SeqString {
+class SeqAsciiString: public SeqString {
public:
// Dispatched behavior.
- inline uint16_t AsciiStringGet(int index);
- inline void AsciiStringSet(int index, uint16_t value);
+ inline uint16_t SeqAsciiStringGet(int index);
+ inline void SeqAsciiStringSet(int index, uint16_t value);
// Get the address of the characters in this string.
inline Address GetCharsAddress();
// Casting
- static inline AsciiString* cast(Object* obj);
+ static inline SeqAsciiString* cast(Object* obj);
// Garbage collection support. This method is called by the
// garbage collector to compute the actual size of an AsciiString
// instance.
- inline int AsciiStringSize(Map* map);
+ inline int SeqAsciiStringSize(Map* map);
// Computes the size for an AsciiString instance of a given length.
static int SizeFor(int length) {
@@ -3112,36 +3200,39 @@
static const int kHeaderSize = String::kSize;
// Support for StringInputBuffer.
- inline void AsciiStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
- unsigned* offset,
- unsigned chars);
- inline const unibrow::byte* AsciiStringReadBlock(unsigned* remaining,
- unsigned* offset,
- unsigned chars);
+ inline void SeqAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
+ unsigned* offset,
+ unsigned chars);
+ inline const unibrow::byte* SeqAsciiStringReadBlock(unsigned* remaining,
+ unsigned* offset,
+ unsigned chars);
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(AsciiString);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SeqAsciiString);
};
// The TwoByteString class captures sequential unicode string objects.
// Each character in the TwoByteString is a two-byte uint16_t.
-class TwoByteString: public SeqString {
+class SeqTwoByteString: public SeqString {
public:
// Dispatched behavior.
- inline uint16_t TwoByteStringGet(int index);
- inline void TwoByteStringSet(int index, uint16_t value);
+ inline uint16_t SeqTwoByteStringGet(int index);
+ inline void SeqTwoByteStringSet(int index, uint16_t value);
+
+ // Get the address of the characters in this string.
+ inline Address GetCharsAddress();
// For regexp code.
- const uint16_t* TwoByteStringGetData(unsigned start);
+ const uint16_t* SeqTwoByteStringGetData(unsigned start);
// Casting
- static inline TwoByteString* cast(Object* obj);
+ static inline SeqTwoByteString* cast(Object* obj);
// Garbage collection support. This method is called by the
// garbage collector to compute the actual size of a TwoByteString
// instance.
- inline int TwoByteStringSize(Map* map);
+ inline int SeqTwoByteStringSize(Map* map);
// Computes the size for a TwoByteString instance of a given length.
static int SizeFor(int length) {
@@ -3152,12 +3243,12 @@
static const int kHeaderSize = String::kSize;
// Support for StringInputBuffer.
- inline void TwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
- unsigned* offset_ptr,
- unsigned chars);
+ inline void SeqTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
+ unsigned* offset_ptr,
+ unsigned chars);
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(TwoByteString);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SeqTwoByteString);
};
@@ -3369,6 +3460,19 @@
};
+template <typename T>
+class VectorIterator {
+ public:
+ VectorIterator(T* d, int l) : data_(Vector<const T>(d, l)), index_(0) { }
+ explicit VectorIterator(Vector<const T> data) : data_(data), index_(0) { }
+ T GetNext() { return data_[index_++]; }
+ bool has_more() { return index_ < data_.length(); }
+ private:
+ Vector<const T> data_;
+ int index_;
+};
+
+
// The Oddball describes objects null, undefined, true, and false.
class Oddball: public HeapObject {
public:
@@ -3447,7 +3551,7 @@
Object* Initialize(int capacity);
// Set the content of the array to the content of storage.
- void SetContent(FixedArray* storage);
+ inline void SetContent(FixedArray* storage);
// Support for sorting
Object* RemoveHoles();
diff --git a/src/property.h b/src/property.h
index 4b6de3a..1c9b0d2 100644
--- a/src/property.h
+++ b/src/property.h
@@ -198,7 +198,6 @@
lookup_type_ = NOT_FOUND;
}
-
JSObject* holder() {
ASSERT(IsValid());
return holder_;
diff --git a/src/runtime.cc b/src/runtime.cc
index 25ac6f3..8e329b0 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -42,6 +42,7 @@
#include "runtime.h"
#include "scopeinfo.h"
#include "v8threads.h"
+#include "smart-pointer.h"
namespace v8 { namespace internal {
@@ -220,6 +221,30 @@
return JSObject::cast(obj)->class_name();
}
+inline static Object* IsSpecificClassOf(Arguments args, String* name) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+ Object* obj = args[0];
+ if (obj->IsJSObject() && (JSObject::cast(obj)->class_name() == name)) {
+ return Heap::true_value();
+ }
+ return Heap::false_value();
+}
+
+static Object* Runtime_IsStringClass(Arguments args) {
+ return IsSpecificClassOf(args, Heap::String_symbol());
+}
+
+
+static Object* Runtime_IsDateClass(Arguments args) {
+ return IsSpecificClassOf(args, Heap::Date_symbol());
+}
+
+
+static Object* Runtime_IsArrayClass(Arguments args) {
+ return IsSpecificClassOf(args, Heap::Array_symbol());
+}
+
static Object* Runtime_IsInPrototypeChain(Arguments args) {
NoHandleAllocation ha;
@@ -402,22 +427,21 @@
IgnoreAttributesAndSetLocalProperty(global, name, value, attributes);
}
}
- // Done.
+
return Heap::undefined_value();
}
static Object* Runtime_DeclareContextSlot(Arguments args) {
HandleScope scope;
- ASSERT(args.length() == 5);
+ ASSERT(args.length() == 4);
- // args[0] is result (TOS)
- CONVERT_ARG_CHECKED(Context, context, 1);
- Handle<String> name(String::cast(args[2]));
+ CONVERT_ARG_CHECKED(Context, context, 0);
+ Handle<String> name(String::cast(args[1]));
PropertyAttributes mode =
- static_cast<PropertyAttributes>(Smi::cast(args[3])->value());
+ static_cast<PropertyAttributes>(Smi::cast(args[2])->value());
ASSERT(mode == READ_ONLY || mode == NONE);
- Handle<Object> initial_value(args[4]);
+ Handle<Object> initial_value(args[3]);
// Declarations are always done in the function context.
context = Handle<Context>(context->fcontext());
@@ -456,32 +480,35 @@
SetProperty(context_ext, name, initial_value, mode);
}
}
- return args[0]; // return TOS
- }
- // The property is not in the function context. It needs to be "declared"
- // in the function context's extension context, or in the global context.
- Handle<JSObject> context_ext;
- if (context->extension() != NULL) {
- // The function context's extension context exists - use it.
- context_ext = Handle<JSObject>(context->extension());
} else {
- // The function context's extension context does not exists - allocate it.
- context_ext = Factory::NewJSObject(Top::context_extension_function());
- // And store it in the extension slot.
- context->set_extension(*context_ext);
- }
- ASSERT(*context_ext != NULL);
+ // The property is not in the function context. It needs to be
+ // "declared" in the function context's extension context, or in the
+ // global context.
+ Handle<JSObject> context_ext;
+ if (context->extension() != NULL) {
+ // The function context's extension context exists - use it.
+ context_ext = Handle<JSObject>(context->extension());
+ } else {
+ // The function context's extension context does not exists - allocate
+ // it.
+ context_ext = Factory::NewJSObject(Top::context_extension_function());
+ // And store it in the extension slot.
+ context->set_extension(*context_ext);
+ }
+ ASSERT(*context_ext != NULL);
- // Declare the property by setting it to the initial value if provided,
- // or undefined, and use the correct mode (e.g. READ_ONLY attribute for
- // constant declarations).
- ASSERT(!context_ext->HasLocalProperty(*name));
- Handle<Object> value(Heap::undefined_value());
- if (*initial_value != NULL) value = initial_value;
- SetProperty(context_ext, name, value, mode);
- ASSERT(context_ext->GetLocalPropertyAttribute(*name) == mode);
- return args[0]; // return TOS
+ // Declare the property by setting it to the initial value if provided,
+ // or undefined, and use the correct mode (e.g. READ_ONLY attribute for
+ // constant declarations).
+ ASSERT(!context_ext->HasLocalProperty(*name));
+ Handle<Object> value(Heap::undefined_value());
+ if (*initial_value != NULL) value = initial_value;
+ SetProperty(context_ext, name, value, mode);
+ ASSERT(context_ext->GetLocalPropertyAttribute(*name) == mode);
+ }
+
+ return Heap::undefined_value();
}
@@ -864,10 +891,11 @@
target->shared()->set_length(fun->shared()->length());
target->shared()->set_formal_parameter_count(
fun->shared()->formal_parameter_count());
- // Set the source code of the target function.
- target->shared()->set_script(fun->shared()->script());
- target->shared()->set_start_position(fun->shared()->start_position());
- target->shared()->set_end_position(fun->shared()->end_position());
+ // Set the source code of the target function to undefined.
+ // SetCode is only used for built-in constructors like String,
+ // Array, and Object, and some web code
+ // doesn't like seeing source code for constructors.
+ target->shared()->set_script(Heap::undefined_value());
context = Handle<Context>(fun->context());
// Make sure we get a fresh copy of the literal vector to avoid
@@ -925,110 +953,320 @@
}
-static inline void ComputeKMPNextTable(String* pattern, int next_table[]) {
- int i = 0;
- int j = -1;
- next_table[0] = -1;
+// Cap on the maximal shift in the Boyer-Moore implementation. By setting a
+// limit, we can fix the size of tables.
+static const int kBMMaxShift = 0xff;
+static const int kBMAlphabetSize = 0x100; // Reduce alphabet to this size.
- Access<StringInputBuffer> buffer(&string_input_buffer);
- buffer->Reset(pattern);
- int length = pattern->length();
- uint16_t p = buffer->GetNext();
- while (i < length - 1) {
- while (j > -1 && p != pattern->Get(j)) {
- j = next_table[j];
+// Holds the two buffers used by Boyer-Moore string search's Good Suffix
+// shift. Only allows the last kBMMaxShift characters of the needle
+// to be indexed.
+class BMGoodSuffixBuffers: public AllStatic {
+ public:
+ BMGoodSuffixBuffers() {}
+ inline void init(int needle_length) {
+ ASSERT(needle_length > 1);
+ int start = needle_length < kBMMaxShift ? 0 : needle_length - kBMMaxShift;
+ int len = needle_length - start;
+ biased_suffixes_ = suffixes_ - start;
+ biased_good_suffix_shift_ = good_suffix_shift_ - start;
+ for (int i = 0; i <= len; i++) {
+ good_suffix_shift_[i] = len;
}
- i++;
- j++;
- p = buffer->GetNext();
- if (p == pattern->Get(j)) {
- next_table[i] = next_table[j];
- } else {
- next_table[i] = j;
+ }
+ inline int& suffix(int index) {
+ ASSERT(biased_suffixes_ + index >= suffixes_);
+ return biased_suffixes_[index];
+ }
+ inline int& shift(int index) {
+ ASSERT(biased_good_suffix_shift_ + index >= good_suffix_shift_);
+ return biased_good_suffix_shift_[index];
+ }
+ private:
+ int suffixes_[kBMMaxShift + 1];
+ int good_suffix_shift_[kBMMaxShift + 1];
+ int *biased_suffixes_;
+ int *biased_good_suffix_shift_;
+ DISALLOW_COPY_AND_ASSIGN(BMGoodSuffixBuffers);
+};
+
+// buffers reused by BoyerMoore
+static int bad_char_occurence[kBMAlphabetSize];
+static BMGoodSuffixBuffers bmgs_buffers;
+
+// Compute the bad-char table for Boyer-Moore in the static buffer.
+// Return false if the pattern contains non-ASCII characters that cannot be
+// in the searched string.
+template <typename pchar>
+static void BoyerMoorePopulateBadCharTable(Vector<const pchar> pattern,
+ int start) {
+ // Run forwards to populate bad_char_table, so that *last* instance
+ // of character equivalence class is the one registered.
+ // Notice: Doesn't include the last character.
+ for (int i = 0; i < kBMAlphabetSize; i++) {
+ bad_char_occurence[i] = start - 1;
+ }
+ for (int i = start; i < pattern.length(); i++) {
+ bad_char_occurence[pattern[i] % kBMAlphabetSize] = i;
+ }
+}
+
+template <typename pchar>
+static void BoyerMoorePopulateGoodSuffixTable(Vector<const pchar> pattern,
+ int start,
+ int len) {
+ int m = pattern.length();
+ // Compute Good Suffix tables.
+ bmgs_buffers.init(m);
+
+ bmgs_buffers.shift(m-1) = 1;
+ bmgs_buffers.suffix(m) = m + 1;
+ pchar last_char = pattern[m - 1];
+ int suffix = m + 1;
+ for (int i = m; i > start;) {
+ for (pchar c = pattern[i - 1]; suffix <= m && c != pattern[suffix - 1];) {
+ if (bmgs_buffers.shift(suffix) == len) {
+ bmgs_buffers.shift(suffix) = suffix - i;
+ }
+ suffix = bmgs_buffers.suffix(suffix);
+ }
+ i--;
+ suffix--;
+ bmgs_buffers.suffix(i) = suffix;
+ if (suffix == m) {
+ // No suffix to extend, so we check against last_char only.
+ while (i > start && pattern[i - 1] != last_char) {
+ if (bmgs_buffers.shift(m) == len) {
+ bmgs_buffers.shift(m) = m - i;
+ }
+ i--;
+ bmgs_buffers.suffix(i) = m;
+ }
+ if (i > start) {
+ i--;
+ suffix--;
+ bmgs_buffers.suffix(i) = suffix;
+ }
+ }
+ }
+ if (suffix < m) {
+ for (int i = start; i <= m; i++) {
+ if (bmgs_buffers.shift(i) == len) {
+ bmgs_buffers.shift(i) = suffix - start;
+ }
+ if (i == suffix) {
+ suffix = bmgs_buffers.suffix(suffix);
+ }
}
}
}
+// Restricted Boyer-Moore string matching. Restricts tables to a
+// suffix of long pattern strings and handles only equivalence classes
+// of the full alphabet. This allows us to ensure that tables take only
+// a fixed amount of space.
+template <typename schar, typename pchar>
+static int BoyerMooreIndexOf(Vector<const schar> subject,
+ Vector<const pchar> pattern,
+ int start_index) {
+ int m = pattern.length();
+ int n = subject.length();
-int Runtime::StringMatchKmp(String* sub, String* pat, int start_index) {
- sub->TryFlatten();
- pat->TryFlatten();
+ // Only preprocess at most kBMMaxShift last characters of pattern.
+ int start = m < kBMMaxShift ? 0 : m - kBMMaxShift;
+ int len = m - start;
- int subject_length = sub->length();
+ BoyerMoorePopulateBadCharTable(pattern, start);
+
+ int badness = 0; // How bad we are doing without a good-suffix table.
+ int idx; // No matches found prior to this index.
+ // Perform search
+ for (idx = start_index; idx <= n - m;) {
+ int j = m - 1;
+ schar c;
+ while (j >= 0 && pattern[j] == (c = subject[idx + j])) j--;
+ if (j < 0) {
+ return idx;
+ } else {
+ int bc_occ = bad_char_occurence[c % kBMAlphabetSize];
+ int shift = bc_occ < j ? j - bc_occ : 1;
+ idx += shift;
+ // Badness increases by the number of characters we have
+ // checked, and decreases by the number of characters we
+ // can skip by shifting. It's a measure of how we are doing
+ // compared to reading each character exactly once.
+ badness += (m - j) - shift;
+ if (badness > m) break;
+ }
+ }
+
+ // If we are not done, we got here because we should build the Good Suffix
+ // table and continue searching.
+ if (idx <= n - m) {
+ BoyerMoorePopulateGoodSuffixTable(pattern, start, len);
+ // Continue search from i.
+ do {
+ int j = m - 1;
+ schar c;
+ while (j >= 0 && pattern[j] == (c = subject[idx + j])) j--;
+ if (j < 0) {
+ return idx;
+ } else if (j < start) {
+ // we have matched more than our tables allow us to be smart about.
+ idx += 1;
+ } else {
+ int gs_shift = bmgs_buffers.shift(j + 1);
+ int bc_occ = bad_char_occurence[c % kBMAlphabetSize];
+ int bc_shift = j - bc_occ;
+ idx += (gs_shift > bc_shift) ? gs_shift : bc_shift;
+ }
+ } while (idx <= n - m);
+ }
+
+ return -1;
+}
+
+template <typename schar, typename pchar>
+static int SingleCharIndexOf(Vector<const schar> string,
+ pchar pattern_char,
+ int start_index) {
+ for (int i = start_index, n = string.length(); i < n; i++) {
+ if (pattern_char == string[i]) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+// Trivial string search for shorter strings.
+// On return, if "complete" is set to true, the return value is the
+// final result of searching for the patter in the subject.
+// If "complete" is set to false, the return value is the index where
+// further checking should start, i.e., it's guaranteed that the pattern
+// does not occur at a position prior to the returned index.
+template <typename pchar, typename schar>
+static int SimpleIndexOf(Vector<const schar> subject,
+ Vector<const pchar> pattern,
+ int start_index,
+ bool &complete) {
+ int pattern_length = pattern.length();
+ int subject_length = subject.length();
+ // Badness is a count of how many extra times the same character
+ // is checked. We compare it to the index counter, so we start
+ // it at the start_index, and give it a little discount to avoid
+ // very early bail-outs.
+ int badness = start_index - pattern_length;
+ // We know our pattern is at least 2 characters, we cache the first so
+ // the common case of the first character not matching is faster.
+ pchar pattern_first_char = pattern[0];
+
+ for (int i = start_index, n = subject_length - pattern_length; i <= n; i++) {
+ if (subject[i] != pattern_first_char) continue;
+ int j = 1;
+ do {
+ if (pattern[j] != subject[i+j]) {
+ break;
+ }
+ j++;
+ } while (j < pattern_length);
+ if (j == pattern_length) {
+ complete = true;
+ return i;
+ }
+ badness += j;
+ if (badness > i) { // More than one extra character on average.
+ complete = false;
+ return (i + 1); // No matches up to index i+1.
+ }
+ }
+ complete = true;
+ return -1;
+}
+
+// Dispatch to different algorithms for different length of pattern/subject
+template <typename schar, typename pchar>
+static int StringMatchStrategy(Vector<const schar> sub,
+ Vector<const pchar> pat,
+ int start_index) {
+ ASSERT(pat.length() > 1);
+
+ // We have an ASCII haystack and a non-ASCII needle. Check if there
+ // really is a non-ASCII character in the needle and bail out if there
+ // is.
+ if (sizeof(pchar) > 1 && sizeof(schar) == 1) {
+ for (int i = 0; i < pat.length(); i++) {
+ uc16 c = pat[i];
+ if (c > String::kMaxAsciiCharCode) {
+ return -1;
+ }
+ }
+ }
+ // For small searches, a complex sort is not worth the setup overhead.
+ bool complete;
+ int idx = SimpleIndexOf(sub, pat, start_index, complete);
+ if (complete) return idx;
+ return BoyerMooreIndexOf(sub, pat, idx);
+}
+
+// Perform string match of pattern on subject, starting at start index.
+// Caller must ensure that 0 <= start_index <= sub->length(),
+// and should check that pat->length() + start_index <= sub->length()
+int Runtime::StringMatch(Handle<String> sub,
+ Handle<String> pat,
+ int start_index) {
+ ASSERT(0 <= start_index);
+ ASSERT(start_index <= sub->length());
+
int pattern_length = pat->length();
-
- if (start_index > subject_length) return -1;
if (pattern_length == 0) return start_index;
+ int subject_length = sub->length();
+ if (start_index + pattern_length > subject_length) return -1;
+
+ FlattenString(sub);
// Searching for one specific character is common. For one
- // character patterns the KMP algorithm is guaranteed to slow down
- // the search, so we just run through the subject string.
+ // character patterns linear search is necessary, so any smart
+ // algorithm is unnecessary overhead.
if (pattern_length == 1) {
- uint16_t pattern_char = pat->Get(0);
- for (int i = start_index; i < subject_length; i++) {
- if (sub->Get(i) == pattern_char) {
- return i;
- }
+ AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
+ if (sub->is_ascii_representation()) {
+ return SingleCharIndexOf(sub->ToAsciiVector(), pat->Get(0), start_index);
}
- return -1;
+ return SingleCharIndexOf(sub->ToUC16Vector(), pat->Get(0), start_index);
}
- // For small searches, KMP is not worth the setup overhead.
- if (subject_length < 100) {
- // We know our pattern is at least 2 characters, we cache the first so
- // the common case of the first character not matching is faster.
- uint16_t pattern_first_char = pat->Get(0);
- for (int i = start_index; i + pattern_length <= subject_length; i++) {
- if (sub->Get(i) != pattern_first_char) continue;
+ FlattenString(pat);
- for (int j = 1; j < pattern_length; j++) {
- if (pat->Get(j) != sub->Get(j + i)) break;
- if (j == pattern_length - 1) return i;
- }
+ AssertNoAllocation no_heap_allocation; // ensure vectors stay valid
+ // dispatch on type of strings
+ if (pat->is_ascii_representation()) {
+ Vector<const char> pat_vector = pat->ToAsciiVector();
+ if (sub->is_ascii_representation()) {
+ return StringMatchStrategy(sub->ToAsciiVector(), pat_vector, start_index);
}
- return -1;
+ return StringMatchStrategy(sub->ToUC16Vector(), pat_vector, start_index);
}
-
- // For patterns with a larger length we use the KMP algorithm.
- //
- // Compute the 'next' table.
- int* next_table = NewArray<int>(pattern_length);
- ComputeKMPNextTable(pat, next_table);
- // Search using the 'next' table.
- int pattern_index = 0;
- // We would like to use StringInputBuffer here, but it does not have
- // the ability to start anywhere but the first character of a
- // string. It would be nice to have efficient forward-seeking
- // support on StringInputBuffers.
- int subject_index = start_index;
- while (subject_index < subject_length) {
- uint16_t subject_char = sub->Get(subject_index);
- while (pattern_index > -1 && pat->Get(pattern_index) != subject_char) {
- pattern_index = next_table[pattern_index];
- }
- pattern_index++;
- subject_index++;
- if (pattern_index >= pattern_length) {
- DeleteArray(next_table);
- return subject_index - pattern_index;
- }
+ Vector<const uc16> pat_vector = pat->ToUC16Vector();
+ if (sub->is_ascii_representation()) {
+ return StringMatchStrategy(sub->ToAsciiVector(), pat_vector, start_index);
}
- DeleteArray(next_table);
- return -1;
+ return StringMatchStrategy(sub->ToUC16Vector(), pat_vector, start_index);
}
static Object* Runtime_StringIndexOf(Arguments args) {
- NoHandleAllocation ha;
+ HandleScope scope; // create a new handle scope
ASSERT(args.length() == 3);
- CONVERT_CHECKED(String, sub, args[0]);
- CONVERT_CHECKED(String, pat, args[1]);
+ CONVERT_ARG_CHECKED(String, sub, 0);
+ CONVERT_ARG_CHECKED(String, pat, 1);
+
Object* index = args[2];
uint32_t start_index;
if (!Array::IndexFromObject(index, &start_index)) return Smi::FromInt(-1);
- return Smi::FromInt(Runtime::StringMatchKmp(sub, pat, start_index));
+ int position = Runtime::StringMatch(sub, pat, start_index);
+ return Smi::FromInt(position);
}
@@ -1314,6 +1552,29 @@
}
+
+// KeyedStringGetProperty is called from KeyedLoadIC::GenerateGeneric
+static Object* Runtime_KeyedGetProperty(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ Object* receiver = args[0];
+ Object* key = args[1];
+ if (receiver->IsJSObject() &&
+ key->IsString() &&
+ !JSObject::cast(receiver)->HasFastProperties()) {
+ Dictionary* dictionary = JSObject::cast(receiver)->property_dictionary();
+ int entry = dictionary->FindStringEntry(String::cast(key));
+ if ((entry != DescriptorArray::kNotFound)
+ && (dictionary->DetailsAt(entry).type() == NORMAL)) {
+ return dictionary->ValueAt(entry);
+ }
+ }
+ return Runtime::GetObjectProperty(args.at<Object>(0),
+ args.at<Object>(1));
+}
+
+
Object* Runtime::SetObjectProperty(Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
@@ -1954,7 +2215,7 @@
// character is also ascii. This is currently the case, but it
// might break in the future if we implement more context and locale
// dependent upper/lower conversions.
- Object* o = s->IsAscii()
+ Object* o = s->IsAsciiRepresentation()
? Heap::AllocateRawAsciiString(length)
: Heap::AllocateRawTwoByteString(length);
if (o->IsFailure()) return o;
@@ -1970,7 +2231,8 @@
// We can assume that the string is not empty
uc32 current = buffer->GetNext();
while (i < length) {
- uc32 next = buffer->has_more() ? buffer->GetNext() : 0;
+ bool has_next = buffer->has_more();
+ uc32 next = has_next ? buffer->GetNext() : 0;
int char_length = mapping->get(current, next, chars);
if (char_length == 0) {
// The case conversion of this character is the character itself.
@@ -1994,12 +2256,21 @@
// "realloc" it and probably, in the vast majority of cases,
// extend the existing string to be able to hold the full
// result.
- int current_length = i + char_length + mapping->get(next, 0, chars);
+ int next_length = 0;
+ if (has_next) {
+ next_length = mapping->get(next, 0, chars);
+ if (next_length == 0) next_length = 1;
+ }
+ int current_length = i + char_length + next_length;
while (buffer->has_more()) {
current = buffer->GetNext();
+ // NOTE: we use 0 as the next character here because, while
+ // the next character may affect what a character converts to,
+ // it does not in any case affect the length of what it convert
+ // to.
int char_length = mapping->get(current, 0, chars);
if (char_length == 0) char_length = 1;
- current += char_length;
+ current_length += char_length;
}
length = current_length;
goto try_convert;
@@ -2231,7 +2502,7 @@
if (first->IsString()) return first;
}
- bool ascii = special->IsAscii();
+ bool ascii = special->IsAsciiRepresentation();
int position = 0;
for (int i = 0; i < array_length; i++) {
Object* elt = fixed_array->get(i);
@@ -2251,7 +2522,7 @@
return Failure::OutOfMemoryException();
}
position += element_length;
- if (ascii && !element->IsAscii()) {
+ if (ascii && !element->IsAsciiRepresentation()) {
ascii = false;
}
} else {
@@ -2395,8 +2666,12 @@
int len = x->length();
if (len != y->length()) return Smi::FromInt(NOT_EQUAL);
if (len == 0) return Smi::FromInt(EQUAL);
- // Fast case: First, middle and last characters.
+
+ // Handle one elment strings.
if (x->Get(0) != y->Get(0)) return Smi::FromInt(NOT_EQUAL);
+ if (len == 1) return Smi::FromInt(EQUAL);
+
+ // Fast case: First, middle and last characters.
if (x->Get(len>>1) != y->Get(len>>1)) return Smi::FromInt(NOT_EQUAL);
if (x->Get(len - 1) != y->Get(len - 1)) return Smi::FromInt(NOT_EQUAL);
@@ -2774,7 +3049,7 @@
if (Debug::StepInActive()) {
StackFrameIterator it;
it.Advance();
- ASSERT(InternalFrame::cast(it.frame())->is_construct_trampoline());
+ ASSERT(it.frame()->is_construct());
it.Advance();
if (it.frame()->fp() == Debug::step_in_fp()) {
HandleScope scope;
@@ -3704,7 +3979,7 @@
case FIELD:
value =
JSObject::cast(
- result->holder())->properties()->get(result->GetFieldIndex());
+ result->holder())->FastPropertyAt(result->GetFieldIndex());
if (value->IsTheHole()) {
return Heap::undefined_value();
}
@@ -4697,8 +4972,14 @@
// Convert the script objects to proper JS objects.
for (int i = 0; i < count; i++) {
- Handle<Script> script(Script::cast(instances->get(i)));
- instances->set(i, *GetScriptWrapper(script));
+ Handle<Script> script = Handle<Script>(Script::cast(instances->get(i)));
+ // Get the script wrapper in a local handle before calling GetScriptWrapper,
+ // because using
+ // instances->set(i, *GetScriptWrapper(script))
+ // is unsafe as GetScriptWrapper might call GC and the C++ compiler might
+ // already have deferenced the instances handle.
+ Handle<JSValue> wrapper = GetScriptWrapper(script);
+ instances->set(i, *wrapper);
}
// Return result as a JS array.
diff --git a/src/runtime.h b/src/runtime.h
index fcded39..100077b 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -40,6 +40,7 @@
#define RUNTIME_FUNCTION_LIST_ALWAYS(F) \
/* Property access */ \
F(GetProperty, 2) \
+ F(KeyedGetProperty, 2) \
F(DeleteProperty, 2) \
F(HasLocalProperty, 2) \
F(HasProperty, 2) \
@@ -164,6 +165,9 @@
F(GetScript, 1) \
\
F(ClassOf, 1) \
+ F(IsDateClass, 1) \
+ F(IsStringClass, 1) \
+ F(IsArrayClass, 1) \
F(SetCode, 2) \
\
F(CreateApiFunction, 1) \
@@ -260,7 +264,7 @@
\
/* Declarations and initialization */ \
F(DeclareGlobals, 3) \
- F(DeclareContextSlot, 5) \
+ F(DeclareContextSlot, 4) \
F(InitializeVarGlobal, -1 /* 1 or 2 */) \
F(InitializeConstGlobal, 2) \
F(InitializeConstContextSlot, 3) \
@@ -332,7 +336,7 @@
// Get the runtime function with the given name.
static Function* FunctionForName(const char* name);
- static int StringMatchKmp(String* sub, String* pat, int index);
+ static int StringMatch(Handle<String> sub, Handle<String> pat, int index);
// TODO(1240886): The following three methods are *not* handle safe,
// but accept handle arguments. This seems fragile.
diff --git a/src/runtime.js b/src/runtime.js
index 471019a..e44a103 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -52,36 +52,31 @@
// ECMA-262, section 11.9.1, page 55.
function EQUALS(y) {
+ if (IS_STRING(this) && IS_STRING(y)) return %StringEquals(this, y);
var x = this;
// NOTE: We use iteration instead of recursion, because it is
// difficult to call EQUALS with the correct setting of 'this' in
// an efficient way.
-
while (true) {
-
if (IS_NUMBER(x)) {
if (y == null) return 1; // not equal
return %NumberEquals(x, %ToNumber(y));
-
} else if (IS_STRING(x)) {
if (IS_STRING(y)) return %StringEquals(x, y);
if (IS_NUMBER(y)) return %NumberEquals(%ToNumber(x), y);
if (IS_BOOLEAN(y)) return %NumberEquals(%ToNumber(x), %ToNumber(y));
if (y == null) return 1; // not equal
y = %ToPrimitive(y, NO_HINT);
-
} else if (IS_BOOLEAN(x)) {
if (IS_BOOLEAN(y)) {
return %_ObjectEquals(x, y) ? 0 : 1;
}
if (y == null) return 1; // not equal
return %NumberEquals(%ToNumber(x), %ToNumber(y));
-
} else if (x == null) {
// NOTE: This checks for both null and undefined.
return (y == null) ? 0 : 1;
-
} else {
if (IS_OBJECT(y)) {
return %_ObjectEquals(x, y) ? 0 : 1;
@@ -90,34 +85,29 @@
return %_ObjectEquals(x, y) ? 0 : 1;
}
x = %ToPrimitive(x, NO_HINT);
-
}
}
}
-
// ECMA-262, section 11.9.4, page 56.
function STRICT_EQUALS(x) {
- if (IS_NUMBER(this)) {
- if (!IS_NUMBER(x)) return 1; // not equal
- return %NumberEquals(this, x);
- }
-
if (IS_STRING(this)) {
if (!IS_STRING(x)) return 1; // not equal
return %StringEquals(this, x);
- }
+ }
- if (IS_BOOLEAN(this)) {
- if (!IS_BOOLEAN(x)) return 1; // not equal
- if (this) return x ? 0 : 1;
- else return x ? 1 : 0;
- }
+ if (IS_NUMBER(this)) {
+ if (!IS_NUMBER(x)) return 1; // not equal
+ return %NumberEquals(this, x);
+ }
- if (IS_UNDEFINED(this)) { // both undefined and undetectable
+ if (IS_UNDEFINED(this)) {
+ // Both undefined and undetectable.
return IS_UNDEFINED(x) ? 0 : 1;
}
+ // Objects, null, booleans and functions are all that's left.
+ // They can all be compared with a simple identity check.
return %_ObjectEquals(this, x) ? 0 : 1;
}
@@ -125,11 +115,15 @@
// ECMA-262, section 11.8.5, page 53. The 'ncr' parameter is used as
// the result when either (or both) the operands are NaN.
function COMPARE(x, ncr) {
- // Improve performance for floating point compares
+ // Fast case for numbers and strings.
if (IS_NUMBER(this) && IS_NUMBER(x)) {
return %NumberCompare(this, x, ncr);
}
+ if (IS_STRING(this) && IS_STRING(x)) {
+ return %StringCompare(this, x);
+ }
+ // Default implementation.
var a = %ToPrimitive(this, NUMBER_HINT);
var b = %ToPrimitive(x, NUMBER_HINT);
if (IS_STRING(a) && IS_STRING(b)) {
@@ -149,10 +143,10 @@
// ECMA-262, section 11.6.1, page 50.
function ADD(x) {
// Fast case: Check for number operands and do the addition.
- if (IS_NUMBER(this) && IS_NUMBER(x)) {
- return %NumberAdd(this, x);
- }
+ if (IS_NUMBER(this) && IS_NUMBER(x)) return %NumberAdd(this, x);
+ if (IS_STRING(this) && IS_STRING(x)) return %StringAdd(this, x);
+ // Default implementation.
var a = %ToPrimitive(this, NO_HINT);
var b = %ToPrimitive(x, NO_HINT);
@@ -277,7 +271,10 @@
}
-// ECMA-262, section 11.8.6, page 54.
+// ECMA-262, section 11.8.6, page 54. To make the implementation more
+// efficient, the return value should be zero if the 'this' is an
+// instance of F, and non-zero if not. This makes it possible to avoid
+// an expensive ToBoolean conversion in the generated code.
function INSTANCE_OF(F) {
var V = this;
if (!IS_FUNCTION(F)) {
@@ -286,7 +283,7 @@
// If V is not an object, return false.
if (IS_NULL(V) || (!IS_OBJECT(V) && !IS_FUNCTION(V))) {
- return false;
+ return 1;
}
// Get the prototype of F; if it is not an object, throw an error.
@@ -296,7 +293,7 @@
}
// Return whether or not O is in the prototype chain of V.
- return %IsInPrototypeChain(O, V);
+ return %IsInPrototypeChain(O, V) ? 0 : 1;
}
@@ -397,6 +394,9 @@
// ECMA-262, section 9.1, page 30. Use null/undefined for no hint,
// (1) for number hint, and (2) for string hint.
function ToPrimitive(x, hint) {
+ // Fast case check.
+ if (IS_STRING(x)) return x;
+ // Normal behavior.
if (!IS_OBJECT(x) && !IS_FUNCTION(x)) return x;
if (x == null) return x; // check for null, undefined
if (hint == NO_HINT) hint = (IS_DATE(x)) ? STRING_HINT : NUMBER_HINT;
diff --git a/src/string-stream.cc b/src/string-stream.cc
index 9ba6d87..055ec95 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -317,7 +317,7 @@
key->ShortPrint();
}
Add(": ");
- Object* value = js_object->properties()->get(r.GetFieldIndex());
+ Object* value = js_object->FastPropertyAt(r.GetFieldIndex());
Add("%o\n", value);
}
}
diff --git a/src/string.js b/src/string.js
index bb12c01..26318a3 100644
--- a/src/string.js
+++ b/src/string.js
@@ -46,7 +46,7 @@
// ECMA-262 section 15.5.4.2
function StringToString() {
- if (!IS_STRING(this) && %ClassOf(this) !== 'String')
+ if (!IS_STRING(this) && !%IsStringClass(this))
throw new $TypeError('String.prototype.toString is not generic');
return %_ValueOf(this);
}
@@ -54,7 +54,7 @@
// ECMA-262 section 15.5.4.3
function StringValueOf() {
- if (!IS_STRING(this) && %ClassOf(this) !== 'String')
+ if (!IS_STRING(this) && !%IsStringClass(this))
throw new $TypeError('String.prototype.valueOf is not generic');
return %_ValueOf(this);
}
diff --git a/src/stub-cache-arm.cc b/src/stub-cache-arm.cc
index dbbfb0a..5bcc6b0 100644
--- a/src/stub-cache-arm.cc
+++ b/src/stub-cache-arm.cc
@@ -103,8 +103,8 @@
__ b(eq, &miss);
// Get the map of the receiver and compute the hash.
- __ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ ldr(ip, FieldMemOperand(name, String::kLengthOffset));
+ __ ldr(scratch, FieldMemOperand(name, String::kLengthOffset));
+ __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ add(scratch, scratch, Operand(ip));
__ eor(scratch, scratch, Operand(flags));
__ and_(scratch,
@@ -148,6 +148,27 @@
}
+// Load a fast property out of a holder object (src). In-object properties
+// are loaded directly otherwise the property is loaded from the properties
+// fixed array.
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst, Register src,
+ JSObject* holder, int index) {
+ // Adjust for the number of properties stored in the holder.
+ index -= holder->map()->inobject_properties();
+ if (index < 0) {
+ // Get the property straight out of the holder.
+ int offset = holder->map()->instance_size() + (index * kPointerSize);
+ __ ldr(dst, FieldMemOperand(src, offset));
+ } else {
+ // Calculate the offset into the properties array.
+ int offset = index * kPointerSize + Array::kHeaderSize;
+ __ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
+ __ ldr(dst, FieldMemOperand(dst, offset));
+ }
+}
+
+
#undef __
#define __ masm()->
@@ -208,11 +229,7 @@
// Do the right check and compute the holder register.
Register reg =
__ CheckMaps(JSObject::cast(object), r1, holder, r3, r2, &miss);
-
- // Get the properties array of the holder and get the function from the field.
- int offset = index * kPointerSize + Array::kHeaderSize;
- __ ldr(r1, FieldMemOperand(reg, JSObject::kPropertiesOffset));
- __ ldr(r1, FieldMemOperand(r1, offset));
+ GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
// Check that the function really is a function.
__ tst(r1, Operand(kSmiTagMask));
@@ -421,8 +438,15 @@
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_ExtendStorage));
__ Jump(ic, RelocInfo::CODE_TARGET);
} else {
- // Get the properties array
- __ ldr(r1, FieldMemOperand(r3, JSObject::kPropertiesOffset));
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ if (index >= 0) {
+ // Get the properties array
+ __ ldr(r1, FieldMemOperand(r3, JSObject::kPropertiesOffset));
+ }
if (transition != NULL) {
// Update the map of the object; no write barrier updating is
@@ -431,17 +455,31 @@
__ str(ip, FieldMemOperand(r3, HeapObject::kMapOffset));
}
- // Write to the properties array.
- int offset = index * kPointerSize + Array::kHeaderSize;
- __ str(r0, FieldMemOperand(r1, offset));
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ str(r0, FieldMemOperand(r3, offset));
- // Skip updating write barrier if storing a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &exit);
+ // Skip updating write barrier if storing a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &exit);
- // Update the write barrier for the array address.
- __ mov(r3, Operand(offset));
- __ RecordWrite(r1, r3, r2); // OK to clobber r2, since we return
+ // Update the write barrier for the array address.
+ __ mov(r1, Operand(offset));
+ __ RecordWrite(r3, r1, r2);
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + Array::kHeaderSize;
+ __ str(r0, FieldMemOperand(r1, offset));
+
+ // Skip updating write barrier if storing a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ __ b(eq, &exit);
+
+ // Update the write barrier for the array address.
+ __ mov(r3, Operand(offset));
+ __ RecordWrite(r1, r3, r2); // OK to clobber r2, since we return
+ }
// Return the value (register r0).
__ bind(&exit);
@@ -589,13 +627,7 @@
// Check that the maps haven't changed.
Register reg = __ CheckMaps(object, r0, holder, r3, r1, &miss);
-
- // Get the properties array of the holder.
- __ ldr(r3, FieldMemOperand(reg, JSObject::kPropertiesOffset));
-
- // Return the value from the properties array.
- int offset = index * kPointerSize + Array::kHeaderSize;
- __ ldr(r0, FieldMemOperand(r3, offset));
+ GenerateFastPropertyLoad(masm(), r0, reg, holder, index);
__ Ret();
// Handle load cache miss.
diff --git a/src/stub-cache-ia32.cc b/src/stub-cache-ia32.cc
index a7d8a7b..01526da 100644
--- a/src/stub-cache-ia32.cc
+++ b/src/stub-cache-ia32.cc
@@ -99,8 +99,8 @@
__ j(zero, &miss, not_taken);
// Get the map of the receiver and compute the hash.
- __ mov(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
- __ add(scratch, FieldOperand(name, String::kLengthOffset));
+ __ mov(scratch, FieldOperand(name, String::kLengthOffset));
+ __ add(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
__ xor_(scratch, flags);
__ and_(scratch, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
@@ -232,52 +232,30 @@
Register scratch1,
Register scratch2,
Label* miss_label) {
- // Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss_label, not_taken);
-
- // Check that the receiver is a function.
- __ mov(scratch1, FieldOperand(receiver, HeapObject::kMapOffset));
- __ movzx_b(scratch2, FieldOperand(scratch1, Map::kInstanceTypeOffset));
- __ cmp(scratch2, JS_FUNCTION_TYPE);
- __ j(not_equal, miss_label, not_taken);
-
- // Make sure that the function has an instance prototype.
- Label non_instance;
- __ movzx_b(scratch2, FieldOperand(scratch1, Map::kBitFieldOffset));
- __ test(scratch2, Immediate(1 << Map::kHasNonInstancePrototype));
- __ j(not_zero, &non_instance, not_taken);
-
- // Get the prototype or initial map from the function.
- __ mov(scratch1,
- FieldOperand(receiver, JSFunction::kPrototypeOrInitialMapOffset));
-
- // If the prototype or initial map is the hole, don't return it and
- // simply miss the cache instead. This will allow us to allocate a
- // prototype object on-demand in the runtime system.
- __ cmp(Operand(scratch1), Immediate(Factory::the_hole_value()));
- __ j(equal, miss_label, not_taken);
+ __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
__ mov(eax, Operand(scratch1));
-
- // If the function does not have an initial map, we're done.
- Label done;
- __ mov(scratch1, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(scratch2, FieldOperand(scratch1, Map::kInstanceTypeOffset));
- __ cmp(scratch2, MAP_TYPE);
- __ j(not_equal, &done);
-
- // Get the prototype from the initial map.
- __ mov(eax, FieldOperand(eax, Map::kPrototypeOffset));
-
- // All done: Return the prototype.
- __ bind(&done);
__ ret(0);
+}
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- __ bind(&non_instance);
- __ mov(eax, FieldOperand(scratch1, Map::kConstructorOffset));
- __ ret(0);
+
+// Load a fast property out of a holder object (src). In-object properties
+// are loaded directly otherwise the property is loaded from the properties
+// fixed array.
+void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst, Register src,
+ JSObject* holder, int index) {
+ // Adjust for the number of properties stored in the holder.
+ index -= holder->map()->inobject_properties();
+ if (index < 0) {
+ // Get the property straight out of the holder.
+ int offset = holder->map()->instance_size() + (index * kPointerSize);
+ __ mov(dst, FieldOperand(src, offset));
+ } else {
+ // Calculate the offset into the properties array.
+ int offset = index * kPointerSize + Array::kHeaderSize;
+ __ mov(dst, FieldOperand(src, JSObject::kPropertiesOffset));
+ __ mov(dst, FieldOperand(dst, offset));
+ }
}
@@ -297,12 +275,8 @@
Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
- // Get the properties array of the holder.
- __ mov(scratch1, FieldOperand(reg, JSObject::kPropertiesOffset));
-
- // Return the value from the properties array.
- int offset = index * kPointerSize + Array::kHeaderSize;
- __ mov(eax, FieldOperand(scratch1, offset));
+ // Get the value from the properties.
+ GenerateFastPropertyLoad(masm, eax, reg, holder, index);
__ ret(0);
}
@@ -442,8 +416,16 @@
return;
}
- // Get the properties array (optimistically).
- __ mov(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ if (index >= 0) {
+ // Get the properties array (optimistically).
+ __ mov(scratch, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ }
+
if (transition != NULL) {
// Update the map of the object; no write barrier updating is
// needed because the map is never in new space.
@@ -451,14 +433,25 @@
Immediate(Handle<Map>(transition)));
}
- // Write to the properties array.
- int offset = index * kPointerSize + Array::kHeaderSize;
- __ mov(FieldOperand(scratch, offset), eax);
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ mov(FieldOperand(receiver_reg, offset), eax);
- // Update the write barrier for the array address.
- // Pass the value being stored in the now unused name_reg.
- __ mov(name_reg, Operand(eax));
- __ RecordWrite(scratch, offset, name_reg, receiver_reg);
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ mov(name_reg, Operand(eax));
+ __ RecordWrite(receiver_reg, offset, name_reg, scratch);
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + Array::kHeaderSize;
+ __ mov(FieldOperand(scratch, offset), eax);
+
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ mov(name_reg, Operand(eax));
+ __ RecordWrite(scratch, offset, name_reg, receiver_reg);
+ }
// Return the value (register eax).
__ ret(0);
@@ -516,10 +509,7 @@
Register reg =
__ CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss);
- // Get the properties array of the holder and get the function from the field.
- int offset = index * kPointerSize + Array::kHeaderSize;
- __ mov(edi, FieldOperand(reg, JSObject::kPropertiesOffset));
- __ mov(edi, FieldOperand(edi, offset));
+ GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
// Check that the function really is a function.
__ test(edi, Immediate(kSmiTagMask));
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 718995f..4fb78da 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -198,11 +198,16 @@
// Computes the hashed offsets for primary and secondary caches.
static int PrimaryOffset(String* name, Code::Flags flags, Map* map) {
+ // This works well because the heap object tag size and the hash
+ // shift are equal. Shifting down the length field to get the
+ // hash code would effectively throw away two bits of the hash
+ // code.
+ ASSERT(kHeapObjectTagSize == String::kHashShift);
// Compute the hash of the name (use entire length field).
- uint32_t name_hash = name->length_field();
- ASSERT(name_hash & String::kHashComputedMask);
+ ASSERT(name->HasHashCode());
+ uint32_t field = name->length_field();
// Base the offset on a simple combination of name, flags, and map.
- uint32_t key = (reinterpret_cast<uint32_t>(map) + name_hash) ^ flags;
+ uint32_t key = (reinterpret_cast<uint32_t>(map) + field) ^ flags;
return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize);
}
@@ -296,6 +301,9 @@
static void GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
int index,
Register prototype);
+ static void GenerateFastPropertyLoad(MacroAssembler* masm,
+ Register dst, Register src,
+ JSObject* holder, int index);
static void GenerateLoadField(MacroAssembler* masm,
JSObject* object,
JSObject* holder,
diff --git a/src/top.cc b/src/top.cc
index 712e94b..8ad4c23 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -119,7 +119,7 @@
// When the thread starts running it will allocate a fixed number of bytes
// on the stack and publish the location of this memory for others to use.
void Run() {
- EmbeddedVector<char, 16 * 1024> local_buffer;
+ EmbeddedVector<char, 32 * 1024> local_buffer;
// Initialize the buffer with a known good value.
OS::StrNCpy(local_buffer, "Trace data was not generated.\n",
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index f1463f4..b3f71d9 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -27,7 +27,7 @@
prefix cctest
-# TODO(96) fix this flaky test
+# BUG(96): Fix this flaky test.
test-debug/ThreadedDebugging: PASS || FAIL
[ $arch == arm ]
@@ -35,3 +35,6 @@
test-debug: SKIP
test-serialize: SKIP
test-api: SKIP
+
+# BUG(113): Test seems flaky on ARM.
+test-spaces/LargeObjectSpace: PASS || FAIL
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index c45ccf8..7bc60bd 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -2341,6 +2341,33 @@
}
+static const char* js_code_causing_huge_string_flattening =
+ "var str = 'X';"
+ "for (var i = 0; i < 29; i++) {"
+ " str = str + str;"
+ "}"
+ "str.match(/X/);";
+
+
+void OOMCallback(const char* location, const char* message) {
+ exit(0);
+}
+
+
+TEST(RegexpOutOfMemory) {
+ // Execute a script that causes out of memory when flattening a string.
+ v8::HandleScope scope;
+ v8::V8::SetFatalErrorHandler(OOMCallback);
+ LocalContext context;
+ Local<Script> script =
+ Script::Compile(String::New(js_code_causing_huge_string_flattening));
+ last_location = NULL;
+ Local<Value> result = script->Run();
+
+ CHECK(false); // Should not return.
+}
+
+
static void MissingScriptInfoMessageListener(v8::Handle<v8::Message> message,
v8::Handle<Value> data) {
CHECK_EQ(v8::Undefined(), data);
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 4f4a127..9bb9f98 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -38,7 +38,7 @@
CheckMap(Heap::heap_number_map(), HEAP_NUMBER_TYPE, HeapNumber::kSize);
CheckMap(Heap::fixed_array_map(), FIXED_ARRAY_TYPE, Array::kHeaderSize);
CheckMap(Heap::long_string_map(), LONG_STRING_TYPE,
- TwoByteString::kHeaderSize);
+ SeqTwoByteString::kHeaderSize);
}
diff --git a/test/mjsunit/array-sort.js b/test/mjsunit/array-sort.js
index a68b819..dfa4590 100644
--- a/test/mjsunit/array-sort.js
+++ b/test/mjsunit/array-sort.js
@@ -134,9 +134,21 @@
// Test array sorting with undefined elemeents in the array.
function TestArraySortingWithUndefined() {
- var a = [3, void 0, 2];
+ var a = [ 3, void 0, 2 ];
a.sort();
- assertArrayEquals([ 2, 3, void 0], a);
+ assertArrayEquals([ 2, 3, void 0 ], a);
}
TestArraySortingWithUndefined();
+
+// Test that sorting using an unsound comparison function still gives a
+// sane result, i.e. it terminates without error and retains the elements
+// in the array.
+function TestArraySortingWithUnsoundComparisonFunction() {
+ var a = [ 3, void 0, 2 ];
+ a.sort(function(x, y) { return 1; });
+ a.sort();
+ assertArrayEquals([ 2, 3, void 0 ], a);
+}
+
+TestArraySortingWithUnsoundComparisonFunction();
diff --git a/test/mjsunit/debug-evaluate-locals.js b/test/mjsunit/debug-evaluate-locals.js
index 97b9f39..501e34a 100644
--- a/test/mjsunit/debug-evaluate-locals.js
+++ b/test/mjsunit/debug-evaluate-locals.js
@@ -32,29 +32,59 @@
listenerComplete = false;
exception = false;
+
+function checkFrame0(name, value) {
+ assertTrue(name == 'a' || name == 'b');
+ if (name == 'a') {
+ assertEquals(1, value);
+ }
+ if (name == 'b') {
+ assertEquals(2, value);
+ }
+}
+
+
+function checkFrame1(name, value) {
+ assertTrue(name == '.arguments' || name == 'a');
+ if (name == 'a') {
+ assertEquals(3, value);
+ }
+}
+
+
+function checkFrame2(name, value) {
+ assertTrue(name == '.arguments' || name == 'a' ||
+ name == 'arguments' || name == 'b');
+ if (name == 'a') {
+ assertEquals(5, value);
+ }
+ if (name == 'b') {
+ assertEquals(0, value);
+ }
+}
+
+
function listener(event, exec_state, event_data, data) {
try {
if (event == Debug.DebugEvent.Break)
{
// Frame 0 has normal variables a and b.
- assertEquals('a', exec_state.frame(0).localName(0));
- assertEquals('b', exec_state.frame(0).localName(1));
- assertEquals(1, exec_state.frame(0).localValue(0).value());
- assertEquals(2, exec_state.frame(0).localValue(1).value());
+ var frame0 = exec_state.frame(0);
+ checkFrame0(frame0.localName(0), frame0.localValue(0).value());
+ checkFrame0(frame0.localName(1), frame0.localValue(1).value());
// Frame 1 has normal variable a (and the .arguments variable).
- assertEquals('.arguments', exec_state.frame(1).localName(0));
- assertEquals('a', exec_state.frame(1).localName(1));
- assertEquals(3, exec_state.frame(1).localValue(1).value());
+ var frame1 = exec_state.frame(1);
+ checkFrame1(frame1.localName(0), frame1.localValue(0).value());
+ checkFrame1(frame1.localName(1), frame1.localValue(1).value());
- // Frame 0 has normal variables a and b (and both the .arguments and
+ // Frame 2 has normal variables a and b (and both the .arguments and
// arguments variable).
- assertEquals('.arguments', exec_state.frame(2).localName(0));
- assertEquals('a', exec_state.frame(2).localName(1));
- assertEquals('arguments', exec_state.frame(2).localName(2));
- assertEquals('b', exec_state.frame(2).localName(3));
- assertEquals(5, exec_state.frame(2).localValue(1).value());
- assertEquals(0, exec_state.frame(2).localValue(3).value());
+ var frame2 = exec_state.frame(2);
+ checkFrame2(frame2.localName(0), frame2.localValue(0).value());
+ checkFrame2(frame2.localName(1), frame2.localValue(1).value());
+ checkFrame2(frame2.localName(2), frame2.localValue(2).value());
+ checkFrame2(frame2.localName(3), frame2.localValue(3).value());
// Evaluating a and b on frames 0, 1 and 2 produces 1, 2, 3, 4, 5 and 6.
assertEquals(1, exec_state.frame(0).evaluate('a').value());
diff --git a/test/mjsunit/instanceof.js b/test/mjsunit/instanceof.js
index 3fef2e2..01ea426 100644
--- a/test/mjsunit/instanceof.js
+++ b/test/mjsunit/instanceof.js
@@ -30,3 +30,64 @@
assertFalse({} instanceof Array);
assertTrue([] instanceof Array);
+
+function TestChains() {
+ var A = {};
+ var B = {};
+ var C = {};
+ B.__proto__ = A;
+ C.__proto__ = B;
+
+ function F() { }
+ F.prototype = A;
+ assertTrue(C instanceof F);
+ assertTrue(B instanceof F);
+ assertFalse(A instanceof F);
+
+ F.prototype = B;
+ assertTrue(C instanceof F);
+ assertFalse(B instanceof F);
+ assertFalse(A instanceof F);
+
+ F.prototype = C;
+ assertFalse(C instanceof F);
+ assertFalse(B instanceof F);
+ assertFalse(A instanceof F);
+}
+
+TestChains();
+
+
+function TestExceptions() {
+ function F() { }
+ var items = [ 1, new Number(42),
+ true,
+ 'string', new String('hest'),
+ {}, [],
+ F, new F(),
+ Object, String ];
+
+ var exceptions = 0;
+ var instanceofs = 0;
+
+ for (var i = 0; i < items.length; i++) {
+ for (var j = 0; j < items.length; j++) {
+ try {
+ if (items[i] instanceof items[j]) instanceofs++;
+ } catch (e) {
+ assertTrue(e instanceof TypeError);
+ exceptions++;
+ }
+ }
+ }
+ assertEquals(10, instanceofs);
+ assertEquals(88, exceptions);
+
+ // Make sure to throw an exception if the function prototype
+ // isn't a proper JavaScript object.
+ function G() { }
+ G.prototype = undefined;
+ assertThrows("({} instanceof G)");
+}
+
+TestExceptions();
diff --git a/test/mjsunit/large-object-allocation.js b/test/mjsunit/large-object-allocation.js
new file mode 100644
index 0000000..c2b717c
--- /dev/null
+++ b/test/mjsunit/large-object-allocation.js
@@ -0,0 +1,300 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Allocate a very large object that is guaranteed to overflow the
+// instance_size field in the map resulting in an object that is smaller
+// than what was called for.
+function LargeObject(i) {
+ this.a = i;
+ this.b = i;
+ this.c = i;
+ this.d = i;
+ this.e = i;
+ this.f = i;
+ this.g = i;
+ this.h = i;
+ this.i = i;
+ this.j = i;
+ this.k = i;
+ this.l = i;
+ this.m = i;
+ this.n = i;
+ this.o = i;
+ this.p = i;
+ this.q = i;
+ this.r = i;
+ this.s = i;
+ this.t = i;
+ this.u = i;
+ this.v = i;
+ this.w = i;
+ this.x = i;
+ this.y = i;
+ this.z = i;
+ this.a1 = i;
+ this.b1 = i;
+ this.c1 = i;
+ this.d1 = i;
+ this.e1 = i;
+ this.f1 = i;
+ this.g1 = i;
+ this.h1 = i;
+ this.i1 = i;
+ this.j1 = i;
+ this.k1 = i;
+ this.l1 = i;
+ this.m1 = i;
+ this.n1 = i;
+ this.o1 = i;
+ this.p1 = i;
+ this.q1 = i;
+ this.r1 = i;
+ this.s1 = i;
+ this.t1 = i;
+ this.u1 = i;
+ this.v1 = i;
+ this.w1 = i;
+ this.x1 = i;
+ this.y1 = i;
+ this.z1 = i;
+ this.a2 = i;
+ this.b2 = i;
+ this.c2 = i;
+ this.d2 = i;
+ this.e2 = i;
+ this.f2 = i;
+ this.g2 = i;
+ this.h2 = i;
+ this.i2 = i;
+ this.j2 = i;
+ this.k2 = i;
+ this.l2 = i;
+ this.m2 = i;
+ this.n2 = i;
+ this.o2 = i;
+ this.p2 = i;
+ this.q2 = i;
+ this.r2 = i;
+ this.s2 = i;
+ this.t2 = i;
+ this.u2 = i;
+ this.v2 = i;
+ this.w2 = i;
+ this.x2 = i;
+ this.y2 = i;
+ this.z2 = i;
+ this.a3 = i;
+ this.b3 = i;
+ this.c3 = i;
+ this.d3 = i;
+ this.e3 = i;
+ this.f3 = i;
+ this.g3 = i;
+ this.h3 = i;
+ this.i3 = i;
+ this.j3 = i;
+ this.k3 = i;
+ this.l3 = i;
+ this.m3 = i;
+ this.n3 = i;
+ this.o3 = i;
+ this.p3 = i;
+ this.q3 = i;
+ this.r3 = i;
+ this.s3 = i;
+ this.t3 = i;
+ this.u3 = i;
+ this.v3 = i;
+ this.w3 = i;
+ this.x3 = i;
+ this.y3 = i;
+ this.z3 = i;
+ this.a4 = i;
+ this.b4 = i;
+ this.c4 = i;
+ this.d4 = i;
+ this.e4 = i;
+ this.f4 = i;
+ this.g4 = i;
+ this.h4 = i;
+ this.i4 = i;
+ this.j4 = i;
+ this.k4 = i;
+ this.l4 = i;
+ this.m4 = i;
+ this.n4 = i;
+ this.o4 = i;
+ this.p4 = i;
+ this.q4 = i;
+ this.r4 = i;
+ this.s4 = i;
+ this.t4 = i;
+ this.u4 = i;
+ this.v4 = i;
+ this.w4 = i;
+ this.x4 = i;
+ this.y4 = i;
+ this.z4 = i;
+ this.a5 = i;
+ this.b5 = i;
+ this.c5 = i;
+ this.d5 = i;
+ this.e5 = i;
+ this.f5 = i;
+ this.g5 = i;
+ this.h5 = i;
+ this.i5 = i;
+ this.j5 = i;
+ this.k5 = i;
+ this.l5 = i;
+ this.m5 = i;
+ this.n5 = i;
+ this.o5 = i;
+ this.p5 = i;
+ this.q5 = i;
+ this.r5 = i;
+ this.s5 = i;
+ this.t5 = i;
+ this.u5 = i;
+ this.v5 = i;
+ this.w5 = i;
+ this.x5 = i;
+ this.y5 = i;
+ this.z5 = i;
+ this.a6 = i;
+ this.b6 = i;
+ this.c6 = i;
+ this.d6 = i;
+ this.e6 = i;
+ this.f6 = i;
+ this.g6 = i;
+ this.h6 = i;
+ this.i6 = i;
+ this.j6 = i;
+ this.k6 = i;
+ this.l6 = i;
+ this.m6 = i;
+ this.n6 = i;
+ this.o6 = i;
+ this.p6 = i;
+ this.q6 = i;
+ this.r6 = i;
+ this.s6 = i;
+ this.t6 = i;
+ this.u6 = i;
+ this.v6 = i;
+ this.w6 = i;
+ this.x6 = i;
+ this.y6 = i;
+ this.z6 = i;
+ this.a7 = i;
+ this.b7 = i;
+ this.c7 = i;
+ this.d7 = i;
+ this.e7 = i;
+ this.f7 = i;
+ this.g7 = i;
+ this.h7 = i;
+ this.i7 = i;
+ this.j7 = i;
+ this.k7 = i;
+ this.l7 = i;
+ this.m7 = i;
+ this.n7 = i;
+ this.o7 = i;
+ this.p7 = i;
+ this.q7 = i;
+ this.r7 = i;
+ this.s7 = i;
+ this.t7 = i;
+ this.u7 = i;
+ this.v7 = i;
+ this.w7 = i;
+ this.x7 = i;
+ this.y7 = i;
+ this.z7 = i;
+ this.a8 = i;
+ this.b8 = i;
+ this.c8 = i;
+ this.d8 = i;
+ this.e8 = i;
+ this.f8 = i;
+ this.g8 = i;
+ this.h8 = i;
+ this.i8 = i;
+ this.j8 = i;
+ this.k8 = i;
+ this.l8 = i;
+ this.m8 = i;
+ this.n8 = i;
+ this.o8 = i;
+ this.p8 = i;
+ this.q8 = i;
+ this.r8 = i;
+ this.s8 = i;
+ this.t8 = i;
+ this.u8 = i;
+ this.v8 = i;
+ this.w8 = i;
+ this.x8 = i;
+ this.y8 = i;
+ this.z8 = i;
+ this.a9 = i;
+ this.b9 = i;
+ this.c9 = i;
+ this.d9 = i;
+ this.e9 = i;
+ this.f9 = i;
+ this.g9 = i;
+ this.h9 = i;
+ this.i9 = i;
+ this.j9 = i;
+ this.k9 = i;
+ this.l9 = i;
+ this.m9 = i;
+ this.n9 = i;
+ this.o9 = i;
+ this.p9 = i;
+ this.q9 = i;
+ // With this number of properties the object perfectly wraps around if the
+ // instance size is not checked when allocating the initial map for MultiProp.
+ // Meaning that the instance will be smaller than a minimal JSObject and we
+ // will suffer a bus error in the release build or an assertion in the debug
+ // build.
+}
+
+function ExpectAllFields(o, val) {
+ for (var x in o) {
+ assertEquals(o[x], val);
+ }
+}
+
+var a = new LargeObject(1);
+var b = new LargeObject(2);
+
+ExpectAllFields(a, 1);
+ExpectAllFields(b, 2);
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index 4833bfe..6a97c7f 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -36,8 +36,9 @@
[ $arch == arm ]
-# Slow test: Times out in debug mode.
+# Slow tests which times out in debug mode.
try: PASS, SKIP if $mode == debug
+debug-scripts-request: PASS, SKIP if $mode == debug
# Bug number 1020483: Debug tests fail on ARM.
debug-constructor: FAIL
@@ -57,11 +58,6 @@
debug-step-stub-callfunction: FAIL
debug-stepin-constructor: FAIL
debug-step: FAIL
-regress/regress-998565: FAIL
-
-# Bug number 1308895: These tests pass on the ARM simulator, but
-# fail on the ARM Linux machine.
-debug-script-breakpoints: PASS || FAIL
-debug-scripts-request: PASS || FAIL
debug-breakpoints: PASS || FAIL
+regress/regress-998565: FAIL
diff --git a/test/mjsunit/regress/regress-114.js b/test/mjsunit/regress/regress-114.js
new file mode 100644
index 0000000..6c1a6a3
--- /dev/null
+++ b/test/mjsunit/regress/regress-114.js
@@ -0,0 +1,43 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// German eszett
+assertEquals("FRIEDRICHSTRASSE 14", "friedrichstra\xDFe 14".toUpperCase());
+assertEquals("XXSSSSSSXX", "xx\xDF\xDF\xDFxx".toUpperCase());
+assertEquals("(SS)", "(\xDF)".toUpperCase());
+assertEquals("SS", "\xDF".toUpperCase());
+
+// Turkish dotted upper-case I lower-case converts to two characters
+assertEquals("i\u0307", "\u0130".toLowerCase());
+assertEquals("(i\u0307)", "(\u0130)".toLowerCase());
+assertEquals("xxi\u0307xx", "XX\u0130XX".toLowerCase());
+
+// Greek small upsilon with dialytika and tonos upper-case converts to three
+// characters
+assertEquals("\u03A5\u0308\u0301", "\u03B0".toUpperCase());
+assertEquals("(\u03A5\u0308\u0301)", "(\u03B0)".toUpperCase());
+assertEquals("XX\u03A5\u0308\u0301XX", "xx\u03B0xx".toUpperCase());
diff --git a/test/mjsunit/regress/regress-1170187.js b/test/mjsunit/regress/regress-1170187.js
index f0471ff..69f1a10 100644
--- a/test/mjsunit/regress/regress-1170187.js
+++ b/test/mjsunit/regress/regress-1170187.js
@@ -35,11 +35,32 @@
listenerCalled = false;
exception = false;
+
+function checkName(name) {
+ assertTrue(name == 'a' || name == 'b' || name == 'c');
+}
+
+
+function checkValue(value) {
+ assertEquals(void 0, value);
+}
+
+
function listener(event, exec_state, event_data, data) {
try {
if (event == Debug.DebugEvent.Break) {
- assertEquals('c', exec_state.frame(0).localName(0));
- assertEquals(void 0, exec_state.frame(0).localValue(0).value());
+ var local0Name = exec_state.frame(0).localName(0);
+ var local1Name = exec_state.frame(0).localName(1);
+ var local2Name = exec_state.frame(0).localName(2);
+ checkName(local0Name);
+ checkName(local1Name);
+ checkName(local2Name);
+ var local0Value = exec_state.frame(0).localValue(0).value();
+ var local1Value = exec_state.frame(0).localValue(1).value();
+ var local2Value = exec_state.frame(0).localValue(2).value();
+ checkValue(local0Value);
+ checkValue(local1Value);
+ checkValue(local2Value);
listenerCalled = true;
}
} catch (e) {
diff --git a/test/mjsunit/string-indexof.js b/test/mjsunit/string-indexof.js
index 5b37a48..2018da7 100644
--- a/test/mjsunit/string-indexof.js
+++ b/test/mjsunit/string-indexof.js
@@ -27,6 +27,12 @@
var s = "test test test";
+assertEquals(0, s.indexOf("t"));
+assertEquals(3, s.indexOf("t", 1));
+assertEquals(5, s.indexOf("t", 4));
+assertEquals(1, s.indexOf("e"));
+assertEquals(2, s.indexOf("s"));
+
assertEquals(5, s.indexOf("test", 4));
assertEquals(5, s.indexOf("test", 5));
assertEquals(10, s.indexOf("test", 6));
@@ -47,3 +53,90 @@
assertEquals(10, reString.indexOf("(asdf)?"));
assertEquals(1, String.prototype.indexOf.length);
+
+// Random greek letters
+var twoByteString = "\u039a\u0391\u03a3\u03a3\u0395";
+
+// Test single char pattern
+assertEquals(0, twoByteString.indexOf("\u039a"), "Lamda");
+assertEquals(1, twoByteString.indexOf("\u0391"), "Alpha");
+assertEquals(2, twoByteString.indexOf("\u03a3"), "First Sigma");
+assertEquals(3, twoByteString.indexOf("\u03a3",3), "Second Sigma");
+assertEquals(4, twoByteString.indexOf("\u0395"), "Epsilon");
+assertEquals(-1, twoByteString.indexOf("\u0392"), "Not beta");
+
+// Test multi-char pattern
+assertEquals(0, twoByteString.indexOf("\u039a\u0391"), "lambda Alpha");
+assertEquals(1, twoByteString.indexOf("\u0391\u03a3"), "Alpha Sigma");
+assertEquals(2, twoByteString.indexOf("\u03a3\u03a3"), "Sigma Sigma");
+assertEquals(3, twoByteString.indexOf("\u03a3\u0395"), "Sigma Epsilon");
+
+assertEquals(-1, twoByteString.indexOf("\u0391\u03a3\u0395"),
+ "Not Alpha Sigma Epsilon");
+
+//single char pattern
+assertEquals(4, twoByteString.indexOf("\u0395"));
+
+// Test complex string indexOf algorithms. Only trigger for long strings.
+
+// Long string that isn't a simple repeat of a shorter string.
+var long = "A";
+for(var i = 66; i < 76; i++) { // from 'B' to 'K'
+ long = long + String.fromCharCode(i) + long;
+}
+
+// pattern of 15 chars, repeated every 16 chars in long
+var pattern = "ABACABADABACABA";
+for(var i = 0; i < long.length - pattern.length; i+= 7) {
+ var index = long.indexOf(pattern, i);
+ assertEquals((i + 15) & ~0xf, index, "Long ABACABA...-string at index " + i);
+}
+assertEquals(510, long.indexOf("AJABACA"), "Long AJABACA, First J");
+assertEquals(1534, long.indexOf("AJABACA", 511), "Long AJABACA, Second J");
+
+pattern = "JABACABADABACABA";
+assertEquals(511, long.indexOf(pattern), "Long JABACABA..., First J");
+assertEquals(1535, long.indexOf(pattern, 512), "Long JABACABA..., Second J");
+
+
+var lipsum = "lorem ipsum per se esse fugiendum. itaque aiunt hanc quasi "
+ + "naturalem atque insitam in animis nostris inesse notionem, ut "
+ + "alterum esse appetendum, alterum aspernandum sentiamus. Alii autem,"
+ + " quibus ego assentior, cum a philosophis compluribus permulta "
+ + "dicantur, cur nec voluptas in bonis sit numeranda nec in malis "
+ + "dolor, non existimant oportere nimium nos causae confidere, sed et"
+ + " argumentandum et accurate disserendum et rationibus conquisitis de"
+ + " voluptate et dolore disputandum putant.\n"
+ + "Sed ut perspiciatis, unde omnis iste natus error sit voluptatem "
+ + "accusantium doloremque laudantium, totam rem aperiam eaque ipsa,"
+ + "quae ab illo inventore veritatis et quasi architecto beatae vitae "
+ + "dicta sunt, explicabo. nemo enim ipsam voluptatem, quia voluptas"
+ + "sit, aspernatur aut odit aut fugit, sed quia consequuntur magni"
+ + " dolores eos, qui ratione voluptatem sequi nesciunt, neque porro"
+ + " quisquam est, qui dolorem ipsum, quia dolor sit, amet, "
+ + "consectetur, adipisci velit, sed quia non numquam eius modi"
+ + " tempora incidunt, ut labore et dolore magnam aliquam quaerat "
+ + "voluptatem. ut enim ad minima veniam, quis nostrum exercitationem "
+ + "ullam corporis suscipit laboriosam, nisi ut aliquid ex ea commodi "
+ + "consequatur? quis autem vel eum iure reprehenderit, qui in ea "
+ + "voluptate velit esse, quam nihil molestiae consequatur, vel illum, "
+ + "qui dolorem eum fugiat, quo voluptas nulla pariatur?\n";
+
+assertEquals(893, lipsum.indexOf("lorem ipsum, quia dolor sit, amet"),
+ "Lipsum");
+// test a lot of substrings of differing length and start-position.
+for(var i = 0; i < lipsum.length; i += 3) {
+ for(var len = 1; i + len < lipsum.length; len += 7) {
+ var substring = lipsum.substring(i, i + len);
+ var index = -1;
+ do {
+ index = lipsum.indexOf(substring, index + 1);
+ assertTrue(index != -1,
+ "Lipsum substring " + i + ".." + (i + len-1) + " not found");
+ assertEquals(lipsum.substring(index, index + len), substring,
+ "Wrong lipsum substring found: " + i + ".." + (i + len - 1) + "/" +
+ index + ".." + (index + len - 1));
+ } while (index >= 0 && index < i);
+ assertEquals(i, index, "Lipsum match at " + i + ".." + (i + len - 1));
+ }
+}
diff --git a/test/mjsunit/switch.js b/test/mjsunit/switch.js
index 3b8458d..ae5ce2b 100644
--- a/test/mjsunit/switch.js
+++ b/test/mjsunit/switch.js
@@ -27,7 +27,7 @@
function f0() {
switch (0) {
- // switch deliberatly left empty
+ // switch deliberately left empty
}
}
diff --git a/tools/tickprocessor.py b/tools/tickprocessor.py
index 5ea8e28..e3df951 100644
--- a/tools/tickprocessor.py
+++ b/tools/tickprocessor.py
@@ -65,7 +65,9 @@
self.size = size
def ToString(self):
- return self.name + ' ' + self.type
+ name = self.name
+ if name == '': name = '<anonymous>'
+ return self.type + ': ' + name
class TickProcessor(object):
diff --git a/tools/v8.xcodeproj/project.pbxproj b/tools/v8.xcodeproj/project.pbxproj
index 632efd8..205fd95 100644
--- a/tools/v8.xcodeproj/project.pbxproj
+++ b/tools/v8.xcodeproj/project.pbxproj
@@ -247,6 +247,8 @@
8900116B0E71CA2300F91F35 /* libraries.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = libraries.cc; sourceTree = "<group>"; };
89495E460E79FC23001F68C3 /* compilation-cache.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = "compilation-cache.cc"; sourceTree = "<group>"; };
89495E470E79FC23001F68C3 /* compilation-cache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "compilation-cache.h"; sourceTree = "<group>"; };
+ 8964482B0E9C00F700E7C516 /* codegen-ia32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "codegen-ia32.h"; sourceTree = "<group>"; };
+ 896448BC0E9D530500E7C516 /* codegen-arm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "codegen-arm.h"; sourceTree = "<group>"; };
8970F2F00E719FB2006AE7B5 /* libv8.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libv8.a; sourceTree = BUILT_PRODUCTS_DIR; };
897F767A0E71B4CC007ACF34 /* v8_shell */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = v8_shell; sourceTree = BUILT_PRODUCTS_DIR; };
897FF0D40E719A8500D62E90 /* v8-debug.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "v8-debug.h"; sourceTree = "<group>"; };
@@ -456,8 +458,8 @@
897FF1B50E719C0900D62E90 /* shell.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = shell.cc; sourceTree = "<group>"; };
897FF1B60E719C2300D62E90 /* js2c.py */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.python; path = js2c.py; sourceTree = "<group>"; };
897FF1B70E719C2E00D62E90 /* macros.py */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.python; name = macros.py; path = ../src/macros.py; sourceTree = "<group>"; };
- 89B12E8D0E7FF2A40080BA62 /* presubmit.py */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.python; path = presubmit.py; sourceTree = "<group>"; };
897FF1BF0E719CB600D62E90 /* libjscre.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libjscre.a; sourceTree = BUILT_PRODUCTS_DIR; };
+ 89B12E8D0E7FF2A40080BA62 /* presubmit.py */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.python; path = presubmit.py; sourceTree = "<group>"; };
89F23C870E78D5B2006B2466 /* libv8-arm.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = "libv8-arm.a"; sourceTree = BUILT_PRODUCTS_DIR; };
89F23C950E78D5B6006B2466 /* v8_shell-arm */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "v8_shell-arm"; sourceTree = BUILT_PRODUCTS_DIR; };
/* End PBXFileReference section */
@@ -579,7 +581,9 @@
897FF1120E719B8F00D62E90 /* code-stubs.h */,
897FF1130E719B8F00D62E90 /* code.h */,
897FF1140E719B8F00D62E90 /* codegen-arm.cc */,
+ 896448BC0E9D530500E7C516 /* codegen-arm.h */,
897FF1150E719B8F00D62E90 /* codegen-ia32.cc */,
+ 8964482B0E9C00F700E7C516 /* codegen-ia32.h */,
897FF1160E719B8F00D62E90 /* codegen-inl.h */,
897FF1170E719B8F00D62E90 /* codegen.cc */,
897FF1180E719B8F00D62E90 /* codegen.h */,
diff --git a/tools/visual_studio/v8_base.vcproj b/tools/visual_studio/v8_base.vcproj
index e79dc4e..37e604d 100644
--- a/tools/visual_studio/v8_base.vcproj
+++ b/tools/visual_studio/v8_base.vcproj
@@ -369,10 +369,18 @@
>
</File>
<File
+ RelativePath="..\..\src\codegen-arm.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\codegen-ia32.cc"
>
</File>
<File
+ RelativePath="..\..\src\codegen-ia32.h"
+ >
+ </File>
+ <File
RelativePath="..\..\src\codegen-inl.h"
>
</File>
diff --git a/tools/visual_studio/v8_snapshot.vcproj b/tools/visual_studio/v8_snapshot.vcproj
index efdfb8a..8a17a1f 100644
--- a/tools/visual_studio/v8_snapshot.vcproj
+++ b/tools/visual_studio/v8_snapshot.vcproj
@@ -152,7 +152,7 @@
</File>
</Filter>
<File
- RelativePath="$(OutDir)\mksnapshot.exe"
+ RelativePath="$(OutDir)\v8_mksnapshot.exe"
>
<FileConfiguration
Name="Debug|Win32"