Upgrade to V8 3.4
Merge 3.4.14.35
Simple merge required updates to makefiles only.
Bug: 568872
Change-Id: I403a38452c547e06fcfa951c12eca12a1bc40978
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index c7050a7..89df079 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -320,13 +320,13 @@
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
- num_prinfo_ = 0;
+ num_pending_reloc_info_ = 0;
next_buffer_check_ = 0;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
- last_const_pool_end_ = 0;
+ first_const_pool_use_ = -1;
last_bound_pos_ = 0;
- ast_id_for_reloc_info_ = kNoASTId;
+ ClearRecordedAstId();
}
@@ -346,7 +346,7 @@
void Assembler::GetCode(CodeDesc* desc) {
// Emit constant pool if necessary.
CheckConstPool(true, false);
- ASSERT(num_prinfo_ == 0);
+ ASSERT(num_pending_reloc_info_ == 0);
// Setup code descriptor.
desc->buffer = buffer_;
@@ -873,7 +873,7 @@
emit(instr | rn.code()*B16 | rd.code()*B12);
if (rn.is(pc) || x.rm_.is(pc)) {
// Block constant pool emission for one instruction after reading pc.
- BlockConstPoolBefore(pc_offset() + kInstrSize);
+ BlockConstPoolFor(1);
}
}
@@ -997,7 +997,7 @@
// Block the emission of the constant pool, since the branch instruction must
// be emitted at the pc offset recorded by the label.
- BlockConstPoolBefore(pc_offset() + kInstrSize);
+ BlockConstPoolFor(1);
return target_pos - (pc_offset() + kPcLoadDelta);
}
@@ -1493,15 +1493,17 @@
void Assembler::stop(const char* msg, Condition cond, int32_t code) {
#ifndef __arm__
ASSERT(code >= kDefaultStopCode);
- // The Simulator will handle the stop instruction and get the message address.
- // It expects to find the address just after the svc instruction.
- BlockConstPoolFor(2);
- if (code >= 0) {
- svc(kStopCode + code, cond);
- } else {
- svc(kStopCode + kMaxStopCode, cond);
+ {
+ // The Simulator will handle the stop instruction and get the message
+ // address. It expects to find the address just after the svc instruction.
+ BlockConstPoolScope block_const_pool(this);
+ if (code >= 0) {
+ svc(kStopCode + code, cond);
+ } else {
+ svc(kStopCode + kMaxStopCode, cond);
+ }
+ emit(reinterpret_cast<Instr>(msg));
}
- emit(reinterpret_cast<Instr>(msg));
#else // def __arm__
#ifdef CAN_USE_ARMV5_INSTRUCTIONS
if (cond != al) {
@@ -2406,11 +2408,6 @@
}
-void Assembler::BlockConstPoolFor(int instructions) {
- BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
-}
-
-
// Debugging.
void Assembler::RecordJSReturn() {
positions_recorder()->WriteRecordedPositions();
@@ -2474,8 +2471,8 @@
// to relocate any emitted relocation entries.
// Relocate pending relocation entries.
- for (int i = 0; i < num_prinfo_; i++) {
- RelocInfo& rinfo = prinfo_[i];
+ for (int i = 0; i < num_pending_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_reloc_info_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION);
if (rinfo.rmode() != RelocInfo::JS_RETURN) {
@@ -2489,7 +2486,7 @@
// No relocation info should be pending while using db. db is used
// to write pure data with no pointers and the constant pool should
// be emitted before using db.
- ASSERT(num_prinfo_ == 0);
+ ASSERT(num_pending_reloc_info_ == 0);
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
pc_ += sizeof(uint8_t);
@@ -2500,7 +2497,7 @@
// No relocation info should be pending while using dd. dd is used
// to write pure data with no pointers and the constant pool should
// be emitted before using dd.
- ASSERT(num_prinfo_ == 0);
+ ASSERT(num_pending_reloc_info_ == 0);
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t);
@@ -2517,11 +2514,14 @@
|| RelocInfo::IsPosition(rmode));
// These modes do not need an entry in the constant pool.
} else {
- ASSERT(num_prinfo_ < kMaxNumPRInfo);
- prinfo_[num_prinfo_++] = rinfo;
+ ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
+ if (num_pending_reloc_info_ == 0) {
+ first_const_pool_use_ = pc_offset();
+ }
+ pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
- BlockConstPoolBefore(pc_offset() + kInstrSize);
+ BlockConstPoolFor(1);
}
if (rinfo.rmode() != RelocInfo::NONE) {
// Don't record external references unless the heap will be serialized.
@@ -2537,9 +2537,8 @@
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- ASSERT(ast_id_for_reloc_info_ != kNoASTId);
- RelocInfo reloc_info_with_ast_id(pc_, rmode, ast_id_for_reloc_info_);
- ast_id_for_reloc_info_ = kNoASTId;
+ RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId());
+ ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
reloc_info_writer.Write(&rinfo);
@@ -2548,111 +2547,112 @@
}
-void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
- // Calculate the offset of the next check. It will be overwritten
- // when a const pool is generated or when const pools are being
- // blocked for a specific range.
- next_buffer_check_ = pc_offset() + kCheckConstInterval;
-
- // There is nothing to do if there are no pending relocation info entries.
- if (num_prinfo_ == 0) return;
-
- // We emit a constant pool at regular intervals of about kDistBetweenPools
- // or when requested by parameter force_emit (e.g. after each function).
- // We prefer not to emit a jump unless the max distance is reached or if we
- // are running low on slots, which can happen if a lot of constants are being
- // emitted (e.g. --debug-code and many static references).
- int dist = pc_offset() - last_const_pool_end_;
- if (!force_emit && dist < kMaxDistBetweenPools &&
- (require_jump || dist < kDistBetweenPools) &&
- // TODO(1236125): Cleanup the "magic" number below. We know that
- // the code generation will test every kCheckConstIntervalInst.
- // Thus we are safe as long as we generate less than 7 constant
- // entries per instruction.
- (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
- return;
+void Assembler::BlockConstPoolFor(int instructions) {
+ int pc_limit = pc_offset() + instructions * kInstrSize;
+ if (no_const_pool_before_ < pc_limit) {
+ // If there are some pending entries, the constant pool cannot be blocked
+ // further than first_const_pool_use_ + kMaxDistToPool
+ ASSERT((num_pending_reloc_info_ == 0) ||
+ (pc_limit < (first_const_pool_use_ + kMaxDistToPool)));
+ no_const_pool_before_ = pc_limit;
}
- // If we did not return by now, we need to emit the constant pool soon.
+ if (next_buffer_check_ < no_const_pool_before_) {
+ next_buffer_check_ = no_const_pool_before_;
+ }
+}
- // However, some small sequences of instructions must not be broken up by the
- // insertion of a constant pool; such sequences are protected by setting
- // either const_pool_blocked_nesting_ or no_const_pool_before_, which are
- // both checked here. Also, recursive calls to CheckConstPool are blocked by
- // no_const_pool_before_.
- if (const_pool_blocked_nesting_ > 0 || pc_offset() < no_const_pool_before_) {
- // Emission is currently blocked; make sure we try again as soon as
- // possible.
- if (const_pool_blocked_nesting_ > 0) {
- next_buffer_check_ = pc_offset() + kInstrSize;
- } else {
- next_buffer_check_ = no_const_pool_before_;
- }
+void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
+ // Some short sequence of instruction mustn't be broken up by constant pool
+ // emission, such sequences are protected by calls to BlockConstPoolFor and
+ // BlockConstPoolScope.
+ if (is_const_pool_blocked()) {
// Something is wrong if emission is forced and blocked at the same time.
ASSERT(!force_emit);
return;
}
- int jump_instr = require_jump ? kInstrSize : 0;
+ // There is nothing to do if there are no pending constant pool entries.
+ if (num_pending_reloc_info_ == 0) {
+ // Calculate the offset of the next check.
+ next_buffer_check_ = pc_offset() + kCheckPoolInterval;
+ return;
+ }
+
+ // We emit a constant pool when:
+ // * requested to do so by parameter force_emit (e.g. after each function).
+ // * the distance to the first instruction accessing the constant pool is
+ // kAvgDistToPool or more.
+ // * no jump is required and the distance to the first instruction accessing
+ // the constant pool is at least kMaxDistToPool / 2.
+ ASSERT(first_const_pool_use_ >= 0);
+ int dist = pc_offset() - first_const_pool_use_;
+ if (!force_emit && dist < kAvgDistToPool &&
+ (require_jump || (dist < (kMaxDistToPool / 2)))) {
+ return;
+ }
// Check that the code buffer is large enough before emitting the constant
- // pool and relocation information (include the jump over the pool and the
- // constant pool marker).
- int max_needed_space =
- jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
- while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
+ // pool (include the jump over the pool and the constant pool marker and
+ // the gap to the relocation information).
+ int jump_instr = require_jump ? kInstrSize : 0;
+ int needed_space = jump_instr + kInstrSize +
+ num_pending_reloc_info_ * kInstrSize + kGap;
+ while (buffer_space() <= needed_space) GrowBuffer();
- // Block recursive calls to CheckConstPool.
- BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
- num_prinfo_*kInstrSize);
- // Don't bother to check for the emit calls below.
- next_buffer_check_ = no_const_pool_before_;
+ {
+ // Block recursive calls to CheckConstPool.
+ BlockConstPoolScope block_const_pool(this);
- // Emit jump over constant pool if necessary.
- Label after_pool;
- if (require_jump) b(&after_pool);
-
- RecordComment("[ Constant Pool");
-
- // Put down constant pool marker "Undefined instruction" as specified by
- // A5.6 (ARMv7) Instruction set encoding.
- emit(kConstantPoolMarker | num_prinfo_);
-
- // Emit constant pool entries.
- for (int i = 0; i < num_prinfo_; i++) {
- RelocInfo& rinfo = prinfo_[i];
- ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
- rinfo.rmode() != RelocInfo::POSITION &&
- rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
- Instr instr = instr_at(rinfo.pc());
-
- // Instruction to patch must be a ldr/str [pc, #offset].
- // P and U set, B and W clear, Rn == pc, offset12 still 0.
- ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | kOff12Mask)) ==
- (2*B25 | P | U | pc.code()*B16));
- int delta = pc_ - rinfo.pc() - 8;
- ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
- if (delta < 0) {
- instr &= ~U;
- delta = -delta;
+ // Emit jump over constant pool if necessary.
+ Label after_pool;
+ if (require_jump) {
+ b(&after_pool);
}
- ASSERT(is_uint12(delta));
- instr_at_put(rinfo.pc(), instr + delta);
- emit(rinfo.data());
- }
- num_prinfo_ = 0;
- last_const_pool_end_ = pc_offset();
- RecordComment("]");
+ RecordComment("[ Constant Pool");
- if (after_pool.is_linked()) {
- bind(&after_pool);
+ // Put down constant pool marker "Undefined instruction" as specified by
+ // A5.6 (ARMv7) Instruction set encoding.
+ emit(kConstantPoolMarker | num_pending_reloc_info_);
+
+ // Emit constant pool entries.
+ for (int i = 0; i < num_pending_reloc_info_; i++) {
+ RelocInfo& rinfo = pending_reloc_info_[i];
+ ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+ rinfo.rmode() != RelocInfo::POSITION &&
+ rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
+
+ Instr instr = instr_at(rinfo.pc());
+ // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
+ ASSERT(IsLdrPcImmediateOffset(instr) &&
+ GetLdrRegisterImmediateOffset(instr) == 0);
+
+ int delta = pc_ - rinfo.pc() - kPcLoadDelta;
+ // 0 is the smallest delta:
+ // ldr rd, [pc, #0]
+ // constant pool marker
+ // data
+ ASSERT(is_uint12(delta));
+
+ instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
+ emit(rinfo.data());
+ }
+
+ num_pending_reloc_info_ = 0;
+ first_const_pool_use_ = -1;
+
+ RecordComment("]");
+
+ if (after_pool.is_linked()) {
+ bind(&after_pool);
+ }
}
// Since a constant pool was just emitted, move the check offset forward by
// the standard interval.
- next_buffer_check_ = pc_offset() + kCheckConstInterval;
+ next_buffer_check_ = pc_offset() + kCheckPoolInterval;
}
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 2ab46b3..97d4226 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -167,13 +167,14 @@
// Double word VFP register.
struct DwVfpRegister {
- // d0 has been excluded from allocation. This is following ia32
- // where xmm0 is excluded. This should be revisited.
- // Currently d0 is used as a scratch register.
- // d1 has also been excluded from allocation to be used as a scratch
- // register as well.
static const int kNumRegisters = 16;
- static const int kNumAllocatableRegisters = 15;
+ // A few double registers are reserved: one as a scratch register and one to
+ // hold 0.0, that does not fit in the immediate field of vmov instructions.
+ // d14: 0.0
+ // d15: scratch register.
+ static const int kNumReservedRegisters = 2;
+ static const int kNumAllocatableRegisters = kNumRegisters -
+ kNumReservedRegisters;
static int ToAllocationIndex(DwVfpRegister reg) {
ASSERT(reg.code() != 0);
@@ -188,6 +189,7 @@
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
+ "d0",
"d1",
"d2",
"d3",
@@ -200,9 +202,7 @@
"d10",
"d11",
"d12",
- "d13",
- "d14",
- "d15"
+ "d13"
};
return names[index];
}
@@ -306,6 +306,7 @@
// Aliases for double registers.
const DwVfpRegister kFirstCalleeSavedDoubleReg = d8;
const DwVfpRegister kLastCalleeSavedDoubleReg = d15;
+const DwVfpRegister kDoubleRegZero = d14;
// Coprocessor register
@@ -377,7 +378,6 @@
INLINE(explicit Operand(int32_t immediate,
RelocInfo::Mode rmode = RelocInfo::NONE));
INLINE(explicit Operand(const ExternalReference& f));
- INLINE(explicit Operand(const char* s));
explicit Operand(Handle<Object> handle);
INLINE(explicit Operand(Smi* value));
@@ -455,6 +455,7 @@
Register rn() const { return rn_; }
Register rm() const { return rm_; }
+ AddrMode am() const { return am_; }
bool OffsetIsUint12Encodable() const {
return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_);
@@ -504,6 +505,7 @@
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
+
public:
explicit Scope(CpuFeature f) {
unsigned mask = 1u << f;
@@ -523,10 +525,12 @@
isolate_->set_enabled_cpu_features(old_enabled_);
}
}
+
private:
Isolate* isolate_;
unsigned old_enabled_;
#else
+
public:
explicit Scope(CpuFeature f) {}
#endif
@@ -1136,8 +1140,13 @@
void jmp(Label* L) { b(L, al); }
// Check the code size generated from label to here.
- int InstructionsGeneratedSince(Label* l) {
- return (pc_offset() - l->pos()) / kInstrSize;
+ int SizeOfCodeGeneratedSince(Label* label) {
+ return pc_offset() - label->pos();
+ }
+
+ // Check the number of instructions generated from label to here.
+ int InstructionsGeneratedSince(Label* label) {
+ return SizeOfCodeGeneratedSince(label) / kInstrSize;
}
// Check whether an immediate fits an addressing mode 1 instruction.
@@ -1159,10 +1168,6 @@
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
};
- // Postpone the generation of the constant pool for the specified number of
- // instructions.
- void BlockConstPoolFor(int instructions);
-
// Debugging
// Mark address of the ExitJSFrame code.
@@ -1173,7 +1178,17 @@
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
- void RecordAstId(unsigned ast_id) { ast_id_for_reloc_info_ = ast_id; }
+ void SetRecordedAstId(unsigned ast_id) {
+ ASSERT(recorded_ast_id_ == kNoASTId);
+ recorded_ast_id_ = ast_id;
+ }
+
+ unsigned RecordedAstId() {
+ ASSERT(recorded_ast_id_ != kNoASTId);
+ return recorded_ast_id_;
+ }
+
+ void ClearRecordedAstId() { recorded_ast_id_ = kNoASTId; }
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
@@ -1222,24 +1237,24 @@
static int GetCmpImmediateRawImmediate(Instr instr);
static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
- // Buffer size and constant pool distance are checked together at regular
- // intervals of kBufferCheckInterval emitted bytes
- static const int kBufferCheckInterval = 1*KB/2;
// Constants in pools are accessed via pc relative addressing, which can
// reach +/-4KB thereby defining a maximum distance between the instruction
- // and the accessed constant. We satisfy this constraint by limiting the
- // distance between pools.
- static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
- static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize;
+ // and the accessed constant.
+ static const int kMaxDistToPool = 4*KB;
+ static const int kMaxNumPendingRelocInfo = kMaxDistToPool/kInstrSize;
- // Check if is time to emit a constant pool for pending reloc info entries
+ // Postpone the generation of the constant pool for the specified number of
+ // instructions.
+ void BlockConstPoolFor(int instructions);
+
+ // Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
protected:
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
// the relocation info.
- unsigned ast_id_for_reloc_info_;
+ unsigned recorded_ast_id_;
bool emit_debug_code() const { return emit_debug_code_; }
@@ -1257,18 +1272,37 @@
// Patch branch instruction at pos to branch to given branch target pos
void target_at_put(int pos, int target_pos);
- // Block the emission of the constant pool before pc_offset
- void BlockConstPoolBefore(int pc_offset) {
- if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
+ // Prevent contant pool emission until EndBlockConstPool is called.
+ // Call to this function can be nested but must be followed by an equal
+ // number of call to EndBlockConstpool.
+ void StartBlockConstPool() {
+ if (const_pool_blocked_nesting_++ == 0) {
+ // Prevent constant pool checks happening by setting the next check to
+ // the biggest possible offset.
+ next_buffer_check_ = kMaxInt;
+ }
}
- void StartBlockConstPool() {
- const_pool_blocked_nesting_++;
- }
+ // Resume constant pool emission. Need to be called as many time as
+ // StartBlockConstPool to have an effect.
void EndBlockConstPool() {
- const_pool_blocked_nesting_--;
+ if (--const_pool_blocked_nesting_ == 0) {
+ // Check the constant pool hasn't been blocked for too long.
+ ASSERT((num_pending_reloc_info_ == 0) ||
+ (pc_offset() < (first_const_pool_use_ + kMaxDistToPool)));
+ // Two cases:
+ // * no_const_pool_before_ >= next_buffer_check_ and the emission is
+ // still blocked
+ // * no_const_pool_before_ < next_buffer_check_ and the next emit will
+ // trigger a check.
+ next_buffer_check_ = no_const_pool_before_;
+ }
}
- bool is_const_pool_blocked() const { return const_pool_blocked_nesting_ > 0; }
+
+ bool is_const_pool_blocked() const {
+ return (const_pool_blocked_nesting_ > 0) ||
+ (pc_offset() < no_const_pool_before_);
+ }
private:
// Code buffer:
@@ -1302,33 +1336,41 @@
// expensive. By default we only check again once a number of instructions
// has been generated. That also means that the sizing of the buffers is not
// an exact science, and that we rely on some slop to not overrun buffers.
- static const int kCheckConstIntervalInst = 32;
- static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
+ static const int kCheckPoolIntervalInst = 32;
+ static const int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
- // Pools are emitted after function return and in dead code at (more or less)
- // regular intervals of kDistBetweenPools bytes
- static const int kDistBetweenPools = 1*KB;
+ // Average distance beetween a constant pool and the first instruction
+ // accessing the constant pool. Longer distance should result in less I-cache
+ // pollution.
+ // In practice the distance will be smaller since constant pool emission is
+ // forced after function return and sometimes after unconditional branches.
+ static const int kAvgDistToPool = kMaxDistToPool - kCheckPoolInterval;
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
int no_const_pool_before_; // Block emission before this pc offset.
- // Keep track of the last emitted pool to guarantee a maximal distance
- int last_const_pool_end_; // pc offset following the last constant pool
+ // Keep track of the first instruction requiring a constant pool entry
+ // since the previous constant pool was emitted.
+ int first_const_pool_use_;
// Relocation info generation
// Each relocation is encoded as a variable size value
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer;
+
// Relocation info records are also used during code generation as temporary
// containers for constants and code target addresses until they are emitted
// to the constant pool. These pending relocation info records are temporarily
// stored in a separate buffer until a constant pool is emitted.
// If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction.
- RelocInfo prinfo_[kMaxNumPRInfo]; // the buffer of pending relocation info
- int num_prinfo_; // number of pending reloc info entries in the buffer
+
+ // the buffer of pending relocation info
+ RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
+ // number of pending reloc info entries in the buffer
+ int num_pending_reloc_info_;
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 794b370..328102b 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -619,8 +619,7 @@
Label non_function_call;
// Check that the function is not a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &non_function_call);
+ __ JumpIfSmi(r1, &non_function_call);
// Check that the function is a JSFunction.
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &non_function_call);
@@ -675,8 +674,7 @@
// Load the initial map and verify that it is in fact a map.
// r1: constructor function
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &rt_call);
+ __ JumpIfSmi(r2, &rt_call);
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
__ b(ne, &rt_call);
@@ -946,12 +944,11 @@
// sp[0]: receiver (newly allocated object)
// sp[1]: constructor function
// sp[2]: number of arguments (smi-tagged)
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &use_receiver);
+ __ JumpIfSmi(r0, &use_receiver);
// If the type of the result (stored in its map) is less than
- // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
- __ CompareObjectType(r0, r3, r3, FIRST_JS_OBJECT_TYPE);
+ // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+ __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, &exit);
// Throw away the result of the constructor invocation and use the
@@ -1047,8 +1044,7 @@
// Invoke the code and pass argc as r0.
__ mov(r0, Operand(r3));
if (is_construct) {
- __ Call(masm->isolate()->builtins()->JSConstructCall(),
- RelocInfo::CODE_TARGET);
+ __ Call(masm->isolate()->builtins()->JSConstructCall());
} else {
ParameterCount actual(r0);
__ InvokeFunction(r1, actual, CALL_FUNCTION,
@@ -1236,8 +1232,7 @@
// r0: actual number of arguments
Label non_function;
__ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &non_function);
+ __ JumpIfSmi(r1, &non_function);
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &non_function);
@@ -1257,8 +1252,7 @@
__ b(ne, &shift_arguments);
// Do not transform the receiver for native (Compilerhints already in r3).
- __ tst(r3, Operand(1 << (SharedFunctionInfo::kES5Native +
- kSmiTagSize)));
+ __ tst(r3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ b(ne, &shift_arguments);
// Compute the receiver in non-strict mode.
@@ -1267,8 +1261,7 @@
// r0: actual number of arguments
// r1: function
// r2: first argument
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &convert_to_object);
+ __ JumpIfSmi(r2, &convert_to_object);
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
__ cmp(r2, r3);
@@ -1277,9 +1270,8 @@
__ cmp(r2, r3);
__ b(eq, &use_global_receiver);
- STATIC_ASSERT(LAST_JS_OBJECT_TYPE + 1 == LAST_TYPE);
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- __ CompareObjectType(r2, r3, r3, FIRST_JS_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CompareObjectType(r2, r3, r3, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, &shift_arguments);
__ bind(&convert_to_object);
@@ -1443,13 +1435,11 @@
__ b(ne, &push_receiver);
// Do not transform the receiver for strict mode functions.
- __ tst(r2, Operand(1 << (SharedFunctionInfo::kES5Native +
- kSmiTagSize)));
+ __ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ b(ne, &push_receiver);
// Compute the receiver in non-strict mode.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &call_to_object);
+ __ JumpIfSmi(r0, &call_to_object);
__ LoadRoot(r1, Heap::kNullValueRootIndex);
__ cmp(r0, r1);
__ b(eq, &use_global_receiver);
@@ -1459,9 +1449,8 @@
// Check if the receiver is already a JavaScript object.
// r0: receiver
- STATIC_ASSERT(LAST_JS_OBJECT_TYPE + 1 == LAST_TYPE);
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
+ STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+ __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, &push_receiver);
// Convert the receiver to a regular object.
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 5e6c0c3..eaad9f2 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -69,8 +69,7 @@
void ToNumberStub::Generate(MacroAssembler* masm) {
// The ToNumber stub takes one argument in eax.
Label check_heap_number, call_builtin;
- __ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &check_heap_number);
+ __ JumpIfNotSmi(r0, &check_heap_number);
__ Ret();
__ bind(&check_heap_number);
@@ -158,7 +157,7 @@
__ ldr(r3, MemOperand(sp, 0));
// Setup the object header.
- __ LoadRoot(r2, Heap::kContextMapRootIndex);
+ __ LoadRoot(r2, Heap::kFunctionContextMapRootIndex);
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
__ mov(r2, Operand(Smi::FromInt(length)));
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
@@ -166,11 +165,10 @@
// Setup the fixed slots.
__ mov(r1, Operand(Smi::FromInt(0)));
__ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ __ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
- // Copy the global object from the surrounding context.
+ // Copy the global object from the previous context.
__ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -187,7 +185,7 @@
// Need to collect. Call into runtime system.
__ bind(&gc);
- __ TailCallRuntime(Runtime::kNewContext, 1, 1);
+ __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
}
@@ -306,12 +304,6 @@
}
void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "ConvertToDoubleStub"; }
-
-#ifdef DEBUG
- void Print() { PrintF("ConvertToDoubleStub\n"); }
-#endif
};
@@ -394,11 +386,11 @@
__ mov(scratch1, Operand(r0));
ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
__ push(lr);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub1.GetCode());
// Write Smi from r1 to r1 and r0 in double format.
__ mov(scratch1, Operand(r1));
ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub2.GetCode());
__ pop(lr);
}
}
@@ -475,7 +467,7 @@
__ mov(scratch1, Operand(object));
ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
__ push(lr);
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub.GetCode());
__ pop(lr);
}
@@ -931,14 +923,14 @@
// They are both equal and they are not both Smis so both of them are not
// Smis. If it's not a heap number, then return equal.
if (cond == lt || cond == gt) {
- __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, slow);
} else {
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(eq, &heap_number);
// Comparing JS objects with <=, >= is complicated.
if (cond != eq) {
- __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
+ __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
__ b(ge, slow);
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
@@ -1029,8 +1021,7 @@
(lhs.is(r1) && rhs.is(r0)));
Label rhs_is_smi;
- __ tst(rhs, Operand(kSmiTagMask));
- __ b(eq, &rhs_is_smi);
+ __ JumpIfSmi(rhs, &rhs_is_smi);
// Lhs is a Smi. Check whether the rhs is a heap number.
__ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE);
@@ -1061,7 +1052,7 @@
// Convert lhs to a double in r2, r3.
__ mov(r7, Operand(lhs));
ConvertToDoubleStub stub1(r3, r2, r7, r6);
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub1.GetCode());
// Load rhs to a double in r0, r1.
__ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
__ pop(lr);
@@ -1103,7 +1094,7 @@
// Convert rhs to a double in r0, r1.
__ mov(r7, Operand(rhs));
ConvertToDoubleStub stub2(r1, r0, r7, r6);
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
+ __ Call(stub2.GetCode());
__ pop(lr);
}
// Fall through to both_loaded_as_doubles.
@@ -1220,14 +1211,14 @@
ASSERT((lhs.is(r0) && rhs.is(r1)) ||
(lhs.is(r1) && rhs.is(r0)));
- // If either operand is a JSObject or an oddball value, then they are
+ // If either operand is a JS object or an oddball value, then they are
// not equal since their pointers are different.
// There is no test for undetectability in strict equality.
- STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
Label first_non_object;
// Get the type of the first operand into r2 and compare it with
- // FIRST_JS_OBJECT_TYPE.
- __ CompareObjectType(rhs, r2, r2, FIRST_JS_OBJECT_TYPE);
+ // FIRST_SPEC_OBJECT_TYPE.
+ __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE);
__ b(lt, &first_non_object);
// Return non-zero (r0 is not zero)
@@ -1240,7 +1231,7 @@
__ cmp(r2, Operand(ODDBALL_TYPE));
__ b(eq, &return_not_equal);
- __ CompareObjectType(lhs, r3, r3, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, &return_not_equal);
// Check for oddballs: true, false, null, undefined.
@@ -1317,9 +1308,9 @@
__ Ret();
__ bind(&object_test);
- __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
+ __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE));
__ b(lt, not_both_strings);
- __ CompareObjectType(lhs, r2, r3, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE);
__ b(lt, not_both_strings);
// If both objects are undetectable, they are equal. Otherwise, they
// are not equal, since they are different objects and an object is not
@@ -1458,8 +1449,7 @@
if (include_smi_compare_) {
Label not_two_smis, smi_done;
__ orr(r2, r1, r0);
- __ tst(r2, Operand(kSmiTagMask));
- __ b(ne, ¬_two_smis);
+ __ JumpIfNotSmi(r2, ¬_two_smis);
__ mov(r1, Operand(r1, ASR, 1));
__ sub(r0, r1, Operand(r0, ASR, 1));
__ Ret();
@@ -1482,8 +1472,7 @@
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
__ and_(r2, lhs_, Operand(rhs_));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(ne, ¬_smis);
+ __ JumpIfNotSmi(r2, ¬_smis);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
// 1) Return the answer.
// 2) Go to slow.
@@ -1614,15 +1603,13 @@
}
-// This stub does not handle the inlined cases (Smis, Booleans, undefined).
// The stub returns zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub uses VFP3 instructions.
CpuFeatures::Scope scope(VFP3);
- Label false_result;
- Label not_heap_number;
- Register scratch = r9.is(tos_) ? r7 : r9;
+ Label false_result, true_result, not_string;
+ const Register map = r9.is(tos_) ? r7 : r9;
// undefined -> false
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
@@ -1652,11 +1639,31 @@
__ cmp(tos_, ip);
__ b(eq, &false_result);
- // HeapNumber => false iff +0, -0, or NaN.
- __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, ip);
- __ b(¬_heap_number, ne);
+ // Get the map of the heap object.
+ __ ldr(map, FieldMemOperand(tos_, HeapObject::kMapOffset));
+
+ // Undetectable -> false.
+ __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ tst(ip, Operand(1 << Map::kIsUndetectable));
+ __ b(&false_result, ne);
+
+ // JavaScript object -> true.
+ __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
+ // "tos_" is a register and contains a non-zero value. Hence we implicitly
+ // return true if the greater than condition is satisfied.
+ __ Ret(ge);
+
+ // String value -> false iff empty.
+ __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
+ __ b(¬_string, ge);
+ __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset));
+ // Return string length as boolean value, i.e. return false iff length is 0.
+ __ Ret();
+
+ __ bind(¬_string);
+ // HeapNumber -> false iff +0, -0, or NaN.
+ __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+ __ b(&true_result, ne);
__ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
__ VFPCompareAndSetFlags(d1, 0.0);
// "tos_" is a register, and contains a non zero value by default.
@@ -1666,72 +1673,27 @@
__ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
__ Ret();
- __ bind(¬_heap_number);
-
- // It can be an undetectable object.
- // Undetectable => false.
- __ ldr(ip, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(ip, Map::kBitFieldOffset));
- __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable));
- __ cmp(scratch, Operand(1 << Map::kIsUndetectable));
- __ b(&false_result, eq);
-
- // JavaScript object => true.
- __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE));
- // "tos_" is a register and contains a non-zero value.
- // Hence we implicitly return true if the greater than
- // condition is satisfied.
- __ Ret(gt);
-
- // Check for string
- __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE));
- // "tos_" is a register and contains a non-zero value.
- // Hence we implicitly return true if the greater than
- // condition is satisfied.
- __ Ret(gt);
-
- // String value => false iff empty, i.e., length is zero
- __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset));
- // If length is zero, "tos_" contains zero ==> false.
- // If length is not zero, "tos_" contains a non-zero value ==> true.
+ // Return 1/0 for true/false in tos_.
+ __ bind(&true_result);
+ __ mov(tos_, Operand(1, RelocInfo::NONE));
__ Ret();
-
- // Return 0 in "tos_" for false .
__ bind(&false_result);
__ mov(tos_, Operand(0, RelocInfo::NONE));
__ Ret();
}
-Handle<Code> GetUnaryOpStub(int key, UnaryOpIC::TypeInfo type_info) {
- UnaryOpStub stub(key, type_info);
- return stub.GetCode();
-}
-
-
-const char* UnaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
+void UnaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name = NULL; // Make g++ happy.
switch (mode_) {
case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
}
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "UnaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- UnaryOpIC::GetName(operand_type_));
- return name_;
+ stream->Add("UnaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ UnaryOpIC::GetName(operand_type_));
}
@@ -1755,22 +1717,14 @@
void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- // Prepare to push argument.
- __ mov(r3, Operand(r0));
-
- // Push this stub's key. Although the operation and the type info are
- // encoded into the key, the encoding is opaque, so push them too.
- __ mov(r2, Operand(Smi::FromInt(MinorKey())));
- __ mov(r1, Operand(Smi::FromInt(op_)));
+ __ mov(r3, Operand(r0)); // the operand
+ __ mov(r2, Operand(Smi::FromInt(op_)));
+ __ mov(r1, Operand(Smi::FromInt(mode_)));
__ mov(r0, Operand(Smi::FromInt(operand_type_)));
-
__ Push(r3, r2, r1, r0);
__ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kUnaryOp_Patch),
- masm->isolate()),
- 4,
- 1);
+ ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
}
@@ -1903,6 +1857,8 @@
void UnaryOpStub::GenerateHeapNumberCodeBitNot(
MacroAssembler* masm, Label* slow) {
+ Label impossible;
+
EmitCheckForHeapNumber(masm, r0, r1, r6, slow);
// Convert the heap number is r0 to an untagged integer in r1.
__ ConvertToInt32(r0, r1, r2, r3, d0, slow);
@@ -1921,17 +1877,27 @@
__ bind(&try_float);
if (mode_ == UNARY_NO_OVERWRITE) {
Label slow_allocate_heapnumber, heapnumber_allocated;
- __ AllocateHeapNumber(r0, r2, r3, r6, &slow_allocate_heapnumber);
+ // Allocate a new heap number without zapping r0, which we need if it fails.
+ __ AllocateHeapNumber(r2, r3, r4, r6, &slow_allocate_heapnumber);
__ jmp(&heapnumber_allocated);
__ bind(&slow_allocate_heapnumber);
__ EnterInternalFrame();
- __ push(r1);
- __ CallRuntime(Runtime::kNumberAlloc, 0);
- __ pop(r1);
+ __ push(r0); // Push the heap number, not the untagged int32.
+ __ CallRuntime(Runtime::kNumberAlloc, 0);
+ __ mov(r2, r0); // Move the new heap number into r2.
+ // Get the heap number into r0, now that the new heap number is in r2.
+ __ pop(r0);
__ LeaveInternalFrame();
+ // Convert the heap number in r0 to an untagged integer in r1.
+ // This can't go slow-case because it's the same number we already
+ // converted once again.
+ __ ConvertToInt32(r0, r1, r3, r4, d0, &impossible);
+ __ mvn(r1, Operand(r1));
+
__ bind(&heapnumber_allocated);
+ __ mov(r0, r2); // Move newly allocated heap number to r0.
}
if (CpuFeatures::IsSupported(VFP3)) {
@@ -1948,6 +1914,11 @@
WriteInt32ToHeapNumberStub stub(r1, r0, r2);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
+
+ __ bind(&impossible);
+ if (FLAG_debug_code) {
+ __ stop("Incorrect assumption in bit-not stub");
+ }
}
@@ -2002,14 +1973,6 @@
}
-Handle<Code> GetBinaryOpStub(int key,
- BinaryOpIC::TypeInfo type_info,
- BinaryOpIC::TypeInfo result_type_info) {
- BinaryOpStub stub(key, type_info, result_type_info);
- return stub.GetCode();
-}
-
-
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
Label get_result;
@@ -2066,12 +2029,7 @@
}
-const char* BinaryOpStub::GetName() {
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
+void BinaryOpStub::PrintName(StringStream* stream) {
const char* op_name = Token::Name(op_);
const char* overwrite_name;
switch (mode_) {
@@ -2080,13 +2038,10 @@
case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
default: overwrite_name = "UnknownOverwrite"; break;
}
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "BinaryOpStub_%s_%s_%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(operands_type_));
- return name_;
+ stream->Add("BinaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ BinaryOpIC::GetName(operands_type_));
}
@@ -2444,8 +2399,7 @@
// Perform combined smi check on both operands.
__ orr(scratch1, left, Operand(right));
STATIC_ASSERT(kSmiTag == 0);
- __ tst(scratch1, Operand(kSmiTagMask));
- __ b(ne, ¬_smis);
+ __ JumpIfNotSmi(scratch1, ¬_smis);
// If the smi-smi operation results in a smi return is generated.
GenerateSmiSmiOperation(masm);
@@ -2558,37 +2512,36 @@
case Token::MUL:
case Token::DIV:
case Token::MOD: {
- // Load both operands and check that they are 32-bit integer.
- // Jump to type transition if they are not. The registers r0 and r1 (right
- // and left) are preserved for the runtime call.
- FloatingPointHelper::Destination destination =
- CpuFeatures::IsSupported(VFP3) &&
- op_ != Token::MOD ?
- FloatingPointHelper::kVFPRegisters :
- FloatingPointHelper::kCoreRegisters;
+ // Load both operands and check that they are 32-bit integer.
+ // Jump to type transition if they are not. The registers r0 and r1 (right
+ // and left) are preserved for the runtime call.
+ FloatingPointHelper::Destination destination =
+ (CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD)
+ ? FloatingPointHelper::kVFPRegisters
+ : FloatingPointHelper::kCoreRegisters;
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- right,
- destination,
- d7,
- r2,
- r3,
- heap_number_map,
- scratch1,
- scratch2,
- s0,
- &transition);
- FloatingPointHelper::LoadNumberAsInt32Double(masm,
- left,
- destination,
- d6,
- r4,
- r5,
- heap_number_map,
- scratch1,
- scratch2,
- s0,
- &transition);
+ FloatingPointHelper::LoadNumberAsInt32Double(masm,
+ right,
+ destination,
+ d7,
+ r2,
+ r3,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ s0,
+ &transition);
+ FloatingPointHelper::LoadNumberAsInt32Double(masm,
+ left,
+ destination,
+ d6,
+ r4,
+ r5,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ s0,
+ &transition);
if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatures::Scope scope(VFP3);
@@ -2649,9 +2602,11 @@
// DIV just falls through to allocating a heap number.
}
- if (result_type_ >= (op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
- : BinaryOpIC::INT32) {
- __ bind(&return_heap_number);
+ __ bind(&return_heap_number);
+ // Return a heap number, or fall through to type transition or runtime
+ // call if we can't.
+ if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER
+ : BinaryOpIC::INT32)) {
// We are using vfp registers so r5 is available.
heap_number_result = r5;
GenerateHeapResultAllocation(masm,
@@ -2825,7 +2780,11 @@
UNREACHABLE();
}
- if (transition.is_linked()) {
+ // We never expect DIV to yield an integer result, so we always generate
+ // type transition code for DIV operations expecting an integer result: the
+ // code will fall through to this type transition.
+ if (transition.is_linked() ||
+ ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
__ bind(&transition);
GenerateTypeTransition(masm);
}
@@ -3394,15 +3353,10 @@
__ mov(r2, Operand(ExternalReference::isolate_address()));
-
- // TODO(1242173): To let the GC traverse the return address of the exit
- // frames, we need to know where the return address is. Right now,
- // we store it on the stack to be able to find it again, but we never
- // restore from it in case of changes, which makes it impossible to
- // support moving the C entry code stub. This should be fixed, but currently
- // this is OK because the CEntryStub gets generated so early in the V8 boot
- // sequence that it is not moving ever.
-
+ // To let the GC traverse the return address of the exit frames, we need to
+ // know where the return address is. The CEntryStub is unmovable, so
+ // we can store the address on the stack to be able to find it again and
+ // we never have to restore it, because it will not change.
// Compute the return address in lr to return to after the jump below. Pc is
// already at '+ 8' from the current instruction but return is after three
// instructions so add another 4 to pc to get the return address.
@@ -3556,6 +3510,8 @@
CpuFeatures::Scope scope(VFP3);
// Save callee-saved vfp registers.
__ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
+ // Set up the reserved register for 0.0.
+ __ vmov(kDoubleRegZero, 0.0);
}
// Get address of argv, see stm above.
@@ -3590,7 +3546,6 @@
// Setup frame pointer for the frame to be pushed.
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
-#ifdef ENABLE_LOGGING_AND_PROFILING
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
ExternalReference js_entry_sp(Isolate::k_js_entry_sp_address, isolate);
@@ -3606,7 +3561,6 @@
__ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
__ bind(&cont);
__ push(ip);
-#endif
// Call a faked try-block that does the invoke.
__ bl(&invoke);
@@ -3667,7 +3621,6 @@
__ PopTryHandler();
__ bind(&exit); // r0 holds result
-#ifdef ENABLE_LOGGING_AND_PROFILING
// Check if the current stack frame is marked as the outermost JS frame.
Label non_outermost_js_2;
__ pop(r5);
@@ -3677,7 +3630,6 @@
__ mov(r5, Operand(ExternalReference(js_entry_sp)));
__ str(r6, MemOperand(r5));
__ bind(&non_outermost_js_2);
-#endif
// Restore the top frame descriptors from the stack.
__ pop(r3);
@@ -3940,12 +3892,233 @@
}
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
// sp[0] : number of parameters
// sp[4] : receiver displacement
// sp[8] : function
// Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
+ __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(ne, &runtime);
+
+ // Patch the arguments.length and the parameters pointer in the current frame.
+ __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ str(r2, MemOperand(sp, 0 * kPointerSize));
+ __ add(r3, r3, Operand(r2, LSL, 1));
+ __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ str(r3, MemOperand(sp, 1 * kPointerSize));
+
+ __ bind(&runtime);
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
+ // Stack layout:
+ // sp[0] : number of parameters (tagged)
+ // sp[4] : address of receiver argument
+ // sp[8] : function
+ // Registers used over whole function:
+ // r6 : allocated object (tagged)
+ // r9 : mapped parameter count (tagged)
+
+ __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
+ // r1 = parameter count (tagged)
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label runtime;
+ Label adaptor_frame, try_allocate;
+ __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset));
+ __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ b(eq, &adaptor_frame);
+
+ // No adaptor, parameter count = argument count.
+ __ mov(r2, r1);
+ __ b(&try_allocate);
+
+ // We have an adaptor frame. Patch the parameters pointer.
+ __ bind(&adaptor_frame);
+ __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ add(r3, r3, Operand(r2, LSL, 1));
+ __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
+ __ str(r3, MemOperand(sp, 1 * kPointerSize));
+
+ // r1 = parameter count (tagged)
+ // r2 = argument count (tagged)
+ // Compute the mapped parameter count = min(r1, r2) in r1.
+ __ cmp(r1, Operand(r2));
+ __ mov(r1, Operand(r2), LeaveCC, gt);
+
+ __ bind(&try_allocate);
+
+ // Compute the sizes of backing store, parameter map, and arguments object.
+ // 1. Parameter map, has 2 extra words containing context and backing store.
+ const int kParameterMapHeaderSize =
+ FixedArray::kHeaderSize + 2 * kPointerSize;
+ // If there are no mapped parameters, we do not need the parameter_map.
+ __ cmp(r1, Operand(Smi::FromInt(0)));
+ __ mov(r9, Operand(0), LeaveCC, eq);
+ __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne);
+ __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
+
+ // 2. Backing store.
+ __ add(r9, r9, Operand(r2, LSL, 1));
+ __ add(r9, r9, Operand(FixedArray::kHeaderSize));
+
+ // 3. Arguments object.
+ __ add(r9, r9, Operand(Heap::kArgumentsObjectSize));
+
+ // Do the allocation of all three objects in one go.
+ __ AllocateInNewSpace(r9, r0, r3, r4, &runtime, TAG_OBJECT);
+
+ // r0 = address of new object(s) (tagged)
+ // r2 = argument count (tagged)
+ // Get the arguments boilerplate from the current (global) context into r4.
+ const int kNormalOffset =
+ Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
+ const int kAliasedOffset =
+ Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
+
+ __ ldr(r4, MemOperand(r8, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
+ __ cmp(r1, Operand(0));
+ __ ldr(r4, MemOperand(r4, kNormalOffset), eq);
+ __ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
+
+ // r0 = address of new object (tagged)
+ // r1 = mapped parameter count (tagged)
+ // r2 = argument count (tagged)
+ // r4 = address of boilerplate object (tagged)
+ // Copy the JS object part.
+ for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
+ __ ldr(r3, FieldMemOperand(r4, i));
+ __ str(r3, FieldMemOperand(r0, i));
+ }
+
+ // Setup the callee in-object property.
+ STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+ __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
+ const int kCalleeOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsCalleeIndex * kPointerSize;
+ __ str(r3, FieldMemOperand(r0, kCalleeOffset));
+
+ // Use the length (smi tagged) and set that as an in-object property too.
+ STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+ const int kLengthOffset = JSObject::kHeaderSize +
+ Heap::kArgumentsLengthIndex * kPointerSize;
+ __ str(r2, FieldMemOperand(r0, kLengthOffset));
+
+ // Setup the elements pointer in the allocated arguments object.
+ // If we allocated a parameter map, r4 will point there, otherwise
+ // it will point to the backing store.
+ __ add(r4, r0, Operand(Heap::kArgumentsObjectSize));
+ __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
+
+ // r0 = address of new object (tagged)
+ // r1 = mapped parameter count (tagged)
+ // r2 = argument count (tagged)
+ // r4 = address of parameter map or backing store (tagged)
+ // Initialize parameter map. If there are no mapped arguments, we're done.
+ Label skip_parameter_map;
+ __ cmp(r1, Operand(Smi::FromInt(0)));
+ // Move backing store address to r3, because it is
+ // expected there when filling in the unmapped arguments.
+ __ mov(r3, r4, LeaveCC, eq);
+ __ b(eq, &skip_parameter_map);
+
+ __ LoadRoot(r6, Heap::kNonStrictArgumentsElementsMapRootIndex);
+ __ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset));
+ __ add(r6, r1, Operand(Smi::FromInt(2)));
+ __ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
+ __ str(r8, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
+ __ add(r6, r4, Operand(r1, LSL, 1));
+ __ add(r6, r6, Operand(kParameterMapHeaderSize));
+ __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
+
+ // Copy the parameter slots and the holes in the arguments.
+ // We need to fill in mapped_parameter_count slots. They index the context,
+ // where parameters are stored in reverse order, at
+ // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+ // The mapped parameter thus need to get indices
+ // MIN_CONTEXT_SLOTS+parameter_count-1 ..
+ // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+ // We loop from right to left.
+ Label parameters_loop, parameters_test;
+ __ mov(r6, r1);
+ __ ldr(r9, MemOperand(sp, 0 * kPointerSize));
+ __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+ __ sub(r9, r9, Operand(r1));
+ __ LoadRoot(r7, Heap::kTheHoleValueRootIndex);
+ __ add(r3, r4, Operand(r6, LSL, 1));
+ __ add(r3, r3, Operand(kParameterMapHeaderSize));
+
+ // r6 = loop variable (tagged)
+ // r1 = mapping index (tagged)
+ // r3 = address of backing store (tagged)
+ // r4 = address of parameter map (tagged)
+ // r5 = temporary scratch (a.o., for address calculation)
+ // r7 = the hole value
+ __ jmp(¶meters_test);
+
+ __ bind(¶meters_loop);
+ __ sub(r6, r6, Operand(Smi::FromInt(1)));
+ __ mov(r5, Operand(r6, LSL, 1));
+ __ add(r5, r5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+ __ str(r9, MemOperand(r4, r5));
+ __ sub(r5, r5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+ __ str(r7, MemOperand(r3, r5));
+ __ add(r9, r9, Operand(Smi::FromInt(1)));
+ __ bind(¶meters_test);
+ __ cmp(r6, Operand(Smi::FromInt(0)));
+ __ b(ne, ¶meters_loop);
+
+ __ bind(&skip_parameter_map);
+ // r2 = argument count (tagged)
+ // r3 = address of backing store (tagged)
+ // r5 = scratch
+ // Copy arguments header and remaining slots (if there are any).
+ __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex);
+ __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset));
+ __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset));
+
+ Label arguments_loop, arguments_test;
+ __ mov(r9, r1);
+ __ ldr(r4, MemOperand(sp, 1 * kPointerSize));
+ __ sub(r4, r4, Operand(r9, LSL, 1));
+ __ jmp(&arguments_test);
+
+ __ bind(&arguments_loop);
+ __ sub(r4, r4, Operand(kPointerSize));
+ __ ldr(r6, MemOperand(r4, 0));
+ __ add(r5, r3, Operand(r9, LSL, 1));
+ __ str(r6, FieldMemOperand(r5, FixedArray::kHeaderSize));
+ __ add(r9, r9, Operand(Smi::FromInt(1)));
+
+ __ bind(&arguments_test);
+ __ cmp(r9, Operand(r2));
+ __ b(lt, &arguments_loop);
+
+ // Return and remove the on-stack parameters.
+ __ add(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
+
+ // Do the runtime call to allocate the arguments object.
+ // r2 = argument count (taggged)
+ __ bind(&runtime);
+ __ str(r2, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
+ __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+ // sp[0] : number of parameters
+ // sp[4] : receiver displacement
+ // sp[8] : function
+ // Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
@@ -3973,40 +4146,31 @@
__ mov(r1, Operand(r1, LSR, kSmiTagSize));
__ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize));
__ bind(&add_arguments_object);
- __ add(r1, r1, Operand(GetArgumentsObjectSize() / kPointerSize));
+ __ add(r1, r1, Operand(Heap::kArgumentsObjectSizeStrict / kPointerSize));
// Do the allocation of both objects in one go.
- __ AllocateInNewSpace(
- r1,
- r0,
- r2,
- r3,
- &runtime,
- static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+ __ AllocateInNewSpace(r1,
+ r0,
+ r2,
+ r3,
+ &runtime,
+ static_cast<AllocationFlags>(TAG_OBJECT |
+ SIZE_IN_WORDS));
// Get the arguments boilerplate from the current (global) context.
__ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset));
- __ ldr(r4, MemOperand(r4,
- Context::SlotOffset(GetArgumentsBoilerplateIndex())));
+ __ ldr(r4, MemOperand(r4, Context::SlotOffset(
+ Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
// Copy the JS object part.
__ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
- if (type_ == NEW_NON_STRICT) {
- // Setup the callee in-object property.
- STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
- __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
- const int kCalleeOffset = JSObject::kHeaderSize +
- Heap::kArgumentsCalleeIndex * kPointerSize;
- __ str(r3, FieldMemOperand(r0, kCalleeOffset));
- }
-
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
__ ldr(r1, MemOperand(sp, 0 * kPointerSize));
__ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
- Heap::kArgumentsLengthIndex * kPointerSize));
+ Heap::kArgumentsLengthIndex * kPointerSize));
// If there are no actual arguments, we're done.
Label done;
@@ -4018,12 +4182,13 @@
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
- __ add(r4, r0, Operand(GetArgumentsObjectSize()));
+ __ add(r4, r0, Operand(Heap::kArgumentsObjectSizeStrict));
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
__ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
__ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
__ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
- __ mov(r1, Operand(r1, LSR, kSmiTagSize)); // Untag the length for the loop.
+ // Untag the length for the loop.
+ __ mov(r1, Operand(r1, LSR, kSmiTagSize));
// Copy the fixed array slots.
Label loop;
@@ -4046,7 +4211,7 @@
// Do the runtime call to allocate the arguments object.
__ bind(&runtime);
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
}
@@ -4098,8 +4263,7 @@
// Check that the first argument is a JSRegExp object.
__ ldr(r0, MemOperand(sp, kJSRegExpOffset));
STATIC_ASSERT(kSmiTag == 0);
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &runtime);
+ __ JumpIfSmi(r0, &runtime);
__ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
__ b(ne, &runtime);
@@ -4135,8 +4299,7 @@
// regexp_data: RegExp data (FixedArray)
// Check that the second argument is a string.
__ ldr(subject, MemOperand(sp, kSubjectOffset));
- __ tst(subject, Operand(kSmiTagMask));
- __ b(eq, &runtime);
+ __ JumpIfSmi(subject, &runtime);
Condition is_string = masm->IsObjectStringType(subject, r0);
__ b(NegateCondition(is_string), &runtime);
// Get the length of the string to r3.
@@ -4149,8 +4312,7 @@
// Check that the third argument is a positive smi less than the subject
// string length. A negative value will be greater (unsigned comparison).
__ ldr(r0, MemOperand(sp, kPreviousIndexOffset));
- __ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &runtime);
+ __ JumpIfNotSmi(r0, &runtime);
__ cmp(r3, Operand(r0));
__ b(ls, &runtime);
@@ -4159,8 +4321,7 @@
// regexp_data: RegExp data (FixedArray)
// Check that the fourth object is a JSArray object.
__ ldr(r0, MemOperand(sp, kLastMatchInfoOffset));
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &runtime);
+ __ JumpIfSmi(r0, &runtime);
__ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
__ b(ne, &runtime);
// Check that the JSArray is in fast case.
@@ -4419,8 +4580,7 @@
__ ldr(r1, MemOperand(sp, kPointerSize * 2));
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
- __ tst(r1, Operand(kSmiTagMask));
- __ b(ne, &slowcase);
+ __ JumpIfNotSmi(r1, &slowcase);
__ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength)));
__ b(hi, &slowcase);
// Smi-tagging is equivalent to multiplying by 2.
@@ -4562,6 +4722,7 @@
__ mov(r0, Operand(argc_)); // Setup the number of arguments.
__ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
+ __ SetCallKind(r5, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
@@ -4569,16 +4730,9 @@
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
-const char* CompareStub::GetName() {
+void CompareStub::PrintName(StringStream* stream) {
ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
(lhs_.is(r1) && rhs_.is(r0)));
-
- if (name_ != NULL) return name_;
- const int kMaxNameLength = 100;
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
- kMaxNameLength);
- if (name_ == NULL) return "OOM";
-
const char* cc_name;
switch (cc_) {
case lt: cc_name = "LT"; break;
@@ -4589,40 +4743,14 @@
case ne: cc_name = "NE"; break;
default: cc_name = "UnknownCondition"; break;
}
-
- const char* lhs_name = lhs_.is(r0) ? "_r0" : "_r1";
- const char* rhs_name = rhs_.is(r0) ? "_r0" : "_r1";
-
- const char* strict_name = "";
- if (strict_ && (cc_ == eq || cc_ == ne)) {
- strict_name = "_STRICT";
- }
-
- const char* never_nan_nan_name = "";
- if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) {
- never_nan_nan_name = "_NO_NAN";
- }
-
- const char* include_number_compare_name = "";
- if (!include_number_compare_) {
- include_number_compare_name = "_NO_NUMBER";
- }
-
- const char* include_smi_compare_name = "";
- if (!include_smi_compare_) {
- include_smi_compare_name = "_NO_SMI";
- }
-
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
- "CompareStub_%s%s%s%s%s%s",
- cc_name,
- lhs_name,
- rhs_name,
- strict_name,
- never_nan_nan_name,
- include_number_compare_name,
- include_smi_compare_name);
- return name_;
+ bool is_equality = cc_ == eq || cc_ == ne;
+ stream->Add("CompareStub_%s", cc_name);
+ stream->Add(lhs_.is(r0) ? "_r0" : "_r1");
+ stream->Add(rhs_.is(r0) ? "_r0" : "_r1");
+ if (strict_ && is_equality) stream->Add("_STRICT");
+ if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
+ if (!include_number_compare_) stream->Add("_NO_NUMBER");
+ if (!include_smi_compare_) stream->Add("_NO_SMI");
}
@@ -5301,8 +5429,7 @@
// Make sure first argument is a sequential (or flat) string.
__ ldr(r5, MemOperand(sp, kStringOffset));
STATIC_ASSERT(kSmiTag == 0);
- __ tst(r5, Operand(kSmiTagMask));
- __ b(eq, &runtime);
+ __ JumpIfSmi(r5, &runtime);
Condition is_string = masm->IsObjectStringType(r5, r1);
__ b(NegateCondition(is_string), &runtime);
@@ -5939,8 +6066,7 @@
ASSERT(state_ == CompareIC::SMIS);
Label miss;
__ orr(r2, r1, r0);
- __ tst(r2, Operand(kSmiTagMask));
- __ b(ne, &miss);
+ __ JumpIfNotSmi(r2, &miss);
if (GetCondition() == eq) {
// For equality we do not care about the sign of the result.
@@ -5964,8 +6090,7 @@
Label unordered;
Label miss;
__ and_(r2, r1, Operand(r0));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &generic_stub);
+ __ JumpIfSmi(r2, &generic_stub);
__ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
__ b(ne, &miss);
@@ -6114,8 +6239,7 @@
ASSERT(state_ == CompareIC::OBJECTS);
Label miss;
__ and_(r2, r1, Operand(r0));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r2, &miss);
__ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE);
__ b(ne, &miss);
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index fb05cd2..557f7e6 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -58,35 +58,14 @@
};
-class ToBooleanStub: public CodeStub {
- public:
- explicit ToBooleanStub(Register tos) : tos_(tos) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Register tos_;
- Major MajorKey() { return ToBoolean; }
- int MinorKey() { return tos_.code(); }
-};
-
-
class UnaryOpStub: public CodeStub {
public:
- UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
+ UnaryOpStub(Token::Value op,
+ UnaryOverwriteMode mode,
+ UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED)
: op_(op),
mode_(mode),
- operand_type_(UnaryOpIC::UNINITIALIZED),
- name_(NULL) {
- }
-
- UnaryOpStub(
- int key,
- UnaryOpIC::TypeInfo operand_type)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- operand_type_(operand_type),
- name_(NULL) {
+ operand_type_(operand_type) {
}
private:
@@ -96,20 +75,7 @@
// Operand type information determined at runtime.
UnaryOpIC::TypeInfo operand_type_;
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("UnaryOpStub %d (op %s), "
- "(mode %d, runtime_type_info %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- UnaryOpIC::GetName(operand_type_));
- }
-#endif
+ virtual void PrintName(StringStream* stream);
class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
class OpBits: public BitField<Token::Value, 1, 7> {};
@@ -163,8 +129,7 @@
: op_(op),
mode_(mode),
operands_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED),
- name_(NULL) {
+ result_type_(BinaryOpIC::UNINITIALIZED) {
use_vfp3_ = CpuFeatures::IsSupported(VFP3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
@@ -177,8 +142,7 @@
mode_(ModeBits::decode(key)),
use_vfp3_(VFP3Bits::decode(key)),
operands_type_(operands_type),
- result_type_(result_type),
- name_(NULL) { }
+ result_type_(result_type) { }
private:
enum SmiCodeGenerateHeapNumberResults {
@@ -194,20 +158,7 @@
BinaryOpIC::TypeInfo operands_type_;
BinaryOpIC::TypeInfo result_type_;
- char* name_;
-
- const char* GetName();
-
-#ifdef DEBUG
- void Print() {
- PrintF("BinaryOpStub %d (op %s), "
- "(mode %d, runtime_type_info %s)\n",
- MinorKey(),
- Token::String(op_),
- static_cast<int>(mode_),
- BinaryOpIC::GetName(operands_type_));
- }
-#endif
+ virtual void PrintName(StringStream* stream);
// Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
@@ -391,12 +342,6 @@
}
void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
-
-#ifdef DEBUG
- void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
-#endif
};
@@ -423,8 +368,6 @@
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "NumberToStringStub"; }
};
@@ -442,8 +385,6 @@
int MinorKey() { return 0; }
bool NeedsImmovableCode() { return true; }
-
- const char* GetName() { return "RegExpCEntryStub"; }
};
@@ -464,14 +405,11 @@
int MinorKey() { return 0; }
bool NeedsImmovableCode() { return true; }
-
- const char* GetName() { return "DirectCEntryStub"; }
};
class FloatingPointHelper : public AllStatic {
public:
-
enum Destination {
kVFPRegisters,
kCoreRegisters
@@ -649,13 +587,6 @@
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
-
-#ifdef DEBUG
- void Print() {
- PrintF("StringDictionaryLookupStub\n");
- }
-#endif
-
Major MajorKey() { return StringDictionaryNegativeLookup; }
int MinorKey() {
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 01aa805..d27982a 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -58,9 +58,7 @@
// Print the code after compiling it.
static void PrintCode(Handle<Code> code, CompilationInfo* info);
-#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
-#endif
static void SetFunctionInfo(Handle<JSFunction> fun,
FunctionLiteral* lit,
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 5b62d82..4b994e5 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -270,6 +270,9 @@
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
+#ifdef DEBUG
+ output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
+#endif
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
@@ -385,6 +388,9 @@
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
+#ifdef DEBUG
+ output_frame->SetKind(Code::FUNCTION);
+#endif
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
@@ -519,7 +525,7 @@
// Set the continuation for the topmost frame.
- if (is_topmost) {
+ if (is_topmost && bailout_type_ != DEBUGGER) {
Builtins* builtins = isolate_->builtins();
Code* continuation = (bailout_type_ == EAGER)
? builtins->builtin(Builtins::kNotifyDeoptimized)
@@ -532,8 +538,28 @@
}
-#define __ masm()->
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+ // Set the register values. The values are not important as there are no
+ // callee saved registers in JavaScript frames, so all registers are
+ // spilled. Registers fp and sp are set to the correct values though.
+ for (int i = 0; i < Register::kNumRegisters; i++) {
+ input_->SetRegister(i, i * 4);
+ }
+ input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+ input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+ for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
+ input_->SetDoubleRegister(i, 0.0);
+ }
+
+ // Fill the frame content from the actual data on the frame.
+ for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+ input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
+ }
+}
+
+
+#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
// easily ported.
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 6116513..c3440eb 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -92,17 +92,19 @@
}
void EmitPatchInfo() {
- int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
- Register reg;
- reg.set_code(delta_to_patch_site / kOff12Mask);
- __ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask);
+ if (patch_site_.is_bound()) {
+ int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
+ Register reg;
+ reg.set_code(delta_to_patch_site / kOff12Mask);
+ __ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask);
#ifdef DEBUG
- info_emitted_ = true;
+ info_emitted_ = true;
#endif
+ } else {
+ __ nop(); // Signals no inlined code.
+ }
}
- bool is_bound() const { return patch_site_.is_bound(); }
-
private:
MacroAssembler* masm_;
Label patch_site_;
@@ -129,6 +131,7 @@
void FullCodeGenerator::Generate(CompilationInfo* info) {
ASSERT(info_ == NULL);
info_ = info;
+ scope_ = info->scope();
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -139,21 +142,21 @@
}
#endif
- // Strict mode functions need to replace the receiver with undefined
- // when called as functions (without an explicit receiver
- // object). r5 is zero for method calls and non-zero for function
- // calls.
- if (info->is_strict_mode()) {
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). r5 is zero for method calls and non-zero for
+ // function calls.
+ if (info->is_strict_mode() || info->is_native()) {
Label ok;
__ cmp(r5, Operand(0));
__ b(eq, &ok);
- int receiver_offset = scope()->num_parameters() * kPointerSize;
+ int receiver_offset = info->scope()->num_parameters() * kPointerSize;
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ str(r2, MemOperand(sp, receiver_offset));
__ bind(&ok);
}
- int locals_count = scope()->num_stack_slots();
+ int locals_count = info->scope()->num_stack_slots();
__ Push(lr, fp, cp, r1);
if (locals_count > 0) {
@@ -173,7 +176,7 @@
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+ int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is in r1.
@@ -182,14 +185,14 @@
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ CallRuntime(Runtime::kNewContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
}
function_in_register = false;
// Context is returned in both r0 and cp. It replaces the context
// passed to us. It's saved in the stack and kept live in cp.
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
- int num_parameters = scope()->num_parameters();
+ int num_parameters = info->scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Slot* slot = scope()->parameter(i)->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
@@ -220,27 +223,28 @@
__ mov(r3, r1);
}
// Receiver is just before the parameters on the caller's stack.
- int offset = scope()->num_parameters() * kPointerSize;
+ int num_parameters = info->scope()->num_parameters();
+ int offset = num_parameters * kPointerSize;
__ add(r2, fp,
Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ mov(r1, Operand(Smi::FromInt(scope()->num_parameters())));
+ __ mov(r1, Operand(Smi::FromInt(num_parameters)));
__ Push(r3, r2, r1);
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
// The stub will rewrite receiever and parameter count if the previous
// stack frame was an arguments adapter frame.
- ArgumentsAccessStub stub(
- is_strict_mode() ? ArgumentsAccessStub::NEW_STRICT
- : ArgumentsAccessStub::NEW_NON_STRICT);
+ ArgumentsAccessStub::Type type;
+ if (is_strict_mode()) {
+ type = ArgumentsAccessStub::NEW_STRICT;
+ } else if (function()->has_duplicate_parameters()) {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_SLOW;
+ } else {
+ type = ArgumentsAccessStub::NEW_NON_STRICT_FAST;
+ }
+ ArgumentsAccessStub stub(type);
__ CallStub(&stub);
- Variable* arguments_shadow = scope()->arguments_shadow();
- if (arguments_shadow != NULL) {
- // Duplicate the value; move-to-slot operation might clobber registers.
- __ mov(r3, r0);
- Move(arguments_shadow->AsSlot(), r3, r1, r2);
- }
Move(arguments->AsSlot(), r0, r1, r2);
}
@@ -345,7 +349,7 @@
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
// Here we use masm_-> instead of the __ macro to avoid the code coverage
// tool from instrumenting as we rely on the code size here.
- int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
+ int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
__ RecordJSReturn();
masm_->mov(sp, fp);
@@ -383,7 +387,7 @@
// For simplicity we always test the accumulator register.
codegen()->Move(result_register(), slot);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -417,7 +421,7 @@
if (true_label_ != fall_through_) __ b(true_label_);
} else {
__ LoadRoot(result_register(), index);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
}
@@ -464,7 +468,7 @@
} else {
// For simplicity we always test the accumulator register.
__ mov(result_register(), Operand(lit));
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
}
@@ -500,7 +504,7 @@
__ Drop(count);
__ Move(result_register(), reg);
codegen()->PrepareForBailoutBeforeSplit(TOS_REG, false, NULL, NULL);
- codegen()->DoTest(true_label_, false_label_, fall_through_);
+ codegen()->DoTest(this);
}
@@ -578,7 +582,8 @@
}
-void FullCodeGenerator::DoTest(Label* if_true,
+void FullCodeGenerator::DoTest(Expression* condition,
+ Label* if_true,
Label* if_false,
Label* fall_through) {
if (CpuFeatures::IsSupported(VFP3)) {
@@ -712,10 +717,12 @@
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
- // Check that we're not inside a 'with'.
- __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX));
- __ cmp(r1, cp);
- __ Check(eq, "Unexpected declaration in current context.");
+ // Check that we're not inside a with or catch context.
+ __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
+ __ CompareRoot(r1, Heap::kWithContextMapRootIndex);
+ __ Check(ne, "Declaration in with context.");
+ __ CompareRoot(r1, Heap::kCatchContextMapRootIndex);
+ __ Check(ne, "Declaration in catch context.");
}
if (mode == Variable::CONST) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
@@ -769,7 +776,7 @@
// IDs for bailouts from optimized code.
ASSERT(prop->obj()->AsVariableProxy() != NULL);
{ AccumulatorValueContext for_object(this);
- EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
+ EmitVariableLoad(prop->obj()->AsVariableProxy());
}
__ push(r0);
@@ -783,7 +790,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ Call(ic);
// Value in r0 is ignored (declarations are statements).
}
}
@@ -857,7 +864,8 @@
// Record position before stub call for type feedback.
SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
- EmitCallIC(ic, &patch_site, clause->CompareId());
+ __ Call(ic, RelocInfo::CODE_TARGET, clause->CompareId());
+ patch_site.EmitPatchInfo();
__ cmp(r0, Operand(0));
__ b(ne, &next_test);
@@ -912,8 +920,8 @@
// Convert the object to a JS object.
Label convert, done_convert;
__ JumpIfSmi(r0, &convert);
- __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
- __ b(hs, &done_convert);
+ __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
+ __ b(ge, &done_convert);
__ bind(&convert);
__ push(r0);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
@@ -1105,7 +1113,7 @@
void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
Comment cmnt(masm_, "[ VariableProxy");
- EmitVariableLoad(expr->var());
+ EmitVariableLoad(expr);
}
@@ -1127,8 +1135,7 @@
__ b(ne, slow);
}
// Load next context in chain.
- __ ldr(next, ContextOperand(current, Context::CLOSURE_INDEX));
- __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
+ __ ldr(next, ContextOperand(current, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
current = next;
}
@@ -1154,8 +1161,7 @@
__ tst(temp, temp);
__ b(ne, slow);
// Load next context in chain.
- __ ldr(next, ContextOperand(next, Context::CLOSURE_INDEX));
- __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
+ __ ldr(next, ContextOperand(next, Context::PREVIOUS_INDEX));
__ b(&loop);
__ bind(&fast);
}
@@ -1166,7 +1172,7 @@
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, mode, AstNode::kNoNumber);
+ __ Call(ic, mode);
}
@@ -1186,8 +1192,7 @@
__ tst(temp, temp);
__ b(ne, slow);
}
- __ ldr(next, ContextOperand(context, Context::CLOSURE_INDEX));
- __ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
+ __ ldr(next, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
context = next;
}
@@ -1248,7 +1253,7 @@
__ mov(r0, Operand(key_literal->handle()));
Handle<Code> ic =
isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
+ __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
__ jmp(done);
}
}
@@ -1257,24 +1262,27 @@
}
-void FullCodeGenerator::EmitVariableLoad(Variable* var) {
- // Four cases: non-this global variables, lookup slots, all other
- // types of slots, and parameters that rewrite to explicit property
- // accesses on the arguments object.
- Slot* slot = var->AsSlot();
- Property* property = var->AsProperty();
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+ // Record position before possible IC call.
+ SetSourcePosition(proxy->position());
+ Variable* var = proxy->var();
- if (var->is_global() && !var->is_this()) {
+ // Three cases: non-this global variables, lookup slots, and all other
+ // types of slots.
+ Slot* slot = var->AsSlot();
+ ASSERT((var->is_global() && !var->is_this()) == (slot == NULL));
+
+ if (slot == NULL) {
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in r2 and the global
// object (receiver) in r0.
__ ldr(r0, GlobalObjectOperand());
__ mov(r2, Operand(var->name()));
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+ __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
context()->Plug(r0);
- } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
+ } else if (slot->type() == Slot::LOOKUP) {
Label done, slow;
// Generate code for loading from variables potentially shadowed
@@ -1290,7 +1298,7 @@
context()->Plug(r0);
- } else if (slot != NULL) {
+ } else {
Comment cmnt(masm_, (slot->type() == Slot::CONTEXT)
? "Context slot"
: "Stack slot");
@@ -1306,32 +1314,6 @@
} else {
context()->Plug(slot);
}
- } else {
- Comment cmnt(masm_, "Rewritten parameter");
- ASSERT_NOT_NULL(property);
- // Rewritten parameter accesses are of the form "slot[literal]".
-
- // Assert that the object is in a slot.
- Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
- ASSERT_NOT_NULL(object_var);
- Slot* object_slot = object_var->AsSlot();
- ASSERT_NOT_NULL(object_slot);
-
- // Load the object.
- Move(r1, object_slot);
-
- // Assert that the key is a smi.
- Literal* key_literal = property->key()->AsLiteral();
- ASSERT_NOT_NULL(key_literal);
- ASSERT(key_literal->handle()->IsSmi());
-
- // Load the key.
- __ mov(r0, Operand(key_literal->handle()));
-
- // Call keyed load IC. It has arguments key and receiver in r0 and r1.
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(property));
- context()->Plug(r0);
}
}
@@ -1441,7 +1423,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, key->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, key->id());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
VisitForEffect(value);
@@ -1572,7 +1554,7 @@
}
// Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty();
@@ -1598,27 +1580,13 @@
break;
case KEYED_PROPERTY:
if (expr->is_compound()) {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- __ ldr(r0, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
- __ push(r0);
- __ mov(r0, Operand(property->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(property->obj());
- VisitForAccumulatorValue(property->key());
- }
+ VisitForStackValue(property->obj());
+ VisitForAccumulatorValue(property->key());
__ ldr(r1, MemOperand(sp, 0));
__ push(r0);
} else {
- if (property->is_arguments_access()) {
- VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
- __ ldr(r1, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
- __ mov(r0, Operand(property->key()->AsLiteral()->handle()));
- __ Push(r1, r0);
- } else {
- VisitForStackValue(property->obj());
- VisitForStackValue(property->key());
- }
+ VisitForStackValue(property->obj());
+ VisitForStackValue(property->key());
}
break;
}
@@ -1629,7 +1597,7 @@
{ AccumulatorValueContext context(this);
switch (assign_type) {
case VARIABLE:
- EmitVariableLoad(expr->target()->AsVariableProxy()->var());
+ EmitVariableLoad(expr->target()->AsVariableProxy());
PrepareForBailout(expr->target(), TOS_REG);
break;
case NAMED_PROPERTY:
@@ -1695,7 +1663,7 @@
__ mov(r2, Operand(key->handle()));
// Call load IC. It has arguments receiver and property name r0 and r2.
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
@@ -1703,7 +1671,7 @@
SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
}
@@ -1730,7 +1698,8 @@
__ bind(&stub_call);
BinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), &patch_site, expr->id());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
__ jmp(&done);
__ bind(&smi_case);
@@ -1811,7 +1780,9 @@
OverwriteMode mode) {
__ pop(r1);
BinaryOpStub stub(op, mode);
- EmitCallIC(stub.GetCode(), NULL, expr->id());
+ JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
context()->Plug(r0);
}
@@ -1825,7 +1796,7 @@
}
// Left-hand side can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->AsProperty();
@@ -1851,30 +1822,20 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ Call(ic);
break;
}
case KEYED_PROPERTY: {
__ push(r0); // Preserve value.
- if (prop->is_synthetic()) {
- ASSERT(prop->obj()->AsVariableProxy() != NULL);
- ASSERT(prop->key()->AsLiteral() != NULL);
- { AccumulatorValueContext for_object(this);
- EmitVariableLoad(prop->obj()->AsVariableProxy()->var());
- }
- __ mov(r2, r0);
- __ mov(r1, Operand(prop->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- __ mov(r1, r0);
- __ pop(r2);
- }
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
+ __ mov(r1, r0);
+ __ pop(r2);
__ pop(r0); // Restore value.
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ Call(ic);
break;
}
}
@@ -1885,8 +1846,6 @@
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
Token::Value op) {
- // Left-hand sides that rewrite to explicit property accesses do not reach
- // here.
ASSERT(var != NULL);
ASSERT(var->is_global() || var->AsSlot() != NULL);
@@ -1900,7 +1859,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT, AstNode::kNoNumber);
+ __ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (op == Token::INIT_CONST) {
// Like var declarations, const declarations are hoisted to function
@@ -1922,18 +1881,7 @@
__ b(ne, &skip);
__ str(result_register(), MemOperand(fp, SlotOffset(slot)));
break;
- case Slot::CONTEXT: {
- __ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX));
- __ ldr(r2, ContextOperand(r1, slot->index()));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r2, ip);
- __ b(ne, &skip);
- __ str(r0, ContextOperand(r1, slot->index()));
- int offset = Context::SlotOffset(slot->index());
- __ mov(r3, r0); // Preserve the stored value in r0.
- __ RecordWrite(r1, Operand(offset), r3, r2);
- break;
- }
+ case Slot::CONTEXT:
case Slot::LOOKUP:
__ push(r0);
__ mov(r0, Operand(slot->var()->name()));
@@ -2009,7 +1957,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2055,7 +2003,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
// If the assignment ends an initialization block, revert to fast case.
if (expr->ends_initialization_block()) {
@@ -2107,7 +2055,7 @@
InLoopFlag in_loop = (loop_depth() > 0) ? IN_LOOP : NOT_IN_LOOP;
Handle<Code> ic =
isolate()->stub_cache()->ComputeCallInitialize(arg_count, in_loop, mode);
- EmitCallIC(ic, mode, expr->id());
+ __ Call(ic, mode, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2141,7 +2089,7 @@
Handle<Code> ic =
isolate()->stub_cache()->ComputeKeyedCallInitialize(arg_count, in_loop);
__ ldr(r2, MemOperand(sp, (arg_count + 1) * kPointerSize)); // Key.
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
RecordJSReturnSite(expr);
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2181,7 +2129,8 @@
__ push(r1);
// Push the receiver of the enclosing function and do runtime call.
- __ ldr(r1, MemOperand(fp, (2 + scope()->num_parameters()) * kPointerSize));
+ int receiver_offset = 2 + info_->scope()->num_parameters();
+ __ ldr(r1, MemOperand(fp, receiver_offset * kPointerSize));
__ push(r1);
// Push the strict mode flag.
__ mov(r1, Operand(Smi::FromInt(strict_mode_flag())));
@@ -2300,9 +2249,9 @@
__ bind(&done);
// Push function.
__ push(r0);
- // Push global receiver.
- __ ldr(r1, GlobalObjectOperand());
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
+ // The receiver is implicitly the global receiver. Indicate this
+ // by passing the hole to the call function stub.
+ __ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
__ push(r1);
__ bind(&call);
}
@@ -2324,7 +2273,7 @@
} else {
// Call to a keyed property.
// For a synthetic property use keyed load IC followed by function call,
- // for a regular property use keyed EmitCallIC.
+ // for a regular property use EmitKeyedCallWithIC.
if (prop->is_synthetic()) {
// Do not visit the object and key subexpressions (they are shared
// by all occurrences of the same rewritten parameter).
@@ -2342,7 +2291,7 @@
SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
+ __ Call(ic, RelocInfo::CODE_TARGET, GetPropertyId(prop));
__ ldr(r1, GlobalObjectOperand());
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
__ Push(r0, r1); // Function, receiver.
@@ -2468,9 +2417,9 @@
__ tst(r1, Operand(1 << Map::kIsUndetectable));
__ b(ne, if_false);
__ ldrb(r1, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+ __ cmp(r1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ b(lt, if_false);
- __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
+ __ cmp(r1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(le, if_true, if_false, fall_through);
@@ -2491,7 +2440,7 @@
&if_true, &if_false, &fall_through);
__ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
Split(ge, if_true, if_false, fall_through);
@@ -2588,8 +2537,7 @@
// If a valueOf property is not found on the object check that it's
// prototype is the un-modified String prototype. If not result is false.
__ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, if_false);
+ __ JumpIfSmi(r2, if_false);
__ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
__ ldr(r3, ContextOperand(cp, Context::GLOBAL_INDEX));
__ ldr(r3, FieldMemOperand(r3, GlobalObject::kGlobalContextOffset));
@@ -2734,7 +2682,7 @@
// parameter count in r0.
VisitForAccumulatorValue(args->at(0));
__ mov(r1, r0);
- __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
+ __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
context()->Plug(r0);
@@ -2746,7 +2694,7 @@
Label exit;
// Get the number of formal parameters.
- __ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
+ __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
// Check if the calling frame is an arguments adaptor frame.
__ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
@@ -2774,16 +2722,18 @@
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
- __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE); // Map is now in r0.
+ __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
+ // Map is now in r0.
__ b(lt, &null);
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
- __ cmp(r1, Operand(JS_FUNCTION_TYPE));
- __ b(eq, &function);
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type, and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
+ __ cmp(r1, Operand(FIRST_CALLABLE_SPEC_OBJECT_TYPE));
+ __ b(ge, &function);
// Check if the constructor in the map is a function.
__ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
@@ -2826,13 +2776,12 @@
// with '%2s' (see Logger::LogRuntime for all the formats).
// 2 (array): Arguments to the format string.
ASSERT_EQ(args->length(), 3);
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (CodeGenerator::ShouldGenerateLog(args->at(0))) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallRuntime(Runtime::kLog, 2);
}
-#endif
+
// Finally, we're expected to leave a value on the top of the stack.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
context()->Plug(r0);
@@ -3343,8 +3292,7 @@
__ b(eq, &ok);
// Fail if either is a non-HeapObject.
__ and_(tmp, left, Operand(right));
- __ tst(tmp, Operand(kSmiTagMask));
- __ b(eq, &fail);
+ __ JumpIfSmi(tmp, &fail);
__ ldr(tmp, FieldMemOperand(left, HeapObject::kMapOffset));
__ ldrb(tmp2, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
__ cmp(tmp2, Operand(JS_REGEXP_TYPE));
@@ -3434,9 +3382,7 @@
__ b(ne, &bailout);
// Check that the array has fast elements.
- __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
- __ tst(scratch2, Operand(1 << Map::kHasFastElements));
- __ b(eq, &bailout);
+ __ CheckFastElements(scratch1, scratch2, &bailout);
// If the array has length zero, return the empty string.
__ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
@@ -3634,6 +3580,39 @@
}
+void FullCodeGenerator::EmitIsNativeOrStrictMode(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 1);
+
+ // Load the function into r0.
+ VisitForAccumulatorValue(args->at(0));
+
+ // Prepare for the test.
+ Label materialize_true, materialize_false;
+ Label* if_true = NULL;
+ Label* if_false = NULL;
+ Label* fall_through = NULL;
+ context()->PrepareTest(&materialize_true, &materialize_false,
+ &if_true, &if_false, &fall_through);
+
+ // Test for strict mode function.
+ __ ldr(r1, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(r1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
+ kSmiTagSize)));
+ __ b(ne, if_true);
+
+ // Test for native function.
+ __ tst(r1, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ b(ne, if_true);
+
+ // Not native or strict-mode function.
+ __ b(if_false);
+
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+ context()->Plug(if_true, if_false);
+}
+
+
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
Handle<String> name = expr->name();
if (name->length() > 0 && name->Get(0) == '_') {
@@ -3666,7 +3645,7 @@
isolate()->stub_cache()->ComputeCallInitialize(arg_count,
NOT_IN_LOOP,
mode);
- EmitCallIC(ic, mode, expr->id());
+ __ Call(ic, mode, expr->id());
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
@@ -3774,8 +3753,7 @@
Comment cmt(masm_, "[ UnaryOperation (ADD)");
VisitForAccumulatorValue(expr->expression());
Label no_conversion;
- __ tst(result_register(), Operand(kSmiTagMask));
- __ b(eq, &no_conversion);
+ __ JumpIfSmi(result_register(), &no_conversion);
ToNumberStub convert_stub;
__ CallStub(&convert_stub);
__ bind(&no_conversion);
@@ -3809,7 +3787,7 @@
// accumulator register r0.
VisitForAccumulatorValue(expr->expression());
SetSourcePosition(expr->position());
- EmitCallIC(stub.GetCode(), NULL, expr->id());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->id());
context()->Plug(r0);
}
@@ -3826,7 +3804,7 @@
}
// Expression can only be a property, a global or a (parameter or local)
- // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
LhsKind assign_type = VARIABLE;
Property* prop = expr->expression()->AsProperty();
@@ -3841,7 +3819,7 @@
if (assign_type == VARIABLE) {
ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
AccumulatorValueContext context(this);
- EmitVariableLoad(expr->expression()->AsVariableProxy()->var());
+ EmitVariableLoad(expr->expression()->AsVariableProxy());
} else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
@@ -3854,15 +3832,8 @@
__ push(r0);
EmitNamedPropertyLoad(prop);
} else {
- if (prop->is_arguments_access()) {
- VariableProxy* obj_proxy = prop->obj()->AsVariableProxy();
- __ ldr(r0, EmitSlotSearch(obj_proxy->var()->AsSlot(), r0));
- __ push(r0);
- __ mov(r0, Operand(prop->key()->AsLiteral()->handle()));
- } else {
- VisitForStackValue(prop->obj());
- VisitForAccumulatorValue(prop->key());
- }
+ VisitForStackValue(prop->obj());
+ VisitForAccumulatorValue(prop->key());
__ ldr(r1, MemOperand(sp, 0));
__ push(r0);
EmitKeyedPropertyLoad(prop);
@@ -3927,7 +3898,8 @@
SetSourcePosition(expr->position());
BinaryOpStub stub(Token::ADD, NO_OVERWRITE);
- EmitCallIC(stub.GetCode(), &patch_site, expr->CountId());
+ __ Call(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountId());
+ patch_site.EmitPatchInfo();
__ bind(&done);
// Store the value returned in r0.
@@ -3958,7 +3930,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->StoreIC_Initialize_Strict()
: isolate()->builtins()->StoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -3975,7 +3947,7 @@
Handle<Code> ic = is_strict_mode()
? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
: isolate()->builtins()->KeyedStoreIC_Initialize();
- EmitCallIC(ic, RelocInfo::CODE_TARGET, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) {
if (!context()->IsEffect()) {
@@ -4001,7 +3973,7 @@
Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
// Use a regular load, not a contextual load, to avoid a reference
// error.
- EmitCallIC(ic, RelocInfo::CODE_TARGET, AstNode::kNoNumber);
+ __ Call(ic);
PrepareForBailout(expr, TOS_REG);
context()->Plug(r0);
} else if (proxy != NULL &&
@@ -4024,30 +3996,18 @@
context()->Plug(r0);
} else {
// This expression cannot throw a reference error at the top level.
- context()->HandleExpression(expr);
+ VisitInCurrentContext(expr);
}
}
-bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
- Expression* left,
- Expression* right,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
- if (op != Token::EQ && op != Token::EQ_STRICT) return false;
-
- // Check for the pattern: typeof <expression> == <string literal>.
- Literal* right_literal = right->AsLiteral();
- if (right_literal == NULL) return false;
- Handle<Object> right_literal_value = right_literal->handle();
- if (!right_literal_value->IsString()) return false;
- UnaryOperation* left_unary = left->AsUnaryOperation();
- if (left_unary == NULL || left_unary->op() != Token::TYPEOF) return false;
- Handle<String> check = Handle<String>::cast(right_literal_value);
-
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+ Handle<String> check,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
{ AccumulatorValueContext context(this);
- VisitForTypeofValue(left_unary->expression());
+ VisitForTypeofValue(expr);
}
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
@@ -4082,7 +4042,7 @@
} else if (check->Equals(isolate()->heap()->function_symbol())) {
__ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r0, FIRST_FUNCTION_CLASS_TYPE);
+ __ CompareObjectType(r0, r1, r0, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
Split(ge, if_true, if_false, fall_through);
} else if (check->Equals(isolate()->heap()->object_symbol())) {
@@ -4090,10 +4050,10 @@
__ CompareRoot(r0, Heap::kNullValueRootIndex);
__ b(eq, if_true);
// Check for JS objects => true.
- __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE);
- __ b(lo, if_false);
- __ CompareInstanceType(r0, r1, FIRST_FUNCTION_CLASS_TYPE);
- __ b(hs, if_false);
+ __ CompareObjectType(r0, r0, r1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ b(lt, if_false);
+ __ CompareInstanceType(r0, r1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ b(gt, if_false);
// Check for undetectable objects => false.
__ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
__ tst(r1, Operand(1 << Map::kIsUndetectable));
@@ -4101,8 +4061,18 @@
} else {
if (if_false != fall_through) __ jmp(if_false);
}
+}
- return true;
+
+void FullCodeGenerator::EmitLiteralCompareUndefined(Expression* expr,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ VisitForAccumulatorValue(expr);
+ PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
+
+ __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
}
@@ -4122,14 +4092,12 @@
// First we try a fast inlined version of the compare when one of
// the operands is a literal.
- Token::Value op = expr->op();
- Expression* left = expr->left();
- Expression* right = expr->right();
- if (TryLiteralCompare(op, left, right, if_true, if_false, fall_through)) {
+ if (TryLiteralCompare(expr, if_true, if_false, fall_through)) {
context()->Plug(if_true, if_false);
return;
}
+ Token::Value op = expr->op();
VisitForStackValue(expr->left());
switch (op) {
case Token::IN:
@@ -4204,7 +4172,8 @@
// Record position and call the compare IC.
SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(op);
- EmitCallIC(ic, &patch_site, expr->id());
+ __ Call(ic, RelocInfo::CODE_TARGET, expr->id());
+ patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(TOS_REG, true, if_true, if_false);
__ cmp(r0, Operand(0));
Split(cond, if_true, if_false, fall_through);
@@ -4237,8 +4206,7 @@
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
__ cmp(r0, r1);
__ b(eq, if_true);
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, if_false);
+ __ JumpIfSmi(r0, if_false);
// It can be an undetectable object.
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
@@ -4266,70 +4234,6 @@
}
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
- RelocInfo::Mode mode,
- unsigned ast_id) {
- ASSERT(mode == RelocInfo::CODE_TARGET ||
- mode == RelocInfo::CODE_TARGET_CONTEXT);
- Counters* counters = isolate()->counters();
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(counters->named_load_full(), 1, r1, r2);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(counters->keyed_load_full(), 1, r1, r2);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(counters->named_store_full(), 1, r1, r2);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(counters->keyed_store_full(), 1, r1, r2);
- default:
- break;
- }
- if (ast_id == kNoASTId || mode == RelocInfo::CODE_TARGET_CONTEXT) {
- __ Call(ic, mode);
- } else {
- ASSERT(mode == RelocInfo::CODE_TARGET);
- mode = RelocInfo::CODE_TARGET_WITH_ID;
- __ CallWithAstId(ic, mode, ast_id);
- }
-}
-
-
-void FullCodeGenerator::EmitCallIC(Handle<Code> ic,
- JumpPatchSite* patch_site,
- unsigned ast_id) {
- Counters* counters = isolate()->counters();
- switch (ic->kind()) {
- case Code::LOAD_IC:
- __ IncrementCounter(counters->named_load_full(), 1, r1, r2);
- break;
- case Code::KEYED_LOAD_IC:
- __ IncrementCounter(counters->keyed_load_full(), 1, r1, r2);
- break;
- case Code::STORE_IC:
- __ IncrementCounter(counters->named_store_full(), 1, r1, r2);
- break;
- case Code::KEYED_STORE_IC:
- __ IncrementCounter(counters->keyed_store_full(), 1, r1, r2);
- default:
- break;
- }
-
- if (ast_id == kNoASTId) {
- __ Call(ic, RelocInfo::CODE_TARGET);
- } else {
- __ CallWithAstId(ic, RelocInfo::CODE_TARGET_WITH_ID, ast_id);
- }
- if (patch_site != NULL && patch_site->is_bound()) {
- patch_site->EmitPatchInfo();
- } else {
- __ nop(); // Signals no inlined code.
- }
-}
-
-
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
__ str(value, MemOperand(fp, frame_offset));
@@ -4341,6 +4245,27 @@
}
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+ Scope* declaration_scope = scope()->DeclarationScope();
+ if (declaration_scope->is_global_scope()) {
+ // Contexts nested in the global context have a canonical empty function
+ // as their closure, not the anonymous closure containing the global
+ // code. Pass a smi sentinel and let the runtime look up the empty
+ // function.
+ __ mov(ip, Operand(Smi::FromInt(0)));
+ } else if (declaration_scope->is_eval_scope()) {
+ // Contexts created by a call to eval have the same closure as the
+ // context calling eval, not the anonymous closure containing the eval
+ // code. Fetch it from the context.
+ __ ldr(ip, ContextOperand(cp, Context::CLOSURE_INDEX));
+ } else {
+ ASSERT(declaration_scope->is_function_scope());
+ __ ldr(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ }
+ __ push(ip);
+}
+
+
// ----------------------------------------------------------------------------
// Non-local control flow support.
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 2123163..6038153 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -79,15 +79,14 @@
// elements map.
// Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss);
+ __ JumpIfSmi(receiver, miss);
// Check that the receiver is a valid JS object.
- __ CompareObjectType(receiver, t0, t1, FIRST_JS_OBJECT_TYPE);
+ __ CompareObjectType(receiver, t0, t1, FIRST_SPEC_OBJECT_TYPE);
__ b(lt, miss);
// If this assert fails, we have to check upper bound too.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+ STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
GenerateGlobalInstanceTypeCheck(masm, t1, miss);
@@ -213,101 +212,6 @@
}
-static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
- Label* miss,
- Register elements,
- Register key,
- Register result,
- Register t0,
- Register t1,
- Register t2) {
- // Register use:
- //
- // elements - holds the slow-case elements of the receiver on entry.
- // Unchanged unless 'result' is the same register.
- //
- // key - holds the smi key on entry.
- // Unchanged unless 'result' is the same register.
- //
- // result - holds the result on exit if the load succeeded.
- // Allowed to be the same as 'key' or 'result'.
- // Unchanged on bailout so 'key' or 'result' can be used
- // in further computation.
- //
- // Scratch registers:
- //
- // t0 - holds the untagged key on entry and holds the hash once computed.
- //
- // t1 - used to hold the capacity mask of the dictionary
- //
- // t2 - used for the index into the dictionary.
- Label done;
-
- // Compute the hash code from the untagged key. This must be kept in sync
- // with ComputeIntegerHash in utils.h.
- //
- // hash = ~hash + (hash << 15);
- __ mvn(t1, Operand(t0));
- __ add(t0, t1, Operand(t0, LSL, 15));
- // hash = hash ^ (hash >> 12);
- __ eor(t0, t0, Operand(t0, LSR, 12));
- // hash = hash + (hash << 2);
- __ add(t0, t0, Operand(t0, LSL, 2));
- // hash = hash ^ (hash >> 4);
- __ eor(t0, t0, Operand(t0, LSR, 4));
- // hash = hash * 2057;
- __ mov(t1, Operand(2057));
- __ mul(t0, t0, t1);
- // hash = hash ^ (hash >> 16);
- __ eor(t0, t0, Operand(t0, LSR, 16));
-
- // Compute the capacity mask.
- __ ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
- __ mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int
- __ sub(t1, t1, Operand(1));
-
- // Generate an unrolled loop that performs a few probes before giving up.
- static const int kProbes = 4;
- for (int i = 0; i < kProbes; i++) {
- // Use t2 for index calculations and keep the hash intact in t0.
- __ mov(t2, t0);
- // Compute the masked index: (hash + i + i * i) & mask.
- if (i > 0) {
- __ add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i)));
- }
- __ and_(t2, t2, Operand(t1));
-
- // Scale the index by multiplying by the element size.
- ASSERT(NumberDictionary::kEntrySize == 3);
- __ add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
-
- // Check if the key is identical to the name.
- __ add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
- __ ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset));
- __ cmp(key, Operand(ip));
- if (i != kProbes - 1) {
- __ b(eq, &done);
- } else {
- __ b(ne, miss);
- }
- }
-
- __ bind(&done);
- // Check that the value is a normal property.
- // t2: elements + (index * kPointerSize)
- const int kDetailsOffset =
- NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- __ ldr(t1, FieldMemOperand(t2, kDetailsOffset));
- __ tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
- __ b(ne, miss);
-
- // Get the value at the masked, scaled index and return.
- const int kValueOffset =
- NumberDictionary::kElementsStartOffset + kPointerSize;
- __ ldr(result, FieldMemOperand(t2, kValueOffset));
-}
-
-
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r2 : name
@@ -503,8 +407,7 @@
// to probe.
//
// Check for number.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &number);
+ __ JumpIfSmi(r1, &number);
__ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE);
__ b(ne, &non_number);
__ bind(&number);
@@ -548,8 +451,7 @@
// r1: function
// Check that the value isn't a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, miss);
+ __ JumpIfSmi(r1, miss);
// Check that the value is a JSFunction.
__ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE);
@@ -624,8 +526,7 @@
if (id == IC::kCallIC_Miss) {
Label invoke, global;
__ ldr(r2, MemOperand(sp, argc * kPointerSize)); // receiver
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &invoke);
+ __ JumpIfSmi(r2, &invoke);
__ CompareObjectType(r2, r3, r3, JS_GLOBAL_OBJECT_TYPE);
__ b(eq, &global);
__ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
@@ -742,7 +643,7 @@
__ b(ne, &slow_load);
__ mov(r0, Operand(r2, ASR, kSmiTagSize));
// r0: untagged index
- GenerateNumberDictionaryLoad(masm, &slow_load, r4, r2, r1, r0, r3, r5);
+ __ LoadFromNumberDictionary(&slow_load, r4, r2, r1, r0, r3, r5);
__ IncrementCounter(counters->keyed_call_generic_smi_dict(), 1, r0, r3);
__ jmp(&do_call);
@@ -812,8 +713,7 @@
// Check if the name is a string.
Label miss;
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r2, &miss);
__ IsObjectJSStringType(r2, r0, &miss);
GenerateCallNormal(masm, argc);
@@ -887,6 +787,174 @@
}
+static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+ Register object,
+ Register key,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* unmapped_case,
+ Label* slow_case) {
+ Heap* heap = masm->isolate()->heap();
+
+ // Check that the receiver is a JSObject. Because of the map check
+ // later, we do not need to check for interceptors or whether it
+ // requires access checks.
+ __ JumpIfSmi(object, slow_case);
+ // Check that the object is some kind of JSObject.
+ __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE);
+ __ b(lt, slow_case);
+
+ // Check that the key is a positive smi.
+ __ tst(key, Operand(0x8000001));
+ __ b(ne, slow_case);
+
+ // Load the elements into scratch1 and check its map.
+ Handle<Map> arguments_map(heap->non_strict_arguments_elements_map());
+ __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
+ __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+ // Check if element is in the range of mapped arguments. If not, jump
+ // to the unmapped lookup with the parameter map in scratch1.
+ __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
+ __ sub(scratch2, scratch2, Operand(Smi::FromInt(2)));
+ __ cmp(key, Operand(scratch2));
+ __ b(cs, unmapped_case);
+
+ // Load element index and check whether it is the hole.
+ const int kOffset =
+ FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
+
+ __ mov(scratch3, Operand(kPointerSize >> 1));
+ __ mul(scratch3, key, scratch3);
+ __ add(scratch3, scratch3, Operand(kOffset));
+
+ __ ldr(scratch2, MemOperand(scratch1, scratch3));
+ __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+ __ cmp(scratch2, scratch3);
+ __ b(eq, unmapped_case);
+
+ // Load value from context and return it. We can reuse scratch1 because
+ // we do not jump to the unmapped lookup (which requires the parameter
+ // map in scratch1).
+ __ ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ mov(scratch3, Operand(kPointerSize >> 1));
+ __ mul(scratch3, scratch2, scratch3);
+ __ add(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
+ return MemOperand(scratch1, scratch3);
+}
+
+
+static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+ Register key,
+ Register parameter_map,
+ Register scratch,
+ Label* slow_case) {
+ // Element is in arguments backing store, which is referenced by the
+ // second element of the parameter_map. The parameter_map register
+ // must be loaded with the parameter map of the arguments object and is
+ // overwritten.
+ const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+ Register backing_store = parameter_map;
+ __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+ Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+ __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
+ DONT_DO_SMI_CHECK);
+ __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
+ __ cmp(key, Operand(scratch));
+ __ b(cs, slow_case);
+ __ mov(scratch, Operand(kPointerSize >> 1));
+ __ mul(scratch, key, scratch);
+ __ add(scratch,
+ scratch,
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ return MemOperand(backing_store, scratch);
+}
+
+
+void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+ Label slow, notin;
+ MemOperand mapped_location =
+ GenerateMappedArgumentsLookup(masm, r1, r0, r2, r3, r4, ¬in, &slow);
+ __ ldr(r0, mapped_location);
+ __ Ret();
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in r2.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, r0, r2, r3, &slow);
+ __ ldr(r2, unmapped_location);
+ __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ cmp(r2, r3);
+ __ b(eq, &slow);
+ __ mov(r0, r2);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm, false);
+}
+
+
+void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
+ // -- lr : return address
+ // -----------------------------------
+ Label slow, notin;
+ MemOperand mapped_location =
+ GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, ¬in, &slow);
+ __ str(r0, mapped_location);
+ __ add(r6, r3, r5);
+ __ RecordWrite(r3, r6, r9);
+ __ Ret();
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in r3.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow);
+ __ str(r0, unmapped_location);
+ __ add(r6, r3, r4);
+ __ RecordWrite(r3, r6, r9);
+ __ Ret();
+ __ bind(&slow);
+ GenerateMiss(masm, false);
+}
+
+
+void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm,
+ int argc) {
+ // ----------- S t a t e -------------
+ // -- r2 : name
+ // -- lr : return address
+ // -----------------------------------
+ Label slow, notin;
+ // Load receiver.
+ __ ldr(r1, MemOperand(sp, argc * kPointerSize));
+ MemOperand mapped_location =
+ GenerateMappedArgumentsLookup(masm, r1, r2, r3, r4, r5, ¬in, &slow);
+ __ ldr(r1, mapped_location);
+ GenerateFunctionTailCall(masm, argc, &slow, r3);
+ __ bind(¬in);
+ // The unmapped lookup expects that the parameter map is in r3.
+ MemOperand unmapped_location =
+ GenerateUnmappedArgumentsLookup(masm, r2, r3, r4, &slow);
+ __ ldr(r1, unmapped_location);
+ __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ cmp(r1, r3);
+ __ b(eq, &slow);
+ GenerateFunctionTailCall(masm, argc, &slow, r3);
+ __ bind(&slow);
+ GenerateMiss(masm, argc);
+}
+
+
+Object* KeyedLoadIC_Miss(Arguments args);
+
+
void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) {
// ---------- S t a t e --------------
// -- lr : return address
@@ -944,11 +1012,8 @@
GenerateKeyedLoadReceiverCheck(
masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow);
- // Check the "has fast elements" bit in the receiver's map which is
- // now in r2.
- __ ldrb(r3, FieldMemOperand(r2, Map::kBitField2Offset));
- __ tst(r3, Operand(1 << Map::kHasFastElements));
- __ b(eq, &check_number_dictionary);
+ // Check the receiver's map to see if it has fast elements.
+ __ CheckFastElements(r2, r3, &check_number_dictionary);
GenerateFastArrayLoad(
masm, receiver, key, r4, r3, r2, r0, NULL, &slow);
@@ -967,7 +1032,7 @@
__ cmp(r3, ip);
__ b(ne, &slow);
__ mov(r2, Operand(r0, ASR, kSmiTagSize));
- GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r0, r2, r3, r5);
+ __ LoadFromNumberDictionary(&slow, r4, r0, r0, r2, r3, r5);
__ Ret();
// Slow case, key and receiver still in r0 and r1.
@@ -1214,11 +1279,9 @@
// r4 and r5 are used as general scratch registers.
// Check that the key is a smi.
- __ tst(key, Operand(kSmiTagMask));
- __ b(ne, &slow);
+ __ JumpIfNotSmi(key, &slow);
// Check that the object isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, &slow);
+ __ JumpIfSmi(receiver, &slow);
// Get the map of the object.
__ ldr(r4, FieldMemOperand(receiver, HeapObject::kMapOffset));
// Check that the receiver does not require access checks. We need
@@ -1230,9 +1293,13 @@
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
__ cmp(r4, Operand(JS_ARRAY_TYPE));
__ b(eq, &array);
- // Check that the object is some kind of JS object.
- __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
+ // Check that the object is some kind of JSObject.
+ __ cmp(r4, Operand(FIRST_JS_RECEIVER_TYPE));
__ b(lt, &slow);
+ __ cmp(r4, Operand(JS_PROXY_TYPE));
+ __ b(eq, &slow);
+ __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ b(eq, &slow);
// Object case: Check key against length in the elements array.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index e32cd0c..b96805e 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -68,13 +68,13 @@
ASSERT(Output() == NULL ||
LUnallocated::cast(Output())->HasFixedPolicy() ||
!LUnallocated::cast(Output())->HasRegisterPolicy());
- for (UseIterator it(this); it.HasNext(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Next());
+ for (UseIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
ASSERT(operand->HasFixedPolicy() ||
operand->IsUsedAtStart());
}
- for (TempIterator it(this); it.HasNext(); it.Advance()) {
- LUnallocated* operand = LUnallocated::cast(it.Next());
+ for (TempIterator it(this); !it.Done(); it.Advance()) {
+ LUnallocated* operand = LUnallocated::cast(it.Current());
ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
}
}
@@ -111,21 +111,18 @@
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintDataTo(StringStream* stream) {
stream->Add("= ");
- inputs_.PrintOperandsTo(stream);
+ for (int i = 0; i < inputs_.length(); i++) {
+ if (i > 0) stream->Add(" ");
+ inputs_[i]->PrintTo(stream);
+ }
}
template<int R, int I, int T>
void LTemplateInstruction<R, I, T>::PrintOutputOperandTo(StringStream* stream) {
- results_.PrintOperandsTo(stream);
-}
-
-
-template<typename T, int N>
-void OperandContainer<T, N>::PrintOperandsTo(StringStream* stream) {
- for (int i = 0; i < N; i++) {
+ for (int i = 0; i < results_.length(); i++) {
if (i > 0) stream->Add(" ");
- elems_[i]->PrintTo(stream);
+ results_[i]->PrintTo(stream);
}
}
@@ -268,12 +265,6 @@
}
-void LTypeofIs::PrintDataTo(StringStream* stream) {
- InputAt(0)->PrintTo(stream);
- stream->Add(" == \"%s\"", *hydrogen()->type_literal()->ToCString());
-}
-
-
void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
stream->Add("if typeof ");
InputAt(0)->PrintTo(stream);
@@ -343,13 +334,6 @@
}
-void LClassOfTest::PrintDataTo(StringStream* stream) {
- stream->Add("= class_of_test(");
- InputAt(0)->PrintTo(stream);
- stream->Add(", \"%o\")", *hydrogen()->class_name());
-}
-
-
void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintTo(stream);
@@ -388,6 +372,15 @@
}
+void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+ elements()->PrintTo(stream);
+ stream->Add("[");
+ key()->PrintTo(stream);
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
+}
+
+
void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
@@ -436,8 +429,7 @@
LLabel* label = LLabel::cast(first_instr);
if (last_instr->IsGoto()) {
LGoto* goto_instr = LGoto::cast(last_instr);
- if (!goto_instr->include_stack_check() &&
- label->IsRedundant() &&
+ if (label->IsRedundant() &&
!label->is_loop_header()) {
bool can_eliminate = true;
for (int i = first + 1; i < last && can_eliminate; ++i) {
@@ -718,7 +710,9 @@
LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
HEnvironment* hydrogen_env = current_block_->last_environment();
- instr->set_environment(CreateEnvironment(hydrogen_env));
+ int argument_index_accumulator = 0;
+ instr->set_environment(CreateEnvironment(hydrogen_env,
+ &argument_index_accumulator));
return instr;
}
@@ -808,6 +802,11 @@
}
+LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) {
+ return AssignEnvironment(new LDeoptimize);
+}
+
+
LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
return AssignEnvironment(new LDeoptimize);
}
@@ -821,7 +820,7 @@
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
- return DefineSameAsFirst(new LBitI(op, left, right));
+ return DefineAsRegister(new LBitI(op, left, right));
} else {
ASSERT(instr->representation().IsTagged());
ASSERT(instr->left()->representation().IsTagged());
@@ -848,11 +847,11 @@
}
ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->OperandAt(0)->representation().IsInteger32());
- ASSERT(instr->OperandAt(1)->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->OperandAt(0));
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left = UseRegisterAtStart(instr->left());
- HValue* right_value = instr->OperandAt(1);
+ HValue* right_value = instr->right();
LOperand* right = NULL;
int constant_value = 0;
if (right_value->IsConstant()) {
@@ -860,7 +859,7 @@
right = chunk_->DefineConstantOperand(constant);
constant_value = constant->Integer32Value() & 0x1f;
} else {
- right = UseRegister(right_value);
+ right = UseRegisterAtStart(right_value);
}
// Shift operations can only deoptimize if we do a logical shift
@@ -877,7 +876,7 @@
}
LInstruction* result =
- DefineSameAsFirst(new LShiftI(op, left, right, does_deopt));
+ DefineAsRegister(new LShiftI(op, left, right, does_deopt));
return does_deopt ? AssignEnvironment(result) : result;
}
@@ -891,7 +890,7 @@
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
LArithmeticD* result = new LArithmeticD(op, left, right);
- return DefineSameAsFirst(result);
+ return DefineAsRegister(result);
}
@@ -989,28 +988,20 @@
if (FLAG_stress_environments && !instr->HasEnvironment()) {
instr = AssignEnvironment(instr);
}
- if (current->IsTest() && !instr->IsGoto()) {
- ASSERT(instr->IsControl());
- HTest* test = HTest::cast(current);
- instr->set_hydrogen_value(test->value());
- HBasicBlock* first = test->FirstSuccessor();
- HBasicBlock* second = test->SecondSuccessor();
- ASSERT(first != NULL && second != NULL);
- instr->SetBranchTargets(first->block_id(), second->block_id());
- } else {
- instr->set_hydrogen_value(current);
- }
-
+ instr->set_hydrogen_value(current);
chunk_->AddInstruction(instr, current_block_);
}
current_instruction_ = old_current;
}
-LEnvironment* LChunkBuilder::CreateEnvironment(HEnvironment* hydrogen_env) {
+LEnvironment* LChunkBuilder::CreateEnvironment(
+ HEnvironment* hydrogen_env,
+ int* argument_index_accumulator) {
if (hydrogen_env == NULL) return NULL;
- LEnvironment* outer = CreateEnvironment(hydrogen_env->outer());
+ LEnvironment* outer =
+ CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
ASSERT(ast_id != AstNode::kNoNumber);
int value_count = hydrogen_env->length();
@@ -1020,7 +1011,6 @@
argument_count_,
value_count,
outer);
- int argument_index = 0;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1029,7 +1019,7 @@
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
- op = new LArgument(argument_index++);
+ op = new LArgument((*argument_index_accumulator)++);
} else {
op = UseAny(value);
}
@@ -1041,102 +1031,19 @@
LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
- LInstruction* result = new LGoto(instr->FirstSuccessor()->block_id(),
- instr->include_stack_check());
- if (instr->include_stack_check()) result = AssignPointerMap(result);
- return result;
+ return new LGoto(instr->FirstSuccessor()->block_id());
}
-LInstruction* LChunkBuilder::DoTest(HTest* instr) {
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
HValue* v = instr->value();
- if (!v->EmitAtUses()) {
- return new LBranch(UseRegisterAtStart(v));
- } else if (v->IsClassOfTest()) {
- HClassOfTest* compare = HClassOfTest::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LClassOfTestAndBranch(UseTempRegister(compare->value()),
- TempRegister());
- } else if (v->IsCompare()) {
- HCompare* compare = HCompare::cast(v);
- Token::Value op = compare->token();
- HValue* left = compare->left();
- HValue* right = compare->right();
- Representation r = compare->GetInputRepresentation();
- if (r.IsInteger32()) {
- ASSERT(left->representation().IsInteger32());
- ASSERT(right->representation().IsInteger32());
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- } else if (r.IsDouble()) {
- ASSERT(left->representation().IsDouble());
- ASSERT(right->representation().IsDouble());
- return new LCmpIDAndBranch(UseRegisterAtStart(left),
- UseRegisterAtStart(right));
- } else {
- ASSERT(left->representation().IsTagged());
- ASSERT(right->representation().IsTagged());
- bool reversed = op == Token::GT || op == Token::LTE;
- LOperand* left_operand = UseFixed(left, reversed ? r0 : r1);
- LOperand* right_operand = UseFixed(right, reversed ? r1 : r0);
- LInstruction* result = new LCmpTAndBranch(left_operand, right_operand);
- return MarkAsCall(result, instr);
- }
- } else if (v->IsIsSmi()) {
- HIsSmi* compare = HIsSmi::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsSmiAndBranch(Use(compare->value()));
- } else if (v->IsIsUndetectable()) {
- HIsUndetectable* compare = HIsUndetectable::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsUndetectableAndBranch(UseRegisterAtStart(compare->value()),
- TempRegister());
- } else if (v->IsHasInstanceType()) {
- HHasInstanceType* compare = HHasInstanceType::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LHasInstanceTypeAndBranch(UseRegisterAtStart(compare->value()));
- } else if (v->IsHasCachedArrayIndex()) {
- HHasCachedArrayIndex* compare = HHasCachedArrayIndex::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LHasCachedArrayIndexAndBranch(
- UseRegisterAtStart(compare->value()));
- } else if (v->IsIsNull()) {
- HIsNull* compare = HIsNull::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- return new LIsNullAndBranch(UseRegisterAtStart(compare->value()));
- } else if (v->IsIsObject()) {
- HIsObject* compare = HIsObject::cast(v);
- ASSERT(compare->value()->representation().IsTagged());
- LOperand* temp = TempRegister();
- return new LIsObjectAndBranch(UseRegister(compare->value()), temp);
- } else if (v->IsCompareJSObjectEq()) {
- HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
- return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()));
- } else if (v->IsCompareSymbolEq()) {
- HCompareSymbolEq* compare = HCompareSymbolEq::cast(v);
- return new LCmpSymbolEqAndBranch(UseRegisterAtStart(compare->left()),
- UseRegisterAtStart(compare->right()));
- } else if (v->IsInstanceOf()) {
- HInstanceOf* instance_of = HInstanceOf::cast(v);
- LInstruction* result =
- new LInstanceOfAndBranch(UseFixed(instance_of->left(), r0),
- UseFixed(instance_of->right(), r1));
- return MarkAsCall(result, instr);
- } else if (v->IsTypeofIs()) {
- HTypeofIs* typeof_is = HTypeofIs::cast(v);
- return new LTypeofIsAndBranch(UseTempRegister(typeof_is->value()));
- } else if (v->IsIsConstructCall()) {
- return new LIsConstructCallAndBranch(TempRegister());
- } else if (v->IsConstant()) {
+ if (v->EmitAtUses()) {
HBasicBlock* successor = HConstant::cast(v)->ToBoolean()
? instr->FirstSuccessor()
: instr->SecondSuccessor();
return new LGoto(successor->block_id());
- } else {
- Abort("Undefined compare before branch");
- return NULL;
}
+ return new LBranch(UseRegisterAtStart(v));
}
@@ -1170,7 +1077,7 @@
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
LInstanceOfKnownGlobal* result =
- new LInstanceOfKnownGlobal(UseFixed(instr->value(), r0), FixedTemp(r4));
+ new LInstanceOfKnownGlobal(UseFixed(instr->left(), r0), FixedTemp(r4));
return MarkAsCall(DefineFixed(result, r0), instr);
}
@@ -1195,6 +1102,11 @@
}
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+ return instr->HasNoUses() ? NULL : DefineAsRegister(new LThisFunction);
+}
+
+
LInstruction* LChunkBuilder::DoContext(HContext* instr) {
return instr->HasNoUses() ? NULL : DefineAsRegister(new LContext);
}
@@ -1245,15 +1157,15 @@
LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
switch (op) {
case kMathAbs:
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
case kMathFloor:
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
case kMathSqrt:
- return DefineSameAsFirst(result);
+ return DefineAsRegister(result);
case kMathRound:
return AssignEnvironment(DefineAsRegister(result));
case kMathPowHalf:
- return DefineSameAsFirst(result);
+ return DefineAsRegister(result);
default:
UNREACHABLE();
return NULL;
@@ -1331,7 +1243,7 @@
LInstruction* LChunkBuilder::DoBitNot(HBitNot* instr) {
ASSERT(instr->value()->representation().IsInteger32());
ASSERT(instr->representation().IsInteger32());
- return DefineSameAsFirst(new LBitNotI(UseRegisterAtStart(instr->value())));
+ return DefineAsRegister(new LBitNotI(UseRegisterAtStart(instr->value())));
}
@@ -1376,15 +1288,20 @@
mod = new LModI(value, UseOrConstant(instr->right()));
} else {
LOperand* dividend = UseRegister(instr->left());
- LOperand* divisor = UseRegisterAtStart(instr->right());
+ LOperand* divisor = UseRegister(instr->right());
mod = new LModI(dividend,
divisor,
TempRegister(),
- FixedTemp(d1),
- FixedTemp(d2));
+ FixedTemp(d10),
+ FixedTemp(d11));
}
- return AssignEnvironment(DefineSameAsFirst(mod));
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+ instr->CheckFlag(HValue::kCanBeDivByZero)) {
+ return AssignEnvironment(DefineAsRegister(mod));
+ } else {
+ return DefineAsRegister(mod);
+ }
} else if (instr->representation().IsTagged()) {
return DoArithmeticT(Token::MOD, instr);
} else {
@@ -1404,16 +1321,22 @@
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
+ LOperand* left;
LOperand* right = UseOrConstant(instr->MostConstantOperand());
LOperand* temp = NULL;
- if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+ if (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+ (instr->CheckFlag(HValue::kCanOverflow) ||
+ !right->IsConstantOperand())) {
+ left = UseRegister(instr->LeastConstantOperand());
temp = TempRegister();
+ } else {
+ left = UseRegisterAtStart(instr->LeastConstantOperand());
}
- LMulI* mul = new LMulI(left, right, temp);
- return AssignEnvironment(DefineSameAsFirst(mul));
+ return AssignEnvironment(DefineAsRegister(new LMulI(left, right, temp)));
+
} else if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::MUL, instr);
+
} else {
return DoArithmeticT(Token::MUL, instr);
}
@@ -1427,7 +1350,7 @@
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new LSubI(left, right);
- LInstruction* result = DefineSameAsFirst(sub);
+ LInstruction* result = DefineAsRegister(sub);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
}
@@ -1447,7 +1370,7 @@
LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
LAddI* add = new LAddI(left, right);
- LInstruction* result = DefineSameAsFirst(add);
+ LInstruction* result = DefineAsRegister(add);
if (instr->CheckFlag(HValue::kCanOverflow)) {
result = AssignEnvironment(result);
}
@@ -1478,88 +1401,84 @@
}
-LInstruction* LChunkBuilder::DoCompare(HCompare* instr) {
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
Token::Value op = instr->token();
Representation r = instr->GetInputRepresentation();
+ ASSERT(instr->left()->representation().IsTagged());
+ ASSERT(instr->right()->representation().IsTagged());
+ bool reversed = (op == Token::GT || op == Token::LTE);
+ LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
+ LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
+ LCmpT* result = new LCmpT(left, right);
+ return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareIDAndBranch(
+ HCompareIDAndBranch* instr) {
+ Representation r = instr->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return DefineAsRegister(new LCmpID(left, right));
- } else if (r.IsDouble()) {
+ return new LCmpIDAndBranch(left, right);
+ } else {
+ ASSERT(r.IsDouble());
ASSERT(instr->left()->representation().IsDouble());
ASSERT(instr->right()->representation().IsDouble());
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- return DefineAsRegister(new LCmpID(left, right));
- } else {
- ASSERT(instr->left()->representation().IsTagged());
- ASSERT(instr->right()->representation().IsTagged());
- bool reversed = (op == Token::GT || op == Token::LTE);
- LOperand* left = UseFixed(instr->left(), reversed ? r0 : r1);
- LOperand* right = UseFixed(instr->right(), reversed ? r1 : r0);
- LCmpT* result = new LCmpT(left, right);
- return MarkAsCall(DefineFixed(result, r0), instr);
+ return new LCmpIDAndBranch(left, right);
}
}
-LInstruction* LChunkBuilder::DoCompareJSObjectEq(
- HCompareJSObjectEq* instr) {
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+ HCompareObjectEqAndBranch* instr) {
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterAtStart(instr->right());
- LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
- return DefineAsRegister(result);
+ return new LCmpObjectEqAndBranch(left, right);
}
-LInstruction* LChunkBuilder::DoCompareSymbolEq(
- HCompareSymbolEq* instr) {
- LOperand* left = UseRegisterAtStart(instr->left());
- LOperand* right = UseRegisterAtStart(instr->right());
- LCmpSymbolEq* result = new LCmpSymbolEq(left, right);
- return DefineAsRegister(result);
+LInstruction* LChunkBuilder::DoCompareConstantEqAndBranch(
+ HCompareConstantEqAndBranch* instr) {
+ return new LCmpConstantEqAndBranch(UseRegisterAtStart(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
+LInstruction* LChunkBuilder::DoIsNullAndBranch(HIsNullAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LIsNull(value));
+ return new LIsNullAndBranch(UseRegisterAtStart(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LIsObject(value));
+ LOperand* temp = TempRegister();
+ return new LIsObjectAndBranch(UseRegisterAtStart(instr->value()), temp);
}
-LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseAtStart(instr->value());
-
- return DefineAsRegister(new LIsSmi(value));
+ return new LIsSmiAndBranch(Use(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsUndetectable(HIsUndetectable* instr) {
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+ HIsUndetectableAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LIsUndetectable(value));
+ return new LIsUndetectableAndBranch(UseRegisterAtStart(instr->value()),
+ TempRegister());
}
-LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+ HHasInstanceTypeAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegisterAtStart(instr->value());
-
- return DefineAsRegister(new LHasInstanceType(value));
+ return new LHasInstanceTypeAndBranch(UseRegisterAtStart(instr->value()));
}
@@ -1572,19 +1491,19 @@
}
-LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
- HHasCachedArrayIndex* instr) {
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+ HHasCachedArrayIndexAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseRegister(instr->value());
-
- return DefineAsRegister(new LHasCachedArrayIndex(value));
+ return new LHasCachedArrayIndexAndBranch(
+ UseRegisterAtStart(instr->value()));
}
-LInstruction* LChunkBuilder::DoClassOfTest(HClassOfTest* instr) {
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+ HClassOfTestAndBranch* instr) {
ASSERT(instr->value()->representation().IsTagged());
- LOperand* value = UseTempRegister(instr->value());
- return DefineSameAsFirst(new LClassOfTest(value));
+ return new LClassOfTestAndBranch(UseTempRegister(instr->value()),
+ TempRegister());
}
@@ -1607,10 +1526,16 @@
}
+LInstruction* LChunkBuilder::DoElementsKind(HElementsKind* instr) {
+ LOperand* object = UseRegisterAtStart(instr->value());
+ return DefineAsRegister(new LElementsKind(object));
+}
+
+
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
LOperand* object = UseRegister(instr->value());
LValueOf* result = new LValueOf(object, TempRegister());
- return AssignEnvironment(DefineSameAsFirst(result));
+ return AssignEnvironment(DefineAsRegister(result));
}
@@ -1633,6 +1558,11 @@
}
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+ return NULL;
+}
+
+
LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
// All HForceRepresentation instructions should be eliminated in the
// representation change phase of Hydrogen.
@@ -1660,7 +1590,7 @@
LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
: NULL;
- LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(d3)
+ LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(d11)
: NULL;
res = DefineSameAsFirst(new LTaggedToI(value, temp1, temp2, temp3));
res = AssignEnvironment(res);
@@ -1754,19 +1684,44 @@
Representation input_rep = value->representation();
LOperand* reg = UseRegister(value);
if (input_rep.IsDouble()) {
- return DefineAsRegister(new LClampDToUint8(reg, FixedTemp(d1)));
+ return DefineAsRegister(new LClampDToUint8(reg, FixedTemp(d11)));
} else if (input_rep.IsInteger32()) {
return DefineAsRegister(new LClampIToUint8(reg));
} else {
ASSERT(input_rep.IsTagged());
// Register allocator doesn't (yet) support allocation of double
// temps. Reserve d1 explicitly.
- LClampTToUint8* result = new LClampTToUint8(reg, FixedTemp(d1));
+ LClampTToUint8* result = new LClampTToUint8(reg, FixedTemp(d11));
return AssignEnvironment(DefineAsRegister(result));
}
}
+LInstruction* LChunkBuilder::DoToInt32(HToInt32* instr) {
+ HValue* value = instr->value();
+ Representation input_rep = value->representation();
+ LOperand* reg = UseRegister(value);
+ if (input_rep.IsDouble()) {
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LDoubleToI* res = new LDoubleToI(reg, temp1, temp2);
+ return AssignEnvironment(DefineAsRegister(res));
+ } else if (input_rep.IsInteger32()) {
+ // Canonicalization should already have removed the hydrogen instruction in
+ // this case, since it is a noop.
+ UNREACHABLE();
+ return NULL;
+ } else {
+ ASSERT(input_rep.IsTagged());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* temp3 = FixedTemp(d11);
+ LTaggedToI* res = new LTaggedToI(reg, temp1, temp2, temp3);
+ return AssignEnvironment(DefineSameAsFirst(res));
+ }
+}
+
+
LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
return new LReturn(UseFixed(instr->value(), r0));
}
@@ -1898,19 +1853,33 @@
LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterAtStart(instr->key());
LLoadKeyedFastElement* result = new LLoadKeyedFastElement(obj, key);
- return AssignEnvironment(DefineSameAsFirst(result));
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
+ HLoadKeyedFastDoubleElement* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->key()->representation().IsInteger32());
+ LOperand* elements = UseTempRegister(instr->elements());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LLoadKeyedFastDoubleElement* result =
+ new LLoadKeyedFastDoubleElement(elements, key);
+ return AssignEnvironment(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
HLoadKeyedSpecializedArrayElement* instr) {
- ExternalArrayType array_type = instr->array_type();
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
Representation representation(instr->representation());
ASSERT(
- (representation.IsInteger32() && (array_type != kExternalFloatArray &&
- array_type != kExternalDoubleArray)) ||
- (representation.IsDouble() && (array_type == kExternalFloatArray ||
- array_type == kExternalDoubleArray)));
+ (representation.IsInteger32() &&
+ (elements_kind != JSObject::EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != JSObject::EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (representation.IsDouble() &&
+ ((elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->key()->representation().IsInteger32());
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* key = UseRegisterOrConstant(instr->key());
@@ -1919,7 +1888,7 @@
LInstruction* load_instr = DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
- return (array_type == kExternalUnsignedIntArray) ?
+ return (elements_kind == JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
AssignEnvironment(load_instr) : load_instr;
}
@@ -1953,21 +1922,38 @@
}
+LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
+ HStoreKeyedFastDoubleElement* instr) {
+ ASSERT(instr->value()->representation().IsDouble());
+ ASSERT(instr->elements()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32());
+
+ LOperand* elements = UseRegisterAtStart(instr->elements());
+ LOperand* val = UseTempRegister(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+
+ return new LStoreKeyedFastDoubleElement(elements, key, val);
+}
+
+
LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
HStoreKeyedSpecializedArrayElement* instr) {
Representation representation(instr->value()->representation());
- ExternalArrayType array_type = instr->array_type();
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
ASSERT(
- (representation.IsInteger32() && (array_type != kExternalFloatArray &&
- array_type != kExternalDoubleArray)) ||
- (representation.IsDouble() && (array_type == kExternalFloatArray ||
- array_type == kExternalDoubleArray)));
+ (representation.IsInteger32() &&
+ (elements_kind != JSObject::EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != JSObject::EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (representation.IsDouble() &&
+ ((elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS))));
ASSERT(instr->external_pointer()->representation().IsExternal());
ASSERT(instr->key()->representation().IsInteger32());
LOperand* external_pointer = UseRegister(instr->external_pointer());
- bool val_is_temp_register = array_type == kExternalPixelArray ||
- array_type == kExternalFloatArray;
+ bool val_is_temp_register =
+ elements_kind == JSObject::EXTERNAL_PIXEL_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS;
LOperand* val = val_is_temp_register
? UseTempRegister(instr->value())
: UseRegister(instr->value());
@@ -2132,13 +2118,14 @@
}
-LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
- return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+ return new LTypeofIsAndBranch(UseTempRegister(instr->value()));
}
-LInstruction* LChunkBuilder::DoIsConstructCall(HIsConstructCall* instr) {
- return DefineAsRegister(new LIsConstructCall());
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+ HIsConstructCallAndBranch* instr) {
+ return new LIsConstructCallAndBranch(TempRegister());
}
@@ -2174,7 +2161,12 @@
LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
- return MarkAsCall(new LStackCheck, instr);
+ if (instr->is_function_entry()) {
+ return MarkAsCall(new LStackCheck, instr);
+ } else {
+ ASSERT(instr->is_backwards_branch());
+ return AssignEnvironment(AssignPointerMap(new LStackCheck));
+ }
}
@@ -2183,7 +2175,6 @@
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
instr->function(),
- HEnvironment::LITHIUM,
undefined,
instr->call_kind());
current_block_->UpdateEnvironment(inner);
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 73c4a87..b477e99 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -32,6 +32,7 @@
#include "lithium-allocator.h"
#include "lithium.h"
#include "safepoint-table.h"
+#include "utils.h"
namespace v8 {
namespace internal {
@@ -76,17 +77,12 @@
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
- V(ClassOfTest) \
V(ClassOfTestAndBranch) \
- V(CmpID) \
+ V(CmpConstantEqAndBranch) \
V(CmpIDAndBranch) \
- V(CmpJSObjectEq) \
- V(CmpJSObjectEqAndBranch) \
+ V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
- V(CmpSymbolEq) \
- V(CmpSymbolEqAndBranch) \
V(CmpT) \
- V(CmpTAndBranch) \
V(ConstantD) \
V(ConstantI) \
V(ConstantT) \
@@ -95,6 +91,7 @@
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
+ V(ElementsKind) \
V(ExternalArrayLength) \
V(FixedArrayLength) \
V(FunctionLiteral) \
@@ -102,26 +99,18 @@
V(GlobalObject) \
V(GlobalReceiver) \
V(Goto) \
- V(HasCachedArrayIndex) \
V(HasCachedArrayIndexAndBranch) \
- V(HasInstanceType) \
V(HasInstanceTypeAndBranch) \
V(In) \
V(InstanceOf) \
- V(InstanceOfAndBranch) \
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
- V(IsConstructCall) \
V(IsConstructCallAndBranch) \
- V(IsNull) \
V(IsNullAndBranch) \
- V(IsObject) \
V(IsObjectAndBranch) \
- V(IsSmi) \
V(IsSmiAndBranch) \
- V(IsUndetectable) \
V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(Label) \
@@ -132,6 +121,7 @@
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
+ V(LoadKeyedFastDoubleElement) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
V(LoadKeyedSpecializedArrayElement) \
@@ -158,6 +148,7 @@
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
+ V(StoreKeyedFastDoubleElement) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
V(StoreKeyedSpecializedArrayElement) \
@@ -169,10 +160,10 @@
V(StringLength) \
V(SubI) \
V(TaggedToI) \
+ V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
V(Typeof) \
- V(TypeofIs) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
@@ -231,7 +222,6 @@
virtual bool IsGap() const { return false; }
virtual bool IsControl() const { return false; }
- virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
void set_environment(LEnvironment* env) { environment_ = env; }
LEnvironment* environment() const { return environment_; }
@@ -286,37 +276,6 @@
};
-template<typename ElementType, int NumElements>
-class OperandContainer {
- public:
- OperandContainer() {
- for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
- }
- int length() { return NumElements; }
- ElementType& operator[](int i) {
- ASSERT(i < length());
- return elems_[i];
- }
- void PrintOperandsTo(StringStream* stream);
-
- private:
- ElementType elems_[NumElements];
-};
-
-
-template<typename ElementType>
-class OperandContainer<ElementType, 0> {
- public:
- int length() { return 0; }
- void PrintOperandsTo(StringStream* stream) { }
- ElementType& operator[](int i) {
- UNREACHABLE();
- static ElementType t = 0;
- return t;
- }
-};
-
-
// R = number of result operands (0 or 1).
// I = number of input operands.
// T = number of temporary operands.
@@ -339,9 +298,9 @@
virtual void PrintOutputOperandTo(StringStream* stream);
protected:
- OperandContainer<LOperand*, R> results_;
- OperandContainer<LOperand*, I> inputs_;
- OperandContainer<LOperand*, T> temps_;
+ EmbeddedContainer<LOperand*, R> results_;
+ EmbeddedContainer<LOperand*, I> inputs_;
+ EmbeddedContainer<LOperand*, T> temps_;
};
@@ -401,19 +360,16 @@
class LGoto: public LTemplateInstruction<0, 0, 0> {
public:
- LGoto(int block_id, bool include_stack_check = false)
- : block_id_(block_id), include_stack_check_(include_stack_check) { }
+ explicit LGoto(int block_id) : block_id_(block_id) { }
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int block_id() const { return block_id_; }
- bool include_stack_check() const { return include_stack_check_; }
private:
int block_id_;
- bool include_stack_check_;
};
@@ -489,16 +445,15 @@
public:
virtual bool IsControl() const { return true; }
- int true_block_id() const { return true_block_id_; }
- int false_block_id() const { return false_block_id_; }
- void SetBranchTargets(int true_block_id, int false_block_id) {
- true_block_id_ = true_block_id;
- false_block_id_ = false_block_id;
- }
+ int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+ HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+ int true_block_id() { return hydrogen()->SuccessorAt(0)->block_id(); }
+ int false_block_id() { return hydrogen()->SuccessorAt(1)->block_id(); }
private:
- int true_block_id_;
- int false_block_id_;
+ HControlInstruction* hydrogen() {
+ return HControlInstruction::cast(this->hydrogen_value());
+ }
};
@@ -614,23 +569,6 @@
};
-class LCmpID: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpID(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpID, "cmp-id")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
- bool is_double() const {
- return hydrogen()->GetInputRepresentation().IsDouble();
- }
-};
-
-
class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
@@ -639,7 +577,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(CmpIDAndBranch, "cmp-id-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
+ DECLARE_HYDROGEN_ACCESSOR(CompareIDAndBranch)
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
@@ -665,63 +603,31 @@
};
-class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
+class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
- LCmpJSObjectEq(LOperand* left, LOperand* right) {
+ LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
+ DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
+ "cmp-object-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
};
-class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
+class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
public:
- LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
+ explicit LCmpConstantEqAndBranch(LOperand* left) {
inputs_[0] = left;
- inputs_[1] = right;
}
- DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
- "cmp-jsobject-eq-and-branch")
+ DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
+ "cmp-constant-eq-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(CompareConstantEqAndBranch)
};
-class LCmpSymbolEq: public LTemplateInstruction<1, 2, 0> {
- public:
- LCmpSymbolEq(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEq, "cmp-symbol-eq")
-};
-
-
-class LCmpSymbolEqAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpSymbolEqAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpSymbolEqAndBranch, "cmp-symbol-eq-and-branch")
-};
-
-
-class LIsNull: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsNull(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsNull, "is-null")
- DECLARE_HYDROGEN_ACCESSOR(IsNull)
-
- bool is_strict() const { return hydrogen()->is_strict(); }
-};
-
class LIsNullAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsNullAndBranch(LOperand* value) {
@@ -729,7 +635,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(IsNullAndBranch, "is-null-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(IsNull)
+ DECLARE_HYDROGEN_ACCESSOR(IsNullAndBranch)
bool is_strict() const { return hydrogen()->is_strict(); }
@@ -737,17 +643,7 @@
};
-class LIsObject: public LTemplateInstruction<1, 1, 1> {
- public:
- explicit LIsObject(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
-};
-
-
-class LIsObjectAndBranch: public LControlInstruction<1, 2> {
+class LIsObjectAndBranch: public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@@ -755,22 +651,12 @@
}
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
-class LIsSmi: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsSmi(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsSmi, "is-smi")
- DECLARE_HYDROGEN_ACCESSOR(IsSmi)
-};
-
-
class LIsSmiAndBranch: public LControlInstruction<1, 0> {
public:
explicit LIsSmiAndBranch(LOperand* value) {
@@ -778,22 +664,12 @@
}
DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
-class LIsUndetectable: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LIsUndetectable(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(IsUndetectable, "is-undetectable")
- DECLARE_HYDROGEN_ACCESSOR(IsUndetectable)
-};
-
-
class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
@@ -803,22 +679,12 @@
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
"is-undetectable-and-branch")
+ DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
-class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LHasInstanceType(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasInstanceType, "has-instance-type")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
-};
-
-
class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasInstanceTypeAndBranch(LOperand* value) {
@@ -827,7 +693,7 @@
DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
"has-instance-type-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(HasInstanceType)
+ DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -844,17 +710,6 @@
};
-class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LHasCachedArrayIndex(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
- DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
-};
-
-
class LHasCachedArrayIndexAndBranch: public LControlInstruction<1, 0> {
public:
explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@@ -863,18 +718,7 @@
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
"has-cached-array-index-and-branch")
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
-class LClassOfTest: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LClassOfTest(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(ClassOfTest, "class-of-test")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+ DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -889,7 +733,7 @@
DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
"class-of-test-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(ClassOfTest)
+ DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -903,21 +747,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
-
- Token::Value op() const { return hydrogen()->token(); }
-};
-
-
-class LCmpTAndBranch: public LControlInstruction<2, 0> {
- public:
- LCmpTAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(Compare)
+ DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
Token::Value op() const { return hydrogen()->token(); }
};
@@ -934,17 +764,6 @@
};
-class LInstanceOfAndBranch: public LControlInstruction<2, 0> {
- public:
- LInstanceOfAndBranch(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
-};
-
-
class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
public:
LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
@@ -1057,7 +876,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
- DECLARE_HYDROGEN_ACCESSOR(Value)
+ DECLARE_HYDROGEN_ACCESSOR(Branch)
virtual void PrintDataTo(StringStream* stream);
};
@@ -1118,6 +937,17 @@
};
+class LElementsKind: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LElementsKind(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
+ DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
+};
+
+
class LValueOf: public LTemplateInstruction<1, 1, 1> {
public:
LValueOf(LOperand* value, LOperand* temp) {
@@ -1309,6 +1139,22 @@
};
+class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
+ "load-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+};
+
+
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
@@ -1323,8 +1169,8 @@
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- ExternalArrayType array_type() const {
- return hydrogen()->array_type();
+ JSObject::ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
}
};
@@ -1440,6 +1286,11 @@
};
+class LThisFunction: public LTemplateInstruction<1, 0, 0> {
+ DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+};
+
+
class LContext: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
@@ -1639,7 +1490,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -1659,7 +1510,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
- DECLARE_HYDROGEN_ACCESSOR(Change)
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@@ -1764,6 +1615,28 @@
};
+class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedFastDoubleElement(LOperand* elements,
+ LOperand* key,
+ LOperand* val) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
+ "store-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+};
+
+
class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) {
@@ -1800,8 +1673,8 @@
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- ExternalArrayType array_type() const {
- return hydrogen()->array_type();
+ JSObject::ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
}
};
@@ -2020,21 +1893,6 @@
};
-class LTypeofIs: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LTypeofIs(LOperand* value) {
- inputs_[0] = value;
- }
-
- DECLARE_CONCRETE_INSTRUCTION(TypeofIs, "typeof-is")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
-
- Handle<String> type_literal() { return hydrogen()->type_literal(); }
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
class LTypeofIsAndBranch: public LControlInstruction<1, 0> {
public:
explicit LTypeofIsAndBranch(LOperand* value) {
@@ -2042,7 +1900,7 @@
}
DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
- DECLARE_HYDROGEN_ACCESSOR(TypeofIs)
+ DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
Handle<String> type_literal() { return hydrogen()->type_literal(); }
@@ -2050,13 +1908,6 @@
};
-class LIsConstructCall: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(IsConstructCall, "is-construct-call")
- DECLARE_HYDROGEN_ACCESSOR(IsConstructCall)
-};
-
-
class LIsConstructCallAndBranch: public LControlInstruction<0, 1> {
public:
explicit LIsConstructCallAndBranch(LOperand* temp) {
@@ -2108,6 +1959,12 @@
class LStackCheck: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+ DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+ Label* done_label() { return &done_label_; }
+
+ private:
+ Label done_label_;
};
@@ -2313,7 +2170,8 @@
LInstruction* instr, int ast_id);
void ClearInstructionPendingDeoptimizationEnvironment();
- LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
+ LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+ int* argument_index_accumulator);
void VisitInstruction(HInstruction* current);
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index d25ca49..ad8091b 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -146,11 +146,11 @@
// fp: Caller's frame pointer.
// lr: Caller's pc.
- // Strict mode functions need to replace the receiver with undefined
- // when called as functions (without an explicit receiver
- // object). r5 is zero for method calls and non-zero for function
- // calls.
- if (info_->is_strict_mode()) {
+ // Strict mode functions and builtins need to replace the receiver
+ // with undefined when called as functions (without an explicit
+ // receiver object). r5 is zero for method calls and non-zero for
+ // function calls.
+ if (info_->is_strict_mode() || info_->is_native()) {
Label ok;
__ cmp(r5, Operand(0));
__ b(eq, &ok);
@@ -189,7 +189,7 @@
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
- __ CallRuntime(Runtime::kNewContext, 1);
+ __ CallRuntime(Runtime::kNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
// Context is returned in both r0 and cp. It replaces the context
@@ -257,11 +257,20 @@
bool LCodeGen::GenerateDeferredCode() {
ASSERT(is_generating());
- for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
- LDeferredCode* code = deferred_[i];
- __ bind(code->entry());
- code->Generate();
- __ jmp(code->exit());
+ if (deferred_.length() > 0) {
+ for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+ LDeferredCode* code = deferred_[i];
+ __ bind(code->entry());
+ code->Generate();
+ __ jmp(code->exit());
+ }
+
+ // Pad code to ensure that the last piece of deferred code have
+ // room for lazy bailout.
+ while ((masm()->pc_offset() - LastSafepointEnd())
+ < Deoptimizer::patch_size()) {
+ __ nop();
+ }
}
// Force constant pool emission at the end of the deferred code to make
@@ -777,7 +786,7 @@
void LCodeGen::RecordPosition(int position) {
- if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
+ if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
}
@@ -880,6 +889,7 @@
void LCodeGen::DoModI(LModI* instr) {
if (instr->hydrogen()->HasPowerOf2Divisor()) {
Register dividend = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
int32_t divisor =
HConstant::cast(instr->hydrogen()->right())->Integer32Value();
@@ -889,17 +899,15 @@
Label positive_dividend, done;
__ cmp(dividend, Operand(0));
__ b(pl, &positive_dividend);
- __ rsb(dividend, dividend, Operand(0));
- __ and_(dividend, dividend, Operand(divisor - 1));
- __ rsb(dividend, dividend, Operand(0), SetCC);
+ __ rsb(result, dividend, Operand(0));
+ __ and_(result, result, Operand(divisor - 1), SetCC);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ b(ne, &done);
- DeoptimizeIf(al, instr->environment());
- } else {
- __ b(&done);
+ DeoptimizeIf(eq, instr->environment());
}
+ __ rsb(result, result, Operand(0));
+ __ b(&done);
__ bind(&positive_dividend);
- __ and_(dividend, dividend, Operand(divisor - 1));
+ __ and_(result, dividend, Operand(divisor - 1));
__ bind(&done);
return;
}
@@ -915,8 +923,6 @@
DwVfpRegister divisor = ToDoubleRegister(instr->TempAt(2));
DwVfpRegister quotient = double_scratch0();
- ASSERT(result.is(left));
-
ASSERT(!dividend.is(divisor));
ASSERT(!dividend.is(quotient));
ASSERT(!divisor.is(quotient));
@@ -932,6 +938,8 @@
DeoptimizeIf(eq, instr->environment());
}
+ __ Move(result, left);
+
// (0 % x) must yield 0 (if x is finite, which is the case here).
__ cmp(left, Operand(0));
__ b(eq, &done);
@@ -1128,68 +1136,125 @@
void LCodeGen::DoMulI(LMulI* instr) {
Register scratch = scratch0();
+ Register result = ToRegister(instr->result());
+ // Note that result may alias left.
Register left = ToRegister(instr->InputAt(0));
- Register right = EmitLoadRegister(instr->InputAt(1), scratch);
+ LOperand* right_op = instr->InputAt(1);
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero) &&
- !instr->InputAt(1)->IsConstantOperand()) {
- __ orr(ToRegister(instr->TempAt(0)), left, right);
- }
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+ bool bailout_on_minus_zero =
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
- if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- // scratch:left = left * right.
- __ smull(left, scratch, left, right);
- __ mov(ip, Operand(left, ASR, 31));
- __ cmp(ip, Operand(scratch));
- DeoptimizeIf(ne, instr->environment());
+ if (right_op->IsConstantOperand() && !can_overflow) {
+ // Use optimized code for specific constants.
+ int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+
+ if (bailout_on_minus_zero && (constant < 0)) {
+ // The case of a null constant will be handled separately.
+ // If constant is negative and left is null, the result should be -0.
+ __ cmp(left, Operand(0));
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ switch (constant) {
+ case -1:
+ __ rsb(result, left, Operand(0));
+ break;
+ case 0:
+ if (bailout_on_minus_zero) {
+ // If left is strictly negative and the constant is null, the
+ // result is -0. Deoptimize if required, otherwise return 0.
+ __ cmp(left, Operand(0));
+ DeoptimizeIf(mi, instr->environment());
+ }
+ __ mov(result, Operand(0));
+ break;
+ case 1:
+ __ Move(result, left);
+ break;
+ default:
+ // Multiplying by powers of two and powers of two plus or minus
+ // one can be done faster with shifted operands.
+ // For other constants we emit standard code.
+ int32_t mask = constant >> 31;
+ uint32_t constant_abs = (constant + mask) ^ mask;
+
+ if (IsPowerOf2(constant_abs) ||
+ IsPowerOf2(constant_abs - 1) ||
+ IsPowerOf2(constant_abs + 1)) {
+ if (IsPowerOf2(constant_abs)) {
+ int32_t shift = WhichPowerOf2(constant_abs);
+ __ mov(result, Operand(left, LSL, shift));
+ } else if (IsPowerOf2(constant_abs - 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs - 1);
+ __ add(result, left, Operand(left, LSL, shift));
+ } else if (IsPowerOf2(constant_abs + 1)) {
+ int32_t shift = WhichPowerOf2(constant_abs + 1);
+ __ rsb(result, left, Operand(left, LSL, shift));
+ }
+
+ // Correct the sign of the result is the constant is negative.
+ if (constant < 0) __ rsb(result, result, Operand(0));
+
+ } else {
+ // Generate standard code.
+ __ mov(ip, Operand(constant));
+ __ mul(result, left, ip);
+ }
+ }
+
} else {
- __ mul(left, left, right);
- }
+ Register right = EmitLoadRegister(right_op, scratch);
+ if (bailout_on_minus_zero) {
+ __ orr(ToRegister(instr->TempAt(0)), left, right);
+ }
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- // Bail out if the result is supposed to be negative zero.
- Label done;
- __ cmp(left, Operand(0));
- __ b(ne, &done);
- if (instr->InputAt(1)->IsConstantOperand()) {
- if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) <= 0) {
- DeoptimizeIf(al, instr->environment());
- }
+ if (can_overflow) {
+ // scratch:result = left * right.
+ __ smull(result, scratch, left, right);
+ __ cmp(scratch, Operand(result, ASR, 31));
+ DeoptimizeIf(ne, instr->environment());
} else {
- // Test the non-zero operand for negative sign.
+ __ mul(result, left, right);
+ }
+
+ if (bailout_on_minus_zero) {
+ // Bail out if the result is supposed to be negative zero.
+ Label done;
+ __ cmp(result, Operand(0));
+ __ b(ne, &done);
__ cmp(ToRegister(instr->TempAt(0)), Operand(0));
DeoptimizeIf(mi, instr->environment());
+ __ bind(&done);
}
- __ bind(&done);
}
}
void LCodeGen::DoBitI(LBitI* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- ASSERT(left->Equals(instr->result()));
- ASSERT(left->IsRegister());
- Register result = ToRegister(left);
- Operand right_operand(no_reg);
+ LOperand* left_op = instr->InputAt(0);
+ LOperand* right_op = instr->InputAt(1);
+ ASSERT(left_op->IsRegister());
+ Register left = ToRegister(left_op);
+ Register result = ToRegister(instr->result());
+ Operand right(no_reg);
- if (right->IsStackSlot() || right->IsArgument()) {
- Register right_reg = EmitLoadRegister(right, ip);
- right_operand = Operand(right_reg);
+ if (right_op->IsStackSlot() || right_op->IsArgument()) {
+ right = Operand(EmitLoadRegister(right_op, ip));
} else {
- ASSERT(right->IsRegister() || right->IsConstantOperand());
- right_operand = ToOperand(right);
+ ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
+ right = ToOperand(right_op);
}
switch (instr->op()) {
case Token::BIT_AND:
- __ and_(result, ToRegister(left), right_operand);
+ __ and_(result, left, right);
break;
case Token::BIT_OR:
- __ orr(result, ToRegister(left), right_operand);
+ __ orr(result, left, right);
break;
case Token::BIT_XOR:
- __ eor(result, ToRegister(left), right_operand);
+ __ eor(result, left, right);
break;
default:
UNREACHABLE();
@@ -1199,54 +1264,62 @@
void LCodeGen::DoShiftI(LShiftI* instr) {
+ // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
+ // result may alias either of them.
+ LOperand* right_op = instr->InputAt(1);
+ Register left = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
Register scratch = scratch0();
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- ASSERT(left->Equals(instr->result()));
- ASSERT(left->IsRegister());
- Register result = ToRegister(left);
- if (right->IsRegister()) {
- // Mask the right operand.
- __ and_(scratch, ToRegister(right), Operand(0x1F));
+ if (right_op->IsRegister()) {
+ // Mask the right_op operand.
+ __ and_(scratch, ToRegister(right_op), Operand(0x1F));
switch (instr->op()) {
case Token::SAR:
- __ mov(result, Operand(result, ASR, scratch));
+ __ mov(result, Operand(left, ASR, scratch));
break;
case Token::SHR:
if (instr->can_deopt()) {
- __ mov(result, Operand(result, LSR, scratch), SetCC);
+ __ mov(result, Operand(left, LSR, scratch), SetCC);
DeoptimizeIf(mi, instr->environment());
} else {
- __ mov(result, Operand(result, LSR, scratch));
+ __ mov(result, Operand(left, LSR, scratch));
}
break;
case Token::SHL:
- __ mov(result, Operand(result, LSL, scratch));
+ __ mov(result, Operand(left, LSL, scratch));
break;
default:
UNREACHABLE();
break;
}
} else {
- int value = ToInteger32(LConstantOperand::cast(right));
+ // Mask the right_op operand.
+ int value = ToInteger32(LConstantOperand::cast(right_op));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
case Token::SAR:
if (shift_count != 0) {
- __ mov(result, Operand(result, ASR, shift_count));
+ __ mov(result, Operand(left, ASR, shift_count));
+ } else {
+ __ Move(result, left);
}
break;
case Token::SHR:
- if (shift_count == 0 && instr->can_deopt()) {
- __ tst(result, Operand(0x80000000));
- DeoptimizeIf(ne, instr->environment());
+ if (shift_count != 0) {
+ __ mov(result, Operand(left, LSR, shift_count));
} else {
- __ mov(result, Operand(result, LSR, shift_count));
+ if (instr->can_deopt()) {
+ __ tst(left, Operand(0x80000000));
+ DeoptimizeIf(ne, instr->environment());
+ }
+ __ Move(result, left);
}
break;
case Token::SHL:
if (shift_count != 0) {
- __ mov(result, Operand(result, LSL, shift_count));
+ __ mov(result, Operand(left, LSL, shift_count));
+ } else {
+ __ Move(result, left);
}
break;
default:
@@ -1260,16 +1333,16 @@
void LCodeGen::DoSubI(LSubI* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
- ASSERT(left->Equals(instr->result()));
+ LOperand* result = instr->result();
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
SBit set_cond = can_overflow ? SetCC : LeaveCC;
if (right->IsStackSlot() || right->IsArgument()) {
Register right_reg = EmitLoadRegister(right, ip);
- __ sub(ToRegister(left), ToRegister(left), Operand(right_reg), set_cond);
+ __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
ASSERT(right->IsRegister() || right->IsConstantOperand());
- __ sub(ToRegister(left), ToRegister(left), ToOperand(right), set_cond);
+ __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
}
if (can_overflow) {
@@ -1288,7 +1361,7 @@
ASSERT(instr->result()->IsDoubleRegister());
DwVfpRegister result = ToDoubleRegister(instr->result());
double v = instr->value();
- __ vmov(result, v);
+ __ Vmov(result, v);
}
@@ -1319,19 +1392,34 @@
}
+void LCodeGen::DoElementsKind(LElementsKind* instr) {
+ Register result = ToRegister(instr->result());
+ Register input = ToRegister(instr->InputAt(0));
+
+ // Load map into |result|.
+ __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
+ // Load the map's "bit field 2" into |result|. We only need the first byte,
+ // but the following bit field extraction takes care of that anyway.
+ __ ldr(result, FieldMemOperand(result, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
+}
+
+
void LCodeGen::DoValueOf(LValueOf* instr) {
Register input = ToRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Register map = ToRegister(instr->TempAt(0));
- ASSERT(input.is(result));
Label done;
// If the object is a smi return the object.
__ tst(input, Operand(kSmiTagMask));
+ __ Move(result, input, eq);
__ b(eq, &done);
// If the object is not a value type, return the object.
__ CompareObjectType(input, map, map, JS_VALUE_TYPE);
+ __ Move(result, input, ne);
__ b(ne, &done);
__ ldr(result, FieldMemOperand(input, JSValue::kValueOffset));
@@ -1340,9 +1428,9 @@
void LCodeGen::DoBitNotI(LBitNotI* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->Equals(instr->result()));
- __ mvn(ToRegister(input), Operand(ToRegister(input)));
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ __ mvn(result, Operand(input));
}
@@ -1360,16 +1448,16 @@
void LCodeGen::DoAddI(LAddI* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
- ASSERT(left->Equals(instr->result()));
+ LOperand* result = instr->result();
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
SBit set_cond = can_overflow ? SetCC : LeaveCC;
if (right->IsStackSlot() || right->IsArgument()) {
Register right_reg = EmitLoadRegister(right, ip);
- __ add(ToRegister(left), ToRegister(left), Operand(right_reg), set_cond);
+ __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
} else {
ASSERT(right->IsRegister() || right->IsConstantOperand());
- __ add(ToRegister(left), ToRegister(left), ToOperand(right), set_cond);
+ __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
}
if (can_overflow) {
@@ -1381,18 +1469,19 @@
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
+ DoubleRegister result = ToDoubleRegister(instr->result());
switch (instr->op()) {
case Token::ADD:
- __ vadd(left, left, right);
+ __ vadd(result, left, right);
break;
case Token::SUB:
- __ vsub(left, left, right);
+ __ vsub(result, left, right);
break;
case Token::MUL:
- __ vmul(left, left, right);
+ __ vmul(result, left, right);
break;
case Token::DIV:
- __ vdiv(left, left, right);
+ __ vdiv(result, left, right);
break;
case Token::MOD: {
// Save r0-r3 on the stack.
@@ -1404,7 +1493,7 @@
ExternalReference::double_fp_operation(Token::MOD, isolate()),
0, 2);
// Move the result in the double result register.
- __ GetCFunctionDoubleResult(ToDoubleRegister(instr->result()));
+ __ GetCFunctionDoubleResult(result);
// Restore r0-r3.
__ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
@@ -1459,7 +1548,7 @@
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
- Representation r = instr->hydrogen()->representation();
+ Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
Register reg = ToRegister(instr->InputAt(0));
__ cmp(reg, Operand(0));
@@ -1475,7 +1564,7 @@
} else {
ASSERT(r.IsTagged());
Register reg = ToRegister(instr->InputAt(0));
- if (instr->hydrogen()->type().IsBoolean()) {
+ if (instr->hydrogen()->value()->type().IsBoolean()) {
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(reg, ip);
EmitBranch(true_block, false_block, eq);
@@ -1494,12 +1583,11 @@
__ b(eq, false_label);
__ cmp(reg, Operand(0));
__ b(eq, false_label);
- __ tst(reg, Operand(kSmiTagMask));
- __ b(eq, true_label);
+ __ JumpIfSmi(reg, true_label);
// Test double values. Zero and NaN are false.
Label call_stub;
- DoubleRegister dbl_scratch = d0;
+ DoubleRegister dbl_scratch = double_scratch0();
Register scratch = scratch0();
__ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
@@ -1527,45 +1615,17 @@
}
-void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
+void LCodeGen::EmitGoto(int block) {
block = chunk_->LookupDestination(block);
int next_block = GetNextEmittedBlock(current_block_);
if (block != next_block) {
- // Perform stack overflow check if this goto needs it before jumping.
- if (deferred_stack_check != NULL) {
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, chunk_->GetAssemblyLabel(block));
- __ jmp(deferred_stack_check->entry());
- deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
- } else {
- __ jmp(chunk_->GetAssemblyLabel(block));
- }
+ __ jmp(chunk_->GetAssemblyLabel(block));
}
}
-void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
- PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
- CallRuntimeFromDeferred(Runtime::kStackGuard, 0, instr);
-}
-
-
void LCodeGen::DoGoto(LGoto* instr) {
- class DeferredStackCheck: public LDeferredCode {
- public:
- DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
- : LDeferredCode(codegen), instr_(instr) { }
- virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
- private:
- LGoto* instr_;
- };
-
- DeferredStackCheck* deferred = NULL;
- if (instr->include_stack_check()) {
- deferred = new DeferredStackCheck(this, instr);
- }
- EmitGoto(instr->block_id(), deferred);
+ EmitGoto(instr->block_id());
}
@@ -1602,34 +1662,6 @@
}
-void LCodeGen::DoCmpID(LCmpID* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- LOperand* result = instr->result();
- Register scratch = scratch0();
-
- Label unordered, done;
- if (instr->is_double()) {
- // Compare left and right as doubles and load the
- // resulting flags into the normal status register.
- __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
- // If a NaN is involved, i.e. the result is unordered (V set),
- // jump to unordered to return false.
- __ b(vs, &unordered);
- } else {
- EmitCmpI(left, right);
- }
-
- Condition cc = TokenToCondition(instr->op(), instr->is_double());
- __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
- __ b(cc, &done);
-
- __ bind(&unordered);
- __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
@@ -1652,18 +1684,7 @@
}
-void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- Register result = ToRegister(instr->result());
-
- __ cmp(left, Operand(right));
- __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
- __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
-}
-
-
-void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1674,62 +1695,16 @@
}
-void LCodeGen::DoCmpSymbolEq(LCmpSymbolEq* instr) {
+void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- Register result = ToRegister(instr->result());
-
- __ cmp(left, Operand(right));
- __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
- __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
-}
-
-
-void LCodeGen::DoCmpSymbolEqAndBranch(LCmpSymbolEqAndBranch* instr) {
- Register left = ToRegister(instr->InputAt(0));
- Register right = ToRegister(instr->InputAt(1));
- int false_block = chunk_->LookupDestination(instr->false_block_id());
int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
- __ cmp(left, Operand(right));
+ __ cmp(left, Operand(instr->hydrogen()->right()));
EmitBranch(true_block, false_block, eq);
}
-void LCodeGen::DoIsNull(LIsNull* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- __ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(reg, ip);
- if (instr->is_strict()) {
- __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
- __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
- } else {
- Label true_value, false_value, done;
- __ b(eq, &true_value);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(ip, reg);
- __ b(eq, &true_value);
- __ tst(reg, Operand(kSmiTagMask));
- __ b(eq, &false_value);
- // Check for undetectable objects by looking in the bit field in
- // the map. The object has already been smi checked.
- Register scratch = result;
- __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
- __ tst(scratch, Operand(1 << Map::kIsUndetectable));
- __ b(ne, &true_value);
- __ bind(&false_value);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ jmp(&done);
- __ bind(&true_value);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ bind(&done);
- }
-}
-
-
void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
Register scratch = scratch0();
Register reg = ToRegister(instr->InputAt(0));
@@ -1751,8 +1726,7 @@
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(reg, ip);
__ b(eq, true_label);
- __ tst(reg, Operand(kSmiTagMask));
- __ b(eq, false_label);
+ __ JumpIfSmi(reg, false_label);
// Check for undetectable objects by looking in the bit field in
// the map. The object has already been smi checked.
__ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
@@ -1765,13 +1739,13 @@
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
- Register temp2,
Label* is_not_object,
Label* is_object) {
+ Register temp2 = scratch0();
__ JumpIfSmi(input, is_not_object);
- __ LoadRoot(temp1, Heap::kNullValueRootIndex);
- __ cmp(input, temp1);
+ __ LoadRoot(temp2, Heap::kNullValueRootIndex);
+ __ cmp(input, temp2);
__ b(eq, is_object);
// Load map.
@@ -1783,33 +1757,13 @@
// Load instance type and check that it is in object type range.
__ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
- __ cmp(temp2, Operand(FIRST_JS_OBJECT_TYPE));
+ __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ b(lt, is_not_object);
- __ cmp(temp2, Operand(LAST_JS_OBJECT_TYPE));
+ __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
return le;
}
-void LCodeGen::DoIsObject(LIsObject* instr) {
- Register reg = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Register temp = scratch0();
- Label is_false, is_true, done;
-
- Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
- __ b(true_cond, &is_true);
-
- __ bind(&is_false);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ b(&done);
-
- __ bind(&is_true);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
Register temp1 = ToRegister(instr->TempAt(0));
@@ -1821,25 +1775,12 @@
Label* false_label = chunk_->GetAssemblyLabel(false_block);
Condition true_cond =
- EmitIsObject(reg, temp1, temp2, false_label, true_label);
+ EmitIsObject(reg, temp1, false_label, true_label);
EmitBranch(true_block, false_block, true_cond);
}
-void LCodeGen::DoIsSmi(LIsSmi* instr) {
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- Register result = ToRegister(instr->result());
- Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
- __ tst(input_reg, Operand(kSmiTagMask));
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- Label done;
- __ b(eq, &done);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
@@ -1850,25 +1791,6 @@
}
-void LCodeGen::DoIsUndetectable(LIsUndetectable* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- Label false_label, done;
- __ JumpIfSmi(input, &false_label);
- __ ldr(result, FieldMemOperand(input, HeapObject::kMapOffset));
- __ ldrb(result, FieldMemOperand(result, Map::kBitFieldOffset));
- __ tst(result, Operand(1 << Map::kIsUndetectable));
- __ b(eq, &false_label);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done);
- __ bind(&false_label);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = ToRegister(instr->TempAt(0));
@@ -1884,7 +1806,7 @@
}
-static InstanceType TestType(HHasInstanceType* instr) {
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == FIRST_TYPE) return to;
@@ -1893,7 +1815,7 @@
}
-static Condition BranchCondition(HHasInstanceType* instr) {
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == to) return eq;
@@ -1904,23 +1826,6 @@
}
-void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- Label done;
- __ tst(input, Operand(kSmiTagMask));
- __ LoadRoot(result, Heap::kFalseValueRootIndex, eq);
- __ b(eq, &done);
- __ CompareObjectType(input, result, result, TestType(instr->hydrogen()));
- Condition cond = BranchCondition(instr->hydrogen());
- __ LoadRoot(result, Heap::kTrueValueRootIndex, cond);
- __ LoadRoot(result, Heap::kFalseValueRootIndex, NegateCondition(cond));
- __ bind(&done);
-}
-
-
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register scratch = scratch0();
Register input = ToRegister(instr->InputAt(0));
@@ -1930,8 +1835,7 @@
Label* false_label = chunk_->GetAssemblyLabel(false_block);
- __ tst(input, Operand(kSmiTagMask));
- __ b(eq, false_label);
+ __ JumpIfSmi(input, false_label);
__ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
@@ -1951,20 +1855,6 @@
}
-void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
-
- ASSERT(instr->hydrogen()->value()->representation().IsTagged());
- __ ldr(scratch,
- FieldMemOperand(input, String::kHashFieldOffset));
- __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
- __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
- __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
-}
-
-
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
@@ -1990,28 +1880,28 @@
Register temp2) {
ASSERT(!input.is(temp));
ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
- __ tst(input, Operand(kSmiTagMask));
- __ b(eq, is_false);
- __ CompareObjectType(input, temp, temp2, FIRST_JS_OBJECT_TYPE);
+ __ JumpIfSmi(input, is_false);
+ __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
__ b(lt, is_false);
// Map is now in temp.
// Functions have class 'Function'.
- __ CompareInstanceType(temp, temp2, JS_FUNCTION_TYPE);
+ __ CompareInstanceType(temp, temp2, FIRST_CALLABLE_SPEC_OBJECT_TYPE);
if (class_name->IsEqualTo(CStrVector("Function"))) {
- __ b(eq, is_true);
+ __ b(ge, is_true);
} else {
- __ b(eq, is_false);
+ __ b(ge, is_false);
}
// Check if the constructor in the map is a function.
__ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
- // As long as JS_FUNCTION_TYPE is the last instance type and it is
- // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
- // LAST_JS_OBJECT_TYPE.
- ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
- ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+ // As long as LAST_CALLABLE_SPEC_OBJECT_TYPE is the last instance type and
+ // FIRST_CALLABLE_SPEC_OBJECT_TYPE comes right after
+ // LAST_NONCALLABLE_SPEC_OBJECT_TYPE, we can avoid checking for the latter.
+ STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
+ STATIC_ASSERT(FIRST_CALLABLE_SPEC_OBJECT_TYPE ==
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE + 1);
// Objects with a non-function constructor have class 'Object'.
__ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
@@ -2037,27 +1927,6 @@
}
-void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- ASSERT(input.is(result));
- Handle<String> class_name = instr->hydrogen()->class_name();
-
- Label done, is_true, is_false;
-
- EmitClassOfTest(&is_true, &is_false, class_name, input, scratch0(), input);
- __ b(ne, &is_false);
-
- __ bind(&is_true);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
- __ jmp(&done);
-
- __ bind(&is_false);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ bind(&done);
-}
-
-
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
Register temp = scratch0();
@@ -2101,20 +1970,6 @@
}
-void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
- ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0.
- ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1.
-
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- InstanceofStub stub(InstanceofStub::kArgsInRegisters);
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ cmp(r0, Operand(0));
- EmitBranch(true_block, false_block, eq);
-}
-
-
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
class DeferredInstanceOfKnownGlobal: public LDeferredCode {
public:
@@ -2269,25 +2124,6 @@
}
-void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
- Token::Value op = instr->op();
- int true_block = chunk_->LookupDestination(instr->true_block_id());
- int false_block = chunk_->LookupDestination(instr->false_block_id());
-
- Handle<Code> ic = CompareIC::GetUninitialized(op);
- CallCode(ic, RelocInfo::CODE_TARGET, instr);
-
- // The compare stub expects compare condition and the input operands
- // reversed for GT and LTE.
- Condition condition = ComputeCompareCondition(op);
- if (op == Token::GT || op == Token::LTE) {
- condition = ReverseCondition(condition);
- }
- __ cmp(r0, Operand(0));
- EmitBranch(true_block, false_block, condition);
-}
-
-
void LCodeGen::DoReturn(LReturn* instr) {
if (FLAG_trace) {
// Push the return value on the stack as the parameter.
@@ -2526,7 +2362,7 @@
__ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
if (FLAG_debug_code) {
- Label done;
+ Label done, fail;
__ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(scratch, ip);
@@ -2534,11 +2370,18 @@
__ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
__ cmp(scratch, ip);
__ b(eq, &done);
- __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ sub(scratch, scratch, Operand(FIRST_EXTERNAL_ARRAY_TYPE));
- __ cmp(scratch, Operand(kExternalArrayTypeCount));
- __ Check(cc, "Check for fast elements failed.");
+ // |scratch| still contains |input|'s map.
+ __ ldr(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
+ __ ubfx(scratch, scratch, Map::kElementsKindShift,
+ Map::kElementsKindBitCount);
+ __ cmp(scratch, Operand(JSObject::FAST_ELEMENTS));
+ __ b(eq, &done);
+ __ cmp(scratch, Operand(JSObject::FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+ __ b(lt, &fail);
+ __ cmp(scratch, Operand(JSObject::LAST_EXTERNAL_ARRAY_ELEMENTS_KIND));
+ __ b(le, &done);
+ __ bind(&fail);
+ __ Abort("Check for fast or external elements failed.");
__ bind(&done);
}
}
@@ -2576,7 +2419,6 @@
Register key = EmitLoadRegister(instr->key(), scratch0());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
- ASSERT(result.is(elements));
// Load the result.
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
@@ -2591,11 +2433,53 @@
}
+void LCodeGen::DoLoadKeyedFastDoubleElement(
+ LLoadKeyedFastDoubleElement* instr) {
+ Register elements = ToRegister(instr->elements());
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ Register key = no_reg;
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ Register scratch = scratch0();
+
+ int shift_size =
+ ElementsKindToShiftSize(JSObject::FAST_DOUBLE_ELEMENTS);
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+
+ Operand operand = key_is_constant
+ ? Operand(constant_key * (1 << shift_size) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag)
+ : Operand(key, LSL, shift_size);
+ __ add(elements, elements, operand);
+ if (!key_is_constant) {
+ __ add(elements, elements,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ }
+
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ // TODO(danno): If no hole check is required, there is no need to allocate
+ // elements into a temporary register, instead scratch can be used.
+ __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ __ cmp(scratch, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr->environment());
+ }
+
+ __ vldr(result, elements, 0);
+}
+
+
void LCodeGen::DoLoadKeyedSpecializedArrayElement(
LLoadKeyedSpecializedArrayElement* instr) {
Register external_pointer = ToRegister(instr->external_pointer());
Register key = no_reg;
- ExternalArrayType array_type = instr->array_type();
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
bool key_is_constant = instr->key()->IsConstantOperand();
int constant_key = 0;
if (key_is_constant) {
@@ -2606,43 +2490,45 @@
} else {
key = ToRegister(instr->key());
}
- int shift_size = ExternalArrayTypeToShiftSize(array_type);
+ int shift_size = ElementsKindToShiftSize(elements_kind);
- if (array_type == kExternalFloatArray || array_type == kExternalDoubleArray) {
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
- DwVfpRegister result(ToDoubleRegister(instr->result()));
- Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size))
- : Operand(key, LSL, shift_size));
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ Operand operand = key_is_constant
+ ? Operand(constant_key * (1 << shift_size))
+ : Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
- if (array_type == kExternalFloatArray) {
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
__ vldr(result.low(), scratch0(), 0);
__ vcvt_f64_f32(result, result.low());
- } else { // i.e. array_type == kExternalDoubleArray
+ } else { // i.e. elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS
__ vldr(result, scratch0(), 0);
}
} else {
- Register result(ToRegister(instr->result()));
+ Register result = ToRegister(instr->result());
MemOperand mem_operand(key_is_constant
? MemOperand(external_pointer, constant_key * (1 << shift_size))
: MemOperand(external_pointer, key, LSL, shift_size));
- switch (array_type) {
- case kExternalByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
__ ldrsb(result, mem_operand);
break;
- case kExternalUnsignedByteArray:
- case kExternalPixelArray:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ ldrb(result, mem_operand);
break;
- case kExternalShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
__ ldrsh(result, mem_operand);
break;
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ ldrh(result, mem_operand);
break;
- case kExternalIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
__ ldr(result, mem_operand);
break;
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ ldr(result, mem_operand);
__ cmp(result, Operand(0x80000000));
// TODO(danno): we could be more clever here, perhaps having a special
@@ -2650,8 +2536,12 @@
// happens, and generate code that returns a double rather than int.
DeoptimizeIf(cs, instr->environment());
break;
- case kExternalFloatArray:
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -2717,12 +2607,26 @@
ASSERT(function.is(r1)); // Required by InvokeFunction.
ASSERT(ToRegister(instr->result()).is(r0));
- // TODO(1412): This is not correct if the called function is a
- // strict mode function or a native.
- //
- // If the receiver is null or undefined, we have to pass the global object
- // as a receiver.
+ // If the receiver is null or undefined, we have to pass the global
+ // object as a receiver to normal functions. Values have to be
+ // passed unchanged to builtins and strict-mode functions.
Label global_object, receiver_ok;
+
+ // Do not transform the receiver to object for strict mode
+ // functions.
+ __ ldr(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(scratch,
+ FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+ __ tst(scratch,
+ Operand(1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize)));
+ __ b(ne, &receiver_ok);
+
+ // Do not transform the receiver to object for builtins.
+ __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+ __ b(ne, &receiver_ok);
+
+ // Normal function. Replace undefined or null with global receiver.
__ LoadRoot(scratch, Heap::kNullValueRootIndex);
__ cmp(receiver, scratch);
__ b(eq, &global_object);
@@ -2733,8 +2637,8 @@
// Deoptimize if the receiver is not a JS object.
__ tst(receiver, Operand(kSmiTagMask));
DeoptimizeIf(eq, instr->environment());
- __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_OBJECT_TYPE);
- DeoptimizeIf(lo, instr->environment());
+ __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
+ DeoptimizeIf(lt, instr->environment());
__ jmp(&receiver_ok);
__ bind(&global_object);
@@ -2797,6 +2701,12 @@
}
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+ Register result = ToRegister(instr->result());
+ __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
void LCodeGen::DoContext(LContext* instr) {
Register result = ToRegister(instr->result());
__ mov(result, cp);
@@ -2807,8 +2717,7 @@
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ ldr(result,
- MemOperand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ ldr(result, FieldMemOperand(result, JSFunction::kContextOffset));
+ MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
@@ -2872,8 +2781,8 @@
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
Register scratch = scratch0();
// Deoptimize if not a heap number.
@@ -2887,10 +2796,10 @@
scratch = no_reg;
__ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive, just
- // return it. We do not need to patch the stack since |input| and
- // |result| are the same register and |input| would be restored
- // unchanged by popping safepoint registers.
+ // return it.
__ tst(exponent, Operand(HeapNumber::kSignMask));
+ // Move the input to the result if necessary.
+ __ Move(result, input);
__ b(eq, &done);
// Input is negative. Reverse its sign.
@@ -2930,7 +2839,7 @@
__ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
__ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
- __ StoreToSafepointRegisterSlot(tmp1, input);
+ __ StoreToSafepointRegisterSlot(tmp1, result);
}
__ bind(&done);
@@ -2939,11 +2848,13 @@
void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
__ cmp(input, Operand(0));
+ __ Move(result, input, pl);
// We can make rsb conditional because the previous cmp instruction
// will clear the V (overflow) flag and rsb won't set this flag
// if input is positive.
- __ rsb(input, input, Operand(0), SetCC, mi);
+ __ rsb(result, input, Operand(0), SetCC, mi);
// Deoptimize on overflow.
DeoptimizeIf(vs, instr->environment());
}
@@ -2963,11 +2874,11 @@
LUnaryMathOperation* instr_;
};
- ASSERT(instr->InputAt(0)->Equals(instr->result()));
Representation r = instr->hydrogen()->value()->representation();
if (r.IsDouble()) {
DwVfpRegister input = ToDoubleRegister(instr->InputAt(0));
- __ vabs(input, input);
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ __ vabs(result, input);
} else if (r.IsInteger32()) {
EmitIntegerMathAbs(instr);
} else {
@@ -3045,7 +2956,7 @@
// Save the original sign for later comparison.
__ and_(scratch2, scratch1, Operand(HeapNumber::kSignMask));
- __ vmov(double_scratch0(), 0.5);
+ __ Vmov(double_scratch0(), 0.5);
__ vadd(input, input, double_scratch0());
// Check sign of the result: if the sign changed, the input
@@ -3082,24 +2993,17 @@
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
- ASSERT(ToDoubleRegister(instr->result()).is(input));
- __ vsqrt(input, input);
+ DoubleRegister result = ToDoubleRegister(instr->result());
+ __ vsqrt(result, input);
}
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
- Register scratch = scratch0();
- SwVfpRegister single_scratch = double_scratch0().low();
- DoubleRegister double_scratch = double_scratch0();
- ASSERT(ToDoubleRegister(instr->result()).is(input));
-
+ DoubleRegister result = ToDoubleRegister(instr->result());
// Add +0 to convert -0 to +0.
- __ mov(scratch, Operand(0));
- __ vmov(single_scratch, scratch);
- __ vcvt_f64_s32(double_scratch, single_scratch);
- __ vadd(input, input, double_scratch);
- __ vsqrt(input, input);
+ __ vadd(result, input, kDoubleRegZero);
+ __ vsqrt(result, result);
}
@@ -3382,12 +3286,54 @@
}
+void LCodeGen::DoStoreKeyedFastDoubleElement(
+ LStoreKeyedFastDoubleElement* instr) {
+ DwVfpRegister value = ToDoubleRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register key = no_reg;
+ Register scratch = scratch0();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ Label not_nan;
+
+ // Calculate the effective address of the slot in the array to store the
+ // double value.
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int shift_size = ElementsKindToShiftSize(JSObject::FAST_DOUBLE_ELEMENTS);
+ Operand operand = key_is_constant
+ ? Operand(constant_key * (1 << shift_size) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag)
+ : Operand(key, LSL, shift_size);
+ __ add(scratch, elements, operand);
+ if (!key_is_constant) {
+ __ add(scratch, scratch,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ }
+
+ // Check for NaN. All NaNs must be canonicalized.
+ __ VFPCompareAndSetFlags(value, value);
+
+ // Only load canonical NaN if the comparison above set the overflow.
+ __ Vmov(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double(), vs);
+
+ __ bind(¬_nan);
+ __ vstr(value, scratch, 0);
+}
+
+
void LCodeGen::DoStoreKeyedSpecializedArrayElement(
LStoreKeyedSpecializedArrayElement* instr) {
Register external_pointer = ToRegister(instr->external_pointer());
Register key = no_reg;
- ExternalArrayType array_type = instr->array_type();
+ JSObject::ElementsKind elements_kind = instr->elements_kind();
bool key_is_constant = instr->key()->IsConstantOperand();
int constant_key = 0;
if (key_is_constant) {
@@ -3398,18 +3344,19 @@
} else {
key = ToRegister(instr->key());
}
- int shift_size = ExternalArrayTypeToShiftSize(array_type);
+ int shift_size = ElementsKindToShiftSize(elements_kind);
- if (array_type == kExternalFloatArray || array_type == kExternalDoubleArray) {
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatures::Scope scope(VFP3);
DwVfpRegister value(ToDoubleRegister(instr->value()));
Operand operand(key_is_constant ? Operand(constant_key * (1 << shift_size))
: Operand(key, LSL, shift_size));
__ add(scratch0(), external_pointer, operand);
- if (array_type == kExternalFloatArray) {
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
__ vcvt_f32_f64(double_scratch0().low(), value);
__ vstr(double_scratch0().low(), scratch0(), 0);
- } else { // i.e. array_type == kExternalDoubleArray
+ } else { // i.e. elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS
__ vstr(value, scratch0(), 0);
}
} else {
@@ -3417,22 +3364,26 @@
MemOperand mem_operand(key_is_constant
? MemOperand(external_pointer, constant_key * (1 << shift_size))
: MemOperand(external_pointer, key, LSL, shift_size));
- switch (array_type) {
- case kExternalPixelArray:
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ strb(value, mem_operand);
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ strh(value, mem_operand);
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ str(value, mem_operand);
break;
- case kExternalFloatArray:
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3693,8 +3644,8 @@
void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
Label slow;
Register reg = ToRegister(instr->InputAt(0));
- DoubleRegister dbl_scratch = d0;
- SwVfpRegister flt_scratch = s0;
+ DoubleRegister dbl_scratch = double_scratch0();
+ SwVfpRegister flt_scratch = dbl_scratch.low();
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
@@ -3803,14 +3754,13 @@
bool deoptimize_on_undefined,
LEnvironment* env) {
Register scratch = scratch0();
- SwVfpRegister flt_scratch = s0;
- ASSERT(!result_reg.is(d0));
+ SwVfpRegister flt_scratch = double_scratch0().low();
+ ASSERT(!result_reg.is(double_scratch0()));
Label load_smi, heap_number, done;
// Smi check.
- __ tst(input_reg, Operand(kSmiTagMask));
- __ b(eq, &load_smi);
+ __ JumpIfSmi(input_reg, &load_smi);
// Heap number map check.
__ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
@@ -4316,29 +4266,6 @@
}
-void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
- Register input = ToRegister(instr->InputAt(0));
- Register result = ToRegister(instr->result());
- Label true_label;
- Label false_label;
- Label done;
-
- Condition final_branch_condition = EmitTypeofIs(&true_label,
- &false_label,
- input,
- instr->type_literal());
- __ b(final_branch_condition, &true_label);
- __ bind(&false_label);
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ b(&done);
-
- __ bind(&true_label);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register input = ToRegister(instr->InputAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -4394,17 +4321,19 @@
} else if (type_name->Equals(heap()->function_symbol())) {
__ JumpIfSmi(input, false_label);
- __ CompareObjectType(input, input, scratch, FIRST_FUNCTION_CLASS_TYPE);
+ __ CompareObjectType(input, input, scratch,
+ FIRST_CALLABLE_SPEC_OBJECT_TYPE);
final_branch_condition = ge;
} else if (type_name->Equals(heap()->object_symbol())) {
__ JumpIfSmi(input, false_label);
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ b(eq, true_label);
- __ CompareObjectType(input, input, scratch, FIRST_JS_OBJECT_TYPE);
- __ b(lo, false_label);
- __ CompareInstanceType(input, scratch, FIRST_FUNCTION_CLASS_TYPE);
- __ b(hs, false_label);
+ __ CompareObjectType(input, input, scratch,
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ b(lt, false_label);
+ __ CompareInstanceType(input, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ b(gt, false_label);
// Check for undetectable objects => false.
__ ldrb(ip, FieldMemOperand(input, Map::kBitFieldOffset));
__ tst(ip, Operand(1 << Map::kIsUndetectable));
@@ -4420,26 +4349,6 @@
}
-void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
- Register result = ToRegister(instr->result());
- Label true_label;
- Label false_label;
- Label done;
-
- EmitIsConstructCall(result, scratch0());
- __ b(eq, &true_label);
-
- __ LoadRoot(result, Heap::kFalseValueRootIndex);
- __ b(&done);
-
-
- __ bind(&true_label);
- __ LoadRoot(result, Heap::kTrueValueRootIndex);
-
- __ bind(&done);
-}
-
-
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp1 = ToRegister(instr->TempAt(0));
int true_block = chunk_->LookupDestination(instr->true_block_id());
@@ -4514,15 +4423,50 @@
}
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+ {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+ RegisterLazyDeoptimization(
+ instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+ }
+
+ // The gap code includes the restoring of the safepoint registers.
+ int pc = masm()->pc_offset();
+ safepoints_.SetPcAfterGap(pc);
+}
+
+
void LCodeGen::DoStackCheck(LStackCheck* instr) {
- // Perform stack overflow check.
- Label ok;
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- StackCheckStub stub;
- CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- __ bind(&ok);
+ class DeferredStackCheck: public LDeferredCode {
+ public:
+ DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+ private:
+ LStackCheck* instr_;
+ };
+
+ if (instr->hydrogen()->is_function_entry()) {
+ // Perform stack overflow check.
+ Label done;
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &done);
+ StackCheckStub stub;
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+ __ bind(&done);
+ } else {
+ ASSERT(instr->hydrogen()->is_backwards_branch());
+ // Perform stack overflow check if this goto needs it before jumping.
+ DeferredStackCheck* deferred_stack_check =
+ new DeferredStackCheck(this, instr);
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(lo, deferred_stack_check->entry());
+ __ bind(instr->done_label());
+ deferred_stack_check->SetExit(instr->done_label());
+ }
}
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index 8253c17..ead8489 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -108,7 +108,7 @@
void DoDeferredNumberTagI(LNumberTagI* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
- void DoDeferredStackCheck(LGoto* instr);
+ void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
@@ -148,7 +148,7 @@
HGraph* graph() const { return chunk_->graph(); }
Register scratch0() { return r9; }
- DwVfpRegister double_scratch0() { return d0; }
+ DwVfpRegister double_scratch0() { return d15; }
int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
@@ -261,7 +261,7 @@
}
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
- void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
+ void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(Register input,
@@ -280,7 +280,6 @@
// true and false label should be made, to optimize fallthrough.
Condition EmitIsObject(Register input,
Register temp1,
- Register temp2,
Label* is_not_object,
Label* is_object);
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index c227b13..c34a579 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -91,7 +91,7 @@
}
-void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
+void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
Condition cond) {
ASSERT(!RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
@@ -118,10 +118,8 @@
void MacroAssembler::Call(Register target, Condition cond) {
// Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this);
-#ifdef DEBUG
- int pre_position = pc_offset();
-#endif
-
+ Label start;
+ bind(&start);
#if USE_BLX
blx(target, cond);
#else
@@ -129,34 +127,29 @@
mov(lr, Operand(pc), LeaveCC, cond);
mov(pc, Operand(target), LeaveCC, cond);
#endif
-
-#ifdef DEBUG
- int post_position = pc_offset();
- CHECK_EQ(pre_position + CallSize(target, cond), post_position);
-#endif
+ ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
}
int MacroAssembler::CallSize(
- intptr_t target, RelocInfo::Mode rmode, Condition cond) {
+ Address target, RelocInfo::Mode rmode, Condition cond) {
int size = 2 * kInstrSize;
Instr mov_instr = cond | MOV | LeaveCC;
- if (!Operand(target, rmode).is_single_instruction(mov_instr)) {
+ intptr_t immediate = reinterpret_cast<intptr_t>(target);
+ if (!Operand(immediate, rmode).is_single_instruction(mov_instr)) {
size += kInstrSize;
}
return size;
}
-void MacroAssembler::Call(intptr_t target,
+void MacroAssembler::Call(Address target,
RelocInfo::Mode rmode,
Condition cond) {
// Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this);
-#ifdef DEBUG
- int pre_position = pc_offset();
-#endif
-
+ Label start;
+ bind(&start);
#if USE_BLX
// On ARMv5 and after the recommended call sequence is:
// ldr ip, [pc, #...]
@@ -168,7 +161,7 @@
// we have to do it explicitly.
positions_recorder()->WriteRecordedPositions();
- mov(ip, Operand(target, rmode));
+ mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
blx(ip, cond);
ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
@@ -176,82 +169,36 @@
// Set lr for return at current pc + 8.
mov(lr, Operand(pc), LeaveCC, cond);
// Emit a ldr<cond> pc, [pc + offset of target in constant pool].
- mov(pc, Operand(target, rmode), LeaveCC, cond);
+ mov(pc, Operand(reinterpret_cast<int32_t>(target), rmode), LeaveCC, cond);
ASSERT(kCallTargetAddressOffset == kInstrSize);
#endif
-
-#ifdef DEBUG
- int post_position = pc_offset();
- CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
-#endif
+ ASSERT_EQ(CallSize(target, rmode, cond), SizeOfCodeGeneratedSince(&start));
}
-int MacroAssembler::CallSize(
- byte* target, RelocInfo::Mode rmode, Condition cond) {
- return CallSize(reinterpret_cast<intptr_t>(target), rmode);
-}
-
-
-void MacroAssembler::Call(
- byte* target, RelocInfo::Mode rmode, Condition cond) {
-#ifdef DEBUG
- int pre_position = pc_offset();
-#endif
-
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- Call(reinterpret_cast<intptr_t>(target), rmode, cond);
-
-#ifdef DEBUG
- int post_position = pc_offset();
- CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
-#endif
-}
-
-
-int MacroAssembler::CallSize(
- Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
- return CallSize(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
-}
-
-
-void MacroAssembler::CallWithAstId(Handle<Code> code,
- RelocInfo::Mode rmode,
- unsigned ast_id,
- Condition cond) {
-#ifdef DEBUG
- int pre_position = pc_offset();
-#endif
-
- ASSERT(rmode == RelocInfo::CODE_TARGET_WITH_ID);
- ASSERT(ast_id != kNoASTId);
- ASSERT(ast_id_for_reloc_info_ == kNoASTId);
- ast_id_for_reloc_info_ = ast_id;
- // 'code' is always generated ARM code, never THUMB code
- Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
-
-#ifdef DEBUG
- int post_position = pc_offset();
- CHECK_EQ(pre_position + CallSize(code, rmode, cond), post_position);
-#endif
+int MacroAssembler::CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode,
+ unsigned ast_id,
+ Condition cond) {
+ return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
}
void MacroAssembler::Call(Handle<Code> code,
RelocInfo::Mode rmode,
+ unsigned ast_id,
Condition cond) {
-#ifdef DEBUG
- int pre_position = pc_offset();
-#endif
-
+ Label start;
+ bind(&start);
ASSERT(RelocInfo::IsCodeTarget(rmode));
+ if (rmode == RelocInfo::CODE_TARGET && ast_id != kNoASTId) {
+ SetRecordedAstId(ast_id);
+ rmode = RelocInfo::CODE_TARGET_WITH_ID;
+ }
// 'code' is always generated ARM code, never THUMB code
- Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
-
-#ifdef DEBUG
- int post_position = pc_offset();
- CHECK_EQ(pre_position + CallSize(code, rmode, cond), post_position);
-#endif
+ Call(reinterpret_cast<Address>(code.location()), rmode, cond);
+ ASSERT_EQ(CallSize(code, rmode, ast_id, cond),
+ SizeOfCodeGeneratedSince(&start));
}
@@ -298,14 +245,20 @@
}
+void MacroAssembler::Push(Handle<Object> handle) {
+ mov(ip, Operand(handle));
+ push(ip);
+}
+
+
void MacroAssembler::Move(Register dst, Handle<Object> value) {
mov(dst, Operand(value));
}
-void MacroAssembler::Move(Register dst, Register src) {
+void MacroAssembler::Move(Register dst, Register src, Condition cond) {
if (!dst.is(src)) {
- mov(dst, src);
+ mov(dst, src, LeaveCC, cond);
}
}
@@ -330,7 +283,8 @@
!src2.must_use_constant_pool() &&
CpuFeatures::IsSupported(ARMv7) &&
IsPowerOf2(src2.immediate() + 1)) {
- ubfx(dst, src1, 0, WhichPowerOf2(src2.immediate() + 1), cond);
+ ubfx(dst, src1, 0,
+ WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
} else {
and_(dst, src1, src2, LeaveCC, cond);
@@ -438,20 +392,6 @@
}
-void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
- // Empty the const pool.
- CheckConstPool(true, true);
- add(pc, pc, Operand(index,
- LSL,
- Instruction::kInstrSizeLog2 - kSmiTagSize));
- BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
- nop(); // Jump table alignment.
- for (int i = 0; i < targets.length(); i++) {
- b(targets[i]);
- }
-}
-
-
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond) {
@@ -654,19 +594,36 @@
ASSERT_EQ(0, dst1.code() % 2);
ASSERT_EQ(dst1.code() + 1, dst2.code());
+ // V8 does not use this addressing mode, so the fallback code
+ // below doesn't support it yet.
+ ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
+
// Generate two ldr instructions if ldrd is not available.
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
ldrd(dst1, dst2, src, cond);
} else {
- MemOperand src2(src);
- src2.set_offset(src2.offset() + 4);
- if (dst1.is(src.rn())) {
- ldr(dst2, src2, cond);
- ldr(dst1, src, cond);
- } else {
- ldr(dst1, src, cond);
- ldr(dst2, src2, cond);
+ if ((src.am() == Offset) || (src.am() == NegOffset)) {
+ MemOperand src2(src);
+ src2.set_offset(src2.offset() + 4);
+ if (dst1.is(src.rn())) {
+ ldr(dst2, src2, cond);
+ ldr(dst1, src, cond);
+ } else {
+ ldr(dst1, src, cond);
+ ldr(dst2, src2, cond);
+ }
+ } else { // PostIndex or NegPostIndex.
+ ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex));
+ if (dst1.is(src.rn())) {
+ ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
+ ldr(dst1, src, cond);
+ } else {
+ MemOperand src2(src);
+ src2.set_offset(src2.offset() - 4);
+ ldr(dst1, MemOperand(src.rn(), 4, PostIndex), cond);
+ ldr(dst2, src2, cond);
+ }
}
}
}
@@ -679,15 +636,26 @@
ASSERT_EQ(0, src1.code() % 2);
ASSERT_EQ(src1.code() + 1, src2.code());
+ // V8 does not use this addressing mode, so the fallback code
+ // below doesn't support it yet.
+ ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
+
// Generate two str instructions if strd is not available.
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatures::Scope scope(ARMv7);
strd(src1, src2, dst, cond);
} else {
MemOperand dst2(dst);
- dst2.set_offset(dst2.offset() + 4);
- str(src1, dst, cond);
- str(src2, dst2, cond);
+ if ((dst.am() == Offset) || (dst.am() == NegOffset)) {
+ dst2.set_offset(dst2.offset() + 4);
+ str(src1, dst, cond);
+ str(src2, dst2, cond);
+ } else { // PostIndex or NegPostIndex.
+ ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
+ dst2.set_offset(dst2.offset() - 4);
+ str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
+ str(src2, dst2, cond);
+ }
}
}
@@ -734,6 +702,23 @@
vmrs(fpscr_flags, cond);
}
+void MacroAssembler::Vmov(const DwVfpRegister dst,
+ const double imm,
+ const Condition cond) {
+ ASSERT(CpuFeatures::IsEnabled(VFP3));
+ static const DoubleRepresentation minus_zero(-0.0);
+ static const DoubleRepresentation zero(0.0);
+ DoubleRepresentation value(imm);
+ // Handle special values first.
+ if (value.bits == zero.bits) {
+ vmov(dst, kDoubleRegZero, cond);
+ } else if (value.bits == minus_zero.bits) {
+ vneg(dst, kDoubleRegZero, cond);
+ } else {
+ vmov(dst, imm, cond);
+ }
+}
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
// r0-r3: preserved
@@ -956,9 +941,9 @@
Handle<Code> adaptor =
isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(adaptor, RelocInfo::CODE_TARGET));
+ call_wrapper.BeforeCall(CallSize(adaptor));
SetCallKind(r5, call_kind);
- Call(adaptor, RelocInfo::CODE_TARGET);
+ Call(adaptor);
call_wrapper.AfterCall();
b(done);
} else {
@@ -1084,9 +1069,9 @@
Register scratch,
Label* fail) {
ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
- cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE));
+ cmp(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
b(lt, fail);
- cmp(scratch, Operand(LAST_JS_OBJECT_TYPE));
+ cmp(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
b(gt, fail);
}
@@ -1358,6 +1343,100 @@
}
+void MacroAssembler::LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register t0,
+ Register t1,
+ Register t2) {
+ // Register use:
+ //
+ // elements - holds the slow-case elements of the receiver on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // key - holds the smi key on entry.
+ // Unchanged unless 'result' is the same register.
+ //
+ // result - holds the result on exit if the load succeeded.
+ // Allowed to be the same as 'key' or 'result'.
+ // Unchanged on bailout so 'key' or 'result' can be used
+ // in further computation.
+ //
+ // Scratch registers:
+ //
+ // t0 - holds the untagged key on entry and holds the hash once computed.
+ //
+ // t1 - used to hold the capacity mask of the dictionary
+ //
+ // t2 - used for the index into the dictionary.
+ Label done;
+
+ // Compute the hash code from the untagged key. This must be kept in sync
+ // with ComputeIntegerHash in utils.h.
+ //
+ // hash = ~hash + (hash << 15);
+ mvn(t1, Operand(t0));
+ add(t0, t1, Operand(t0, LSL, 15));
+ // hash = hash ^ (hash >> 12);
+ eor(t0, t0, Operand(t0, LSR, 12));
+ // hash = hash + (hash << 2);
+ add(t0, t0, Operand(t0, LSL, 2));
+ // hash = hash ^ (hash >> 4);
+ eor(t0, t0, Operand(t0, LSR, 4));
+ // hash = hash * 2057;
+ mov(t1, Operand(2057));
+ mul(t0, t0, t1);
+ // hash = hash ^ (hash >> 16);
+ eor(t0, t0, Operand(t0, LSR, 16));
+
+ // Compute the capacity mask.
+ ldr(t1, FieldMemOperand(elements, NumberDictionary::kCapacityOffset));
+ mov(t1, Operand(t1, ASR, kSmiTagSize)); // convert smi to int
+ sub(t1, t1, Operand(1));
+
+ // Generate an unrolled loop that performs a few probes before giving up.
+ static const int kProbes = 4;
+ for (int i = 0; i < kProbes; i++) {
+ // Use t2 for index calculations and keep the hash intact in t0.
+ mov(t2, t0);
+ // Compute the masked index: (hash + i + i * i) & mask.
+ if (i > 0) {
+ add(t2, t2, Operand(NumberDictionary::GetProbeOffset(i)));
+ }
+ and_(t2, t2, Operand(t1));
+
+ // Scale the index by multiplying by the element size.
+ ASSERT(NumberDictionary::kEntrySize == 3);
+ add(t2, t2, Operand(t2, LSL, 1)); // t2 = t2 * 3
+
+ // Check if the key is identical to the name.
+ add(t2, elements, Operand(t2, LSL, kPointerSizeLog2));
+ ldr(ip, FieldMemOperand(t2, NumberDictionary::kElementsStartOffset));
+ cmp(key, Operand(ip));
+ if (i != kProbes - 1) {
+ b(eq, &done);
+ } else {
+ b(ne, miss);
+ }
+ }
+
+ bind(&done);
+ // Check that the value is a normal property.
+ // t2: elements + (index * kPointerSize)
+ const int kDetailsOffset =
+ NumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+ ldr(t1, FieldMemOperand(t2, kDetailsOffset));
+ tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::mask())));
+ b(ne, miss);
+
+ // Get the value at the masked, scaled index and return.
+ const int kValueOffset =
+ NumberDictionary::kElementsStartOffset + kPointerSize;
+ ldr(result, FieldMemOperand(t2, kValueOffset));
+}
+
+
void MacroAssembler::AllocateInNewSpace(int object_size,
Register result,
Register scratch1,
@@ -1677,6 +1756,16 @@
}
+void MacroAssembler::CheckFastElements(Register map,
+ Register scratch,
+ Label* fail) {
+ STATIC_ASSERT(JSObject::FAST_ELEMENTS == 0);
+ ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+ cmp(scratch, Operand(Map::kMaximumBitField2FastElementValue));
+ b(hi, fail);
+}
+
+
void MacroAssembler::CheckMap(Register obj,
Register scratch,
Handle<Map> map,
@@ -1773,7 +1862,7 @@
void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond);
}
@@ -1783,7 +1872,8 @@
{ MaybeObject* maybe_result = stub->TryGetCode();
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- Call(Handle<Code>(Code::cast(result)), RelocInfo::CODE_TARGET, cond);
+ Handle<Code> code(Code::cast(result));
+ Call(code, RelocInfo::CODE_TARGET, kNoASTId, cond);
return result;
}
@@ -2459,6 +2549,9 @@
LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
cmp(elements, ip);
b(eq, &ok);
+ LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
+ cmp(elements, ip);
+ b(eq, &ok);
LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
cmp(elements, ip);
b(eq, &ok);
@@ -2521,12 +2614,9 @@
void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
if (context_chain_length > 0) {
// Move up the chain of contexts to the context containing the slot.
- ldr(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
- // Load the function context (which is the incoming, outer context).
- ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
+ ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
for (int i = 1; i < context_chain_length; i++) {
- ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
- ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
+ ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
}
} else {
// Slot is in the current function context. Move it into the
@@ -2534,17 +2624,6 @@
// cannot be allowed to destroy the context in esi).
mov(dst, cp);
}
-
- // We should not have found a 'with' context by walking the context chain
- // (i.e., the static scope chain and runtime context chain do not agree).
- // A variable occurring in such a scope should have slot type LOOKUP and
- // not CONTEXT.
- if (emit_debug_code()) {
- ldr(ip, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- cmp(dst, ip);
- Check(eq, "Yo dawg, I heard you liked function contexts "
- "so I put function contexts in all your contexts");
- }
}
@@ -2692,8 +2771,7 @@
// Check that neither is a smi.
STATIC_ASSERT(kSmiTag == 0);
and_(scratch1, first, Operand(second));
- tst(scratch1, Operand(kSmiTagMask));
- b(eq, failure);
+ JumpIfSmi(scratch1, failure);
JumpIfNonSmisNotBothSequentialAsciiStrings(first,
second,
scratch1,
@@ -3085,7 +3163,7 @@
Label done;
Label in_bounds;
- vmov(temp_double_reg, 0.0);
+ Vmov(temp_double_reg, 0.0);
VFPCompareAndSetFlags(input_reg, temp_double_reg);
b(gt, &above_zero);
@@ -3095,7 +3173,7 @@
// Double value is >= 255, return 255.
bind(&above_zero);
- vmov(temp_double_reg, 255.0);
+ Vmov(temp_double_reg, 255.0);
VFPCompareAndSetFlags(input_reg, temp_double_reg);
b(le, &in_bounds);
mov(result_reg, Operand(255));
@@ -3103,7 +3181,7 @@
// In 0-255 range, round and truncate.
bind(&in_bounds);
- vmov(temp_double_reg, 0.5);
+ Vmov(temp_double_reg, 0.5);
vadd(temp_double_reg, input_reg, temp_double_reg);
vcvt_u32_f64(s0, temp_double_reg);
vmov(result_reg, s0);
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 1e2c9f4..9c653ad 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -90,21 +90,21 @@
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target, Condition cond = al);
- void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al);
+ void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
static int CallSize(Register target, Condition cond = al);
void Call(Register target, Condition cond = al);
- static int CallSize(byte* target, RelocInfo::Mode rmode, Condition cond = al);
- void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
- static int CallSize(Handle<Code> code,
+ static int CallSize(Address target,
RelocInfo::Mode rmode,
Condition cond = al);
+ void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
+ static int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ unsigned ast_id = kNoASTId,
+ Condition cond = al);
void Call(Handle<Code> code,
- RelocInfo::Mode rmode,
- Condition cond = al);
- void CallWithAstId(Handle<Code> code,
- RelocInfo::Mode rmode,
- unsigned ast_id,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ unsigned ast_id = kNoASTId,
Condition cond = al);
void Ret(Condition cond = al);
@@ -145,11 +145,9 @@
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Handle<Object> value);
- void Move(Register dst, Register src);
+ void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
- // Jumps to the label at the index given by the Smi in "index".
- void SmiJumpTable(Register index, Vector<Label*> targets);
// Load an object from the root table.
void LoadRoot(Register destination,
Heap::RootListIndex index,
@@ -194,6 +192,9 @@
Register address,
Register scratch);
+ // Push a handle.
+ void Push(Handle<Object> handle);
+
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) {
ASSERT(!src1.is(src2));
@@ -313,6 +314,10 @@
const Register fpscr_flags,
const Condition cond = al);
+ void Vmov(const DwVfpRegister dst,
+ const double imm,
+ const Condition cond = al);
+
// ---------------------------------------------------------------------------
// Activation frames
@@ -430,6 +435,16 @@
Register scratch,
Label* miss);
+
+ void LoadFromNumberDictionary(Label* miss,
+ Register elements,
+ Register key,
+ Register result,
+ Register t0,
+ Register t1,
+ Register t2);
+
+
inline void MarkCode(NopMarkerTypes type) {
nop(type);
}
@@ -579,6 +594,12 @@
InstanceType type);
+ // Check if a map for a JSObject indicates that the object has fast elements.
+ // Jump to the specified label if it does not.
+ void CheckFastElements(Register map,
+ Register scratch,
+ Label* fail);
+
// Check if the map of an object is equal to a specified map (either
// given directly or as an index into the root list) and branch to
// label if not. Skip the smi check if not required (object is known
@@ -1027,12 +1048,6 @@
int num_double_arguments);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
- static int CallSize(intptr_t target,
- RelocInfo::Mode rmode,
- Condition cond = al);
- void Call(intptr_t target,
- RelocInfo::Mode rmode,
- Condition cond = al);
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index 1c59823..983a528 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -899,13 +899,12 @@
constant_offset - offset_of_pc_register_read;
ASSERT(pc_offset_of_constant < 0);
if (is_valid_memory_offset(pc_offset_of_constant)) {
- masm_->BlockConstPoolBefore(masm_->pc_offset() + Assembler::kInstrSize);
+ Assembler::BlockConstPoolScope block_const_pool(masm_);
__ ldr(r0, MemOperand(pc, pc_offset_of_constant));
} else {
// Not a 12-bit offset, so it needs to be loaded from the constant
// pool.
- masm_->BlockConstPoolBefore(
- masm_->pc_offset() + 2 * Assembler::kInstrSize);
+ Assembler::BlockConstPoolScope block_const_pool(masm_);
__ mov(r0, Operand(pc_offset_of_constant + Assembler::kInstrSize));
__ ldr(r0, MemOperand(pc, r0));
}
@@ -1185,8 +1184,7 @@
void RegExpMacroAssemblerARM::EmitBacktrackConstantPool() {
__ CheckConstPool(false, false);
- __ BlockConstPoolBefore(
- masm_->pc_offset() + kBacktrackConstantPoolSize * Assembler::kInstrSize);
+ Assembler::BlockConstPoolScope block_const_pool(masm_);
backtrack_constant_pool_offset_ = masm_->pc_offset();
for (int i = 0; i < kBacktrackConstantPoolSize; i++) {
__ emit(0);
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index d771e40..0e65386 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -28,6 +28,9 @@
#ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
#define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
+#include "arm/assembler-arm.h"
+#include "arm/assembler-arm-inl.h"
+
namespace v8 {
namespace internal {
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index be8b7d6..c2665f8 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -121,7 +121,7 @@
// Check that receiver is a JSObject.
__ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ cmp(scratch0, Operand(FIRST_JS_OBJECT_TYPE));
+ __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
__ b(lt, miss_label);
// Load properties array.
@@ -189,8 +189,7 @@
ASSERT(!extra2.is(no_reg));
// Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(receiver, &miss);
// Get the map of the receiver and compute the hash.
__ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset));
@@ -282,8 +281,7 @@
Register scratch,
Label* miss_label) {
// Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss_label);
+ __ JumpIfSmi(receiver, miss_label);
// Check that the object is a JS array.
__ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
@@ -305,8 +303,7 @@
Label* smi,
Label* non_string_object) {
// Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, smi);
+ __ JumpIfSmi(receiver, smi);
// Check that the object is a string.
__ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
@@ -381,8 +378,7 @@
Label exit;
// Check that the receiver isn't a smi.
- __ tst(receiver_reg, Operand(kSmiTagMask));
- __ b(eq, miss_label);
+ __ JumpIfSmi(receiver_reg, miss_label);
// Check that the map of the receiver hasn't changed.
__ ldr(scratch, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
@@ -431,8 +427,7 @@
__ str(r0, FieldMemOperand(receiver_reg, offset));
// Skip updating write barrier if storing a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &exit);
+ __ JumpIfSmi(r0, &exit);
// Update the write barrier for the array address.
// Pass the now unused name_reg as a scratch register.
@@ -445,8 +440,7 @@
__ str(r0, FieldMemOperand(scratch, offset));
// Skip updating write barrier if storing a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &exit);
+ __ JumpIfSmi(r0, &exit);
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
@@ -1165,8 +1159,7 @@
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss);
+ __ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
Register reg =
@@ -1187,8 +1180,7 @@
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss);
+ __ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
Register reg =
@@ -1212,8 +1204,7 @@
String* name,
Label* miss) {
// Check that the receiver isn't a smi.
- __ tst(receiver, Operand(kSmiTagMask));
- __ b(eq, miss);
+ __ JumpIfSmi(receiver, miss);
// Check that the maps haven't changed.
Register reg =
@@ -1426,8 +1417,7 @@
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
if (object != holder) {
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, miss);
+ __ JumpIfSmi(r0, miss);
}
// Check that the maps haven't changed.
@@ -1449,8 +1439,7 @@
// the nice side effect that multiple closures based on the same
// function can all use this call IC. Before we load through the
// function, we have to verify that it still is a function.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, miss);
+ __ JumpIfSmi(r1, miss);
__ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
__ b(ne, miss);
@@ -1495,8 +1484,7 @@
// Get the receiver of the function from the stack into r0.
__ ldr(r0, MemOperand(sp, argc * kPointerSize));
// Check that the receiver isn't a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r0, &miss);
// Do the right check and compute the holder register.
Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
@@ -1967,8 +1955,7 @@
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r1, &miss);
CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
&miss);
@@ -1985,8 +1972,7 @@
// Check the code is a smi.
Label slow;
STATIC_ASSERT(kSmiTag == 0);
- __ tst(code, Operand(kSmiTagMask));
- __ b(ne, &slow);
+ __ JumpIfNotSmi(code, &slow);
// Convert the smi code to uint16.
__ and_(code, code, Operand(Smi::FromInt(0xffff)));
@@ -2188,8 +2174,7 @@
__ ldr(r1, MemOperand(sp, 1 * kPointerSize));
STATIC_ASSERT(kSmiTag == 0);
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r1, &miss);
CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
&miss);
@@ -2292,8 +2277,7 @@
__ ldr(r1, MemOperand(sp, argc * kPointerSize));
// Check that the receiver isn't a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss_before_stack_reserved);
+ __ JumpIfSmi(r1, &miss_before_stack_reserved);
__ IncrementCounter(counters->call_const(), 1, r0, r3);
__ IncrementCounter(counters->call_const_fast_api(), 1, r0, r3);
@@ -2347,8 +2331,7 @@
// Check that the receiver isn't a smi.
if (check != NUMBER_CHECK) {
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r1, &miss);
}
// Make sure that it's okay not to patch the on stack receiver
@@ -2381,7 +2364,7 @@
} else {
// Check that the object is a two-byte string or a symbol.
__ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
- __ b(hs, &miss);
+ __ b(ge, &miss);
// Check that the maps starting from the prototype haven't changed.
GenerateDirectLoadGlobalFunctionPrototype(
masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
@@ -2398,8 +2381,7 @@
} else {
Label fast;
// Check that the object is a smi or a heap number.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &fast);
+ __ JumpIfSmi(r1, &fast);
__ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE);
__ b(ne, &miss);
__ bind(&fast);
@@ -2619,8 +2601,7 @@
Label miss;
// Check that the object isn't a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r1, &miss);
// Check that the map of the object hasn't changed.
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -2667,8 +2648,7 @@
Label miss;
// Check that the object isn't a smi.
- __ tst(r1, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r1, &miss);
// Check that the map of the object hasn't changed.
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
@@ -2759,8 +2739,7 @@
Label miss;
// Check that receiver is not a smi.
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r0, &miss);
// Check the maps of the full prototype chain.
CheckPrototypes(object, r0, last, r3, r1, r4, name, &miss);
@@ -2904,8 +2883,7 @@
// object which can only happen for contextual calls. In this case,
// the receiver cannot be a smi.
if (object != holder) {
- __ tst(r0, Operand(kSmiTagMask));
- __ b(eq, &miss);
+ __ JumpIfSmi(r0, &miss);
}
// Check that the map of the global has not changed.
@@ -3115,14 +3093,15 @@
}
-MaybeObject* KeyedLoadStubCompiler::CompileLoadFastElement(Map* receiver_map) {
+MaybeObject* KeyedLoadStubCompiler::CompileLoadElement(Map* receiver_map) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
- MaybeObject* maybe_stub = KeyedLoadFastElementStub().TryGetCode();
Code* stub;
+ JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
+ MaybeObject* maybe_stub = KeyedLoadElementStub(elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(r1,
r2,
@@ -3206,8 +3185,7 @@
}
-MaybeObject* KeyedStoreStubCompiler::CompileStoreFastElement(
- Map* receiver_map) {
+MaybeObject* KeyedStoreStubCompiler::CompileStoreElement(Map* receiver_map) {
// ----------- S t a t e -------------
// -- r0 : value
// -- r1 : key
@@ -3215,10 +3193,11 @@
// -- lr : return address
// -- r3 : scratch
// -----------------------------------
+ Code* stub;
+ JSObject::ElementsKind elements_kind = receiver_map->elements_kind();
bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
MaybeObject* maybe_stub =
- KeyedStoreFastElementStub(is_js_array).TryGetCode();
- Code* stub;
+ KeyedStoreElementStub(is_js_array, elements_kind).TryGetCode();
if (!maybe_stub->To(&stub)) return maybe_stub;
__ DispatchMap(r2,
r3,
@@ -3292,8 +3271,7 @@
// r1: constructor function
// r7: undefined
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
- __ tst(r2, Operand(kSmiTagMask));
- __ b(eq, &generic_stub_call);
+ __ JumpIfSmi(r2, &generic_stub_call);
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
__ b(ne, &generic_stub_call);
@@ -3410,82 +3388,86 @@
}
-MaybeObject* ExternalArrayLoadStubCompiler::CompileLoad(
- JSObject*receiver, ExternalArrayType array_type) {
- // ----------- S t a t e -------------
- // -- lr : return address
- // -- r0 : key
- // -- r1 : receiver
- // -----------------------------------
- MaybeObject* maybe_stub =
- KeyedLoadExternalArrayStub(array_type).TryGetCode();
- Code* stub;
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(r1,
- r2,
- Handle<Map>(receiver->map()),
- Handle<Code>(stub),
- DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- // Return the generated code.
- return GetCode();
-}
-
-
-MaybeObject* ExternalArrayStoreStubCompiler::CompileStore(
- JSObject* receiver, ExternalArrayType array_type) {
- // ----------- S t a t e -------------
- // -- r0 : value
- // -- r1 : name
- // -- r2 : receiver
- // -- lr : return address
- // -----------------------------------
- MaybeObject* maybe_stub =
- KeyedStoreExternalArrayStub(array_type).TryGetCode();
- Code* stub;
- if (!maybe_stub->To(&stub)) return maybe_stub;
- __ DispatchMap(r2,
- r3,
- Handle<Map>(receiver->map()),
- Handle<Code>(stub),
- DO_SMI_CHECK);
-
- Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
- __ Jump(ic, RelocInfo::CODE_TARGET);
-
- return GetCode();
-}
-
-
#undef __
#define __ ACCESS_MASM(masm)
-static bool IsElementTypeSigned(ExternalArrayType array_type) {
- switch (array_type) {
- case kExternalByteArray:
- case kExternalShortArray:
- case kExternalIntArray:
+void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
+ MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+ Label slow, miss_force_generic;
+
+ Register key = r0;
+ Register receiver = r1;
+
+ __ JumpIfNotSmi(key, &miss_force_generic);
+ __ mov(r2, Operand(key, ASR, kSmiTagSize));
+ __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5);
+ __ Ret();
+
+ __ bind(&slow);
+ __ IncrementCounter(
+ masm->isolate()->counters()->keyed_load_external_array_slow(),
+ 1, r2, r3);
+
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+ Handle<Code> slow_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_Slow();
+ __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+
+ // Miss case, call the runtime.
+ __ bind(&miss_force_generic);
+
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+
+ Handle<Code> miss_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
+static bool IsElementTypeSigned(JSObject::ElementsKind elements_kind) {
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
return true;
- case kExternalUnsignedByteArray:
- case kExternalUnsignedShortArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
return false;
- default:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
return false;
}
+ return false;
}
void KeyedLoadStubCompiler::GenerateLoadExternalArray(
MacroAssembler* masm,
- ExternalArrayType array_type) {
+ JSObject::ElementsKind elements_kind) {
// ---------- S t a t e --------------
// -- lr : return address
// -- r0 : key
@@ -3519,25 +3501,25 @@
ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
Register value = r2;
- switch (array_type) {
- case kExternalByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
__ ldrsb(value, MemOperand(r3, key, LSR, 1));
break;
- case kExternalPixelArray:
- case kExternalUnsignedByteArray:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ ldrb(value, MemOperand(r3, key, LSR, 1));
break;
- case kExternalShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
__ ldrsh(value, MemOperand(r3, key, LSL, 0));
break;
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ ldrh(value, MemOperand(r3, key, LSL, 0));
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ ldr(value, MemOperand(r3, key, LSL, 1));
break;
- case kExternalFloatArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ add(r2, r3, Operand(key, LSL, 1));
@@ -3546,7 +3528,7 @@
__ ldr(value, MemOperand(r3, key, LSL, 1));
}
break;
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
__ add(r2, r3, Operand(key, LSL, 2));
@@ -3558,7 +3540,10 @@
__ ldr(r3, MemOperand(r4, Register::kSizeInBytes));
}
break;
- default:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3572,7 +3557,7 @@
// d0: value (if VFP3 is supported)
// r2/r3: value (if VFP3 is not supported)
- if (array_type == kExternalIntArray) {
+ if (elements_kind == JSObject::EXTERNAL_INT_ELEMENTS) {
// For the Int and UnsignedInt array types, we need to see whether
// the value can be represented in a Smi. If not, we need to convert
// it to a HeapNumber.
@@ -3616,7 +3601,7 @@
__ str(dst2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ Ret();
}
- } else if (array_type == kExternalUnsignedIntArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS) {
// The test is different for unsigned int values. Since we need
// the value to be in the range of a positive smi, we can't
// handle either of the top two bits being set in the value.
@@ -3681,7 +3666,7 @@
__ mov(r0, r4);
__ Ret();
}
- } else if (array_type == kExternalFloatArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
// For the floating-point array type, we need to always allocate a
// HeapNumber.
if (CpuFeatures::IsSupported(VFP3)) {
@@ -3751,7 +3736,7 @@
__ mov(r0, r3);
__ Ret();
}
- } else if (array_type == kExternalDoubleArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Allocate a HeapNumber for the result. Don't use r0 and r1 as
@@ -3808,7 +3793,7 @@
void KeyedStoreStubCompiler::GenerateStoreExternalArray(
MacroAssembler* masm,
- ExternalArrayType array_type) {
+ JSObject::ElementsKind elements_kind) {
// ---------- S t a t e --------------
// -- r0 : value
// -- r1 : key
@@ -3842,7 +3827,7 @@
// runtime for all other kinds of values.
// r3: external array.
// r4: key (integer).
- if (array_type == kExternalPixelArray) {
+ if (elements_kind == JSObject::EXTERNAL_PIXEL_ELEMENTS) {
// Double to pixel conversion is only implemented in the runtime for now.
__ JumpIfNotSmi(value, &slow);
} else {
@@ -3854,29 +3839,29 @@
// r3: base pointer of external storage.
// r4: key (integer).
// r5: value (integer).
- switch (array_type) {
- case kExternalPixelArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
// Clamp the value to [0..255].
__ Usat(r5, 8, Operand(r5));
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
- case kExternalFloatArray:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
// Perform int-to-float conversion and store to memory.
StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
break;
- case kExternalDoubleArray:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
__ add(r3, r3, Operand(r4, LSL, 3));
// r3: effective address of the double element
FloatingPointHelper::Destination destination;
@@ -3897,7 +3882,10 @@
__ str(r7, MemOperand(r3, Register::kSizeInBytes));
}
break;
- default:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3905,7 +3893,7 @@
// Entry registers are intact, r0 holds the value which is the return value.
__ Ret();
- if (array_type != kExternalPixelArray) {
+ if (elements_kind != JSObject::EXTERNAL_PIXEL_ELEMENTS) {
// r3: external array.
// r4: index (integer).
__ bind(&check_heap_number);
@@ -3923,7 +3911,7 @@
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
- if (array_type == kExternalFloatArray) {
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
// vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r5, r0, Operand(kHeapObjectTag));
@@ -3931,48 +3919,38 @@
__ add(r5, r3, Operand(r4, LSL, 2));
__ vcvt_f32_f64(s0, d0);
__ vstr(s0, r5, 0);
- } else if (array_type == kExternalDoubleArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ add(r5, r3, Operand(r4, LSL, 3));
__ vstr(d0, r5, 0);
} else {
- // Need to perform float-to-int conversion.
- // Test for NaN or infinity (both give zero).
- __ ldr(r6, FieldMemOperand(value, HeapNumber::kExponentOffset));
-
// Hoisted load. vldr requires offset to be a multiple of 4 so we can
// not include -kHeapObjectTag into it.
__ sub(r5, value, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
+ __ EmitECMATruncate(r5, d0, s2, r6, r7, r9);
- __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- // NaNs and Infinities have all-one exponents so they sign extend to -1.
- __ cmp(r6, Operand(-1));
- __ mov(r5, Operand(0), LeaveCC, eq);
-
- // Not infinity or NaN simply convert to int.
- if (IsElementTypeSigned(array_type)) {
- __ vcvt_s32_f64(s0, d0, kDefaultRoundToZero, ne);
- } else {
- __ vcvt_u32_f64(s0, d0, kDefaultRoundToZero, ne);
- }
- __ vmov(r5, s0, ne);
-
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
- default:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -3986,7 +3964,7 @@
__ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
__ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
- if (array_type == kExternalFloatArray) {
+ if (elements_kind == JSObject::EXTERNAL_FLOAT_ELEMENTS) {
Label done, nan_or_infinity_or_zero;
static const int kMantissaInHiWordShift =
kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
@@ -4038,14 +4016,14 @@
__ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
__ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
__ b(&done);
- } else if (array_type == kExternalDoubleArray) {
+ } else if (elements_kind == JSObject::EXTERNAL_DOUBLE_ELEMENTS) {
__ add(r7, r3, Operand(r4, LSL, 3));
// r7: effective address of destination element.
__ str(r6, MemOperand(r7, 0));
__ str(r5, MemOperand(r7, Register::kSizeInBytes));
__ Ret();
} else {
- bool is_signed_type = IsElementTypeSigned(array_type);
+ bool is_signed_type = IsElementTypeSigned(elements_kind);
int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
@@ -4092,20 +4070,26 @@
__ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
__ bind(&done);
- switch (array_type) {
- case kExternalByteArray:
- case kExternalUnsignedByteArray:
+ switch (elements_kind) {
+ case JSObject::EXTERNAL_BYTE_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ strb(r5, MemOperand(r3, r4, LSL, 0));
break;
- case kExternalShortArray:
- case kExternalUnsignedShortArray:
+ case JSObject::EXTERNAL_SHORT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ strh(r5, MemOperand(r3, r4, LSL, 1));
break;
- case kExternalIntArray:
- case kExternalUnsignedIntArray:
+ case JSObject::EXTERNAL_INT_ELEMENTS:
+ case JSObject::EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ str(r5, MemOperand(r3, r4, LSL, 2));
break;
- default:
+ case JSObject::EXTERNAL_PIXEL_ELEMENTS:
+ case JSObject::EXTERNAL_FLOAT_ELEMENTS:
+ case JSObject::EXTERNAL_DOUBLE_ELEMENTS:
+ case JSObject::FAST_ELEMENTS:
+ case JSObject::FAST_DOUBLE_ELEMENTS:
+ case JSObject::DICTIONARY_ELEMENTS:
+ case JSObject::NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
@@ -4184,6 +4168,77 @@
}
+void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
+ MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- lr : return address
+ // -- r0 : key
+ // -- r1 : receiver
+ // -----------------------------------
+ Label miss_force_generic, slow_allocate_heapnumber;
+
+ Register key_reg = r0;
+ Register receiver_reg = r1;
+ Register elements_reg = r2;
+ Register heap_number_reg = r2;
+ Register indexed_double_offset = r3;
+ Register scratch = r4;
+ Register scratch2 = r5;
+ Register scratch3 = r6;
+ Register heap_number_map = r7;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+
+ // Check that the key is a smi.
+ __ JumpIfNotSmi(key_reg, &miss_force_generic);
+
+ // Get the elements array.
+ __ ldr(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+
+ // Check that the key is within bounds.
+ __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+ __ cmp(key_reg, Operand(scratch));
+ __ b(hs, &miss_force_generic);
+
+ // Load the upper word of the double in the fixed array and test for NaN.
+ __ add(indexed_double_offset, elements_reg,
+ Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+ uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
+ __ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
+ __ cmp(scratch, Operand(kHoleNanUpper32));
+ __ b(&miss_force_generic, eq);
+
+ // Non-NaN. Allocate a new heap number and copy the double value into it.
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
+ heap_number_map, &slow_allocate_heapnumber);
+
+ // Don't need to reload the upper 32 bits of the double, it's already in
+ // scratch.
+ __ str(scratch, FieldMemOperand(heap_number_reg,
+ HeapNumber::kExponentOffset));
+ __ ldr(scratch, FieldMemOperand(indexed_double_offset,
+ FixedArray::kHeaderSize));
+ __ str(scratch, FieldMemOperand(heap_number_reg,
+ HeapNumber::kMantissaOffset));
+
+ __ mov(r0, heap_number_reg);
+ __ Ret();
+
+ __ bind(&slow_allocate_heapnumber);
+ Handle<Code> slow_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_Slow();
+ __ Jump(slow_ic, RelocInfo::CODE_TARGET);
+
+ __ bind(&miss_force_generic);
+ Handle<Code> miss_ic =
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET);
+}
+
+
void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
bool is_js_array) {
// ----------- S t a t e -------------
@@ -4206,7 +4261,7 @@
// have been verified by the caller to not be a smi.
// Check that the key is a smi.
- __ JumpIfNotSmi(r0, &miss_force_generic);
+ __ JumpIfNotSmi(key_reg, &miss_force_generic);
// Get the elements array and make sure it is a fast element array, not 'cow'.
__ ldr(elements_reg,
@@ -4247,6 +4302,125 @@
}
+void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
+ MacroAssembler* masm,
+ bool is_js_array) {
+ // ----------- S t a t e -------------
+ // -- r0 : value
+ // -- r1 : key
+ // -- r2 : receiver
+ // -- lr : return address
+ // -- r3 : scratch
+ // -- r4 : scratch
+ // -- r5 : scratch
+ // -----------------------------------
+ Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
+
+ Register value_reg = r0;
+ Register key_reg = r1;
+ Register receiver_reg = r2;
+ Register scratch = r3;
+ Register elements_reg = r4;
+ Register mantissa_reg = r5;
+ Register exponent_reg = r6;
+ Register scratch4 = r7;
+
+ // This stub is meant to be tail-jumped to, the receiver must already
+ // have been verified by the caller to not be a smi.
+ __ JumpIfNotSmi(key_reg, &miss_force_generic);
+
+ __ ldr(elements_reg,
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
+
+ // Check that the key is within bounds.
+ if (is_js_array) {
+ __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
+ } else {
+ __ ldr(scratch,
+ FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
+ }
+ // Compare smis, unsigned compare catches both negative and out-of-bound
+ // indexes.
+ __ cmp(key_reg, scratch);
+ __ b(hs, &miss_force_generic);
+
+ // Handle smi values specially.
+ __ JumpIfSmi(value_reg, &smi_value);
+
+ // Ensure that the object is a heap number
+ __ CheckMap(value_reg,
+ scratch,
+ masm->isolate()->factory()->heap_number_map(),
+ &miss_force_generic,
+ DONT_DO_SMI_CHECK);
+
+ // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
+ // in the exponent.
+ __ mov(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
+ __ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
+ __ cmp(exponent_reg, scratch);
+ __ b(ge, &maybe_nan);
+
+ __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+
+ __ bind(&have_double_value);
+ __ add(scratch, elements_reg,
+ Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+ __ str(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+ __ str(exponent_reg, FieldMemOperand(scratch, offset));
+ __ Ret();
+
+ __ bind(&maybe_nan);
+ // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
+ // it's an Infinity, and the non-NaN code path applies.
+ __ b(gt, &is_nan);
+ __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+ __ cmp(mantissa_reg, Operand(0));
+ __ b(eq, &have_double_value);
+ __ bind(&is_nan);
+ // Load canonical NaN for storing into the double array.
+ uint64_t nan_int64 = BitCast<uint64_t>(
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ __ mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
+ __ mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
+ __ jmp(&have_double_value);
+
+ __ bind(&smi_value);
+ __ add(scratch, elements_reg,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ __ add(scratch, scratch,
+ Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
+ // scratch is now effective address of the double element
+
+ FloatingPointHelper::Destination destination;
+ if (CpuFeatures::IsSupported(VFP3)) {
+ destination = FloatingPointHelper::kVFPRegisters;
+ } else {
+ destination = FloatingPointHelper::kCoreRegisters;
+ }
+ __ SmiUntag(value_reg, value_reg);
+ FloatingPointHelper::ConvertIntToDouble(
+ masm, value_reg, destination,
+ d0, mantissa_reg, exponent_reg, // These are: double_dst, dst1, dst2.
+ scratch4, s2); // These are: scratch2, single_scratch.
+ if (destination == FloatingPointHelper::kVFPRegisters) {
+ CpuFeatures::Scope scope(VFP3);
+ __ vstr(d0, scratch, 0);
+ } else {
+ __ str(mantissa_reg, MemOperand(scratch, 0));
+ __ str(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
+ }
+ __ Ret();
+
+ // Handle store cache miss, replacing the ic with the generic stub.
+ __ bind(&miss_force_generic);
+ Handle<Code> ic =
+ masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
+ __ Jump(ic, RelocInfo::CODE_TARGET);
+}
+
+
#undef __
} } // namespace v8::internal