Merge V8 3.9 at 3.9.24.9
http://v8.googlecode.com/svn/branches/3.9@11260
Bug: 5688872
Change-Id: Iddd944e82189d92df3fc427dc5f0d3f1b2f0c6c8
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 941c7fe..e93a417 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -574,12 +574,22 @@
void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
- mul(rd, rs, rt.rm());
+ if (kArchVariant == kLoongson) {
+ mult(rs, rt.rm());
+ mflo(rd);
+ } else {
+ mul(rd, rs, rt.rm());
+ }
} else {
// li handles the relocation.
ASSERT(!rs.is(at));
li(at, rt);
- mul(rd, rs, at);
+ if (kArchVariant == kLoongson) {
+ mult(rs, at);
+ mflo(rd);
+ } else {
+ mul(rd, rs, at);
+ }
}
}
@@ -734,7 +744,7 @@
void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
- if (mips32r2) {
+ if (kArchVariant == kMips32r2) {
if (rt.is_reg()) {
rotrv(rd, rs, rt.rm());
} else {
@@ -758,31 +768,30 @@
}
}
-
//------------Pseudo-instructions-------------
-void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
+void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
ASSERT(!j.is_reg());
BlockTrampolinePoolScope block_trampoline_pool(this);
- if (!MustUseReg(j.rmode_) && !gen2instr) {
+ if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
// Normal load of an immediate value which does not need Relocation Info.
if (is_int16(j.imm32_)) {
addiu(rd, zero_reg, j.imm32_);
} else if (!(j.imm32_ & kHiMask)) {
ori(rd, zero_reg, j.imm32_);
} else if (!(j.imm32_ & kImm16Mask)) {
- lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
+ lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
} else {
- lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
+ lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
ori(rd, rd, (j.imm32_ & kImm16Mask));
}
- } else if (MustUseReg(j.rmode_) || gen2instr) {
+ } else {
if (MustUseReg(j.rmode_)) {
RecordRelocInfo(j.rmode_, j.imm32_);
}
- // We need always the same number of instructions as we may need to patch
+ // We always need the same number of instructions as we may need to patch
// this code to load another value which may need 2 instructions to load.
- lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
+ lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask);
ori(rd, rd, (j.imm32_ & kImm16Mask));
}
}
@@ -922,7 +931,7 @@
ASSERT(pos < 32);
ASSERT(pos + size < 33);
- if (mips32r2) {
+ if (kArchVariant == kMips32r2) {
ext_(rt, rs, pos, size);
} else {
// Move rs to rt and shift it left then right to get the
@@ -946,7 +955,7 @@
ASSERT(pos + size <= 32);
ASSERT(size != 0);
- if (mips32r2) {
+ if (kArchVariant == kMips32r2) {
ins_(rt, rs, pos, size);
} else {
ASSERT(!rt.is(t8) && !rs.is(t8));
@@ -1016,6 +1025,48 @@
mtc1(t8, fd);
}
+void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
+ if (kArchVariant == kLoongson && fd.is(fs)) {
+ mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ trunc_w_d(fd, fs);
+ mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ } else {
+ trunc_w_d(fd, fs);
+ }
+}
+
+void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
+ if (kArchVariant == kLoongson && fd.is(fs)) {
+ mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ round_w_d(fd, fs);
+ mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ } else {
+ round_w_d(fd, fs);
+ }
+}
+
+
+void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
+ if (kArchVariant == kLoongson && fd.is(fs)) {
+ mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ floor_w_d(fd, fs);
+ mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ } else {
+ floor_w_d(fd, fs);
+ }
+}
+
+
+void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
+ if (kArchVariant == kLoongson && fd.is(fs)) {
+ mfc1(t8, FPURegister::from_code(fs.code() + 1));
+ ceil_w_d(fd, fs);
+ mtc1(t8, FPURegister::from_code(fs.code() + 1));
+ } else {
+ ceil_w_d(fd, fs);
+ }
+}
+
void MacroAssembler::Trunc_uw_d(FPURegister fd,
Register rs,
@@ -1146,6 +1197,104 @@
}
+void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
+ if (kArchVariant == kLoongson) {
+ Label done;
+ Branch(&done, ne, rt, Operand(zero_reg));
+ mov(rd, rs);
+ bind(&done);
+ } else {
+ movz(rd, rs, rt);
+ }
+}
+
+
+void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
+ if (kArchVariant == kLoongson) {
+ Label done;
+ Branch(&done, eq, rt, Operand(zero_reg));
+ mov(rd, rs);
+ bind(&done);
+ } else {
+ movn(rd, rs, rt);
+ }
+}
+
+
+void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
+ if (kArchVariant == kLoongson) {
+ // Tests an FP condition code and then conditionally move rs to rd.
+ // We do not currently use any FPU cc bit other than bit 0.
+ ASSERT(cc == 0);
+ ASSERT(!(rs.is(t8) || rd.is(t8)));
+ Label done;
+ Register scratch = t8;
+ // For testing purposes we need to fetch content of the FCSR register and
+ // than test its cc (floating point condition code) bit (for cc = 0, it is
+ // 24. bit of the FCSR).
+ cfc1(scratch, FCSR);
+ // For the MIPS I, II and III architectures, the contents of scratch is
+ // UNPREDICTABLE for the instruction immediately following CFC1.
+ nop();
+ srl(scratch, scratch, 16);
+ andi(scratch, scratch, 0x0080);
+ Branch(&done, eq, scratch, Operand(zero_reg));
+ mov(rd, rs);
+ bind(&done);
+ } else {
+ movt(rd, rs, cc);
+ }
+}
+
+
+void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
+ if (kArchVariant == kLoongson) {
+ // Tests an FP condition code and then conditionally move rs to rd.
+ // We do not currently use any FPU cc bit other than bit 0.
+ ASSERT(cc == 0);
+ ASSERT(!(rs.is(t8) || rd.is(t8)));
+ Label done;
+ Register scratch = t8;
+ // For testing purposes we need to fetch content of the FCSR register and
+ // than test its cc (floating point condition code) bit (for cc = 0, it is
+ // 24. bit of the FCSR).
+ cfc1(scratch, FCSR);
+ // For the MIPS I, II and III architectures, the contents of scratch is
+ // UNPREDICTABLE for the instruction immediately following CFC1.
+ nop();
+ srl(scratch, scratch, 16);
+ andi(scratch, scratch, 0x0080);
+ Branch(&done, ne, scratch, Operand(zero_reg));
+ mov(rd, rs);
+ bind(&done);
+ } else {
+ movf(rd, rs, cc);
+ }
+}
+
+
+void MacroAssembler::Clz(Register rd, Register rs) {
+ if (kArchVariant == kLoongson) {
+ ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
+ Register mask = t8;
+ Register scratch = t9;
+ Label loop, end;
+ mov(at, rs);
+ mov(rd, zero_reg);
+ lui(mask, 0x8000);
+ bind(&loop);
+ and_(scratch, at, mask);
+ Branch(&end, ne, scratch, Operand(zero_reg));
+ addiu(rd, rd, 1);
+ Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
+ srl(mask, mask, 1);
+ bind(&end);
+ } else {
+ clz(rd, rs);
+ }
+}
+
+
// Tries to get a signed int32 out of a double precision floating point heap
// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
// 32bits signed integer range.
@@ -1236,8 +1385,8 @@
subu(scratch2, zero_reg, scratch);
// Trick to check sign bit (msb) held in dest, count leading zero.
// 0 indicates negative, save negative version with conditional move.
- clz(dest, dest);
- movz(scratch, scratch2, dest);
+ Clz(dest, dest);
+ Movz(scratch, scratch2, dest);
mov(dest, scratch);
}
bind(&done);
@@ -1268,16 +1417,16 @@
// Do operation based on rounding mode.
switch (rounding_mode) {
case kRoundToNearest:
- round_w_d(result, double_input);
+ Round_w_d(result, double_input);
break;
case kRoundToZero:
- trunc_w_d(result, double_input);
+ Trunc_w_d(result, double_input);
break;
case kRoundToPlusInf:
- ceil_w_d(result, double_input);
+ Ceil_w_d(result, double_input);
break;
case kRoundToMinusInf:
- floor_w_d(result, double_input);
+ Floor_w_d(result, double_input);
break;
} // End of switch-statement.
@@ -1304,7 +1453,7 @@
// Check for Infinity and NaNs, which should return 0.
Subu(scratch, result, HeapNumber::kExponentMask);
- movz(result, zero_reg, scratch);
+ Movz(result, zero_reg, scratch);
Branch(&done, eq, scratch, Operand(zero_reg));
// Express exponent as delta to (number of mantissa bits + 31).
@@ -1368,7 +1517,7 @@
result = sign;
sign = no_reg;
Subu(result, zero_reg, input_high);
- movz(result, input_high, scratch);
+ Movz(result, input_high, scratch);
bind(&done);
}
@@ -1497,6 +1646,16 @@
}
+void MacroAssembler::Branch(Label* L,
+ Condition cond,
+ Register rs,
+ Heap::RootListIndex index,
+ BranchDelaySlot bdslot) {
+ LoadRoot(at, index);
+ Branch(L, cond, rs, Operand(at), bdslot);
+}
+
+
void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
b(offset);
@@ -2288,8 +2447,15 @@
Register rs,
const Operand& rt,
BranchDelaySlot bd) {
+ Label skip;
+ if (cond != cc_always) {
+ Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
+ }
+ // The first instruction of 'li' may be placed in the delay slot.
+ // This is not an issue, t9 is expected to be clobbered anyway.
li(t9, Operand(target, rmode));
- Jump(t9, cond, rs, rt, bd);
+ Jump(t9, al, zero_reg, Operand(zero_reg), bd);
+ bind(&skip);
}
@@ -2384,7 +2550,7 @@
// Must record previous source positions before the
// li() generates a new code target.
positions_recorder()->WriteRecordedPositions();
- li(t9, Operand(target_int, rmode), true);
+ li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
Call(t9, cond, rs, rt, bd);
ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
@@ -2419,7 +2585,7 @@
rmode = RelocInfo::CODE_TARGET_WITH_ID;
}
Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
- ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt),
+ ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
SizeOfCodeGeneratedSince(&start));
}
@@ -2489,14 +2655,16 @@
nop();
}
+void MacroAssembler::DropAndRet(int drop) {
+ Ret(USE_DELAY_SLOT);
+ addiu(sp, sp, drop * kPointerSize);
+}
void MacroAssembler::DropAndRet(int drop,
Condition cond,
Register r1,
const Operand& r2) {
- // This is a workaround to make sure only one branch instruction is
- // generated. It relies on Drop and Ret not creating branches if
- // cond == cc_always.
+ // Both Drop and Ret need to be conditional.
Label skip;
if (cond != cc_always) {
Branch(&skip, NegateCondition(cond), r1, r2);
@@ -2563,8 +2731,8 @@
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
- mov(a0, zero_reg);
- li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
+ PrepareCEntryArgs(0);
+ PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
CEntryStub ces(1);
ASSERT(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
@@ -2576,8 +2744,7 @@
// ---------------------------------------------------------------------------
// Exception handling.
-void MacroAssembler::PushTryHandler(CodeLocation try_location,
- HandlerType type,
+void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
int handler_index) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
@@ -2589,30 +2756,23 @@
// For the JSEntry handler, we must preserve a0-a3 and s0.
// t1-t3 are available. We will build up the handler from the bottom by
- // pushing on the stack. First compute the state.
- unsigned state = StackHandler::OffsetField::encode(handler_index);
- if (try_location == IN_JAVASCRIPT) {
- state |= (type == TRY_CATCH_HANDLER)
- ? StackHandler::KindField::encode(StackHandler::TRY_CATCH)
- : StackHandler::KindField::encode(StackHandler::TRY_FINALLY);
- } else {
- ASSERT(try_location == IN_JS_ENTRY);
- state |= StackHandler::KindField::encode(StackHandler::ENTRY);
- }
-
+ // pushing on the stack.
// Set up the code object (t1) and the state (t2) for pushing.
- li(t1, Operand(CodeObject()));
+ unsigned state =
+ StackHandler::IndexField::encode(handler_index) |
+ StackHandler::KindField::encode(kind);
+ li(t1, Operand(CodeObject()), CONSTANT_SIZE);
li(t2, Operand(state));
// Push the frame pointer, context, state, and code object.
- if (try_location == IN_JAVASCRIPT) {
- MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
- } else {
+ if (kind == StackHandler::JS_ENTRY) {
ASSERT_EQ(Smi::FromInt(0), 0);
// The second zero_reg indicates no context.
// The first zero_reg is the NULL frame pointer.
// The operands are reversed to match the order of MultiPush/Pop.
Push(zero_reg, zero_reg, t2, t1);
+ } else {
+ MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit());
}
// Link the current handler as the next handler.
@@ -2687,8 +2847,7 @@
}
-void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
- Register value) {
+void MacroAssembler::ThrowUncatchable(Register value) {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
@@ -2698,24 +2857,9 @@
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The exception is expected in v0.
- if (type == OUT_OF_MEMORY) {
- // Set external caught exception to false.
- ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
- isolate());
- li(a0, Operand(false, RelocInfo::NONE));
- li(a2, Operand(external_caught));
- sw(a0, MemOperand(a2));
-
- // Set pending exception and v0 to out of memory exception.
- Failure* out_of_memory = Failure::OutOfMemoryException();
- li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
- li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
- isolate())));
- sw(v0, MemOperand(a2));
- } else if (!value.is(v0)) {
+ if (!value.is(v0)) {
mov(v0, value);
}
-
// Drop the stack pointer to the top of the top stack handler.
li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
lw(sp, MemOperand(a3));
@@ -2727,7 +2871,7 @@
lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
bind(&check_kind);
- STATIC_ASSERT(StackHandler::ENTRY == 0);
+ STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
And(a2, a2, Operand(StackHandler::KindField::kMask));
Branch(&fetch_next, ne, a2, Operand(zero_reg));
@@ -3246,7 +3390,7 @@
// Ensure that the object is a heap number
CheckMap(value_reg,
scratch1,
- isolate()->factory()->heap_number_map(),
+ Heap::kHeapNumberMapRootIndex,
fail,
DONT_DO_SMI_CHECK);
@@ -3750,10 +3894,13 @@
// -----------------------------------------------------------------------------
// Runtime calls.
-void MacroAssembler::CallStub(CodeStub* stub, Condition cond,
- Register r1, const Operand& r2) {
+void MacroAssembler::CallStub(CodeStub* stub,
+ Condition cond,
+ Register r1,
+ const Operand& r2,
+ BranchDelaySlot bd) {
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, kNoASTId, cond, r1, r2, bd);
}
@@ -3836,8 +3983,7 @@
lw(t1, MemOperand(at));
Branch(&promote_scheduled_exception, ne, t0, Operand(t1));
li(s0, Operand(stack_space));
- LeaveExitFrame(false, s0);
- Ret();
+ LeaveExitFrame(false, s0, true);
bind(&promote_scheduled_exception);
TailCallExternalReference(
@@ -4035,8 +4181,8 @@
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
- li(a0, num_arguments);
- li(a1, Operand(ExternalReference(f, isolate())));
+ PrepareCEntryArgs(num_arguments);
+ PrepareCEntryFunction(ExternalReference(f, isolate()));
CEntryStub stub(1);
CallStub(&stub);
}
@@ -4044,8 +4190,8 @@
void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
- li(a0, Operand(function->nargs));
- li(a1, Operand(ExternalReference(function, isolate())));
+ PrepareCEntryArgs(function->nargs);
+ PrepareCEntryFunction(ExternalReference(function, isolate()));
CEntryStub stub(1, kSaveFPRegs);
CallStub(&stub);
}
@@ -4057,12 +4203,13 @@
void MacroAssembler::CallExternalReference(const ExternalReference& ext,
- int num_arguments) {
- li(a0, Operand(num_arguments));
- li(a1, Operand(ext));
+ int num_arguments,
+ BranchDelaySlot bd) {
+ PrepareCEntryArgs(num_arguments);
+ PrepareCEntryFunction(ext);
CEntryStub stub(1);
- CallStub(&stub);
+ CallStub(&stub, al, zero_reg, Operand(zero_reg), bd);
}
@@ -4073,7 +4220,7 @@
// arguments passed in because it is constant. At some point we
// should remove this need and make the runtime routine entry code
// smarter.
- li(a0, Operand(num_arguments));
+ PrepareCEntryArgs(num_arguments);
JumpToExternalReference(ext);
}
@@ -4087,10 +4234,16 @@
}
-void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
- li(a1, Operand(builtin));
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
+ BranchDelaySlot bd) {
+ PrepareCEntryFunction(builtin);
CEntryStub stub(1);
- Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+ Jump(stub.GetCode(),
+ RelocInfo::CODE_TARGET,
+ al,
+ zero_reg,
+ Operand(zero_reg),
+ bd);
}
@@ -4279,6 +4432,46 @@
}
+void MacroAssembler::LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match) {
+ // Load the global or builtins object from the current context.
+ lw(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+
+ // Check that the function's map is the same as the expected cached map.
+ int expected_index =
+ Context::GetContextMapIndexFromElementsKind(expected_kind);
+ lw(at, MemOperand(scratch, Context::SlotOffset(expected_index)));
+ Branch(no_map_match, ne, map_in_out, Operand(at));
+
+ // Use the transitioned cached map.
+ int trans_index =
+ Context::GetContextMapIndexFromElementsKind(transitioned_kind);
+ lw(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
+}
+
+
+void MacroAssembler::LoadInitialArrayMap(
+ Register function_in, Register scratch, Register map_out) {
+ ASSERT(!function_in.is(map_out));
+ Label done;
+ lw(map_out, FieldMemOperand(function_in,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ if (!FLAG_smi_only_arrays) {
+ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
+ map_out,
+ scratch,
+ &done);
+ }
+ bind(&done);
+}
+
+
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -4309,7 +4502,7 @@
void MacroAssembler::EnterFrame(StackFrame::Type type) {
addiu(sp, sp, -5 * kPointerSize);
li(t8, Operand(Smi::FromInt(type)));
- li(t9, Operand(CodeObject()));
+ li(t9, Operand(CodeObject()), CONSTANT_SIZE);
sw(ra, MemOperand(sp, 4 * kPointerSize));
sw(fp, MemOperand(sp, 3 * kPointerSize));
sw(cp, MemOperand(sp, 2 * kPointerSize));
@@ -4353,7 +4546,8 @@
sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
- li(t8, Operand(CodeObject())); // Accessed from ExitFrame::code_slot.
+ // Accessed from ExitFrame::code_slot.
+ li(t8, Operand(CodeObject()), CONSTANT_SIZE);
sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top.
@@ -4397,7 +4591,8 @@
void MacroAssembler::LeaveExitFrame(bool save_doubles,
- Register argument_count) {
+ Register argument_count,
+ bool do_return) {
// Optionally restore all double registers.
if (save_doubles) {
// Remember: we only need to restore every 2nd double FPU value.
@@ -4423,11 +4618,17 @@
mov(sp, fp); // Respect ABI stack constraint.
lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
- addiu(sp, sp, 8);
+
if (argument_count.is_valid()) {
sll(t8, argument_count, kPointerSizeLog2);
addu(sp, sp, t8);
}
+
+ if (do_return) {
+ Ret(USE_DELAY_SLOT);
+ // If returning, the instruction in the delay slot will be the addiu below.
+ }
+ addiu(sp, sp, 8);
}
@@ -4492,6 +4693,64 @@
}
+void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
+ ASSERT(!reg.is(overflow));
+ mov(overflow, reg); // Save original value.
+ SmiTag(reg);
+ xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
+}
+
+
+void MacroAssembler::SmiTagCheckOverflow(Register dst,
+ Register src,
+ Register overflow) {
+ if (dst.is(src)) {
+ // Fall back to slower case.
+ SmiTagCheckOverflow(dst, overflow);
+ } else {
+ ASSERT(!dst.is(src));
+ ASSERT(!dst.is(overflow));
+ ASSERT(!src.is(overflow));
+ SmiTag(dst, src);
+ xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
+ }
+}
+
+
+void MacroAssembler::UntagAndJumpIfSmi(Register dst,
+ Register src,
+ Label* smi_case) {
+ JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
+ SmiUntag(dst, src);
+}
+
+
+void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
+ Register src,
+ Label* non_smi_case) {
+ JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
+ SmiUntag(dst, src);
+}
+
+void MacroAssembler::JumpIfSmi(Register value,
+ Label* smi_label,
+ Register scratch,
+ BranchDelaySlot bd) {
+ ASSERT_EQ(0, kSmiTag);
+ andi(scratch, value, kSmiTagMask);
+ Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
+}
+
+void MacroAssembler::JumpIfNotSmi(Register value,
+ Label* not_smi_label,
+ Register scratch,
+ BranchDelaySlot bd) {
+ ASSERT_EQ(0, kSmiTag);
+ andi(scratch, value, kSmiTagMask);
+ Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
+}
+
+
void MacroAssembler::JumpIfNotBothSmi(Register reg1,
Register reg2,
Label* on_not_both_smi) {
@@ -5014,11 +5273,53 @@
FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset));
Label not_smi;
JumpIfNotSmi(descriptors, ¬_smi);
- li(descriptors, Operand(FACTORY->empty_descriptor_array()));
+ LoadRoot(descriptors, Heap::kEmptyDescriptorArrayRootIndex);
bind(¬_smi);
}
+void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+ Label next;
+ // Preload a couple of values used in the loop.
+ Register empty_fixed_array_value = t2;
+ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+ Register empty_descriptor_array_value = t3;
+ LoadRoot(empty_descriptor_array_value,
+ Heap::kEmptyDescriptorArrayRootIndex);
+ mov(a1, a0);
+ bind(&next);
+
+ // Check that there are no elements. Register a1 contains the
+ // current JS object we've reached through the prototype chain.
+ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
+ Branch(call_runtime, ne, a2, Operand(empty_fixed_array_value));
+
+ // Check that instance descriptors are not empty so that we can
+ // check for an enum cache. Leave the map in a2 for the subsequent
+ // prototype load.
+ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+ lw(a3, FieldMemOperand(a2, Map::kInstanceDescriptorsOrBitField3Offset));
+ JumpIfSmi(a3, call_runtime);
+
+ // Check that there is an enum cache in the non-empty instance
+ // descriptors (a3). This is the case if the next enumeration
+ // index field does not contain a smi.
+ lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumerationIndexOffset));
+ JumpIfSmi(a3, call_runtime);
+
+ // For all objects but the receiver, check that the cache is empty.
+ Label check_prototype;
+ Branch(&check_prototype, eq, a1, Operand(a0));
+ lw(a3, FieldMemOperand(a3, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ Branch(call_runtime, ne, a3, Operand(empty_fixed_array_value));
+
+ // Load the prototype from the map and loop if non-null.
+ bind(&check_prototype);
+ lw(a1, FieldMemOperand(a2, Map::kPrototypeOffset));
+ Branch(&next, ne, a1, Operand(null_value));
+}
+
+
void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
ASSERT(!output_reg.is(input_reg));
Label done;