Fixed a bug in deletion of lookup slots that could cause global variables to be accidentally deleted (http://crbug.com/70066).
Added support for strict mode octal literal verification.
Fixed a couple of crash bugs (issues 1070 and 1071).
git-svn-id: http://v8.googlecode.com/svn/trunk@6469 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/AUTHORS b/AUTHORS
index ea5b93e..eedf840 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -34,3 +34,4 @@
Ryan Dahl <coldredlemur@gmail.com>
Subrato K De <subratokde@codeaurora.org>
Vlad Burlik <vladbph@gmail.com>
+Mike Gilbert <floppymaster@gmail.com>
diff --git a/ChangeLog b/ChangeLog
index e2f87c5..35ea957 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,8 +1,18 @@
+2011-01-25: Version 3.0.11
+
+ Fixed a bug in deletion of lookup slots that could cause global
+ variables to be accidentally deleted (http://crbug.com/70066).
+
+ Added support for strict mode octal literal verification.
+
+ Fixed a couple of crash bugs (issues 1070 and 1071).
+
+
2011-01-24: Version 3.0.10
Fixed External::Wrap for 64-bit addresses (issue 1037).
- Fixed incorrect .arguments variable proxy handling in the full
+ Fixed incorrect .arguments variable proxy handling in the full
code generator (issue 1060).
Introduced partial strict mode support.
@@ -11,7 +21,7 @@
(issue http://crbug.com/70334).
Fixed incorrect rounding for float-to-integer conversions for external
- array types, which implement the Typed Array spec
+ array types, which implement the Typed Array spec
(issue http://crbug.com/50972).
Performance improvements on the IA32 platform.
diff --git a/SConstruct b/SConstruct
index 985e239..28bb713 100644
--- a/SConstruct
+++ b/SConstruct
@@ -32,7 +32,7 @@
from os.path import join, dirname, abspath
from types import DictType, StringTypes
root_dir = dirname(File('SConstruct').rfile().abspath)
-sys.path.append(join(root_dir, 'tools'))
+sys.path.insert(0, join(root_dir, 'tools'))
import js2c, utils
# ANDROID_TOP is the top of the Android checkout, fetched from the environment
@@ -330,7 +330,7 @@
},
'msvc': {
'all': {
- 'WARNINGFLAGS': ['/W3', '/WX', '/wd4355', '/wd4800']
+ 'WARNINGFLAGS': ['/W3', '/WX', '/wd4351', '/wd4355', '/wd4800']
},
'library:shared': {
'CPPDEFINES': ['BUILDING_V8_SHARED'],
@@ -1023,6 +1023,8 @@
# Print a warning that liveobjectlist will implicitly enable the debugger
print "Warning: forcing debuggersupport on for liveobjectlist"
options['debuggersupport'] = 'on'
+ options['inspector'] = 'on'
+ options['objectprint'] = 'on'
def ParseEnvOverrides(arg, imports):
diff --git a/src/SConscript b/src/SConscript
index f3b9d43..76ca2dc 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -95,6 +95,7 @@
mark-compact.cc
messages.cc
objects.cc
+ objects-printer.cc
objects-visiting.cc
oprofile-agent.cc
parser.cc
@@ -216,8 +217,9 @@
x64/full-codegen-x64.cc
x64/ic-x64.cc
x64/jump-target-x64.cc
- x64/lithium-x64.cc
x64/lithium-codegen-x64.cc
+ x64/lithium-gap-resolver-x64.cc
+ x64/lithium-x64.cc
x64/macro-assembler-x64.cc
x64/regexp-macro-assembler-x64.cc
x64/register-allocator-x64.cc
@@ -236,10 +238,8 @@
'os:win32': ['platform-win32.cc'],
'mode:release': [],
'mode:debug': [
- 'objects-debug.cc', 'objects-printer.cc', 'prettyprinter.cc',
- 'regexp-macro-assembler-tracer.cc'
- ],
- 'objectprint:on': ['objects-printer.cc']
+ 'objects-debug.cc', 'prettyprinter.cc', 'regexp-macro-assembler-tracer.cc'
+ ]
}
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index a446007..f0fd09a 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -112,9 +112,10 @@
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
// Attempt to allocate the context in new space.
- __ AllocateInNewSpace(FixedArray::SizeFor(slots_),
+ __ AllocateInNewSpace(FixedArray::SizeFor(length),
r0,
r1,
r2,
@@ -127,7 +128,7 @@
// Setup the object header.
__ LoadRoot(r2, Heap::kContextMapRootIndex);
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ mov(r2, Operand(Smi::FromInt(slots_)));
+ __ mov(r2, Operand(Smi::FromInt(length)));
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
// Setup the fixed slots.
@@ -143,7 +144,7 @@
// Initialize the rest of the slots to undefined.
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < slots_; i++) {
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
__ str(r1, MemOperand(r0, Context::SlotOffset(i)));
}
@@ -2557,8 +2558,7 @@
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
bool do_gc,
- bool always_allocate,
- int frame_alignment_skew) {
+ bool always_allocate) {
// r0: result parameter for PerformGC, if any
// r4: number of arguments including receiver (C callee-saved)
// r5: pointer to builtin function (C callee-saved)
@@ -2584,14 +2584,13 @@
__ mov(r0, Operand(r4));
__ mov(r1, Operand(r6));
+#if defined(V8_HOST_ARCH_ARM)
int frame_alignment = MacroAssembler::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
-#if defined(V8_HOST_ARCH_ARM)
if (FLAG_debug_code) {
if (frame_alignment > kPointerSize) {
Label alignment_as_expected;
ASSERT(IsPowerOf2(frame_alignment));
- __ sub(r2, sp, Operand(frame_alignment_skew));
__ tst(r2, Operand(frame_alignment_mask));
__ b(eq, &alignment_as_expected);
// Don't use Check here, as it will call Runtime_Abort re-entering here.
@@ -2601,35 +2600,20 @@
}
#endif
- // Just before the call (jump) below lr is pushed, so the actual alignment is
- // adding one to the current skew.
- int alignment_before_call =
- (frame_alignment_skew + kPointerSize) & frame_alignment_mask;
- if (alignment_before_call > 0) {
- // Push until the alignment before the call is met.
- __ mov(r2, Operand(0, RelocInfo::NONE));
- for (int i = alignment_before_call;
- (i & frame_alignment_mask) != 0;
- i += kPointerSize) {
- __ push(r2);
- }
- }
-
// TODO(1242173): To let the GC traverse the return address of the exit
// frames, we need to know where the return address is. Right now,
- // we push it on the stack to be able to find it again, but we never
+ // we store it on the stack to be able to find it again, but we never
// restore from it in case of changes, which makes it impossible to
// support moving the C entry code stub. This should be fixed, but currently
// this is OK because the CEntryStub gets generated so early in the V8 boot
// sequence that it is not moving ever.
- masm->add(lr, pc, Operand(4)); // Compute return address: (pc + 8) + 4
- masm->push(lr);
- masm->Jump(r5);
- // Restore sp back to before aligning the stack.
- if (alignment_before_call > 0) {
- __ add(sp, sp, Operand(alignment_before_call));
- }
+ // Compute the return address in lr to return to after the jump below. Pc is
+ // already at '+ 8' from the current instruction but return is after three
+ // instructions so add another 4 to pc to get the return address.
+ masm->add(lr, pc, Operand(4));
+ __ str(lr, MemOperand(sp, 0));
+ masm->Jump(r5);
if (always_allocate) {
// It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
@@ -2717,8 +2701,7 @@
&throw_termination_exception,
&throw_out_of_memory_exception,
false,
- false,
- -kPointerSize);
+ false);
// Do space-specific GC and retry runtime call.
GenerateCore(masm,
@@ -2726,8 +2709,7 @@
&throw_termination_exception,
&throw_out_of_memory_exception,
true,
- false,
- 0);
+ false);
// Do full GC and retry runtime call one final time.
Failure* failure = Failure::InternalError();
@@ -2737,8 +2719,7 @@
&throw_termination_exception,
&throw_out_of_memory_exception,
true,
- true,
- kPointerSize);
+ true);
__ bind(&throw_out_of_memory_exception);
GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 2fa0711..f76bd35 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -209,7 +209,7 @@
frame_->AllocateStackSlots();
frame_->AssertIsSpilled();
- int heap_slots = scope()->num_heap_slots();
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
// Allocate local context.
// Get outer context and create a new context based on it.
@@ -5849,14 +5849,10 @@
frame_->EmitPush(r0);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // lookup the context holding the named variable
+ // Delete from the context holding the named variable.
frame_->EmitPush(cp);
frame_->EmitPush(Operand(variable->name()));
- frame_->CallRuntime(Runtime::kLookupContext, 2);
- // r0: context
- frame_->EmitPush(r0);
- frame_->EmitPush(Operand(variable->name()));
- frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
+ frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
frame_->EmitPush(r0);
} else {
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 8a53d1c..3fd1e39 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -112,13 +112,16 @@
}
-void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
+void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
+ Code* check_code,
Code* replacement_code) {
UNIMPLEMENTED();
}
-void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
+void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
+ Code* check_code,
+ Code* replacement_code) {
UNIMPLEMENTED();
}
@@ -367,7 +370,7 @@
// Copy core registers into FrameDescription::registers_[kNumRegisters].
ASSERT(Register::kNumRegisters == kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kIntSize) + FrameDescription::registers_offset();
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ ldr(r2, MemOperand(sp, i * kPointerSize));
__ str(r2, MemOperand(r1, offset));
}
@@ -456,7 +459,7 @@
// Push the registers from the last output frame.
for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
- int offset = (i * kIntSize) + FrameDescription::registers_offset();
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ ldr(r6, MemOperand(r2, offset));
__ push(r6);
}
diff --git a/src/arm/frames-arm.cc b/src/arm/frames-arm.cc
index d2726cf..a805d28 100644
--- a/src/arm/frames-arm.cc
+++ b/src/arm/frames-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -30,20 +30,13 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "frames-inl.h"
-#include "arm/assembler-arm-inl.h"
-
namespace v8 {
namespace internal {
Address ExitFrame::ComputeStackPointer(Address fp) {
- Address marker = Memory::Address_at(fp + ExitFrameConstants::kMarkerOffset);
- Address sp = fp + ExitFrameConstants::kSPOffset;
- if (marker == NULL) {
- sp -= DwVfpRegister::kNumRegisters * kDoubleSize + 2 * kPointerSize;
- }
- return sp;
+ return Memory::Address_at(fp + ExitFrameConstants::kSPOffset);
}
diff --git a/src/arm/frames-arm.h b/src/arm/frames-arm.h
index f1be27f..4aa8d6a 100644
--- a/src/arm/frames-arm.h
+++ b/src/arm/frames-arm.h
@@ -107,21 +107,17 @@
class ExitFrameConstants : public AllStatic {
public:
- static const int kCodeOffset = -1 * kPointerSize;
+ static const int kCodeOffset = -2 * kPointerSize;
static const int kSPOffset = -1 * kPointerSize;
- // TODO(regis): Use a patched sp value on the stack instead.
- // A marker of 0 indicates that double registers are saved.
- static const int kMarkerOffset = -2 * kPointerSize;
-
// The caller fields are below the frame pointer on the stack.
- static const int kCallerFPOffset = +0 * kPointerSize;
- // The calling JS function is between FP and PC.
- static const int kCallerPCOffset = +2 * kPointerSize;
+ static const int kCallerFPOffset = 0 * kPointerSize;
+ // The calling JS function is below FP.
+ static const int kCallerPCOffset = 1 * kPointerSize;
// FP-relative displacement of the caller's SP. It points just
// below the saved PC.
- static const int kCallerSPDisplacement = +3 * kPointerSize;
+ static const int kCallerSPDisplacement = 2 * kPointerSize;
};
@@ -131,8 +127,8 @@
static const int kMarkerOffset = -2 * kPointerSize;
static const int kContextOffset = -1 * kPointerSize;
static const int kCallerFPOffset = 0 * kPointerSize;
- static const int kCallerPCOffset = +1 * kPointerSize;
- static const int kCallerSPOffset = +2 * kPointerSize;
+ static const int kCallerPCOffset = 1 * kPointerSize;
+ static const int kCallerSPOffset = 2 * kPointerSize;
};
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index ddc74e2..9483b73 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -92,7 +92,7 @@
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots();
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is in r1.
@@ -2992,22 +2992,20 @@
if (prop != NULL) {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
+ __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
} else if (var->is_global()) {
__ ldr(r1, GlobalObjectOperand());
__ mov(r0, Operand(var->name()));
__ Push(r1, r0);
+ __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
} else {
- // Non-global variable. Call the runtime to look up the context
- // where the variable was introduced.
+ // Non-global variable. Call the runtime to delete from the
+ // context where the variable was introduced.
__ push(context_register());
__ mov(r2, Operand(var->name()));
__ push(r2);
- __ CallRuntime(Runtime::kLookupContext, 2);
- __ push(r0);
- __ mov(r2, Operand(var->name()));
- __ push(r2);
+ __ CallRuntime(Runtime::kDeleteContextSlot, 2);
}
- __ InvokeBuiltin(Builtins::DELETE, CALL_JS);
context()->Plug(r0);
}
break;
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index c484e39..402eb20 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -653,22 +653,6 @@
}
-LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
- LInstruction* instr, int ast_id) {
- ASSERT(instructions_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
- instructions_pending_deoptimization_environment_ = instr;
- pending_deoptimization_ast_id_ = ast_id;
- return instr;
-}
-
-
-void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
- instructions_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = AstNode::kNoNumber;
-}
-
-
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
@@ -678,8 +662,8 @@
if (hinstr->HasSideEffects()) {
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
- instr = SetInstructionPendingDeoptimizationEnvironment(
- instr, sim->ast_id());
+ ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+ pending_deoptimization_ast_id_ = sim->ast_id();
}
// If instruction does not have side-effects lazy deoptimization
@@ -1018,11 +1002,8 @@
HIsObject* compare = HIsObject::cast(v);
ASSERT(compare->value()->representation().IsTagged());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
- temp1,
- temp2);
+ LOperand* temp = TempRegister();
+ return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()), temp);
} else if (v->IsCompareJSObjectEq()) {
HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
@@ -1313,8 +1294,8 @@
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new LSubI(left, right);
LInstruction* result = DefineSameAsFirst(sub);
if (instr->CheckFlag(HValue::kCanOverflow)) {
@@ -1404,7 +1385,7 @@
ASSERT(instr->value()->representation().IsTagged());
LOperand* value = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LIsObject(value, TempRegister()));
+ return DefineAsRegister(new LIsObject(value));
}
@@ -1829,12 +1810,11 @@
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
- if (pending_deoptimization_ast_id_ == instr->ast_id()) {
- LInstruction* result = new LLazyBailout;
- result = AssignEnvironment(result);
- instructions_pending_deoptimization_environment_->
- set_deoptimization_environment(result->environment());
- ClearInstructionPendingDeoptimizationEnvironment();
+ if (pending_deoptimization_ast_id_ != AstNode::kNoNumber) {
+ ASSERT(pending_deoptimization_ast_id_ == instr->ast_id());
+ LInstruction* lazy_bailout = new LLazyBailout;
+ LInstruction* result = AssignEnvironment(lazy_bailout);
+ pending_deoptimization_ast_id_ = AstNode::kNoNumber;
return result;
}
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index 81a0266..dae3ead 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -320,21 +320,10 @@
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
- void set_deoptimization_environment(LEnvironment* env) {
- deoptimization_environment_.set(env);
- }
- LEnvironment* deoptimization_environment() const {
- return deoptimization_environment_.get();
- }
- bool HasDeoptimizationEnvironment() const {
- return deoptimization_environment_.is_set();
- }
-
private:
SetOncePointer<LEnvironment> environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
- SetOncePointer<LEnvironment> deoptimization_environment_;
};
@@ -734,9 +723,8 @@
class LIsObject: public LTemplateInstruction<1, 1, 1> {
public:
- LIsObject(LOperand* value, LOperand* temp) {
+ explicit LIsObject(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
@@ -745,10 +733,9 @@
class LIsObjectAndBranch: public LControlInstruction<1, 2> {
public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
+ LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
- temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
@@ -1965,10 +1952,6 @@
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
LInstruction* MarkAsSaveDoubles(LInstruction* instr);
- LInstruction* SetInstructionPendingDeoptimizationEnvironment(
- LInstruction* instr, int ast_id);
- void ClearInstructionPendingDeoptimizationEnvironment();
-
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
void VisitInstruction(HInstruction* current);
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 6abb830..ea88cb3 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -601,16 +601,20 @@
// Create the environment to bailout to. If the call has side effects
// execution has to continue after the call otherwise execution can continue
// from a previous bailout point repeating the call.
- LEnvironment* deoptimization_environment;
- if (instr->HasDeoptimizationEnvironment()) {
- deoptimization_environment = instr->deoptimization_environment();
+ LEnvironment* lazy_deoptimization_environment;
+ ASSERT(!instr->IsControl());
+ ASSERT(instructions_->at(current_instruction_ + 1)->IsGap());
+ LInstruction* next_instr = instructions_->at(current_instruction_ + 2);
+ if (next_instr->IsLazyBailout()) {
+ ASSERT(next_instr->HasEnvironment());
+ lazy_deoptimization_environment = next_instr->environment();
} else {
- deoptimization_environment = instr->environment();
+ lazy_deoptimization_environment = instr->environment();
}
- RegisterEnvironmentForDeoptimization(deoptimization_environment);
+ RegisterEnvironmentForDeoptimization(lazy_deoptimization_environment);
RecordSafepoint(instr->pointer_map(),
- deoptimization_environment->deoptimization_index());
+ lazy_deoptimization_environment->deoptimization_index());
}
@@ -1730,18 +1734,62 @@
Register temp2,
Label* is_not_object,
Label* is_object) {
- Abort("EmitIsObject unimplemented.");
- return ne;
+ __ BranchOnSmi(input, is_not_object);
+
+ __ LoadRoot(temp1, Heap::kNullValueRootIndex);
+ __ cmp(input, temp1);
+ __ b(eq, is_object);
+
+ // Load map.
+ __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
+ // Undetectable objects behave like undefined.
+ __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
+ __ tst(temp2, Operand(1 << Map::kIsUndetectable));
+ __ b(ne, is_not_object);
+
+ // Load instance type and check that it is in object type range.
+ __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
+ __ cmp(temp2, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(lt, is_not_object);
+ __ cmp(temp2, Operand(LAST_JS_OBJECT_TYPE));
+ return le;
}
void LCodeGen::DoIsObject(LIsObject* instr) {
- Abort("DoIsObject unimplemented.");
+ Register reg = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ Register temp = scratch0();
+ Label is_false, is_true, done;
+
+ Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
+ __ b(true_cond, &is_true);
+
+ __ bind(&is_false);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ b(&done);
+
+ __ bind(&is_true);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+
+ __ bind(&done);
}
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
- Abort("DoIsObjectAndBranch unimplemented.");
+ Register reg = ToRegister(instr->InputAt(0));
+ Register temp1 = ToRegister(instr->TempAt(0));
+ Register temp2 = scratch0();
+
+ int true_block = chunk_->LookupDestination(instr->true_block_id());
+ int false_block = chunk_->LookupDestination(instr->false_block_id());
+ Label* true_label = chunk_->GetAssemblyLabel(true_block);
+ Label* false_label = chunk_->GetAssemblyLabel(false_block);
+
+ Condition true_cond =
+ EmitIsObject(reg, temp1, temp2, false_label, true_label);
+
+ EmitBranch(true_block, false_block, true_cond);
}
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index a78de98..7431f3b 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -615,37 +615,24 @@
void MacroAssembler::EnterExitFrame(bool save_doubles) {
- // r0 is argc.
- // Compute callee's stack pointer before making changes and save it as
- // ip register so that it is restored as sp register on exit, thereby
- // popping the args.
+ // Compute the argv pointer in a callee-saved register.
+ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
+ sub(r6, r6, Operand(kPointerSize));
- // ip = sp + kPointerSize * #args;
- add(ip, sp, Operand(r0, LSL, kPointerSizeLog2));
-
- // Compute the argv pointer and keep it in a callee-saved register.
- sub(r6, ip, Operand(kPointerSize));
-
- // Prepare the stack to be aligned when calling into C. After this point there
- // are 5 pushes before the call into C, so the stack needs to be aligned after
- // 5 pushes.
- int frame_alignment = ActivationFrameAlignment();
- int frame_alignment_mask = frame_alignment - 1;
- if (frame_alignment != kPointerSize) {
- // The following code needs to be more general if this assert does not hold.
- ASSERT(frame_alignment == 2 * kPointerSize);
- // With 5 pushes left the frame must be unaligned at this point.
- mov(r7, Operand(Smi::FromInt(0)));
- tst(sp, Operand((frame_alignment - kPointerSize) & frame_alignment_mask));
- push(r7, eq); // Push if aligned to make it unaligned.
- }
-
- // Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
- stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
+ // Setup the frame structure on the stack.
+ ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
+ ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
+ ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
+ Push(lr, fp);
mov(fp, Operand(sp)); // Setup new frame pointer.
-
+ // Reserve room for saved entry sp and code object.
+ sub(sp, sp, Operand(2 * kPointerSize));
+ if (FLAG_debug_code) {
+ mov(ip, Operand(0));
+ str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ }
mov(ip, Operand(CodeObject()));
- push(ip); // Accessed from ExitFrame::code_slot.
+ str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
// Save the frame pointer and the context in top.
mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
@@ -659,25 +646,30 @@
// Optionally save all double registers.
if (save_doubles) {
- // TODO(regis): Use vstrm instruction.
- // The stack alignment code above made sp unaligned, so add space for one
- // more double register and use aligned addresses.
- ASSERT(kDoubleSize == frame_alignment);
- // Mark the frame as containing doubles by pushing a non-valid return
- // address, i.e. 0.
- ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize);
- mov(ip, Operand(0)); // Marker and alignment word.
- push(ip);
- int space = DwVfpRegister::kNumRegisters * kDoubleSize + kPointerSize;
- sub(sp, sp, Operand(space));
+ sub(sp, sp, Operand(DwVfpRegister::kNumRegisters * kDoubleSize));
+ const int offset = -2 * kPointerSize;
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
- vstr(reg, sp, i * kDoubleSize + kPointerSize);
+ vstr(reg, fp, offset - ((i + 1) * kDoubleSize));
}
- // Note that d0 will be accessible at fp - 2*kPointerSize -
- // DwVfpRegister::kNumRegisters * kDoubleSize, since the code slot and the
- // alignment word were pushed after the fp.
+ // Note that d0 will be accessible at
+ // fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
+ // since the sp slot and code slot were pushed after the fp.
}
+
+ // Reserve place for the return address and align the frame preparing for
+ // calling the runtime function.
+ const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+ sub(sp, sp, Operand(kPointerSize));
+ if (frame_alignment > 0) {
+ ASSERT(IsPowerOf2(frame_alignment));
+ and_(sp, sp, Operand(-frame_alignment));
+ }
+
+ // Set the exit frame sp value to point just before the return address
+ // location.
+ add(ip, sp, Operand(kPointerSize));
+ str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@@ -715,12 +707,10 @@
void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Optionally restore all double registers.
if (save_doubles) {
- // TODO(regis): Use vldrm instruction.
for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
DwVfpRegister reg = DwVfpRegister::from_code(i);
- // Register d15 is just below the marker.
- const int offset = ExitFrameConstants::kMarkerOffset;
- vldr(reg, fp, (i - DwVfpRegister::kNumRegisters) * kDoubleSize + offset);
+ const int offset = -2 * kPointerSize;
+ vldr(reg, fp, offset - ((i + 1) * kDoubleSize));
}
}
@@ -736,9 +726,12 @@
str(r3, MemOperand(ip));
#endif
- // Pop the arguments, restore registers, and return.
- mov(sp, Operand(fp)); // respect ABI stack constraint
- ldm(ia, sp, fp.bit() | sp.bit() | pc.bit());
+ // Tear down the exit frame, pop the arguments, and return. Callee-saved
+ // register r4 still holds argc.
+ mov(sp, Operand(fp));
+ ldm(ia_w, sp, fp.bit() | lr.bit());
+ add(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
+ mov(pc, lr);
}
diff --git a/src/ast.cc b/src/ast.cc
index fa01be0..e8b3e03 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -239,12 +239,19 @@
HashMap* table;
void* key;
uint32_t index;
+ Smi* smi_key_location;
if (handle->IsSymbol()) {
Handle<String> name(String::cast(*handle));
- ASSERT(!name->AsArrayIndex(&index));
- key = name.location();
- hash = name->Hash();
- table = &properties;
+ if (name->AsArrayIndex(&index)) {
+ smi_key_location = Smi::FromInt(index);
+ key = &smi_key_location;
+ hash = index;
+ table = &elements;
+ } else {
+ key = name.location();
+ hash = name->Hash();
+ table = &properties;
+ }
} else if (handle->ToArrayIndex(&index)) {
key = handle.location();
hash = index;
@@ -570,7 +577,14 @@
bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
- holder_ = Handle<JSObject>::null();
+ if (check_type_ == RECEIVER_MAP_CHECK) {
+ // For primitive checks the holder is set up to point to the
+ // corresponding prototype object, i.e. one step of the algorithm
+ // below has been already performed.
+ // For non-primitive checks we clear it to allow computing targets
+ // for polymorphic calls.
+ holder_ = Handle<JSObject>::null();
+ }
while (true) {
LookupResult lookup;
type->LookupInDescriptors(NULL, *name, &lookup);
@@ -640,8 +654,9 @@
map = receiver_types_->at(0);
} else {
ASSERT(check_type_ != RECEIVER_MAP_CHECK);
- map = Handle<Map>(
- oracle->GetPrototypeForPrimitiveCheck(check_type_)->map());
+ holder_ = Handle<JSObject>(
+ oracle->GetPrototypeForPrimitiveCheck(check_type_));
+ map = Handle<Map>(holder_->map());
}
is_monomorphic_ = ComputeTarget(map, name);
}
diff --git a/src/code-stubs.h b/src/code-stubs.h
index bc7516a..06cffed 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -273,21 +273,20 @@
class FastNewContextStub : public CodeStub {
public:
- // We want no more than 64 different stubs.
- static const int kMaximumSlots = Context::MIN_CONTEXT_SLOTS + 63;
+ static const int kMaximumSlots = 64;
explicit FastNewContextStub(int slots) : slots_(slots) {
- ASSERT(slots_ >= Context::MIN_CONTEXT_SLOTS && slots_ <= kMaximumSlots);
+ ASSERT(slots_ > 0 && slots <= kMaximumSlots);
}
void Generate(MacroAssembler* masm);
private:
- virtual const char* GetName() { return "FastNewContextStub"; }
- virtual Major MajorKey() { return FastNewContext; }
- virtual int MinorKey() { return slots_; }
-
int slots_;
+
+ const char* GetName() { return "FastNewContextStub"; }
+ Major MajorKey() { return FastNewContext; }
+ int MinorKey() { return slots_; }
};
@@ -600,8 +599,7 @@
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
bool do_gc,
- bool always_allocate_scope,
- int alignment_skew = 0);
+ bool always_allocate_scope);
void GenerateThrowTOS(MacroAssembler* masm);
void GenerateThrowUncatchable(MacroAssembler* masm,
UncatchableExceptionType type);
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index f9bf280..69aa497 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -128,14 +128,17 @@
static void VisitAllOptimizedFunctions(OptimizedFunctionVisitor* visitor);
- // Given the relocation info of a call to the stack check stub, patch the
- // code so as to go unconditionally to the on-stack replacement builtin
- // instead.
- static void PatchStackCheckCode(RelocInfo* rinfo, Code* replacement_code);
+ // Patch all stack guard checks in the unoptimized code to
+ // unconditionally call replacement_code.
+ static void PatchStackCheckCode(Code* unoptimized_code,
+ Code* check_code,
+ Code* replacement_code);
- // Given the relocation info of a call to the on-stack replacement
- // builtin, patch the code back to the original stack check code.
- static void RevertStackCheckCode(RelocInfo* rinfo, Code* check_code);
+ // Change all patched stack guard checks in the unoptimized code
+ // back to a normal stack guard check.
+ static void RevertStackCheckCode(Code* unoptimized_code,
+ Code* check_code,
+ Code* replacement_code);
~Deoptimizer();
diff --git a/src/heap.cc b/src/heap.cc
index 32d751a..7c7f3d2 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -35,6 +35,7 @@
#include "debug.h"
#include "heap-profiler.h"
#include "global-handles.h"
+#include "liveobjectlist-inl.h"
#include "mark-compact.h"
#include "natives.h"
#include "objects-visiting.h"
@@ -400,6 +401,8 @@
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
ReportStatisticsBeforeGC();
#endif
+
+ LiveObjectList::GCPrologue();
}
intptr_t Heap::SizeOfObjects() {
@@ -412,6 +415,7 @@
}
void Heap::GarbageCollectionEpilogue() {
+ LiveObjectList::GCEpilogue();
#ifdef DEBUG
allow_allocation(true);
ZapFromSpace();
@@ -1066,6 +1070,8 @@
UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
+ LiveObjectList::UpdateReferencesForScavengeGC();
+
ASSERT(new_space_front == new_space_.top());
// Set age mark.
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 2c83662..5f3400f 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -91,7 +91,8 @@
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
- __ AllocateInNewSpace((slots_ * kPointerSize) + FixedArray::kHeaderSize,
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+ __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
eax, ebx, ecx, &gc, TAG_OBJECT);
// Get the function from the stack.
@@ -100,7 +101,7 @@
// Setup the object header.
__ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
__ mov(FieldOperand(eax, Context::kLengthOffset),
- Immediate(Smi::FromInt(slots_)));
+ Immediate(Smi::FromInt(length)));
// Setup the fixed slots.
__ Set(ebx, Immediate(0)); // Set to NULL.
@@ -118,7 +119,7 @@
// Initialize the rest of the slots to undefined.
__ mov(ebx, Factory::undefined_value());
- for (int i = Context::MIN_CONTEXT_SLOTS; i < slots_; i++) {
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
__ mov(Operand(eax, Context::SlotOffset(i)), ebx);
}
@@ -1772,40 +1773,11 @@
void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- Label call_runtime;
ASSERT(operands_type_ == TRBinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
- // If one of the arguments is a string, call the string add stub.
- // Otherwise, transition to the generic TRBinaryOpIC type.
-
- // Registers containing left and right operands respectively.
- Register left = edx;
- Register right = eax;
-
- // Test if left operand is a string.
- NearLabel left_not_string;
- __ test(left, Immediate(kSmiTagMask));
- __ j(zero, &left_not_string);
- __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &left_not_string);
-
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ test(right, Immediate(kSmiTagMask));
- __ j(zero, &call_runtime);
- __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_runtime);
-
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // Neither argument is a string.
- __ bind(&call_runtime);
+ // Try to add arguments as strings, otherwise, transition to the generic
+ // TRBinaryOpIC type.
+ GenerateAddStrings(masm);
GenerateTypeTransition(masm);
}
@@ -2346,36 +2318,8 @@
__ bind(&call_runtime);
switch (op_) {
case Token::ADD: {
+ GenerateAddStrings(masm);
GenerateRegisterArgsPush(masm);
- // Test for string arguments before calling runtime.
- // Registers containing left and right operands respectively.
- Register lhs, rhs;
- lhs = edx;
- rhs = eax;
-
- // Test if left operand is a string.
- NearLabel lhs_not_string;
- __ test(lhs, Immediate(kSmiTagMask));
- __ j(zero, &lhs_not_string);
- __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &lhs_not_string);
-
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
- __ TailCallStub(&string_add_left_stub);
-
- NearLabel call_add_runtime;
- // Left operand is not a string, test right.
- __ bind(&lhs_not_string);
- __ test(rhs, Immediate(kSmiTagMask));
- __ j(zero, &call_add_runtime);
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
- __ j(above_equal, &call_add_runtime);
-
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
- __ TailCallStub(&string_add_right_stub);
-
- // Neither argument is a string.
- __ bind(&call_add_runtime);
__ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
break;
}
@@ -2418,6 +2362,40 @@
}
+void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+ NearLabel call_runtime;
+
+ // Registers containing left and right operands respectively.
+ Register left = edx;
+ Register right = eax;
+
+ // Test if left operand is a string.
+ NearLabel left_not_string;
+ __ test(left, Immediate(kSmiTagMask));
+ __ j(zero, &left_not_string);
+ __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
+ __ j(above_equal, &left_not_string);
+
+ StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_left_stub);
+
+ // Left operand is not a string, test right.
+ __ bind(&left_not_string);
+ __ test(right, Immediate(kSmiTagMask));
+ __ j(zero, &call_runtime);
+ __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
+ __ j(above_equal, &call_runtime);
+
+ StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
+ GenerateRegisterArgsPush(masm);
+ __ TailCallStub(&string_add_right_stub);
+
+ // Neither argument is a string.
+ __ bind(&call_runtime);
+}
+
+
void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
MacroAssembler* masm,
Label* alloc_failure) {
@@ -4660,8 +4638,7 @@
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
bool do_gc,
- bool always_allocate_scope,
- int /* alignment_skew */) {
+ bool always_allocate_scope) {
// eax: result parameter for PerformGC, if any
// ebx: pointer to C function (C callee-saved)
// ebp: frame pointer (restored after C call)
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 4a56d0d..cd18749 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -308,6 +308,7 @@
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateAddStrings(MacroAssembler* masm);
void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
void GenerateRegisterArgsPush(MacroAssembler* masm);
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 29c8c0e..2b48b0b 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -209,7 +209,7 @@
frame_->AllocateStackSlots();
// Allocate the local context if needed.
- int heap_slots = scope()->num_heap_slots();
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ allocate local context");
// Allocate local context.
@@ -8230,19 +8230,13 @@
return;
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Call the runtime to look up the context holding the named
+ // Call the runtime to delete from the context holding the named
// variable. Sync the virtual frame eagerly so we can push the
// arguments directly into place.
frame_->SyncRange(0, frame_->element_count() - 1);
frame_->EmitPush(esi);
frame_->EmitPush(Immediate(variable->name()));
- Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
- ASSERT(context.is_register());
- frame_->EmitPush(context.reg());
- context.Unuse();
- frame_->EmitPush(Immediate(variable->name()));
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
- CALL_FUNCTION, 2);
+ Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
frame_->Push(&answer);
return;
}
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 3050c56..e9e3fcf 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -106,44 +106,71 @@
}
-void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
+void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
+ Code* check_code,
Code* replacement_code) {
- // The stack check code matches the pattern:
- //
- // cmp esp, <limit>
- // jae ok
- // call <stack guard>
- // test eax, <loop nesting depth>
- // ok: ...
- //
- // We will patch away the branch so the code is:
- //
- // cmp esp, <limit> ;; Not changed
- // nop
- // nop
- // call <on-stack replacment>
- // test eax, <loop nesting depth>
- // ok:
- Address call_target_address = rinfo->pc();
- ASSERT(*(call_target_address - 3) == 0x73 && // jae
- *(call_target_address - 2) == 0x07 && // offset
- *(call_target_address - 1) == 0xe8); // call
- *(call_target_address - 3) = 0x90; // nop
- *(call_target_address - 2) = 0x90; // nop
- rinfo->set_target_address(replacement_code->entry());
+ // Iterate the unoptimized code and patch every stack check except at
+ // the function entry. This code assumes the function entry stack
+ // check appears first i.e., is not deferred or otherwise reordered.
+ ASSERT(unoptimized_code->kind() == Code::FUNCTION);
+ bool first = true;
+ for (RelocIterator it(unoptimized_code, RelocInfo::kCodeTargetMask);
+ !it.done();
+ it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ if (rinfo->target_address() == Code::cast(check_code)->entry()) {
+ if (first) {
+ first = false;
+ } else {
+ // The stack check code matches the pattern:
+ //
+ // cmp esp, <limit>
+ // jae ok
+ // call <stack guard>
+ // test eax, <loop nesting depth>
+ // ok: ...
+ //
+ // We will patch away the branch so the code is:
+ //
+ // cmp esp, <limit> ;; Not changed
+ // nop
+ // nop
+ // call <on-stack replacment>
+ // test eax, <loop nesting depth>
+ // ok:
+ Address call_target_address = rinfo->pc();
+ ASSERT(*(call_target_address - 3) == 0x73 && // jae
+ *(call_target_address - 2) == 0x07 && // offset
+ *(call_target_address - 1) == 0xe8); // call
+ *(call_target_address - 3) = 0x90; // nop
+ *(call_target_address - 2) = 0x90; // nop
+ rinfo->set_target_address(replacement_code->entry());
+ }
+ }
+ }
}
-void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
- // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
- // restore the conditional branch.
- Address call_target_address = rinfo->pc();
- ASSERT(*(call_target_address - 3) == 0x90 && // nop
- *(call_target_address - 2) == 0x90 && // nop
- *(call_target_address - 1) == 0xe8); // call
- *(call_target_address - 3) = 0x73; // jae
- *(call_target_address - 2) = 0x07; // offset
- rinfo->set_target_address(check_code->entry());
+void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
+ Code* check_code,
+ Code* replacement_code) {
+ // Iterate the unoptimized code and revert all the patched stack checks.
+ for (RelocIterator it(unoptimized_code, RelocInfo::kCodeTargetMask);
+ !it.done();
+ it.next()) {
+ RelocInfo* rinfo = it.rinfo();
+ if (rinfo->target_address() == replacement_code->entry()) {
+ // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
+ // restore the conditional branch.
+ Address call_target_address = rinfo->pc();
+ ASSERT(*(call_target_address - 3) == 0x90 && // nop
+ *(call_target_address - 2) == 0x90 && // nop
+ *(call_target_address - 1) == 0xe8); // call
+ *(call_target_address - 3) = 0x73; // jae
+ *(call_target_address - 2) = 0x07; // offset
+ rinfo->set_target_address(check_code->entry());
+ }
+ }
}
@@ -507,26 +534,25 @@
__ mov(ebx, Operand(eax, Deoptimizer::input_offset()));
// Fill in the input registers.
- for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kIntSize) + FrameDescription::registers_offset();
- __ mov(ecx, Operand(esp, (kNumberOfRegisters - 1 - i) * kPointerSize));
- __ mov(Operand(ebx, offset), ecx);
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ __ pop(Operand(ebx, offset));
}
// Fill in the double input registers.
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ int src_offset = i * kDoubleSize;
__ movdbl(xmm0, Operand(esp, src_offset));
__ movdbl(Operand(ebx, dst_offset), xmm0);
}
- // Remove the bailout id and the general purpose registers from the stack.
+ // Remove the bailout id and the double registers from the stack.
if (type() == EAGER) {
- __ add(Operand(esp), Immediate(kSavedRegistersAreaSize + kPointerSize));
+ __ add(Operand(esp), Immediate(kDoubleRegsSize + kPointerSize));
} else {
- __ add(Operand(esp), Immediate(kSavedRegistersAreaSize + 2 * kPointerSize));
+ __ add(Operand(esp), Immediate(kDoubleRegsSize + 2 * kPointerSize));
}
// Compute a pointer to the unwinding limit in register ecx; that is
@@ -591,7 +617,7 @@
// Push the registers from the last output frame.
for (int i = 0; i < kNumberOfRegisters; i++) {
- int offset = (i * kIntSize) + FrameDescription::registers_offset();
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ push(Operand(ebx, offset));
}
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 772eb8f..aa0dafb 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -142,7 +142,7 @@
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots();
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in edi.
@@ -3689,19 +3689,18 @@
if (prop != NULL) {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
} else if (var->is_global()) {
__ push(GlobalObjectOperand());
__ push(Immediate(var->name()));
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
} else {
- // Non-global variable. Call the runtime to look up the context
- // where the variable was introduced.
+ // Non-global variable. Call the runtime to delete from the
+ // context where the variable was introduced.
__ push(context_register());
__ push(Immediate(var->name()));
- __ CallRuntime(Runtime::kLookupContext, 2);
- __ push(eax);
- __ push(Immediate(var->name()));
+ __ CallRuntime(Runtime::kDeleteContextSlot, 2);
}
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(eax);
}
break;
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 3bfb10f..ce7b73b 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -414,16 +414,20 @@
// Create the environment to bailout to. If the call has side effects
// execution has to continue after the call otherwise execution can continue
// from a previous bailout point repeating the call.
- LEnvironment* deoptimization_environment;
- if (instr->HasDeoptimizationEnvironment()) {
- deoptimization_environment = instr->deoptimization_environment();
+ LEnvironment* lazy_deoptimization_environment;
+ ASSERT(!instr->IsControl());
+ ASSERT(instructions_->at(current_instruction_ + 1)->IsGap());
+ LInstruction* next_instr = instructions_->at(current_instruction_ + 2);
+ if (next_instr->IsLazyBailout()) {
+ ASSERT(next_instr->HasEnvironment());
+ lazy_deoptimization_environment = next_instr->environment();
} else {
- deoptimization_environment = instr->environment();
+ lazy_deoptimization_environment = instr->environment();
}
- RegisterEnvironmentForDeoptimization(deoptimization_environment);
+ RegisterEnvironmentForDeoptimization(lazy_deoptimization_environment);
RecordSafepoint(instr->pointer_map(),
- deoptimization_environment->deoptimization_index());
+ lazy_deoptimization_environment->deoptimization_index());
}
diff --git a/src/ia32/lithium-gap-resolver-ia32.cc b/src/ia32/lithium-gap-resolver-ia32.cc
index 8886959..45c790f 100644
--- a/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/src/ia32/lithium-gap-resolver-ia32.cc
@@ -32,12 +32,11 @@
namespace internal {
LGapResolver::LGapResolver(LCodeGen* owner)
- : cgen_(owner), moves_(32), spilled_register_(-1) {
- for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) {
- source_uses_[i] = 0;
- destination_uses_[i] = 0;
- }
-}
+ : cgen_(owner),
+ moves_(32),
+ source_uses_(),
+ destination_uses_(),
+ spilled_register_(-1) {}
void LGapResolver::Resolve(LParallelMove* parallel_move) {
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index f422514..e6e3c3f 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -656,22 +656,6 @@
}
-LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
- LInstruction* instr, int ast_id) {
- ASSERT(instructions_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
- instructions_pending_deoptimization_environment_ = instr;
- pending_deoptimization_ast_id_ = ast_id;
- return instr;
-}
-
-
-void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
- instructions_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = AstNode::kNoNumber;
-}
-
-
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
@@ -681,8 +665,8 @@
if (hinstr->HasSideEffects()) {
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
- instr = SetInstructionPendingDeoptimizationEnvironment(
- instr, sim->ast_id());
+ ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+ pending_deoptimization_ast_id_ = sim->ast_id();
}
// If instruction does not have side-effects lazy deoptimization
@@ -1343,8 +1327,8 @@
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
- LOperand* left = UseRegisterAtStart(instr->LeastConstantOperand());
- LOperand* right = UseOrConstantAtStart(instr->MostConstantOperand());
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new LSubI(left, right);
LInstruction* result = DefineSameAsFirst(sub);
if (instr->CheckFlag(HValue::kCanOverflow)) {
@@ -1874,12 +1858,11 @@
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
- if (pending_deoptimization_ast_id_ == instr->ast_id()) {
+ if (pending_deoptimization_ast_id_ != AstNode::kNoNumber) {
+ ASSERT(pending_deoptimization_ast_id_ == instr->ast_id());
LLazyBailout* lazy_bailout = new LLazyBailout;
LInstruction* result = AssignEnvironment(lazy_bailout);
- instructions_pending_deoptimization_environment_->
- set_deoptimization_environment(result->environment());
- ClearInstructionPendingDeoptimizationEnvironment();
+ pending_deoptimization_ast_id_ = AstNode::kNoNumber;
return result;
}
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 1cdd31e..3c233c9 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -321,21 +321,10 @@
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
- void set_deoptimization_environment(LEnvironment* env) {
- deoptimization_environment_.set(env);
- }
- LEnvironment* deoptimization_environment() const {
- return deoptimization_environment_.get();
- }
- bool HasDeoptimizationEnvironment() const {
- return deoptimization_environment_.is_set();
- }
-
private:
SetOncePointer<LEnvironment> environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
- SetOncePointer<LEnvironment> deoptimization_environment_;
};
@@ -1881,7 +1870,6 @@
argument_count_(0),
allocator_(allocator),
position_(RelocInfo::kNoPosition),
- instructions_pending_deoptimization_environment_(NULL),
pending_deoptimization_ast_id_(AstNode::kNoNumber) { }
// Build the sequence for the graph.
@@ -1990,10 +1978,6 @@
CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
LInstruction* MarkAsSaveDoubles(LInstruction* instr);
- LInstruction* SetInstructionPendingDeoptimizationEnvironment(
- LInstruction* instr, int ast_id);
- void ClearInstructionPendingDeoptimizationEnvironment();
-
LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env);
void VisitInstruction(HInstruction* current);
@@ -2015,7 +1999,6 @@
int argument_count_;
LAllocator* allocator_;
int position_;
- LInstruction* instructions_pending_deoptimization_environment_;
int pending_deoptimization_ast_id_;
DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 10c942a..929008f 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -339,7 +339,7 @@
CpuFeatures::Scope scope(SSE2);
int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize;
sub(Operand(esp), Immediate(space));
- int offset = -2 * kPointerSize;
+ const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movdbl(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
@@ -382,7 +382,7 @@
// Optionally restore all XMM registers.
if (save_doubles) {
CpuFeatures::Scope scope(SSE2);
- int offset = -2 * kPointerSize;
+ const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movdbl(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index a946ffa..5c649d1 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -33,6 +33,7 @@
#include "gdb-jit.h"
#include "global-handles.h"
#include "ic-inl.h"
+#include "liveobjectlist-inl.h"
#include "mark-compact.h"
#include "objects-visiting.h"
#include "stub-cache.h"
@@ -1660,6 +1661,7 @@
free_start = current;
is_prev_alive = false;
}
+ LiveObjectList::ProcessNonLive(object);
}
}
@@ -1880,6 +1882,9 @@
size,
false);
} else {
+ // Process the dead object before we write a NULL into its header.
+ LiveObjectList::ProcessNonLive(object);
+
size = object->Size();
Memory::Address_at(current) = NULL;
}
@@ -1899,6 +1904,7 @@
// Update roots.
Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
+ LiveObjectList::IterateElements(&updating_visitor);
// Update pointers in old spaces.
Heap::IterateDirtyRegions(Heap::old_pointer_space(),
@@ -1986,6 +1992,7 @@
free_start = current;
is_previous_alive = false;
}
+ LiveObjectList::ProcessNonLive(object);
}
// The object is now unmarked for the call to Size() at the top of the
// loop.
@@ -2164,6 +2171,7 @@
void UpdateMapPointersInRoots() {
Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
+ LiveObjectList::IterateElements(&map_updating_visitor_);
}
void UpdateMapPointersInPagedSpace(PagedSpace* space) {
@@ -2533,6 +2541,8 @@
// Update the pointer to the head of the weak list of global contexts.
updating_visitor.VisitPointer(&Heap::global_contexts_list_);
+ LiveObjectList::IterateElements(&updating_visitor);
+
int live_maps_size = IterateLiveObjects(Heap::map_space(),
&UpdatePointersInOldObject);
int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
diff --git a/src/messages.js b/src/messages.js
index 932d64d..24b642f 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -90,12 +90,28 @@
}
+// To check if something is a native error we need to check the
+// concrete native error types. It is not enough to check "obj
+// instanceof $Error" because user code can replace
+// NativeError.prototype.__proto__. User code cannot replace
+// NativeError.prototype though and therefore this is a safe test.
+function IsNativeErrorObject(obj) {
+ return (obj instanceof $Error) ||
+ (obj instanceof $EvalError) ||
+ (obj instanceof $RangeError) ||
+ (obj instanceof $ReferenceError) ||
+ (obj instanceof $SyntaxError) ||
+ (obj instanceof $TypeError) ||
+ (obj instanceof $URIError);
+}
+
+
// When formatting internally created error messages, do not
// invoke overwritten error toString methods but explicitly use
// the error to string method. This is to avoid leaking error
// objects between script tags in a browser setting.
function ToStringCheckErrorObject(obj) {
- if (obj instanceof $Error) {
+ if (IsNativeErrorObject(obj)) {
return %_CallFunction(obj, errorToString);
} else {
return ToString(obj);
@@ -216,6 +232,7 @@
strict_param_dupe: "Strict mode function may not have duplicate parameter names",
strict_var_name: "Variable name may not be eval or arguments in strict mode",
strict_function_name: "Function name may not be eval or arguments in strict mode",
+ strict_octal_literal: "Octal literals are not allowed in strict mode.",
};
}
var format = kMessages[message.type];
diff --git a/src/parser.cc b/src/parser.cc
index 2637281..a1ba9ca 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -664,7 +664,11 @@
TemporaryScope temp_scope(&this->temp_scope_);
ZoneList<Statement*>* body = new ZoneList<Statement*>(16);
bool ok = true;
+ int beg_loc = scanner().location().beg_pos;
ParseSourceElements(body, Token::EOS, &ok);
+ if (ok && temp_scope_->StrictMode()) {
+ CheckOctalLiteral(beg_loc, scanner().location().end_pos, &ok);
+ }
if (ok) {
result = new FunctionLiteral(
no_name,
@@ -3384,7 +3388,7 @@
*ok = false;
return NULL;
}
- // TODO(mmaly): Check for octal escape sequence here.
+ CheckOctalLiteral(start_pos, end_pos, CHECK_OK);
}
FunctionLiteral* function_literal =
@@ -3530,6 +3534,18 @@
return GetSymbol(ok);
}
+// Checks whether octal literal last seen is between beg_pos and end_pos.
+// If so, reports an error.
+void Parser::CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
+ int octal = scanner().octal_position();
+ if (beg_pos <= octal && octal <= end_pos) {
+ ReportMessageAt(Scanner::Location(octal, octal + 1), "strict_octal_literal",
+ Vector<const char*>::empty());
+ scanner().clear_octal_position();
+ *ok = false;
+ }
+}
+
// This function reads an identifier and determines whether or not it
// is 'get' or 'set'. The reason for not using ParseIdentifier and
diff --git a/src/parser.h b/src/parser.h
index 0613a8d..916f9ed 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -613,6 +613,9 @@
bool* is_set,
bool* ok);
+ // Strict mode octal literal validation.
+ void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok);
+
// Parser support
VariableProxy* Declare(Handle<String> name, Variable::Mode mode,
FunctionLiteral* fun,
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 7174c7f..c18049f 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -430,7 +430,7 @@
Thread::Thread(const char* name) : ThreadHandle(ThreadHandle::INVALID) {
- set_names(name);
+ set_name(name);
}
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index d7792ac..3406cdc 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -193,22 +193,9 @@
if (maybe_check_code->ToObject(&check_code)) {
Code* replacement_code = Builtins::builtin(Builtins::OnStackReplacement);
Code* unoptimized_code = shared->code();
- // Iterate the unoptimized code and patch every stack check except at
- // the function entry. This code assumes the function entry stack
- // check appears first i.e., is not deferred or otherwise reordered.
- bool first = true;
- for (RelocIterator it(unoptimized_code, RelocInfo::kCodeTargetMask);
- !it.done();
- it.next()) {
- RelocInfo* rinfo = it.rinfo();
- if (rinfo->target_address() == Code::cast(check_code)->entry()) {
- if (first) {
- first = false;
- } else {
- Deoptimizer::PatchStackCheckCode(rinfo, replacement_code);
- }
- }
- }
+ Deoptimizer::PatchStackCheckCode(unoptimized_code,
+ Code::cast(check_code),
+ replacement_code);
}
}
diff --git a/src/runtime.cc b/src/runtime.cc
index 2f1f54c..b8133ae 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -6944,15 +6944,9 @@
Handle<Code> check_code = check_stub.GetCode();
Handle<Code> replacement_code(
Builtins::builtin(Builtins::OnStackReplacement));
- // Iterate the unoptimized code and revert all the patched stack checks.
- for (RelocIterator it(*unoptimized, RelocInfo::kCodeTargetMask);
- !it.done();
- it.next()) {
- RelocInfo* rinfo = it.rinfo();
- if (rinfo->target_address() == replacement_code->entry()) {
- Deoptimizer::RevertStackCheckCode(rinfo, *check_code);
- }
- }
+ Deoptimizer::RevertStackCheckCode(*unoptimized,
+ *check_code,
+ *replacement_code);
// Allow OSR only at nesting level zero again.
unoptimized->set_allow_osr_at_loop_nesting_level(0);
@@ -7049,7 +7043,7 @@
}
-static MaybeObject* Runtime_LookupContext(Arguments args) {
+static MaybeObject* Runtime_DeleteContextSlot(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 2);
@@ -7059,16 +7053,31 @@
int index;
PropertyAttributes attributes;
ContextLookupFlags flags = FOLLOW_CHAINS;
- Handle<Object> holder =
- context->Lookup(name, flags, &index, &attributes);
+ Handle<Object> holder = context->Lookup(name, flags, &index, &attributes);
- if (index < 0 && !holder.is_null()) {
- ASSERT(holder->IsJSObject());
- return *holder;
+ // If the slot was not found the result is true.
+ if (holder.is_null()) {
+ return Heap::true_value();
}
- // No intermediate context found. Use global object by default.
- return Top::context()->global();
+ // If the slot was found in a context, it should be DONT_DELETE.
+ if (holder->IsContext()) {
+ return Heap::false_value();
+ }
+
+ // The slot was found in a JSObject, either a context extension object,
+ // the global object, or an arguments object. Try to delete it
+ // (respecting DONT_DELETE). For consistency with V8's usual behavior,
+ // which allows deleting all parameters in functions that mention
+ // 'arguments', we do this even for the case of slots found on an
+ // arguments object. The slot was found on an arguments object if the
+ // index is non-negative.
+ Handle<JSObject> object = Handle<JSObject>::cast(holder);
+ if (index >= 0) {
+ return object->DeleteElement(index, JSObject::NORMAL_DELETION);
+ } else {
+ return object->DeleteProperty(*name, JSObject::NORMAL_DELETION);
+ }
}
@@ -7141,8 +7150,7 @@
int index;
PropertyAttributes attributes;
ContextLookupFlags flags = FOLLOW_CHAINS;
- Handle<Object> holder =
- context->Lookup(name, flags, &index, &attributes);
+ Handle<Object> holder = context->Lookup(name, flags, &index, &attributes);
// If the index is non-negative, the slot has been found in a local
// variable or a parameter. Read it from the context object or the
@@ -7209,8 +7217,7 @@
int index;
PropertyAttributes attributes;
ContextLookupFlags flags = FOLLOW_CHAINS;
- Handle<Object> holder =
- context->Lookup(name, flags, &index, &attributes);
+ Handle<Object> holder = context->Lookup(name, flags, &index, &attributes);
if (index >= 0) {
if (holder->IsContext()) {
diff --git a/src/runtime.h b/src/runtime.h
index dbd8d64..d8b3457 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -284,7 +284,7 @@
F(NewContext, 1, 1) \
F(PushContext, 1, 1) \
F(PushCatchContext, 1, 1) \
- F(LookupContext, 2, 1) \
+ F(DeleteContextSlot, 2, 1) \
F(LoadContextSlot, 2, 2) \
F(LoadContextSlotNoReferenceError, 2, 2) \
F(StoreContextSlot, 3, 1) \
diff --git a/src/scanner-base.cc b/src/scanner-base.cc
index 997fb31..e141d0e 100644
--- a/src/scanner-base.cc
+++ b/src/scanner-base.cc
@@ -64,7 +64,8 @@
// ----------------------------------------------------------------------------
// Scanner
-Scanner::Scanner() { }
+Scanner::Scanner()
+ : octal_pos_(kNoOctalLocation) { }
uc32 Scanner::ScanHexEscape(uc32 c, int length) {
@@ -98,6 +99,7 @@
// Octal escapes of the forms '\0xx' and '\xxx' are not a part of
// ECMA-262. Other JS VMs support them.
uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
+ octal_pos_ = source_pos() - 1; // Already advanced
uc32 x = c - '0';
for (int i = 0; i < length; i++) {
int d = c0_ - '0';
@@ -601,7 +603,11 @@
kind = DECIMAL;
break;
}
- if (c0_ < '0' || '7' < c0_) break;
+ if (c0_ < '0' || '7' < c0_) {
+ // Octal literal finished.
+ octal_pos_ = next_.location.beg_pos;
+ break;
+ }
AddLiteralCharAdvance();
}
}
diff --git a/src/scanner-base.h b/src/scanner-base.h
index 1024ad1..a3e07d3 100644
--- a/src/scanner-base.h
+++ b/src/scanner-base.h
@@ -247,6 +247,9 @@
// Generic functionality used by both JSON and JavaScript scanners.
class Scanner {
public:
+ // -1 is outside of the range of any real source code.
+ static const int kNoOctalLocation = -1;
+
typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
class LiteralScope {
@@ -280,6 +283,10 @@
Location location() const { return current_.location; }
Location peek_location() const { return next_.location; }
+ // Returns the location of the last seen octal literal
+ int octal_position() const { return octal_pos_; }
+ void clear_octal_position() { octal_pos_ = -1; }
+
// Returns the literal string, if any, for the current token (the
// token returned by Next()). The string is 0-terminated and in
// UTF-8 format; they may contain 0-characters. Literal strings are
@@ -410,6 +417,8 @@
// Input stream. Must be initialized to an UC16CharacterStream.
UC16CharacterStream* source_;
+ // Start position of the octal literal last scanned.
+ int octal_pos_;
// One Unicode character look-ahead; c0_ < 0 at the end of the input.
uc32 c0_;
diff --git a/src/scopes.cc b/src/scopes.cc
index 50da1fa..d3f54ad 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -726,7 +726,6 @@
// Note that we must do a lookup anyway, because if we find one,
// we must mark that variable as potentially accessed from this
// inner scope (the property may not be in the 'with' object).
- if (var != NULL) var->set_is_used(true);
var = NonLocal(proxy->name(), Variable::DYNAMIC);
} else {
@@ -834,8 +833,8 @@
// visible name.
if ((var->is_this() || var->name()->length() > 0) &&
(var->is_accessed_from_inner_scope() ||
- scope_calls_eval_ ||
- inner_scope_calls_eval_)) {
+ scope_calls_eval_ || inner_scope_calls_eval_ ||
+ scope_contains_with_)) {
var->set_is_used(true);
}
// Global variables do not need to be allocated.
diff --git a/src/spaces.cc b/src/spaces.cc
index fca1032..a586fbf 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -27,6 +27,7 @@
#include "v8.h"
+#include "liveobjectlist-inl.h"
#include "macro-assembler.h"
#include "mark-compact.h"
#include "platform.h"
@@ -3125,6 +3126,8 @@
// Free the chunk.
MarkCompactCollector::ReportDeleteIfNeeded(object);
+ LiveObjectList::ProcessNonLive(object);
+
size_ -= static_cast<int>(chunk_size);
objects_size_ -= object->Size();
page_count_--;
diff --git a/src/version.cc b/src/version.cc
index a2659a1..07f43f0 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 0
-#define BUILD_NUMBER 10
+#define BUILD_NUMBER 11
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 70b40e0..4406452 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -425,7 +425,7 @@
// Use SIB with no index register only for base rsp or r12. Otherwise we
// would skip the SIB byte entirely.
ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12));
- buf_[1] = scale << 6 | index.low_bits() << 3 | base.low_bits();
+ buf_[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
rex_ |= index.high_bit() << 1 | base.high_bit();
len_ = 2;
}
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 0f866a4..6a5ec61 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -300,6 +300,34 @@
}
}
+
+bool Operand::AddressUsesRegister(Register reg) const {
+ int code = reg.code();
+ ASSERT((buf_[0] & 0xC0) != 0xC0); // Always a memory operand.
+ // Start with only low three bits of base register. Initial decoding doesn't
+ // distinguish on the REX.B bit.
+ int base_code = buf_[0] & 0x07;
+ if (base_code == rsp.code()) {
+ // SIB byte present in buf_[1].
+ // Check the index register from the SIB byte + REX.X prefix.
+ int index_code = ((buf_[1] >> 3) & 0x07) | ((rex_ & 0x02) << 2);
+ // Index code (including REX.X) of 0x04 (rsp) means no index register.
+ if (index_code != rsp.code() && index_code == code) return true;
+ // Add REX.B to get the full base register code.
+ base_code = (buf_[1] & 0x07) | ((rex_ & 0x01) << 3);
+ // A base register of 0x05 (rbp) with mod = 0 means no base register.
+ if (base_code == rbp.code() && ((buf_[0] & 0xC0) == 0)) return false;
+ return code == base_code;
+ } else {
+ // A base register with low bits of 0x05 (rbp or r13) and mod = 0 means
+ // no base register.
+ if (base_code == rbp.code() && ((buf_[0] & 0xC0) == 0)) return false;
+ base_code |= ((rex_ & 0x01) << 3);
+ return code == base_code;
+ }
+}
+
+
// -----------------------------------------------------------------------------
// Implementation of Assembler.
@@ -1278,6 +1306,18 @@
}
+void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
+ EnsureSpace ensure_space(this);
+ RecordRelocInfo(rmode);
+ last_pc_ = pc_;
+ ASSERT((0 <= cc) && (cc < 16));
+ // 0000 1111 1000 tttn #32-bit disp.
+ emit(0x0F);
+ emit(0x80 | cc);
+ emit(entry - (pc_ + sizeof(intptr_t)));
+}
+
+
void Assembler::jmp(Label* L) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1949,6 +1989,14 @@
}
+void Assembler::push_imm32(int32_t imm32) {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x68);
+ emitl(imm32);
+}
+
+
void Assembler::pushfq() {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2641,6 +2689,30 @@
}
+void Assembler::movdqa(const Operand& dst, XMMRegister src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66);
+ emit_rex_64(src, dst);
+ emit(0x0F);
+ emit(0x7F);
+ emit_sse_operand(src, dst);
+}
+
+
+void Assembler::movdqa(XMMRegister dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ emit(0x66);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x6F);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
ASSERT(is_uint2(imm8));
EnsureSpace ensure_space(this);
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 890cd8a..9d4694b 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -153,6 +153,7 @@
// Unfortunately we can't make this private in a struct when initializing
// by assignment.
int code_;
+
private:
static const int registerCodeByAllocationIndex[kNumAllocatableRegisters];
static const int allocationIndexByRegisterCode[kNumRegisters];
@@ -390,11 +391,15 @@
// this must not overflow.
Operand(const Operand& base, int32_t offset);
+ // Checks whether either base or index register is the given register.
+ // Does not check the "reg" part of the Operand.
+ bool AddressUsesRegister(Register reg) const;
+
private:
byte rex_;
byte buf_[6];
- // The number of bytes in buf_.
- unsigned int len_;
+ // The number of bytes of buf_ in use.
+ byte len_;
// Set the ModR/M byte without an encoded 'reg' register. The
// register is encoded later as part of the emit_operand operation.
@@ -590,6 +595,9 @@
void popfq();
void push(Immediate value);
+ // Push a 32 bit integer, and guarantee that it is actually pushed as a
+ // 32 bit value, the normal push will optimize the 8 bit case.
+ void push_imm32(int32_t imm32);
void push(Register src);
void push(const Operand& src);
@@ -821,6 +829,10 @@
arithmetic_op_32(0x23, dst, src);
}
+ void andl(Register dst, const Operand& src) {
+ arithmetic_op_32(0x23, dst, src);
+ }
+
void andb(Register dst, Immediate src) {
immediate_arithmetic_op_8(0x4, dst, src);
}
@@ -1122,6 +1134,7 @@
// Conditional jumps
void j(Condition cc, Label* L);
+ void j(Condition cc, byte* entry, RelocInfo::Mode rmode);
void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
// Conditional short jump
@@ -1205,6 +1218,9 @@
void movsd(XMMRegister dst, XMMRegister src);
void movsd(XMMRegister dst, const Operand& src);
+ void movdqa(const Operand& dst, XMMRegister src);
+ void movdqa(XMMRegister dst, const Operand& src);
+
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
@@ -1245,10 +1261,6 @@
void emit_sse_operand(XMMRegister dst, Register src);
void emit_sse_operand(Register dst, XMMRegister src);
- // Use either movsd or movlpd.
- // void movdbl(XMMRegister dst, const Operand& src);
- // void movdbl(const Operand& dst, XMMRegister src);
-
// Debugging
void Print();
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index d738261..079dc8a 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -561,7 +561,33 @@
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
- __ int3();
+ // Enter an internal frame.
+ __ EnterInternalFrame();
+
+ // Pass the deoptimization type to the runtime system.
+ __ Push(Smi::FromInt(static_cast<int>(type)));
+
+ __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+ // Tear down temporary frame.
+ __ LeaveInternalFrame();
+
+ // Get the full codegen state from the stack and untag it.
+ __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
+
+ // Switch on the state.
+ NearLabel not_no_registers, not_tos_rax;
+ __ cmpq(rcx, Immediate(FullCodeGenerator::NO_REGISTERS));
+ __ j(not_equal, ¬_no_registers);
+ __ ret(1 * kPointerSize); // Remove state.
+
+ __ bind(¬_no_registers);
+ __ movq(rax, Operand(rsp, 2 * kPointerSize));
+ __ cmpq(rcx, Immediate(FullCodeGenerator::TOS_REG));
+ __ j(not_equal, ¬_tos_rax);
+ __ ret(2 * kPointerSize); // Remove state, rax.
+
+ __ bind(¬_tos_rax);
+ __ Abort("no cases left");
}
void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index a261b9d..e385915 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -91,7 +91,8 @@
void FastNewContextStub::Generate(MacroAssembler* masm) {
// Try to allocate the context in new space.
Label gc;
- __ AllocateInNewSpace((slots_ * kPointerSize) + FixedArray::kHeaderSize,
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+ __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
rax, rbx, rcx, &gc, TAG_OBJECT);
// Get the function from the stack.
@@ -100,7 +101,7 @@
// Setup the object header.
__ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
__ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
- __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(slots_));
+ __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
// Setup the fixed slots.
__ Set(rbx, 0); // Set to NULL.
@@ -115,7 +116,7 @@
// Initialize the rest of the slots to undefined.
__ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
- for (int i = Context::MIN_CONTEXT_SLOTS; i < slots_; i++) {
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
__ movq(Operand(rax, Context::SlotOffset(i)), rbx);
}
@@ -2773,8 +2774,7 @@
Label* throw_termination_exception,
Label* throw_out_of_memory_exception,
bool do_gc,
- bool always_allocate_scope,
- int /* alignment_skew */) {
+ bool always_allocate_scope) {
// rax: result parameter for PerformGC, if any.
// rbx: pointer to C function (C callee-saved).
// rbp: frame pointer (restored after C call).
@@ -2867,7 +2867,7 @@
__ j(zero, &failure_returned);
// Exit the JavaScript to C++ exit frame.
- __ LeaveExitFrame();
+ __ LeaveExitFrame(save_doubles_);
__ ret(0);
// Handling of failure.
@@ -2976,7 +2976,7 @@
#else
int arg_stack_space = 0;
#endif
- __ EnterExitFrame(arg_stack_space);
+ __ EnterExitFrame(arg_stack_space, save_doubles_);
// rax: Holds the context at this point, but should not be used.
// On entry to code generated by GenerateCore, it must hold
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 57720a8..bebe468 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -206,7 +206,7 @@
frame_->AllocateStackSlots();
// Allocate the local context if needed.
- int heap_slots = scope()->num_heap_slots();
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ allocate local context");
// Allocate local context.
@@ -7235,19 +7235,13 @@
return;
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
- // Call the runtime to look up the context holding the named
+ // Call the runtime to delete from the context holding the named
// variable. Sync the virtual frame eagerly so we can push the
// arguments directly into place.
frame_->SyncRange(0, frame_->element_count() - 1);
frame_->EmitPush(rsi);
frame_->EmitPush(variable->name());
- Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
- ASSERT(context.is_register());
- frame_->EmitPush(context.reg());
- context.Unuse();
- frame_->EmitPush(variable->name());
- Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
- CALL_FUNCTION, 2);
+ Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
frame_->Push(&answer);
return;
}
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index 6b19d3f..d5ba567 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -41,18 +41,81 @@
int Deoptimizer::table_entry_size_ = 10;
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
- // UNIMPLEMENTED, for now just return.
- return;
+ AssertNoAllocation no_allocation;
+
+ if (!function->IsOptimized()) return;
+
+ // Get the optimized code.
+ Code* code = function->code();
+
+ // Invalidate the relocation information, as it will become invalid by the
+ // code patching below, and is not needed any more.
+ code->InvalidateRelocation();
+
+ // For each return after a safepoint insert a absolute call to the
+ // corresponding deoptimization entry.
+ unsigned last_pc_offset = 0;
+ SafepointTable table(function->code());
+ for (unsigned i = 0; i < table.length(); i++) {
+ unsigned pc_offset = table.GetPcOffset(i);
+ SafepointEntry safepoint_entry = table.GetEntry(i);
+ int deoptimization_index = safepoint_entry.deoptimization_index();
+ int gap_code_size = safepoint_entry.gap_code_size();
+#ifdef DEBUG
+ // Destroy the code which is not supposed to run again.
+ unsigned instructions = pc_offset - last_pc_offset;
+ CodePatcher destroyer(code->instruction_start() + last_pc_offset,
+ instructions);
+ for (unsigned i = 0; i < instructions; i++) {
+ destroyer.masm()->int3();
+ }
+#endif
+ last_pc_offset = pc_offset;
+ if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
+ CodePatcher patcher(
+ code->instruction_start() + pc_offset + gap_code_size,
+ Assembler::kCallInstructionLength);
+ patcher.masm()->Call(GetDeoptimizationEntry(deoptimization_index, LAZY),
+ RelocInfo::NONE);
+ last_pc_offset += gap_code_size + Assembler::kCallInstructionLength;
+ }
+ }
+#ifdef DEBUG
+ // Destroy the code which is not supposed to run again.
+ unsigned instructions = code->safepoint_table_start() - last_pc_offset;
+ CodePatcher destroyer(code->instruction_start() + last_pc_offset,
+ instructions);
+ for (unsigned i = 0; i < instructions; i++) {
+ destroyer.masm()->int3();
+ }
+#endif
+
+ // Add the deoptimizing code to the list.
+ DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
+ node->set_next(deoptimizing_code_list_);
+ deoptimizing_code_list_ = node;
+
+ // Set the code for the function to non-optimized version.
+ function->ReplaceCode(function->shared()->code());
+
+ if (FLAG_trace_deopt) {
+ PrintF("[forced deoptimization: ");
+ function->PrintName();
+ PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
+ }
}
-void Deoptimizer::PatchStackCheckCode(RelocInfo* rinfo,
+void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
+ Code* check_code,
Code* replacement_code) {
UNIMPLEMENTED();
}
-void Deoptimizer::RevertStackCheckCode(RelocInfo* rinfo, Code* check_code) {
+void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
+ Code* check_code,
+ Code* replacement_code) {
UNIMPLEMENTED();
}
@@ -64,20 +127,382 @@
void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
int frame_index) {
- UNIMPLEMENTED();
+ // Read the ast node id, function, and frame height for this output frame.
+ Translation::Opcode opcode =
+ static_cast<Translation::Opcode>(iterator->Next());
+ USE(opcode);
+ ASSERT(Translation::FRAME == opcode);
+ int node_id = iterator->Next();
+ JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ unsigned height = iterator->Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ if (FLAG_trace_deopt) {
+ PrintF(" translating ");
+ function->PrintName();
+ PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
+ }
+
+ // The 'fixed' part of the frame consists of the incoming parameters and
+ // the part described by JavaScriptFrameConstants.
+ unsigned fixed_frame_size = ComputeFixedSize(function);
+ unsigned input_frame_size = input_->GetFrameSize();
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, function);
+
+ bool is_bottommost = (0 == frame_index);
+ bool is_topmost = (output_count_ - 1 == frame_index);
+ ASSERT(frame_index >= 0 && frame_index < output_count_);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address for the bottommost output frame can be computed from
+ // the input frame pointer and the output frame's height. For all
+ // subsequent output frames, it can be computed from the previous one's
+ // top address and the current frame's size.
+ intptr_t top_address;
+ if (is_bottommost) {
+ // 2 = context and function in the frame.
+ top_address =
+ input_->GetRegister(rbp.code()) - (2 * kPointerSize) - height_in_bytes;
+ } else {
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ }
+ output_frame->SetTop(top_address);
+
+ // Compute the incoming parameter translation.
+ int parameter_count = function->shared()->formal_parameter_count() + 1;
+ unsigned output_offset = output_frame_size;
+ unsigned input_offset = input_frame_size;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+ input_offset -= (parameter_count * kPointerSize);
+
+ // There are no translation commands for the caller's pc and fp, the
+ // context, and the function. Synthesize their values and set them up
+ // explicitly.
+ //
+ // The caller's pc for the bottommost output frame is the same as in the
+ // input frame. For all subsequent output frames, it can be read from the
+ // previous one. This frame's pc can be computed from the non-optimized
+ // function code and AST id of the bailout.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t value;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = output_[frame_index - 1]->GetPc();
+ }
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's pc\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // The caller's frame pointer for the bottommost output frame is the same
+ // as in the input frame. For all subsequent output frames, it can be
+ // read from the previous one. Also compute and set this frame's frame
+ // pointer.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ if (is_bottommost) {
+ value = input_->GetFrameSlot(input_offset);
+ } else {
+ value = output_[frame_index - 1]->GetFp();
+ }
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ ASSERT(!is_bottommost || input_->GetRegister(rbp.code()) == fp_value);
+ output_frame->SetFp(fp_value);
+ if (is_topmost) output_frame->SetRegister(rbp.code(), fp_value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR " ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // The context can be gotten from the function so long as we don't
+ // optimize functions that need local contexts.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(function->context());
+ // The context for the bottommost output frame should also agree with the
+ // input frame.
+ ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (is_topmost) output_frame->SetRegister(rsi.code(), value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR "; context\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // The function was mentioned explicitly in the BEGIN_FRAME.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(function);
+ // The function for the bottommost output frame should also agree with the
+ // input frame.
+ ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08"
+ V8PRIxPTR "; function\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Translate the rest of the frame.
+ for (unsigned i = 0; i < height; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+ ASSERT(0 == output_offset);
+
+ // Compute this frame's PC, state, and continuation.
+ Code* non_optimized_code = function->shared()->code();
+ FixedArray* raw_data = non_optimized_code->deoptimization_data();
+ DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
+ Address start = non_optimized_code->instruction_start();
+ unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
+ unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
+ intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
+ output_frame->SetPc(pc_value);
+
+ FullCodeGenerator::State state =
+ FullCodeGenerator::StateField::decode(pc_and_state);
+ output_frame->SetState(Smi::FromInt(state));
+
+ // Set the continuation for the topmost frame.
+ if (is_topmost) {
+ Code* continuation = (bailout_type_ == EAGER)
+ ? Builtins::builtin(Builtins::NotifyDeoptimized)
+ : Builtins::builtin(Builtins::NotifyLazyDeoptimized);
+ output_frame->SetContinuation(
+ reinterpret_cast<intptr_t>(continuation->entry()));
+ }
+
+ if (output_count_ - 1 == frame_index) iterator->Done();
}
+#define __ masm()->
+
void Deoptimizer::EntryGenerator::Generate() {
- // UNIMPLEMENTED, for now just return.
- return;
+ GeneratePrologue();
+ CpuFeatures::Scope scope(SSE2);
+
+ // Save all general purpose registers before messing with them.
+ const int kNumberOfRegisters = Register::kNumRegisters;
+
+ const int kDoubleRegsSize = kDoubleSize *
+ XMMRegister::kNumAllocatableRegisters;
+ __ subq(rsp, Immediate(kDoubleRegsSize));
+
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+ int offset = i * kDoubleSize;
+ __ movsd(Operand(rsp, offset), xmm_reg);
+ }
+
+ // We push all registers onto the stack, even though we do not need
+ // to restore all later.
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ Register r = Register::toRegister(i);
+ __ push(r);
+ }
+
+ const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize +
+ kDoubleRegsSize;
+
+ // When calling new_deoptimizer_function we need to pass the last argument
+ // on the stack on windows and in r8 on linux. The remaining arguments are
+ // all passed in registers (different ones on linux and windows though).
+
+#ifdef _WIN64
+ Register arg4 = r9;
+ Register arg3 = r8;
+ Register arg2 = rdx;
+ Register arg1 = rcx;
+#else
+ Register arg4 = rcx;
+ Register arg3 = rdx;
+ Register arg2 = rsi;
+ Register arg1 = rdi;
+#endif
+
+ // We use this to keep the value of the fifth argument temporarily.
+ // Unfortunately we can't store it directly in r8 (used for passing
+ // this on linux), since it is another parameter passing register on windows.
+ Register arg5 = r11;
+
+ // Get the bailout id from the stack.
+ __ movq(arg3, Operand(rsp, kSavedRegistersAreaSize));
+
+ // Get the address of the location in the code object if possible
+ // and compute the fp-to-sp delta in register arg5.
+ if (type() == EAGER) {
+ __ Set(arg4, 0);
+ __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
+ } else {
+ __ movq(arg4, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
+ __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize));
+ }
+
+ __ subq(arg5, rbp);
+ __ neg(arg5);
+
+ // Allocate a new deoptimizer object.
+ __ PrepareCallCFunction(5);
+ __ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movq(arg1, rax);
+ __ movq(arg2, Immediate(type()));
+ // Args 3 and 4 are already in the right registers.
+
+ // On windows put the argument on the stack (PrepareCallCFunction have
+ // created space for this). On linux pass the argument in r8.
+#ifdef _WIN64
+ __ movq(Operand(rsp, 0 * kPointerSize), arg5);
+#else
+ __ movq(r8, arg5);
+#endif
+
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
+ // Preserve deoptimizer object in register rax and get the input
+ // frame descriptor pointer.
+ __ movq(rbx, Operand(rax, Deoptimizer::input_offset()));
+
+ // Fill in the input registers.
+ for (int i = kNumberOfRegisters -1; i >= 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ __ pop(Operand(rbx, offset));
+ }
+
+ // Fill in the double input registers.
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+ int dst_offset = i * kDoubleSize + double_regs_offset;
+ __ pop(Operand(rbx, dst_offset));
+ }
+
+ // Remove the bailout id from the stack.
+ if (type() == EAGER) {
+ __ addq(rsp, Immediate(kPointerSize));
+ } else {
+ __ addq(rsp, Immediate(2 * kPointerSize));
+ }
+
+ // Compute a pointer to the unwinding limit in register ecx; that is
+ // the first stack slot not part of the input frame.
+ __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ addq(rcx, rsp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset()));
+ Label pop_loop;
+ __ bind(&pop_loop);
+ __ pop(Operand(rdx, 0));
+ __ addq(rdx, Immediate(sizeof(intptr_t)));
+ __ cmpq(rcx, rsp);
+ __ j(not_equal, &pop_loop);
+
+ // Compute the output frame in the deoptimizer.
+ __ push(rax);
+ __ PrepareCallCFunction(1);
+ __ movq(arg1, rax);
+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ __ pop(rax);
+
+ // Replace the current frame with the output frames.
+ Label outer_push_loop, inner_push_loop;
+ // Outer loop state: rax = current FrameDescription**, rdx = one past the
+ // last FrameDescription**.
+ __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset()));
+ __ movq(rax, Operand(rax, Deoptimizer::output_offset()));
+ __ lea(rdx, Operand(rax, rdx, times_8, 0));
+ __ bind(&outer_push_loop);
+ // Inner loop state: rbx = current FrameDescription*, rcx = loop index.
+ __ movq(rbx, Operand(rax, 0));
+ __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset()));
+ __ bind(&inner_push_loop);
+ __ subq(rcx, Immediate(sizeof(intptr_t)));
+ __ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset()));
+ __ testq(rcx, rcx);
+ __ j(not_zero, &inner_push_loop);
+ __ addq(rax, Immediate(kPointerSize));
+ __ cmpq(rax, rdx);
+ __ j(below, &outer_push_loop);
+
+ // In case of OSR, we have to restore the XMM registers.
+ if (type() == OSR) {
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) {
+ XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i);
+ int src_offset = i * kDoubleSize + double_regs_offset;
+ __ movsd(xmm_reg, Operand(rbx, src_offset));
+ }
+ }
+
+ // Push state, pc, and continuation from the last output frame.
+ if (type() != OSR) {
+ __ push(Operand(rbx, FrameDescription::state_offset()));
+ }
+ __ push(Operand(rbx, FrameDescription::pc_offset()));
+ __ push(Operand(rbx, FrameDescription::continuation_offset()));
+
+ // Push the registers from the last output frame.
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ __ push(Operand(rbx, offset));
+ }
+
+ // Restore the registers from the stack.
+ for (int i = kNumberOfRegisters - 1; i >= 0 ; i--) {
+ Register r = Register::toRegister(i);
+ // Do not restore rsp, simply pop the value into the next register
+ // and overwrite this afterwards.
+ if (r.is(rsp)) {
+ ASSERT(i > 0);
+ r = Register::toRegister(i - 1);
+ }
+ __ pop(r);
+ }
+
+ // Set up the roots register.
+ ExternalReference roots_address = ExternalReference::roots_address();
+ __ movq(r13, roots_address);
+
+ __ movq(kSmiConstantRegister,
+ reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
+ RelocInfo::NONE);
+
+ // Return to the continuation point.
+ __ ret(0);
}
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- UNIMPLEMENTED();
+ // Create a sequence of deoptimization entries.
+ Label done;
+ for (int i = 0; i < count(); i++) {
+ int start = masm()->pc_offset();
+ USE(start);
+ __ push_imm32(i);
+ __ jmp(&done);
+ ASSERT(masm()->pc_offset() - start == table_entry_size_);
+ }
+ __ bind(&done);
}
+#undef __
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index 8fdf20b..92a8a7d 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -1025,11 +1025,19 @@
rex_w() ? 'q' : 'd',
NameOfXMMRegister(regop));
current += PrintRightOperand(current);
+ } else if (opcode == 0x6F) {
+ AppendToBuffer("movdqa %s,",
+ NameOfXMMRegister(regop));
+ current += PrintRightOperand(current);
} else if (opcode == 0x7E) {
AppendToBuffer("mov%c ",
rex_w() ? 'q' : 'd');
current += PrintRightOperand(current);
AppendToBuffer(", %s", NameOfXMMRegister(regop));
+ } else if (opcode == 0x7F) {
+ AppendToBuffer("movdqa ");
+ current += PrintRightOperand(current);
+ AppendToBuffer(", %s", NameOfXMMRegister(regop));
} else {
const char* mnemonic = "?";
if (opcode == 0x57) {
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 896e53d..60efe03 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -88,7 +88,7 @@
bool function_in_register = true;
// Possibly allocate a local context.
- int heap_slots = scope()->num_heap_slots();
+ int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment cmnt(masm_, "[ Allocate local context");
// Argument to NewContext is the function, which is still in rdi.
@@ -3006,19 +3006,18 @@
if (prop != NULL) {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
} else if (var->is_global()) {
__ push(GlobalObjectOperand());
__ Push(var->name());
+ __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
} else {
- // Non-global variable. Call the runtime to look up the context
- // where the variable was introduced.
+ // Non-global variable. Call the runtime to delete from the
+ // context where the variable was introduced.
__ push(context_register());
__ Push(var->name());
- __ CallRuntime(Runtime::kLookupContext, 2);
- __ push(rax);
- __ Push(var->name());
+ __ CallRuntime(Runtime::kDeleteContextSlot, 2);
}
- __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
context()->Plug(rax);
}
break;
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index dc988b1..0a04f03 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -37,157 +37,6 @@
namespace internal {
-class LGapNode: public ZoneObject {
- public:
- explicit LGapNode(LOperand* operand)
- : operand_(operand), resolved_(false), visited_id_(-1) { }
-
- LOperand* operand() const { return operand_; }
- bool IsResolved() const { return !IsAssigned() || resolved_; }
- void MarkResolved() {
- ASSERT(!IsResolved());
- resolved_ = true;
- }
- int visited_id() const { return visited_id_; }
- void set_visited_id(int id) {
- ASSERT(id > visited_id_);
- visited_id_ = id;
- }
-
- bool IsAssigned() const { return assigned_from_.is_set(); }
- LGapNode* assigned_from() const { return assigned_from_.get(); }
- void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
-
- private:
- LOperand* operand_;
- SetOncePointer<LGapNode> assigned_from_;
- bool resolved_;
- int visited_id_;
-};
-
-
-LGapResolver::LGapResolver()
- : nodes_(32),
- identified_cycles_(4),
- result_(16),
- next_visited_id_(0) {
-}
-
-
-const ZoneList<LMoveOperands>* LGapResolver::Resolve(
- const ZoneList<LMoveOperands>* moves,
- LOperand* marker_operand) {
- nodes_.Rewind(0);
- identified_cycles_.Rewind(0);
- result_.Rewind(0);
- next_visited_id_ = 0;
-
- for (int i = 0; i < moves->length(); ++i) {
- LMoveOperands move = moves->at(i);
- if (!move.IsRedundant()) RegisterMove(move);
- }
-
- for (int i = 0; i < identified_cycles_.length(); ++i) {
- ResolveCycle(identified_cycles_[i], marker_operand);
- }
-
- int unresolved_nodes;
- do {
- unresolved_nodes = 0;
- for (int j = 0; j < nodes_.length(); j++) {
- LGapNode* node = nodes_[j];
- if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
- AddResultMove(node->assigned_from(), node);
- node->MarkResolved();
- }
- if (!node->IsResolved()) ++unresolved_nodes;
- }
- } while (unresolved_nodes > 0);
- return &result_;
-}
-
-
-void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
- AddResultMove(from->operand(), to->operand());
-}
-
-
-void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
- result_.Add(LMoveOperands(from, to));
-}
-
-
-void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
- ZoneList<LOperand*> cycle_operands(8);
- cycle_operands.Add(marker_operand);
- LGapNode* cur = start;
- do {
- cur->MarkResolved();
- cycle_operands.Add(cur->operand());
- cur = cur->assigned_from();
- } while (cur != start);
- cycle_operands.Add(marker_operand);
-
- for (int i = cycle_operands.length() - 1; i > 0; --i) {
- LOperand* from = cycle_operands[i];
- LOperand* to = cycle_operands[i - 1];
- AddResultMove(from, to);
- }
-}
-
-
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
- ASSERT(a != b);
- LGapNode* cur = a;
- while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
- cur->set_visited_id(visited_id);
- cur = cur->assigned_from();
- }
-
- return cur == b;
-}
-
-
-bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
- ASSERT(a != b);
- return CanReach(a, b, next_visited_id_++);
-}
-
-
-void LGapResolver::RegisterMove(LMoveOperands move) {
- if (move.source()->IsConstantOperand()) {
- // Constant moves should be last in the machine code. Therefore add them
- // first to the result set.
- AddResultMove(move.source(), move.destination());
- } else {
- LGapNode* from = LookupNode(move.source());
- LGapNode* to = LookupNode(move.destination());
- if (to->IsAssigned() && to->assigned_from() == from) {
- move.Eliminate();
- return;
- }
- ASSERT(!to->IsAssigned());
- if (CanReach(from, to)) {
- // This introduces a cycle. Save.
- identified_cycles_.Add(from);
- }
- to->set_assigned_from(from);
- }
-}
-
-
-LGapNode* LGapResolver::LookupNode(LOperand* operand) {
- for (int i = 0; i < nodes_.length(); ++i) {
- if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
- }
-
- // No node found => create a new one.
- LGapNode* result = new LGapNode(operand);
- nodes_.Add(result);
- return result;
-}
-
-
#define __ masm()->
bool LCodeGen::GenerateCode() {
@@ -525,16 +374,20 @@
// Create the environment to bailout to. If the call has side effects
// execution has to continue after the call otherwise execution can continue
// from a previous bailout point repeating the call.
- LEnvironment* deoptimization_environment;
- if (instr->HasDeoptimizationEnvironment()) {
- deoptimization_environment = instr->deoptimization_environment();
+ LEnvironment* lazy_deoptimization_environment;
+ ASSERT(!instr->IsControl());
+ ASSERT(instructions_->at(current_instruction_ + 1)->IsGap());
+ LInstruction* next_instr = instructions_->at(current_instruction_ + 2);
+ if (next_instr->IsLazyBailout()) {
+ ASSERT(next_instr->HasEnvironment());
+ lazy_deoptimization_environment = next_instr->environment();
} else {
- deoptimization_environment = instr->environment();
+ lazy_deoptimization_environment = instr->environment();
}
- RegisterEnvironmentForDeoptimization(deoptimization_environment);
+ RegisterEnvironmentForDeoptimization(lazy_deoptimization_environment);
RecordSafepoint(instr->pointer_map(),
- deoptimization_environment->deoptimization_index());
+ lazy_deoptimization_environment->deoptimization_index());
}
@@ -567,7 +420,21 @@
void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
- Abort("Unimplemented: %s", "Deoptimiz");
+ RegisterEnvironmentForDeoptimization(environment);
+ ASSERT(environment->HasBeenRegistered());
+ int id = environment->deoptimization_index();
+ Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
+ ASSERT(entry != NULL);
+ if (entry == NULL) {
+ Abort("bailout was not prepared");
+ return;
+ }
+
+ if (cc == no_condition) {
+ __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
+ } else {
+ __ j(cc, entry, RelocInfo::RUNTIME_ENTRY);
+ }
}
@@ -682,86 +549,7 @@
void LCodeGen::DoParallelMove(LParallelMove* move) {
- // xmm0 must always be a scratch register.
- XMMRegister xmm_scratch = xmm0;
- LUnallocated marker_operand(LUnallocated::NONE);
-
- Register cpu_scratch = kScratchRegister;
-
- const ZoneList<LMoveOperands>* moves =
- resolver_.Resolve(move->move_operands(), &marker_operand);
- for (int i = moves->length() - 1; i >= 0; --i) {
- LMoveOperands move = moves->at(i);
- LOperand* from = move.source();
- LOperand* to = move.destination();
- ASSERT(!from->IsDoubleRegister() ||
- !ToDoubleRegister(from).is(xmm_scratch));
- ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch));
- ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch));
- ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch));
- if (from->IsConstantOperand()) {
- LConstantOperand* constant_from = LConstantOperand::cast(from);
- if (to->IsRegister()) {
- if (IsInteger32Constant(constant_from)) {
- __ movl(ToRegister(to), Immediate(ToInteger32(constant_from)));
- } else {
- __ Move(ToRegister(to), ToHandle(constant_from));
- }
- } else {
- if (IsInteger32Constant(constant_from)) {
- __ movl(ToOperand(to), Immediate(ToInteger32(constant_from)));
- } else {
- __ Move(ToOperand(to), ToHandle(constant_from));
- }
- }
- } else if (from == &marker_operand) {
- if (to->IsRegister()) {
- __ movq(ToRegister(to), cpu_scratch);
- } else if (to->IsStackSlot()) {
- __ movq(ToOperand(to), cpu_scratch);
- } else if (to->IsDoubleRegister()) {
- __ movsd(ToDoubleRegister(to), xmm_scratch);
- } else {
- ASSERT(to->IsDoubleStackSlot());
- __ movsd(ToOperand(to), xmm_scratch);
- }
- } else if (to == &marker_operand) {
- if (from->IsRegister()) {
- __ movq(cpu_scratch, ToRegister(from));
- } else if (from->IsStackSlot()) {
- __ movq(cpu_scratch, ToOperand(from));
- } else if (from->IsDoubleRegister()) {
- __ movsd(xmm_scratch, ToDoubleRegister(from));
- } else {
- ASSERT(from->IsDoubleStackSlot());
- __ movsd(xmm_scratch, ToOperand(from));
- }
- } else if (from->IsRegister()) {
- if (to->IsRegister()) {
- __ movq(ToRegister(to), ToRegister(from));
- } else {
- __ movq(ToOperand(to), ToRegister(from));
- }
- } else if (to->IsRegister()) {
- __ movq(ToRegister(to), ToOperand(from));
- } else if (from->IsStackSlot()) {
- ASSERT(to->IsStackSlot());
- __ push(rax);
- __ movq(rax, ToOperand(from));
- __ movq(ToOperand(to), rax);
- __ pop(rax);
- } else if (from->IsDoubleRegister()) {
- ASSERT(to->IsDoubleStackSlot());
- __ movsd(ToOperand(to), ToDoubleRegister(from));
- } else if (to->IsDoubleRegister()) {
- ASSERT(from->IsDoubleStackSlot());
- __ movsd(ToDoubleRegister(to), ToOperand(from));
- } else {
- ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
- __ movsd(xmm_scratch, ToOperand(from));
- __ movsd(ToOperand(to), xmm_scratch);
- }
- }
+ resolver_.Resolve(move);
}
@@ -820,7 +608,22 @@
void LCodeGen::DoSubI(LSubI* instr) {
- Abort("Unimplemented: %s", "DoSubI");
+ LOperand* left = instr->InputAt(0);
+ LOperand* right = instr->InputAt(1);
+ ASSERT(left->Equals(instr->result()));
+
+ if (right->IsConstantOperand()) {
+ __ subl(ToRegister(left),
+ Immediate(ToInteger32(LConstantOperand::cast(right))));
+ } else if (right->IsRegister()) {
+ __ subl(ToRegister(left), ToRegister(right));
+ } else {
+ __ subl(ToRegister(left), ToOperand(right));
+ }
+
+ if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+ DeoptimizeIf(overflow, instr->environment());
+ }
}
@@ -1146,7 +949,18 @@
void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
- Abort("Unimplemented: %s", "DoCmpJSObjectEq");
+ Register left = ToRegister(instr->InputAt(0));
+ Register right = ToRegister(instr->InputAt(1));
+ Register result = ToRegister(instr->result());
+
+ NearLabel different, done;
+ __ cmpq(left, right);
+ __ j(not_equal, &different);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ jmp(&done);
+ __ bind(&different);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
}
@@ -1162,7 +976,45 @@
void LCodeGen::DoIsNull(LIsNull* instr) {
- Abort("Unimplemented: %s", "DoIsNull");
+ Register reg = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+
+ // If the expression is known to be a smi, then it's
+ // definitely not null. Materialize false.
+ // Consider adding other type and representation tests too.
+ if (instr->hydrogen()->value()->type().IsSmi()) {
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ return;
+ }
+
+ __ CompareRoot(reg, Heap::kNullValueRootIndex);
+ if (instr->is_strict()) {
+ __ movl(result, Immediate(Heap::kTrueValueRootIndex));
+ NearLabel load;
+ __ j(equal, &load);
+ __ movl(result, Immediate(Heap::kFalseValueRootIndex));
+ __ bind(&load);
+ __ movq(result, Operand(kRootRegister, result, times_pointer_size, 0));
+ } else {
+ NearLabel true_value, false_value, done;
+ __ j(equal, &true_value);
+ __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &true_value);
+ __ JumpIfSmi(reg, &false_value);
+ // Check for undetectable objects by looking in the bit field in
+ // the map. The object has already been smi checked.
+ Register scratch = result;
+ __ movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
+ __ testb(FieldOperand(scratch, Map::kBitFieldOffset),
+ Immediate(1 << Map::kIsUndetectable));
+ __ j(not_zero, &true_value);
+ __ bind(&false_value);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ jmp(&done);
+ __ bind(&true_value);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ bind(&done);
+ }
}
@@ -1204,56 +1056,77 @@
Condition LCodeGen::EmitIsObject(Register input,
- Register temp1,
- Register temp2,
Label* is_not_object,
Label* is_object) {
- ASSERT(!input.is(temp1));
- ASSERT(!input.is(temp2));
- ASSERT(!temp1.is(temp2));
+ ASSERT(!input.is(kScratchRegister));
__ JumpIfSmi(input, is_not_object);
- __ Cmp(input, Factory::null_value());
+ __ CompareRoot(input, Heap::kNullValueRootIndex);
__ j(equal, is_object);
- __ movq(temp1, FieldOperand(input, HeapObject::kMapOffset));
+ __ movq(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
// Undetectable objects behave like undefined.
- __ testb(FieldOperand(temp1, Map::kBitFieldOffset),
+ __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, is_not_object);
- __ movzxbl(temp2, FieldOperand(temp1, Map::kInstanceTypeOffset));
- __ cmpb(temp2, Immediate(FIRST_JS_OBJECT_TYPE));
+ __ movzxbl(kScratchRegister,
+ FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
+ __ cmpb(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
__ j(below, is_not_object);
- __ cmpb(temp2, Immediate(LAST_JS_OBJECT_TYPE));
+ __ cmpb(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
return below_equal;
}
void LCodeGen::DoIsObject(LIsObject* instr) {
- Abort("Unimplemented: %s", "DoIsObject");
+ Register reg = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ Label is_false, is_true, done;
+
+ Condition true_cond = EmitIsObject(reg, &is_false, &is_true);
+ __ j(true_cond, &is_true);
+
+ __ bind(&is_false);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&is_true);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+
+ __ bind(&done);
}
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->InputAt(0));
- Register temp = ToRegister(instr->TempAt(0));
- Register temp2 = ToRegister(instr->TempAt(1));
int true_block = chunk_->LookupDestination(instr->true_block_id());
int false_block = chunk_->LookupDestination(instr->false_block_id());
Label* true_label = chunk_->GetAssemblyLabel(true_block);
Label* false_label = chunk_->GetAssemblyLabel(false_block);
- Condition true_cond = EmitIsObject(reg, temp, temp2, false_label, true_label);
+ Condition true_cond = EmitIsObject(reg, false_label, true_label);
EmitBranch(true_block, false_block, true_cond);
}
void LCodeGen::DoIsSmi(LIsSmi* instr) {
- Abort("Unimplemented: %s", "DoIsSmi");
+ LOperand* input_operand = instr->InputAt(0);
+ Register result = ToRegister(instr->result());
+ if (input_operand->IsRegister()) {
+ Register input = ToRegister(input_operand);
+ __ CheckSmiToIndicator(result, input);
+ } else {
+ Operand input = ToOperand(instr->InputAt(0));
+ __ CheckSmiToIndicator(result, input);
+ }
+ // result is zero if input is a smi, and one otherwise.
+ ASSERT(Heap::kFalseValueRootIndex == Heap::kTrueValueRootIndex + 1);
+ __ movq(result, Operand(kRootRegister, result, times_pointer_size,
+ Heap::kTrueValueRootIndex * kPointerSize));
}
@@ -1386,7 +1259,25 @@
void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
- Abort("Unimplemented: %s", "DoClassOfTest");
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
+ ASSERT(input.is(result));
+ Register temp = ToRegister(instr->TempAt(0));
+ Handle<String> class_name = instr->hydrogen()->class_name();
+ NearLabel done;
+ Label is_true, is_false;
+
+ EmitClassOfTest(&is_true, &is_false, class_name, input, temp);
+
+ __ j(not_equal, &is_false);
+
+ __ bind(&is_true);
+ __ LoadRoot(result, Heap::kTrueValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&is_false);
+ __ LoadRoot(result, Heap::kFalseValueRootIndex);
+ __ bind(&done);
}
@@ -1408,7 +1299,12 @@
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
- Abort("Unimplemented: %s", "DoCmpMapAndBranch");
+ Register reg = ToRegister(instr->InputAt(0));
+ int true_block = instr->true_block_id();
+ int false_block = instr->false_block_id();
+
+ __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
+ EmitBranch(true_block, false_block, equal);
}
@@ -1563,7 +1459,8 @@
void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
- Abort("Unimplemented: %s", "DoGlobalObject");
+ Register result = ToRegister(instr->result());
+ __ movq(result, GlobalObjectOperand());
}
@@ -1700,27 +1597,63 @@
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- Abort("Unimplemented: %s", "DoInteger32ToDouble");
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister() || input->IsStackSlot());
+ LOperand* output = instr->result();
+ ASSERT(output->IsDoubleRegister());
+ __ cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
}
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
- Abort("Unimplemented: %s", "DoNumberTagI");
-}
+ LOperand* input = instr->InputAt(0);
+ ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ Register reg = ToRegister(input);
-
-void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
- Abort("Unimplemented: %s", "DoDeferredNumberTagI");
+ __ Integer32ToSmi(reg, reg);
}
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
- Abort("Unimplemented: %s", "DoNumberTagD");
+ class DeferredNumberTagD: public LDeferredCode {
+ public:
+ DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+ private:
+ LNumberTagD* instr_;
+ };
+
+ XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
+ Register reg = ToRegister(instr->result());
+ Register tmp = ToRegister(instr->TempAt(0));
+
+ DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
+ if (FLAG_inline_new) {
+ __ AllocateHeapNumber(reg, tmp, deferred->entry());
+ } else {
+ __ jmp(deferred->entry());
+ }
+ __ bind(deferred->exit());
+ __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
}
void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
- Abort("Unimplemented: %s", "DoDeferredNumberTagD");
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ Register reg = ToRegister(instr->result());
+ __ Move(reg, Smi::FromInt(0));
+
+ __ PushSafepointRegisters();
+ __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
+ // Ensure that value in rax survives popping registers.
+ __ movq(kScratchRegister, rax);
+ __ PopSafepointRegisters();
+ __ movq(reg, kScratchRegister);
}
@@ -1737,7 +1670,34 @@
void LCodeGen::EmitNumberUntagD(Register input_reg,
XMMRegister result_reg,
LEnvironment* env) {
- Abort("Unimplemented: %s", "EmitNumberUntagD");
+ NearLabel load_smi, heap_number, done;
+
+ // Smi check.
+ __ JumpIfSmi(input_reg, &load_smi);
+
+ // Heap number map check.
+ __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(equal, &heap_number);
+
+ __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+ DeoptimizeIf(not_equal, env);
+
+ // Convert undefined to NaN. Compute NaN as 0/0.
+ __ xorpd(result_reg, result_reg);
+ __ divsd(result_reg, result_reg);
+ __ jmp(&done);
+
+ // Heap number to XMM conversion.
+ __ bind(&heap_number);
+ __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ // Smi to XMM conversion
+ __ bind(&load_smi);
+ __ SmiToInteger32(kScratchRegister, input_reg); // Untag smi first.
+ __ cvtlsi2sd(result_reg, kScratchRegister);
+ __ bind(&done);
}
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index 4ce7004..7da4047 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -34,37 +34,15 @@
#include "deoptimizer.h"
#include "safepoint-table.h"
#include "scopes.h"
+#include "x64/lithium-gap-resolver-x64.h"
namespace v8 {
namespace internal {
// Forward declarations.
class LDeferredCode;
-class LGapNode;
class SafepointGenerator;
-class LGapResolver BASE_EMBEDDED {
- public:
- LGapResolver();
- const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>* moves,
- LOperand* marker_operand);
-
- private:
- LGapNode* LookupNode(LOperand* operand);
- bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
- bool CanReach(LGapNode* a, LGapNode* b);
- void RegisterMove(LMoveOperands move);
- void AddResultMove(LOperand* from, LOperand* to);
- void AddResultMove(LGapNode* from, LGapNode* to);
- void ResolveCycle(LGapNode* start, LOperand* marker_operand);
-
- ZoneList<LGapNode*> nodes_;
- ZoneList<LGapNode*> identified_cycles_;
- ZoneList<LMoveOperands> result_;
- int next_visited_id_;
-};
-
-
class LCodeGen BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
@@ -80,10 +58,24 @@
scope_(chunk->graph()->info()->scope()),
status_(UNUSED),
deferred_(8),
- osr_pc_offset_(-1) {
+ osr_pc_offset_(-1),
+ resolver_(this) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
+ // Simple accessors.
+ MacroAssembler* masm() const { return masm_; }
+
+ // Support for converting LOperands to assembler types.
+ Register ToRegister(LOperand* op) const;
+ XMMRegister ToDoubleRegister(LOperand* op) const;
+ bool IsInteger32Constant(LConstantOperand* op) const;
+ int ToInteger32(LConstantOperand* op) const;
+ bool IsTaggedConstant(LConstantOperand* op) const;
+ Handle<Object> ToHandle(LConstantOperand* op) const;
+ Operand ToOperand(LOperand* op) const;
+
+
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
@@ -95,7 +87,6 @@
// Deferred code support.
void DoDeferredNumberTagD(LNumberTagD* instr);
- void DoDeferredNumberTagI(LNumberTagI* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LGoto* instr);
@@ -129,7 +120,6 @@
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
- MacroAssembler* masm() const { return masm_; }
int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
@@ -190,13 +180,6 @@
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
- Register ToRegister(LOperand* op) const;
- XMMRegister ToDoubleRegister(LOperand* op) const;
- bool IsInteger32Constant(LConstantOperand* op) const;
- int ToInteger32(LConstantOperand* op) const;
- bool IsTaggedConstant(LConstantOperand* op) const;
- Handle<Object> ToHandle(LConstantOperand* op) const;
- Operand ToOperand(LOperand* op) const;
// Specific math operations - used from DoUnaryMathOperation.
void DoMathAbs(LUnaryMathOperation* instr);
@@ -231,8 +214,6 @@
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
Condition EmitIsObject(Register input,
- Register temp1,
- Register temp2,
Label* is_not_object,
Label* is_object);
diff --git a/src/x64/lithium-gap-resolver-x64.cc b/src/x64/lithium-gap-resolver-x64.cc
new file mode 100644
index 0000000..cedd025
--- /dev/null
+++ b/src/x64/lithium-gap-resolver-x64.cc
@@ -0,0 +1,320 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_X64)
+
+#include "x64/lithium-gap-resolver-x64.h"
+#include "x64/lithium-codegen-x64.h"
+
+namespace v8 {
+namespace internal {
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+ : cgen_(owner), moves_(32) {}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+ ASSERT(moves_.is_empty());
+ // Build up a worklist of moves.
+ BuildInitialMoveList(parallel_move);
+
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands move = moves_[i];
+ // Skip constants to perform them last. They don't block other moves
+ // and skipping such moves with register destinations keeps those
+ // registers free for the whole algorithm.
+ if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+ PerformMove(i);
+ }
+ }
+
+ // Perform the moves with constant sources.
+ for (int i = 0; i < moves_.length(); ++i) {
+ if (!moves_[i].IsEliminated()) {
+ ASSERT(moves_[i].source()->IsConstantOperand());
+ EmitMove(i);
+ }
+ }
+
+ moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+ // Perform a linear sweep of the moves to add them to the initial list of
+ // moves to perform, ignoring any move that is redundant (the source is
+ // the same as the destination, the destination is ignored and
+ // unallocated, or the move was already eliminated).
+ const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+ for (int i = 0; i < moves->length(); ++i) {
+ LMoveOperands move = moves->at(i);
+ if (!move.IsRedundant()) moves_.Add(move);
+ }
+ Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+ // Each call to this function performs a move and deletes it from the move
+ // graph. We first recursively perform any move blocking this one. We
+ // mark a move as "pending" on entry to PerformMove in order to detect
+ // cycles in the move graph. We use operand swaps to resolve cycles,
+ // which means that a call to PerformMove could change any source operand
+ // in the move graph.
+
+ ASSERT(!moves_[index].IsPending());
+ ASSERT(!moves_[index].IsRedundant());
+
+ // Clear this move's destination to indicate a pending move. The actual
+ // destination is saved in a stack-allocated local. Recursion may allow
+ // multiple moves to be pending.
+ ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
+ LOperand* destination = moves_[index].destination();
+ moves_[index].set_destination(NULL);
+
+ // Perform a depth-first traversal of the move graph to resolve
+ // dependencies. Any unperformed, unpending move with a source the same
+ // as this one's destination blocks this one so recursively perform all
+ // such moves.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination) && !other_move.IsPending()) {
+ // Though PerformMove can change any source operand in the move graph,
+ // this call cannot create a blocking move via a swap (this loop does
+ // not miss any). Assume there is a non-blocking move with source A
+ // and this move is blocked on source B and there is a swap of A and
+ // B. Then A and B must be involved in the same cycle (or they would
+ // not be swapped). Since this move's destination is B and there is
+ // only a single incoming edge to an operand, this move must also be
+ // involved in the same cycle. In that case, the blocking move will
+ // be created but will be "pending" when we return from PerformMove.
+ PerformMove(i);
+ }
+ }
+
+ // We are about to resolve this move and don't need it marked as
+ // pending, so restore its destination.
+ moves_[index].set_destination(destination);
+
+ // This move's source may have changed due to swaps to resolve cycles and
+ // so it may now be the last move in the cycle. If so remove it.
+ if (moves_[index].source()->Equals(destination)) {
+ moves_[index].Eliminate();
+ return;
+ }
+
+ // The move may be blocked on a (at most one) pending move, in which case
+ // we have a cycle. Search for such a blocking move and perform a swap to
+ // resolve it.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(destination)) {
+ ASSERT(other_move.IsPending());
+ EmitSwap(index);
+ return;
+ }
+ }
+
+ // This move is not blocked.
+ EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_ASSERTS
+ // No operand should be the destination for more than one move.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LOperand* destination = moves_[i].destination();
+ for (int j = i + 1; j < moves_.length(); ++j) {
+ SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+ }
+ }
+#endif
+}
+
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+
+void LGapResolver::EmitMove(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ Register src = cgen_->ToRegister(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ __ movq(dst, src);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ Operand dst = cgen_->ToOperand(destination);
+ __ movq(dst, src);
+ }
+
+ } else if (source->IsStackSlot()) {
+ Operand src = cgen_->ToOperand(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ __ movq(dst, src);
+ } else {
+ ASSERT(destination->IsStackSlot());
+ Operand dst = cgen_->ToOperand(destination);
+ __ movq(kScratchRegister, src);
+ __ movq(dst, kScratchRegister);
+ }
+
+ } else if (source->IsConstantOperand()) {
+ LConstantOperand* constant_source = LConstantOperand::cast(source);
+ if (destination->IsRegister()) {
+ Register dst = cgen_->ToRegister(destination);
+ if (cgen_->IsInteger32Constant(constant_source)) {
+ __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
+ } else {
+ __ Move(dst, cgen_->ToHandle(constant_source));
+ }
+ } else {
+ ASSERT(destination->IsStackSlot());
+ Operand dst = cgen_->ToOperand(destination);
+ if (cgen_->IsInteger32Constant(constant_source)) {
+ // Allow top 32 bits of an untagged Integer32 to be arbitrary.
+ __ movl(dst, Immediate(cgen_->ToInteger32(constant_source)));
+ } else {
+ __ Move(dst, cgen_->ToHandle(constant_source));
+ }
+ }
+
+ } else if (source->IsDoubleRegister()) {
+ XMMRegister src = cgen_->ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ __ movsd(cgen_->ToDoubleRegister(destination), src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ __ movsd(cgen_->ToOperand(destination), src);
+ }
+ } else if (source->IsDoubleStackSlot()) {
+ Operand src = cgen_->ToOperand(source);
+ if (destination->IsDoubleRegister()) {
+ __ movsd(cgen_->ToDoubleRegister(destination), src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ __ movsd(xmm0, src);
+ __ movsd(cgen_->ToOperand(destination), xmm0);
+ }
+ } else {
+ UNREACHABLE();
+ }
+
+ moves_[index].Eliminate();
+}
+
+
+void LGapResolver::EmitSwap(int index) {
+ LOperand* source = moves_[index].source();
+ LOperand* destination = moves_[index].destination();
+
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister() && destination->IsRegister()) {
+ // Swap two general-purpose registers.
+ Register src = cgen_->ToRegister(source);
+ Register dst = cgen_->ToRegister(destination);
+ __ xchg(dst, src);
+
+ } else if ((source->IsRegister() && destination->IsStackSlot()) ||
+ (source->IsStackSlot() && destination->IsRegister())) {
+ // Swap a general-purpose register and a stack slot.
+ Register reg =
+ cgen_->ToRegister(source->IsRegister() ? source : destination);
+ Operand mem =
+ cgen_->ToOperand(source->IsRegister() ? destination : source);
+ __ movq(kScratchRegister, mem);
+ __ movq(mem, reg);
+ __ movq(reg, kScratchRegister);
+
+ } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
+ (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
+ // Swap two stack slots or two double stack slots.
+ Operand src = cgen_->ToOperand(source);
+ Operand dst = cgen_->ToOperand(destination);
+ __ movsd(xmm0, src);
+ __ movq(kScratchRegister, dst);
+ __ movsd(dst, xmm0);
+ __ movq(src, kScratchRegister);
+
+ } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+ // Swap two double registers.
+ XMMRegister source_reg = cgen_->ToDoubleRegister(source);
+ XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
+ __ movsd(xmm0, source_reg);
+ __ movsd(source_reg, destination_reg);
+ __ movsd(destination_reg, xmm0);
+
+ } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
+ // Swap a double register and a double stack slot.
+ ASSERT((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) ||
+ (source->IsDoubleStackSlot() && destination->IsDoubleRegister()));
+ XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
+ ? source
+ : destination);
+ LOperand* other = source->IsDoubleRegister() ? destination : source;
+ ASSERT(other->IsDoubleStackSlot());
+ Operand other_operand = cgen_->ToOperand(other);
+ __ movsd(xmm0, other_operand);
+ __ movsd(other_operand, reg);
+ __ movsd(reg, xmm0);
+
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+
+ // The swap of source and destination has executed a move from source to
+ // destination.
+ moves_[index].Eliminate();
+
+ // Any unperformed (including pending) move with a source of either
+ // this move's source or destination needs to have their source
+ // changed to reflect the state of affairs after the swap.
+ for (int i = 0; i < moves_.length(); ++i) {
+ LMoveOperands other_move = moves_[i];
+ if (other_move.Blocks(source)) {
+ moves_[i].set_source(destination);
+ } else if (other_move.Blocks(destination)) {
+ moves_[i].set_source(source);
+ }
+ }
+}
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_X64
diff --git a/src/x64/lithium-gap-resolver-x64.h b/src/x64/lithium-gap-resolver-x64.h
new file mode 100644
index 0000000..d828455
--- /dev/null
+++ b/src/x64/lithium-gap-resolver-x64.h
@@ -0,0 +1,74 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
+#define V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
+
+#include "v8.h"
+
+#include "lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+ explicit LGapResolver(LCodeGen* owner);
+
+ // Resolve a set of parallel moves, emitting assembler instructions.
+ void Resolve(LParallelMove* parallel_move);
+
+ private:
+ // Build the initial list of moves.
+ void BuildInitialMoveList(LParallelMove* parallel_move);
+
+ // Perform the move at the moves_ index in question (possibly requiring
+ // other moves to satisfy dependencies).
+ void PerformMove(int index);
+
+ // Emit a move and remove it from the move graph.
+ void EmitMove(int index);
+
+ // Execute a move by emitting a swap of two operands. The move from
+ // source to destination is removed from the move graph.
+ void EmitSwap(int index);
+
+ // Verify the move list before performing moves.
+ void Verify();
+
+ LCodeGen* cgen_;
+
+ // List of moves not yet resolved.
+ ZoneList<LMoveOperands> moves_;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_X64_LITHIUM_GAP_RESOLVER_X64_H_
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index eff6d51..5b7169b 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -651,22 +651,6 @@
}
-LInstruction* LChunkBuilder::SetInstructionPendingDeoptimizationEnvironment(
- LInstruction* instr, int ast_id) {
- ASSERT(instructions_pending_deoptimization_environment_ == NULL);
- ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
- instructions_pending_deoptimization_environment_ = instr;
- pending_deoptimization_ast_id_ = ast_id;
- return instr;
-}
-
-
-void LChunkBuilder::ClearInstructionPendingDeoptimizationEnvironment() {
- instructions_pending_deoptimization_environment_ = NULL;
- pending_deoptimization_ast_id_ = AstNode::kNoNumber;
-}
-
-
LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
HInstruction* hinstr,
CanDeoptimize can_deoptimize) {
@@ -676,8 +660,8 @@
if (hinstr->HasSideEffects()) {
ASSERT(hinstr->next()->IsSimulate());
HSimulate* sim = HSimulate::cast(hinstr->next());
- instr = SetInstructionPendingDeoptimizationEnvironment(
- instr, sim->ast_id());
+ ASSERT(pending_deoptimization_ast_id_ == AstNode::kNoNumber);
+ pending_deoptimization_ast_id_ = sim->ast_id();
}
// If instruction does not have side-effects lazy deoptimization
@@ -974,12 +958,7 @@
} else if (v->IsIsObject()) {
HIsObject* compare = HIsObject::cast(v);
ASSERT(compare->value()->representation().IsTagged());
-
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()),
- temp1,
- temp2);
+ return new LIsObjectAndBranch(UseRegisterAtStart(compare->value()));
} else if (v->IsCompareJSObjectEq()) {
HCompareJSObjectEq* compare = HCompareJSObjectEq::cast(v);
return new LCmpJSObjectEqAndBranch(UseRegisterAtStart(compare->left()),
@@ -1054,8 +1033,7 @@
LInstruction* LChunkBuilder::DoGlobalObject(HGlobalObject* instr) {
- Abort("Unimplemented: %s", "DoGlobalObject");
- return NULL;
+ return DefineAsRegister(new LGlobalObject);
}
@@ -1181,8 +1159,23 @@
LInstruction* LChunkBuilder::DoSub(HSub* instr) {
- Abort("Unimplemented: %s", "DoSub");
- return NULL;
+ if (instr->representation().IsInteger32()) {
+ ASSERT(instr->left()->representation().IsInteger32());
+ ASSERT(instr->right()->representation().IsInteger32());
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseOrConstantAtStart(instr->right());
+ LSubI* sub = new LSubI(left, right);
+ LInstruction* result = DefineSameAsFirst(sub);
+ if (instr->CheckFlag(HValue::kCanOverflow)) {
+ result = AssignEnvironment(result);
+ }
+ return result;
+ } else if (instr->representation().IsDouble()) {
+ return DoArithmeticD(Token::SUB, instr);
+ } else {
+ ASSERT(instr->representation().IsTagged());
+ return DoArithmeticT(Token::SUB, instr);
+ }
}
@@ -1243,26 +1236,34 @@
LInstruction* LChunkBuilder::DoCompareJSObjectEq(
HCompareJSObjectEq* instr) {
- Abort("Unimplemented: %s", "DoCompareJSObjectEq");
- return NULL;
+ LOperand* left = UseRegisterAtStart(instr->left());
+ LOperand* right = UseRegisterAtStart(instr->right());
+ LCmpJSObjectEq* result = new LCmpJSObjectEq(left, right);
+ return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoIsNull(HIsNull* instr) {
- Abort("Unimplemented: %s", "DoIsNull");
- return NULL;
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegisterAtStart(instr->value());
+
+ return DefineAsRegister(new LIsNull(value));
}
LInstruction* LChunkBuilder::DoIsObject(HIsObject* instr) {
- Abort("Unimplemented: %s", "DoIsObject");
- return NULL;
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseRegister(instr->value());
+
+ return DefineAsRegister(new LIsObject(value));
}
LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
- Abort("Unimplemented: %s", "DoIsSmi");
- return NULL;
+ ASSERT(instr->value()->representation().IsTagged());
+ LOperand* value = UseAtStart(instr->value());
+
+ return DefineAsRegister(new LIsSmi(value));
}
@@ -1316,7 +1317,62 @@
LInstruction* LChunkBuilder::DoChange(HChange* instr) {
- Abort("Unimplemented: %s", "DoChange");
+ Representation from = instr->from();
+ Representation to = instr->to();
+ if (from.IsTagged()) {
+ if (to.IsDouble()) {
+ LOperand* value = UseRegister(instr->value());
+ LNumberUntagD* res = new LNumberUntagD(value);
+ return AssignEnvironment(DefineAsRegister(res));
+ } else {
+ ASSERT(to.IsInteger32());
+ LOperand* value = UseRegister(instr->value());
+ bool needs_check = !instr->value()->type().IsSmi();
+ if (needs_check) {
+ LOperand* xmm_temp =
+ (instr->CanTruncateToInt32() && CpuFeatures::IsSupported(SSE3))
+ ? NULL
+ : FixedTemp(xmm1);
+ LTaggedToI* res = new LTaggedToI(value, xmm_temp);
+ return AssignEnvironment(DefineSameAsFirst(res));
+ } else {
+ return DefineSameAsFirst(new LSmiUntag(value, needs_check));
+ }
+ }
+ } else if (from.IsDouble()) {
+ if (to.IsTagged()) {
+ LOperand* value = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+
+ // Make sure that temp and result_temp are different registers.
+ LUnallocated* result_temp = TempRegister();
+ LNumberTagD* result = new LNumberTagD(value, temp);
+ return AssignPointerMap(Define(result, result_temp));
+ } else {
+ ASSERT(to.IsInteger32());
+ bool needs_temp = instr->CanTruncateToInt32() &&
+ !CpuFeatures::IsSupported(SSE3);
+ LOperand* value = needs_temp ?
+ UseTempRegister(instr->value()) : UseRegister(instr->value());
+ LOperand* temp = needs_temp ? TempRegister() : NULL;
+ return AssignEnvironment(DefineAsRegister(new LDoubleToI(value, temp)));
+ }
+ } else if (from.IsInteger32()) {
+ if (to.IsTagged()) {
+ HValue* val = instr->value();
+ LOperand* value = UseRegister(val);
+ if (val->HasRange() && val->range()->IsInSmiRange()) {
+ return DefineSameAsFirst(new LSmiTag(value));
+ } else {
+ LNumberTagI* result = new LNumberTagI(value);
+ return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ }
+ } else {
+ ASSERT(to.IsDouble());
+ return DefineAsRegister(new LInteger32ToDouble(Use(instr->value())));
+ }
+ }
+ UNREACHABLE();
return NULL;
}
@@ -1569,12 +1625,11 @@
// If there is an instruction pending deoptimization environment create a
// lazy bailout instruction to capture the environment.
- if (pending_deoptimization_ast_id_ == instr->ast_id()) {
+ if (pending_deoptimization_ast_id_ != AstNode::kNoNumber) {
+ ASSERT(pending_deoptimization_ast_id_ == instr->ast_id());
LLazyBailout* lazy_bailout = new LLazyBailout;
LInstruction* result = AssignEnvironment(lazy_bailout);
- instructions_pending_deoptimization_environment_->
- set_deoptimization_environment(result->environment());
- ClearInstructionPendingDeoptimizationEnvironment();
+ pending_deoptimization_ast_id_ = AstNode::kNoNumber;
return result;
}
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 9b7d568..227e98b 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -317,21 +317,10 @@
void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
HValue* hydrogen_value() const { return hydrogen_value_; }
- void set_deoptimization_environment(LEnvironment* env) {
- deoptimization_environment_.set(env);
- }
- LEnvironment* deoptimization_environment() const {
- return deoptimization_environment_.get();
- }
- bool HasDeoptimizationEnvironment() const {
- return deoptimization_environment_.is_set();
- }
-
private:
SetOncePointer<LEnvironment> environment_;
SetOncePointer<LPointerMap> pointer_map_;
HValue* hydrogen_value_;
- SetOncePointer<LEnvironment> deoptimization_environment_;
};
@@ -732,23 +721,20 @@
};
-class LIsObject: public LTemplateInstruction<1, 1, 1> {
+class LIsObject: public LTemplateInstruction<1, 1, 0> {
public:
- LIsObject(LOperand* value, LOperand* temp) {
+ explicit LIsObject(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(IsObject, "is-object")
};
-class LIsObjectAndBranch: public LControlInstruction<1, 2> {
+class LIsObjectAndBranch: public LControlInstruction<1, 0> {
public:
- LIsObjectAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
+ explicit LIsObjectAndBranch(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
- temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index a690bd5..2fc825f 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -375,6 +375,16 @@
}
+void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ Runtime::Function* function = Runtime::FunctionForId(id);
+ Set(rax, function->nargs);
+ movq(rbx, ExternalReference(function));
+ CEntryStub ces(1);
+ ces.SaveDoubles();
+ CallStub(&ces);
+}
+
+
MaybeObject* MacroAssembler::TryCallRuntime(Runtime::FunctionId id,
int num_arguments) {
return TryCallRuntime(Runtime::FunctionForId(id), num_arguments);
@@ -967,6 +977,27 @@
}
+void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
+ if (dst.is(src)) {
+ andl(dst, Immediate(kSmiTagMask));
+ } else {
+ movl(dst, Immediate(kSmiTagMask));
+ andl(dst, src);
+ }
+}
+
+
+void MacroAssembler::CheckSmiToIndicator(Register dst, const Operand& src) {
+ if (!(src.AddressUsesRegister(dst))) {
+ movl(dst, Immediate(kSmiTagMask));
+ andl(dst, src);
+ } else {
+ movl(dst, src);
+ andl(dst, Immediate(kSmiTagMask));
+ }
+}
+
+
void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
if (constant->value() == 0) {
if (!dst.is(src)) {
@@ -1427,6 +1458,34 @@
}
+void MacroAssembler::Dropad() {
+ const int kRegistersPushedByPushad = 11;
+ addq(rsp, Immediate(kRegistersPushedByPushad * kPointerSize));
+}
+
+
+// Order general registers are pushed by Pushad:
+// rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14.
+int MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
+ 0,
+ 1,
+ 2,
+ 3,
+ -1,
+ -1,
+ 4,
+ 5,
+ 6,
+ 7,
+ -1,
+ 8,
+ 9,
+ -1,
+ 10,
+ -1
+};
+
+
void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) {
// Adjust this code if not the case.
@@ -1775,12 +1834,24 @@
}
-void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space) {
+void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
+ bool save_doubles) {
#ifdef _WIN64
- const int kShaddowSpace = 4;
- arg_stack_space += kShaddowSpace;
+ const int kShadowSpace = 4;
+ arg_stack_space += kShadowSpace;
#endif
- if (arg_stack_space > 0) {
+ // Optionally save all XMM registers.
+ if (save_doubles) {
+ CpuFeatures::Scope scope(SSE2);
+ int space = XMMRegister::kNumRegisters * kDoubleSize +
+ arg_stack_space * kPointerSize;
+ subq(rsp, Immediate(space));
+ int offset = -2 * kPointerSize;
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+ XMMRegister reg = XMMRegister::FromAllocationIndex(i);
+ movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
+ }
+ } else if (arg_stack_space > 0) {
subq(rsp, Immediate(arg_stack_space * kPointerSize));
}
@@ -1797,7 +1868,7 @@
}
-void MacroAssembler::EnterExitFrame(int arg_stack_space) {
+void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) {
EnterExitFramePrologue(true);
// Setup argv in callee-saved register r12. It is reused in LeaveExitFrame,
@@ -1805,25 +1876,31 @@
int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
lea(r12, Operand(rbp, r14, times_pointer_size, offset));
- EnterExitFrameEpilogue(arg_stack_space);
+ EnterExitFrameEpilogue(arg_stack_space, save_doubles);
}
void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
EnterExitFramePrologue(false);
- EnterExitFrameEpilogue(arg_stack_space);
+ EnterExitFrameEpilogue(arg_stack_space, false);
}
-void MacroAssembler::LeaveExitFrame() {
+void MacroAssembler::LeaveExitFrame(bool save_doubles) {
// Registers:
// r12 : argv
-
+ if (save_doubles) {
+ int offset = -2 * kPointerSize;
+ for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) {
+ XMMRegister reg = XMMRegister::FromAllocationIndex(i);
+ movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
+ }
+ }
// Get the return address from the stack and restore the frame pointer.
movq(rcx, Operand(rbp, 1 * kPointerSize));
movq(rbp, Operand(rbp, 0 * kPointerSize));
- // Pop everything up to and including the arguments and the receiver
+ // Drop everything up to and including the arguments and the receiver
// from the caller stack.
lea(rsp, Operand(r12, 1 * kPointerSize));
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index b2d085f..3536911 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -152,7 +152,7 @@
//
// Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
// accessible via StackSpaceOperand.
- void EnterExitFrame(int arg_stack_space = 0);
+ void EnterExitFrame(int arg_stack_space = 0, bool save_doubles = false);
// Enter specific kind of exit frame. Allocates arg_stack_space * kPointerSize
// memory (not GCed) on the stack accessible via StackSpaceOperand.
@@ -161,20 +161,20 @@
// Leave the current exit frame. Expects/provides the return value in
// register rax:rdx (untouched) and the pointer to the first
// argument in register rsi.
- void LeaveExitFrame();
+ void LeaveExitFrame(bool save_doubles = false);
// Leave the current exit frame. Expects/provides the return value in
// register rax (untouched).
void LeaveApiExitFrame();
// Push and pop the registers that can hold pointers.
- void PushSafepointRegisters() { UNIMPLEMENTED(); }
- void PopSafepointRegisters() { UNIMPLEMENTED(); }
+ void PushSafepointRegisters() { Pushad(); }
+ void PopSafepointRegisters() { Popad(); }
static int SafepointRegisterStackIndex(int reg_code) {
- UNIMPLEMENTED();
- return 0;
+ return kSafepointPushRegisterIndices[reg_code];
}
+
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -301,6 +301,11 @@
// conversion to a smi.
Condition CheckUInteger32ValidSmiValue(Register src);
+ // Check whether src is a Smi, and set dst to zero if it is a smi,
+ // and to one if it isn't.
+ void CheckSmiToIndicator(Register dst, Register src);
+ void CheckSmiToIndicator(Register dst, const Operand& src);
+
// Test-and-jump functions. Typically combines a check function
// above with a conditional jump.
@@ -597,6 +602,9 @@
// (kScratchRegister, kSmiConstantRegister, kRootRegister).
void Pushad();
void Popad();
+ // Sets the stack as after performing Popad, without actually loading the
+ // registers.
+ void Dropad();
// Compare object type for heap object.
// Always use unsigned comparisons: above and below, not less and greater.
@@ -812,6 +820,9 @@
// Call a runtime routine.
void CallRuntime(Runtime::Function* f, int num_arguments);
+ // Call a runtime function and save the value of XMM registers.
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id);
+
// Call a runtime function, returning the CodeStub object called.
// Try to generate the stub code if necessary. Do not perform a GC
// but instead return a retry after GC failure.
@@ -931,6 +942,9 @@
bool allow_stub_calls() { return allow_stub_calls_; }
private:
+ // Order general registers are pushed by Pushad.
+ // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14.
+ static int kSafepointPushRegisterIndices[Register::kNumRegisters];
bool generating_stub_;
bool allow_stub_calls_;
@@ -961,7 +975,7 @@
// Allocates arg_stack_space * kPointerSize memory (not GCed) on the stack
// accessible via StackSpaceOperand.
- void EnterExitFrameEpilogue(int arg_stack_space);
+ void EnterExitFrameEpilogue(int arg_stack_space, bool save_doubles);
void LeaveExitFrameEpilogue();
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index 5d2d9cc..b056403 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -77,12 +77,9 @@
# Tests that time out with crankshaft.
test-api/Threading: SKIP
-# BUG(1049): Currently no deoptimization support.
+# BUG(1069): Context serialization fails on optimized functions.
test-serialize/ContextSerialization: SKIP
test-serialize/ContextDeserialization: SKIP
-test-debug/BreakPointReturn: SKIP
-test-debug/DebugStepLinearMixedICs: SKIP
-test-debug/DebugConditional: SKIP
##############################################################################
[ $arch == arm ]
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index de00fbb..90f4996 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -2383,6 +2383,10 @@
CompileRun("asdf;");
CompileRun("ReferenceError.prototype.constructor = void 0;");
CompileRun("asdf;");
+ CompileRun("ReferenceError.prototype.__proto__ = new Object();");
+ CompileRun("asdf;");
+ CompileRun("ReferenceError.prototype = new Object();");
+ CompileRun("asdf;");
v8::Handle<Value> string = CompileRun("try { asdf; } catch(e) { e + ''; }");
CHECK(string->Equals(v8_str("Whoops")));
v8::V8::RemoveMessageListeners(check_message);
diff --git a/test/cctest/test-assembler-x64.cc b/test/cctest/test-assembler-x64.cc
index f100b73..5d292df 100644
--- a/test/cctest/test-assembler-x64.cc
+++ b/test/cctest/test-assembler-x64.cc
@@ -48,6 +48,12 @@
using v8::internal::rdx;
using v8::internal::rbp;
using v8::internal::rsp;
+using v8::internal::r8;
+using v8::internal::r9;
+using v8::internal::r12;
+using v8::internal::r13;
+using v8::internal::times_1;
+
using v8::internal::FUNCTION_CAST;
using v8::internal::CodeDesc;
using v8::internal::less_equal;
@@ -289,4 +295,47 @@
CHECK_EQ(1, result);
}
+
+TEST(OperandRegisterDependency) {
+ int offsets[4] = {0, 1, 0xfed, 0xbeefcad};
+ for (int i = 0; i < 4; i++) {
+ int offset = offsets[i];
+ CHECK(Operand(rax, offset).AddressUsesRegister(rax));
+ CHECK(!Operand(rax, offset).AddressUsesRegister(r8));
+ CHECK(!Operand(rax, offset).AddressUsesRegister(rcx));
+
+ CHECK(Operand(rax, rax, times_1, offset).AddressUsesRegister(rax));
+ CHECK(!Operand(rax, rax, times_1, offset).AddressUsesRegister(r8));
+ CHECK(!Operand(rax, rax, times_1, offset).AddressUsesRegister(rcx));
+
+ CHECK(Operand(rax, rcx, times_1, offset).AddressUsesRegister(rax));
+ CHECK(Operand(rax, rcx, times_1, offset).AddressUsesRegister(rcx));
+ CHECK(!Operand(rax, rcx, times_1, offset).AddressUsesRegister(r8));
+ CHECK(!Operand(rax, rcx, times_1, offset).AddressUsesRegister(r9));
+ CHECK(!Operand(rax, rcx, times_1, offset).AddressUsesRegister(rdx));
+ CHECK(!Operand(rax, rcx, times_1, offset).AddressUsesRegister(rsp));
+
+ CHECK(Operand(rsp, offset).AddressUsesRegister(rsp));
+ CHECK(!Operand(rsp, offset).AddressUsesRegister(rax));
+ CHECK(!Operand(rsp, offset).AddressUsesRegister(r12));
+
+ CHECK(Operand(rbp, offset).AddressUsesRegister(rbp));
+ CHECK(!Operand(rbp, offset).AddressUsesRegister(rax));
+ CHECK(!Operand(rbp, offset).AddressUsesRegister(r13));
+
+ CHECK(Operand(rbp, rax, times_1, offset).AddressUsesRegister(rbp));
+ CHECK(Operand(rbp, rax, times_1, offset).AddressUsesRegister(rax));
+ CHECK(!Operand(rbp, rax, times_1, offset).AddressUsesRegister(rcx));
+ CHECK(!Operand(rbp, rax, times_1, offset).AddressUsesRegister(r13));
+ CHECK(!Operand(rbp, rax, times_1, offset).AddressUsesRegister(r8));
+ CHECK(!Operand(rbp, rax, times_1, offset).AddressUsesRegister(rsp));
+
+ CHECK(Operand(rsp, rbp, times_1, offset).AddressUsesRegister(rsp));
+ CHECK(Operand(rsp, rbp, times_1, offset).AddressUsesRegister(rbp));
+ CHECK(!Operand(rsp, rbp, times_1, offset).AddressUsesRegister(rax));
+ CHECK(!Operand(rsp, rbp, times_1, offset).AddressUsesRegister(r12));
+ CHECK(!Operand(rsp, rbp, times_1, offset).AddressUsesRegister(r13));
+ }
+}
+
#undef __
diff --git a/test/es5conform/es5conform.status b/test/es5conform/es5conform.status
index a51bd30..47b6887 100644
--- a/test/es5conform/es5conform.status
+++ b/test/es5conform/es5conform.status
@@ -41,11 +41,6 @@
# We are compatible with Safari and Firefox.
chapter11/11.1/11.1.5: UNIMPLEMENTED
-# Delete returns true in eval even when it should return false.
-# Please see http://code.google.com/p/v8/issues/detail?id=759
-chapter11/11.4/11.4.1//11.4.1-4.a-5: FAIL
-chapter11/11.4/11.4.1//11.4.1-4.a-7: FAIL
-
# We do not have a global object called 'global' as required by tests.
chapter15/15.1: FAIL_OK
diff --git a/test/mjsunit/debug-evaluate-locals.js b/test/mjsunit/debug-evaluate-locals.js
index 8430bd3..4b87829 100644
--- a/test/mjsunit/debug-evaluate-locals.js
+++ b/test/mjsunit/debug-evaluate-locals.js
@@ -34,18 +34,18 @@
function checkFrame0(name, value) {
- assertTrue(name == 'a' || name == 'b', 'frame0 name');
+ assertTrue(name == 'a' || name == 'b');
if (name == 'a') {
assertEquals(1, value);
- } else if (name == 'b') {
+ }
+ if (name == 'b') {
assertEquals(2, value);
}
}
function checkFrame1(name, value) {
- assertTrue(name == '.arguments' || name == 'arguments' || name == 'a',
- 'frame1 name');
+ assertTrue(name == '.arguments' || name == 'a');
if (name == 'a') {
assertEquals(3, value);
}
@@ -53,10 +53,12 @@
function checkFrame2(name, value) {
- assertTrue(name == 'a' || name == 'b', 'frame2 name');
+ assertTrue(name == '.arguments' || name == 'a' ||
+ name == 'arguments' || name == 'b');
if (name == 'a') {
assertEquals(5, value);
- } else if (name == 'b') {
+ }
+ if (name == 'b') {
assertEquals(0, value);
}
}
@@ -71,17 +73,18 @@
checkFrame0(frame0.localName(0), frame0.localValue(0).value());
checkFrame0(frame0.localName(1), frame0.localValue(1).value());
- // Frame 1 has normal variables a and arguments (and the .arguments
- // variable).
+ // Frame 1 has normal variable a (and the .arguments variable).
var frame1 = exec_state.frame(1);
checkFrame1(frame1.localName(0), frame1.localValue(0).value());
checkFrame1(frame1.localName(1), frame1.localValue(1).value());
- checkFrame1(frame1.localName(2), frame1.localValue(2).value());
- // Frame 2 has normal variables a and b.
+ // Frame 2 has normal variables a and b (and both the .arguments and
+ // arguments variable).
var frame2 = exec_state.frame(2);
checkFrame2(frame2.localName(0), frame2.localValue(0).value());
checkFrame2(frame2.localName(1), frame2.localValue(1).value());
+ checkFrame2(frame2.localName(2), frame2.localValue(2).value());
+ checkFrame2(frame2.localName(3), frame2.localValue(3).value());
// Evaluating a and b on frames 0, 1 and 2 produces 1, 2, 3, 4, 5 and 6.
assertEquals(1, exec_state.frame(0).evaluate('a').value());
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index 39ddf5a..057c0fa 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -119,9 +119,6 @@
# BUG (1026) This test is currently flaky.
compiler/simple-osr: SKIP
-# BUG(1049): Currently no deoptimization support.
-debug-liveedit-newsource: SKIP
-debug-liveedit-1: SKIP
##############################################################################
[ $arch == mips ]
diff --git a/test/mjsunit/regress/regress-70066.js b/test/mjsunit/regress/regress-70066.js
new file mode 100644
index 0000000..b8386a7
--- /dev/null
+++ b/test/mjsunit/regress/regress-70066.js
@@ -0,0 +1,146 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Regression test for Chromium issue 70066. Delete should work properly
+// from inside 'with' scopes.
+// http://code.google.com/p/chromium/issues/detail?id=70066
+
+x = 0;
+
+// Delete on a slot from a function's own context.
+function test1() {
+ var value = 1;
+ var status;
+ with ({}) { status = delete value; }
+ return value + ":" + status;
+}
+
+assertEquals("1:false", test1(), "test1");
+assertEquals(0, x, "test1"); // Global x is undisturbed.
+
+
+// Delete on a slot from an outer context.
+function test2() {
+ function f() {
+ with ({}) { return delete value; }
+ }
+ var value = 2;
+ var status = f();
+ return value + ":" + status;
+}
+
+assertEquals("2:false", test2(), "test2");
+assertEquals(0, x, "test2"); // Global x is undisturbed.
+
+
+// Delete on an argument. This hits the same code paths as test5 because
+// 'with' forces all parameters to be indirected through the arguments
+// object.
+function test3(value) {
+ var status;
+ with ({}) { status = delete value; }
+ return value + ":" + status;
+}
+
+assertEquals("undefined:true", test3(3), "test3");
+assertEquals(0, x, "test3"); // Global x is undisturbed.
+
+
+// Delete on an argument from an outer context. This hits the same code
+// path as test2.
+function test4(value) {
+ function f() {
+ with ({}) { return delete value; }
+ }
+ var status = f();
+ return value + ":" + status;
+}
+
+assertEquals("4:false", test4(4), "test4");
+assertEquals(0, x, "test4"); // Global x is undisturbed.
+
+
+// Delete on an argument found in the arguments object. Such properties are
+// normally DONT_DELETE in JavaScript but deletion is allowed by V8.
+function test5(value) {
+ var status;
+ with ({}) { status = delete value; }
+ return arguments[0] + ":" + status;
+}
+
+assertEquals("undefined:true", test5(5), "test5");
+assertEquals(0, x, "test5"); // Global x is undisturbed.
+
+function test6(value) {
+ function f() {
+ with ({}) { return delete value; }
+ }
+ var status = f();
+ return arguments[0] + ":" + status;
+}
+
+assertEquals("undefined:true", test6(6), "test6");
+assertEquals(0, x, "test6"); // Global x is undisturbed.
+
+
+// Delete on a property found on 'with' object.
+function test7(object) {
+ with (object) { return delete value; }
+}
+
+var o = {value: 7};
+assertEquals(true, test7(o), "test7");
+assertEquals(void 0, o.value, "test7");
+assertEquals(0, x, "test7"); // Global x is undisturbed.
+
+
+// Delete on a global property.
+function test8() {
+ with ({}) { return delete x; }
+}
+
+assertEquals(true, test8(), "test8");
+assertThrows("x", "test8"); // Global x should be deleted.
+
+
+// Delete on a property that is not found anywhere.
+function test9() {
+ with ({}) { return delete x; }
+}
+
+assertThrows("x", "test9"); // Make sure it's not there.
+assertEquals(true, test9(), "test9");
+
+
+// Delete on a DONT_DELETE property of the global object.
+var y = 10;
+function test10() {
+ with ({}) { return delete y; }
+}
+
+assertEquals(false, test10(), "test10");
+assertEquals(10, y, "test10");
diff --git a/test/mjsunit/strict-mode.js b/test/mjsunit/strict-mode.js
index 924b34f..83a22cc 100644
--- a/test/mjsunit/strict-mode.js
+++ b/test/mjsunit/strict-mode.js
@@ -115,3 +115,17 @@
}
with ({}) {};
}
+
+// Octal literal
+CheckStrictMode("var x = 012");
+CheckStrictMode("012");
+CheckStrictMode("'Hello octal\\032'");
+CheckStrictMode("function octal() { return 012; }");
+CheckStrictMode("function octal() { return '\\032'; }");
+
+// Octal before "use strict"
+assertThrows('\
+ function strict() {\
+ "octal\\032directive";\
+ "use strict";\
+ }', SyntaxError);
diff --git a/test/mjsunit/string-charcodeat.js b/test/mjsunit/string-charcodeat.js
index fb7ab9a..f18d0a5 100644
--- a/test/mjsunit/string-charcodeat.js
+++ b/test/mjsunit/string-charcodeat.js
@@ -205,3 +205,23 @@
assertEquals(49, long.charCodeAt(0), 36);
assertEquals(56, long.charCodeAt(65535), 37);
assertTrue(isNaN(long.charCodeAt(65536)), 38);
+
+
+// Test crankshaft code when the function is set directly on the
+// string prototype object instead of the hidden prototype object.
+// See http://code.google.com/p/v8/issues/detail?id=1070
+
+String.prototype.x = String.prototype.charCodeAt;
+
+function directlyOnPrototype() {
+ assertEquals(97, "a".x(0));
+ assertEquals(98, "b".x(0));
+ assertEquals(99, "c".x(0));
+ assertEquals(97, "a".x(0));
+ assertEquals(98, "b".x(0));
+ assertEquals(99, "c".x(0));
+}
+
+for (var i = 0; i < 10000; i++) {
+ directlyOnPrototype();
+}
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index fe45266..c5af016 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -691,6 +691,8 @@
'../../src/x64/jump-target-x64.cc',
'../../src/x64/lithium-codegen-x64.cc',
'../../src/x64/lithium-codegen-x64.h',
+ '../../src/x64/lithium-gap-resolver-x64.cc',
+ '../../src/x64/lithium-gap-resolver-x64.h',
'../../src/x64/lithium-x64.cc',
'../../src/x64/lithium-x64.h',
'../../src/x64/macro-assembler-x64.cc',
@@ -747,8 +749,7 @@
'sources': [
'../../src/platform-win32.cc',
],
- # 4355, 4800 came from common.vsprops
- 'msvs_disabled_warnings': [4355, 4800],
+ 'msvs_disabled_warnings': [4351, 4355, 4800],
'link_settings': {
'libraries': [ '-lwinmm.lib' ],
},