Version 3.2.6
Fixed xcode build warning in shell.cc (out of order initialization).
Fixed null-pointer dereference in the compiler when running without
SSE3 support (Chromium issue 77654).
Fixed x64 compilation error due to some dead code. (Issue 1286)
Introduced scons target to build the preparser stand-alone example.
Made FreeBSD build and pass all tests.
Review URL: http://codereview.chromium.org/6759025
git-svn-id: http://v8.googlecode.com/svn/trunk@7427 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/SConscript b/src/SConscript
index 3a01dd2..3b9968e 100755
--- a/src/SConscript
+++ b/src/SConscript
@@ -163,18 +163,23 @@
arm/assembler-arm.cc
"""),
'arch:mips': Split("""
+ jump-target-light.cc
+ virtual-frame-light.cc
mips/assembler-mips.cc
mips/builtins-mips.cc
+ mips/code-stubs-mips.cc
mips/codegen-mips.cc
mips/constants-mips.cc
mips/cpu-mips.cc
mips/debug-mips.cc
+ mips/deoptimizer-mips.cc
mips/disasm-mips.cc
- mips/full-codegen-mips.cc
mips/frames-mips.cc
+ mips/full-codegen-mips.cc
mips/ic-mips.cc
mips/jump-target-mips.cc
mips/macro-assembler-mips.cc
+ mips/regexp-macro-assembler-mips.cc
mips/register-allocator-mips.cc
mips/stub-cache-mips.cc
mips/virtual-frame-mips.cc
@@ -245,6 +250,20 @@
}
+PREPARSER_SOURCES = {
+ 'all': Split("""
+ allocation.cc
+ hashmap.cc
+ preparse-data.cc
+ preparser.cc
+ preparser-api.cc
+ scanner-base.cc
+ token.cc
+ unicode.cc
+ """)
+}
+
+
D8_FILES = {
'all': [
'd8.cc', 'd8-debug.cc'
@@ -330,6 +349,9 @@
source_objs = context.ConfigureObject(env, source_files)
non_snapshot_files = [source_objs]
+ preparser_source_files = context.GetRelevantSources(PREPARSER_SOURCES)
+ preparser_objs = context.ConfigureObject(env, preparser_source_files)
+
# Create snapshot if necessary. For cross compilation you should either
# do without snapshots and take the performance hit or you should build a
# host VM with the simulator=arm and snapshot=on options and then take the
@@ -350,8 +372,8 @@
else:
snapshot_obj = empty_snapshot_obj
library_objs = [non_snapshot_files, libraries_obj, snapshot_obj]
- return (library_objs, d8_objs, [mksnapshot])
+ return (library_objs, d8_objs, [mksnapshot], preparser_objs)
-(library_objs, d8_objs, mksnapshot) = ConfigureObjectFiles()
-Return('library_objs d8_objs mksnapshot')
+(library_objs, d8_objs, mksnapshot, preparser_objs) = ConfigureObjectFiles()
+Return('library_objs d8_objs mksnapshot preparser_objs')
diff --git a/src/accessors.cc b/src/accessors.cc
index af344d1..e33b4d7 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -100,14 +100,15 @@
MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
+ Isolate* isolate = object->GetIsolate();
value = FlattenNumber(value);
// Need to call methods that may trigger GC.
- HandleScope scope;
+ HandleScope scope(isolate);
// Protect raw pointers.
- Handle<JSObject> object_handle(object);
- Handle<Object> value_handle(value);
+ Handle<JSObject> object_handle(object, isolate);
+ Handle<Object> value_handle(value, isolate);
bool has_exception;
Handle<Object> uint32_v = Execution::ToUint32(value_handle, &has_exception);
@@ -126,13 +127,13 @@
// This means one of the object's prototypes is a JSArray and
// the object does not have a 'length' property.
// Calling SetProperty causes an infinite loop.
- return object->SetLocalPropertyIgnoreAttributes(HEAP->length_symbol(),
- value, NONE);
+ return object->SetLocalPropertyIgnoreAttributes(
+ isolate->heap()->length_symbol(), value, NONE);
}
}
- return Isolate::Current()->Throw(
- *FACTORY->NewRangeError("invalid_array_length",
- HandleVector<Object>(NULL, 0)));
+ return isolate->Throw(
+ *isolate->factory()->NewRangeError("invalid_array_length",
+ HandleVector<Object>(NULL, 0)));
}
@@ -315,15 +316,18 @@
MaybeObject* Accessors::ScriptGetLineEnds(Object* object, void*) {
- HandleScope scope;
- Handle<Script> script(Script::cast(JSValue::cast(object)->value()));
+ JSValue* wrapper = JSValue::cast(object);
+ Isolate* isolate = wrapper->GetIsolate();
+ HandleScope scope(isolate);
+ Handle<Script> script(Script::cast(wrapper->value()), isolate);
InitScriptLineEnds(script);
ASSERT(script->line_ends()->IsFixedArray());
Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
// We do not want anyone to modify this array from JS.
- ASSERT(*line_ends == HEAP->empty_fixed_array() ||
- line_ends->map() == HEAP->fixed_cow_array_map());
- Handle<JSArray> js_array = FACTORY->NewJSArrayWithElements(line_ends);
+ ASSERT(*line_ends == isolate->heap()->empty_fixed_array() ||
+ line_ends->map() == isolate->heap()->fixed_cow_array_map());
+ Handle<JSArray> js_array =
+ isolate->factory()->NewJSArrayWithElements(line_ends);
return *js_array;
}
@@ -444,9 +448,10 @@
MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
+ Heap* heap = Isolate::Current()->heap();
bool found_it = false;
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return HEAP->undefined_value();
+ if (!found_it) return heap->undefined_value();
while (!function->should_have_prototype()) {
found_it = false;
function = FindInPrototypeChain<JSFunction>(object->GetPrototype(),
@@ -457,7 +462,7 @@
if (!function->has_prototype()) {
Object* prototype;
- { MaybeObject* maybe_prototype = HEAP->AllocateFunctionPrototype(function);
+ { MaybeObject* maybe_prototype = heap->AllocateFunctionPrototype(function);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
}
Object* result;
@@ -472,12 +477,13 @@
MaybeObject* Accessors::FunctionSetPrototype(JSObject* object,
Object* value,
void*) {
+ Heap* heap = object->GetHeap();
bool found_it = false;
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
- if (!found_it) return HEAP->undefined_value();
+ if (!found_it) return heap->undefined_value();
if (!function->should_have_prototype()) {
// Since we hit this accessor, object will have no prototype property.
- return object->SetLocalPropertyIgnoreAttributes(HEAP->prototype_symbol(),
+ return object->SetLocalPropertyIgnoreAttributes(heap->prototype_symbol(),
value,
NONE);
}
diff --git a/src/allocation.cc b/src/allocation.cc
index 27415c6..119b087 100644
--- a/src/allocation.cc
+++ b/src/allocation.cc
@@ -25,58 +25,16 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "v8.h"
-#include "isolate.h"
-#include "allocation.h"
-
-/* TODO(isolates): this is what's included in bleeding_edge
- including of v8.h was replaced with these in
- http://codereview.chromium.org/5005001/
- we need Isolate and Isolate needs a lot more so I'm including v8.h back.
#include "../include/v8stdint.h"
#include "globals.h"
#include "checks.h"
#include "allocation.h"
#include "utils.h"
-*/
namespace v8 {
namespace internal {
-#ifdef DEBUG
-
-NativeAllocationChecker::NativeAllocationChecker(
- NativeAllocationChecker::NativeAllocationAllowed allowed)
- : allowed_(allowed) {
- if (allowed == DISALLOW) {
- Isolate* isolate = Isolate::Current();
- isolate->set_allocation_disallowed(isolate->allocation_disallowed() + 1);
- }
-}
-
-
-NativeAllocationChecker::~NativeAllocationChecker() {
- Isolate* isolate = Isolate::Current();
- if (allowed_ == DISALLOW) {
- isolate->set_allocation_disallowed(isolate->allocation_disallowed() - 1);
- }
- ASSERT(isolate->allocation_disallowed() >= 0);
-}
-
-
-bool NativeAllocationChecker::allocation_allowed() {
- // TODO(isolates): either find a way to make this work that doesn't
- // require initializing an isolate before we can use malloc or drop
- // it completely.
- return true;
- // return Isolate::Current()->allocation_disallowed() == 0;
-}
-
-#endif // DEBUG
-
-
void* Malloced::New(size_t size) {
- ASSERT(NativeAllocationChecker::allocation_allowed());
void* result = malloc(size);
if (result == NULL) {
v8::internal::FatalProcessOutOfMemory("Malloced operator new");
@@ -142,77 +100,6 @@
}
-void Isolate::PreallocatedStorageInit(size_t size) {
- ASSERT(free_list_.next_ == &free_list_);
- ASSERT(free_list_.previous_ == &free_list_);
- PreallocatedStorage* free_chunk =
- reinterpret_cast<PreallocatedStorage*>(new char[size]);
- free_list_.next_ = free_list_.previous_ = free_chunk;
- free_chunk->next_ = free_chunk->previous_ = &free_list_;
- free_chunk->size_ = size - sizeof(PreallocatedStorage);
- preallocated_storage_preallocated_ = true;
-}
-
-
-void* Isolate::PreallocatedStorageNew(size_t size) {
- if (!preallocated_storage_preallocated_) {
- return FreeStoreAllocationPolicy::New(size);
- }
- ASSERT(free_list_.next_ != &free_list_);
- ASSERT(free_list_.previous_ != &free_list_);
-
- size = (size + kPointerSize - 1) & ~(kPointerSize - 1);
- // Search for exact fit.
- for (PreallocatedStorage* storage = free_list_.next_;
- storage != &free_list_;
- storage = storage->next_) {
- if (storage->size_ == size) {
- storage->Unlink();
- storage->LinkTo(&in_use_list_);
- return reinterpret_cast<void*>(storage + 1);
- }
- }
- // Search for first fit.
- for (PreallocatedStorage* storage = free_list_.next_;
- storage != &free_list_;
- storage = storage->next_) {
- if (storage->size_ >= size + sizeof(PreallocatedStorage)) {
- storage->Unlink();
- storage->LinkTo(&in_use_list_);
- PreallocatedStorage* left_over =
- reinterpret_cast<PreallocatedStorage*>(
- reinterpret_cast<char*>(storage + 1) + size);
- left_over->size_ = storage->size_ - size - sizeof(PreallocatedStorage);
- ASSERT(size + left_over->size_ + sizeof(PreallocatedStorage) ==
- storage->size_);
- storage->size_ = size;
- left_over->LinkTo(&free_list_);
- return reinterpret_cast<void*>(storage + 1);
- }
- }
- // Allocation failure.
- ASSERT(false);
- return NULL;
-}
-
-
-// We don't attempt to coalesce.
-void Isolate::PreallocatedStorageDelete(void* p) {
- if (p == NULL) {
- return;
- }
- if (!preallocated_storage_preallocated_) {
- FreeStoreAllocationPolicy::Delete(p);
- return;
- }
- PreallocatedStorage* storage = reinterpret_cast<PreallocatedStorage*>(p) - 1;
- ASSERT(storage->next_->previous_ == storage);
- ASSERT(storage->previous_->next_ == storage);
- storage->Unlink();
- storage->LinkTo(&free_list_);
-}
-
-
void PreallocatedStorage::LinkTo(PreallocatedStorage* other) {
next_ = other->next_;
other->next_->previous_ = this;
diff --git a/src/allocation.h b/src/allocation.h
index d7bbbb8..75aba35 100644
--- a/src/allocation.h
+++ b/src/allocation.h
@@ -39,25 +39,6 @@
// processing.
void FatalProcessOutOfMemory(const char* message);
-// A class that controls whether allocation is allowed. This is for
-// the C++ heap only!
-class NativeAllocationChecker {
- public:
- enum NativeAllocationAllowed { ALLOW, DISALLOW };
-#ifdef DEBUG
- explicit NativeAllocationChecker(NativeAllocationAllowed allowed);
- ~NativeAllocationChecker();
- static bool allocation_allowed();
- private:
- // This flag applies to this particular instance.
- NativeAllocationAllowed allowed_;
-#else
- explicit inline NativeAllocationChecker(NativeAllocationAllowed allowed) {}
- static inline bool allocation_allowed() { return true; }
-#endif
-};
-
-
// Superclass for classes managed with new & delete.
class Malloced {
public:
@@ -101,7 +82,6 @@
template <typename T>
static T* NewArray(int size) {
- ASSERT(NativeAllocationChecker::allocation_allowed());
T* result = new T[size];
if (result == NULL) Malloced::FatalProcessOutOfMemory();
return result;
diff --git a/src/api.cc b/src/api.cc
index dcf8e17..0d5e4f0 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -5288,34 +5288,6 @@
}
-static i::HeapGraphPath* ToInternal(const HeapGraphPath* path) {
- return const_cast<i::HeapGraphPath*>(
- reinterpret_cast<const i::HeapGraphPath*>(path));
-}
-
-
-int HeapGraphPath::GetEdgesCount() const {
- return ToInternal(this)->path()->length();
-}
-
-
-const HeapGraphEdge* HeapGraphPath::GetEdge(int index) const {
- return reinterpret_cast<const HeapGraphEdge*>(
- ToInternal(this)->path()->at(index));
-}
-
-
-const HeapGraphNode* HeapGraphPath::GetFromNode() const {
- return GetEdgesCount() > 0 ? GetEdge(0)->GetFromNode() : NULL;
-}
-
-
-const HeapGraphNode* HeapGraphPath::GetToNode() const {
- const int count = GetEdgesCount();
- return count > 0 ? GetEdge(count - 1)->GetToNode() : NULL;
-}
-
-
static i::HeapEntry* ToInternal(const HeapGraphNode* entry) {
return const_cast<i::HeapEntry*>(
reinterpret_cast<const i::HeapEntry*>(entry));
@@ -5397,21 +5369,6 @@
}
-int HeapGraphNode::GetRetainingPathsCount() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainingPathsCount");
- return ToInternal(this)->GetRetainingPaths()->length();
-}
-
-
-const HeapGraphPath* HeapGraphNode::GetRetainingPath(int index) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::GetRetainingPath");
- return reinterpret_cast<const HeapGraphPath*>(
- ToInternal(this)->GetRetainingPaths()->at(index));
-}
-
-
const HeapGraphNode* HeapGraphNode::GetDominatorNode() const {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapSnapshot::GetDominatorNode");
@@ -5419,26 +5376,6 @@
}
-const HeapGraphNode* HeapSnapshotsDiff::GetAdditionsRoot() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshotsDiff::GetAdditionsRoot");
- i::HeapSnapshotsDiff* diff =
- const_cast<i::HeapSnapshotsDiff*>(
- reinterpret_cast<const i::HeapSnapshotsDiff*>(this));
- return reinterpret_cast<const HeapGraphNode*>(diff->additions_root());
-}
-
-
-const HeapGraphNode* HeapSnapshotsDiff::GetDeletionsRoot() const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshotsDiff::GetDeletionsRoot");
- i::HeapSnapshotsDiff* diff =
- const_cast<i::HeapSnapshotsDiff*>(
- reinterpret_cast<const i::HeapSnapshotsDiff*>(this));
- return reinterpret_cast<const HeapGraphNode*>(diff->deletions_root());
-}
-
-
static i::HeapSnapshot* ToInternal(const HeapSnapshot* snapshot) {
return const_cast<i::HeapSnapshot*>(
reinterpret_cast<const i::HeapSnapshot*>(snapshot));
@@ -5494,15 +5431,6 @@
}
-const HeapSnapshotsDiff* HeapSnapshot::CompareWith(
- const HeapSnapshot* snapshot) const {
- i::Isolate* isolate = i::Isolate::Current();
- IsDeadCheck(isolate, "v8::HeapSnapshot::CompareWith");
- return reinterpret_cast<const HeapSnapshotsDiff*>(
- ToInternal(this)->CompareWith(ToInternal(snapshot)));
-}
-
-
void HeapSnapshot::Serialize(OutputStream* stream,
HeapSnapshot::SerializationFormat format) const {
i::Isolate* isolate = i::Isolate::Current();
@@ -5710,9 +5638,9 @@
char* HandleScopeImplementer::Iterate(ObjectVisitor* v, char* storage) {
- HandleScopeImplementer* thread_local =
+ HandleScopeImplementer* scope_implementer =
reinterpret_cast<HandleScopeImplementer*>(storage);
- thread_local->IterateThis(v);
+ scope_implementer->IterateThis(v);
return storage + ArchiveSpacePerThread();
}
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index c25d445..441adfe 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -2884,6 +2884,9 @@
case TRBinaryOpIC::HEAP_NUMBER:
GenerateHeapNumberStub(masm);
break;
+ case TRBinaryOpIC::ODDBALL:
+ GenerateOddballStub(masm);
+ break;
case TRBinaryOpIC::STRING:
GenerateStringStub(masm);
break;
@@ -3426,9 +3429,20 @@
__ add(scratch2, scratch1, Operand(0x40000000), SetCC);
// If not try to return a heap number.
__ b(mi, &return_heap_number);
+ // Check for minus zero. Return heap number for minus zero.
+ Label not_zero;
+ __ cmp(scratch1, Operand(0));
+ __ b(ne, ¬_zero);
+ __ vmov(scratch2, d5.high());
+ __ tst(scratch2, Operand(HeapNumber::kSignMask));
+ __ b(ne, &return_heap_number);
+ __ bind(¬_zero);
+
// Tag the result and return.
__ SmiTag(r0, scratch1);
__ Ret();
+ } else {
+ // DIV just falls through to allocating a heap number.
}
if (result_type_ >= (op_ == Token::DIV) ? TRBinaryOpIC::HEAP_NUMBER
@@ -3606,10 +3620,41 @@
}
+void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
+ Label call_runtime;
+
+ if (op_ == Token::ADD) {
+ // Handle string addition here, because it is the only operation
+ // that does not do a ToNumber conversion on the operands.
+ GenerateAddStrings(masm);
+ }
+
+ // Convert oddball arguments to numbers.
+ Label check, done;
+ __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
+ __ b(ne, &check);
+ if (Token::IsBitOp(op_)) {
+ __ mov(r1, Operand(Smi::FromInt(0)));
+ } else {
+ __ LoadRoot(r1, Heap::kNanValueRootIndex);
+ }
+ __ jmp(&done);
+ __ bind(&check);
+ __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ b(ne, &done);
+ if (Token::IsBitOp(op_)) {
+ __ mov(r0, Operand(Smi::FromInt(0)));
+ } else {
+ __ LoadRoot(r0, Heap::kNanValueRootIndex);
+ }
+ __ bind(&done);
+
+ GenerateHeapNumberStub(masm);
+}
+
+
void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
-
GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
__ bind(&call_runtime);
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index 5615b7e..1dde255 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -311,6 +311,7 @@
void GenerateSmiStub(MacroAssembler* masm);
void GenerateInt32Stub(MacroAssembler* masm);
void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateOddballStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateAddStrings(MacroAssembler* masm);
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 33ca3b0..3a3dcf0 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -288,14 +288,33 @@
// There are no translation commands for the caller's pc and fp, the
// context, and the function. Set them up explicitly.
- for (int i = 0; ok && i < 4; i++) {
+ for (int i = StandardFrameConstants::kCallerPCOffset;
+ ok && i >= StandardFrameConstants::kMarkerOffset;
+ i -= kPointerSize) {
uint32_t input_value = input_->GetFrameSlot(input_offset);
if (FLAG_trace_osr) {
- PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part)\n",
+ const char* name = "UNKNOWN";
+ switch (i) {
+ case StandardFrameConstants::kCallerPCOffset:
+ name = "caller's pc";
+ break;
+ case StandardFrameConstants::kCallerFPOffset:
+ name = "fp";
+ break;
+ case StandardFrameConstants::kContextOffset:
+ name = "context";
+ break;
+ case StandardFrameConstants::kMarkerOffset:
+ name = "function";
+ break;
+ }
+ PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
output_offset,
input_value,
- input_offset);
+ input_offset,
+ name);
}
+
output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
input_offset -= kPointerSize;
output_offset -= kPointerSize;
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 30f1b38..9936ac0 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -555,7 +555,7 @@
JSObject* holder_obj) {
__ push(name);
InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
- ASSERT(!HEAP->InNewSpace(interceptor));
+ ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
Register scratch = name;
__ mov(scratch, Operand(Handle<Object>(interceptor)));
__ push(scratch);
@@ -624,7 +624,7 @@
// Pass the additional arguments FastHandleApiCall expects.
Object* call_data = optimization.api_call_info()->data();
Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
- if (HEAP->InNewSpace(call_data)) {
+ if (masm->isolate()->heap()->InNewSpace(call_data)) {
__ Move(r0, api_call_info_handle);
__ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
} else {
@@ -719,7 +719,7 @@
name,
holder,
miss);
- return HEAP->undefined_value();
+ return masm->isolate()->heap()->undefined_value();
}
}
@@ -822,7 +822,7 @@
FreeSpaceForFastApiCall(masm);
}
- return HEAP->undefined_value();
+ return masm->isolate()->heap()->undefined_value();
}
void CompileRegular(MacroAssembler* masm,
@@ -1091,7 +1091,7 @@
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
if (!name->IsSymbol()) {
- MaybeObject* maybe_lookup_result = HEAP->LookupSymbol(name);
+ MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
Object* lookup_result = NULL; // Initialization to please compiler.
if (!maybe_lookup_result->ToObject(&lookup_result)) {
set_failure(Failure::cast(maybe_lookup_result));
@@ -1111,7 +1111,7 @@
__ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // from now the object is in holder_reg
__ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
- } else if (HEAP->InNewSpace(prototype)) {
+ } else if (heap()->InNewSpace(prototype)) {
// Get the map of the current object.
__ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
__ cmp(scratch1, Operand(Handle<Map>(current->map())));
@@ -1259,7 +1259,7 @@
__ push(receiver);
__ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
Handle<AccessorInfo> callback_handle(callback);
- if (HEAP->InNewSpace(callback_handle->data())) {
+ if (heap()->InNewSpace(callback_handle->data())) {
__ Move(scratch3, callback_handle);
__ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
} else {
@@ -1477,7 +1477,7 @@
__ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
- if (HEAP->InNewSpace(function)) {
+ if (heap()->InNewSpace(function)) {
// We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
@@ -1560,7 +1560,7 @@
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return HEAP->undefined_value();
+ if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
Label miss;
@@ -1720,7 +1720,7 @@
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return HEAP->undefined_value();
+ if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
Label miss, return_undefined, call_builtin;
@@ -1806,7 +1806,7 @@
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return HEAP->undefined_value();
+ if (!object->IsString() || cell != NULL) return heap()->undefined_value();
const int argc = arguments().immediate();
@@ -1890,7 +1890,7 @@
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return HEAP->undefined_value();
+ if (!object->IsString() || cell != NULL) return heap()->undefined_value();
const int argc = arguments().immediate();
@@ -1979,7 +1979,7 @@
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return HEAP->undefined_value();
+ if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
Label miss;
GenerateNameCheck(name, &miss);
@@ -2048,8 +2048,9 @@
// -- sp[argc * 4] : receiver
// -----------------------------------
- if (!masm()->isolate()->cpu_features()->IsSupported(VFP3))
- return HEAP->undefined_value();
+ if (!masm()->isolate()->cpu_features()->IsSupported(VFP3)) {
+ return heap()->undefined_value();
+ }
CpuFeatures::Scope scope_vfp3(VFP3);
@@ -2057,7 +2058,7 @@
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return HEAP->undefined_value();
+ if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
Label miss, slow;
GenerateNameCheck(name, &miss);
@@ -2199,7 +2200,7 @@
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return HEAP->undefined_value();
+ if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
Label miss;
GenerateNameCheck(name, &miss);
@@ -2291,18 +2292,16 @@
JSGlobalPropertyCell* cell,
JSFunction* function,
String* name) {
- Isolate* isolate = masm()->isolate();
- Heap* heap = isolate->heap();
- Counters* counters = isolate->counters();
+ Counters* counters = isolate()->counters();
ASSERT(optimization.is_simple_api_call());
// Bail out if object is a global object as we don't want to
// repatch it to global receiver.
- if (object->IsGlobalObject()) return heap->undefined_value();
- if (cell != NULL) return heap->undefined_value();
+ if (object->IsGlobalObject()) return heap()->undefined_value();
+ if (cell != NULL) return heap()->undefined_value();
int depth = optimization.GetPrototypeDepthOfExpectedType(
JSObject::cast(object), holder);
- if (depth == kInvalidProtoDepth) return heap->undefined_value();
+ if (depth == kInvalidProtoDepth) return heap()->undefined_value();
Label miss, miss_before_stack_reserved;
@@ -2802,7 +2801,7 @@
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, HEAP->empty_string());
+ return GetCode(NONEXISTENT, heap()->empty_string());
}
@@ -3251,7 +3250,7 @@
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
__ ldr(scratch, FieldMemOperand(elements_reg, HeapObject::kMapOffset));
- __ cmp(scratch, Operand(Handle<Map>(FACTORY->fixed_array_map())));
+ __ cmp(scratch, Operand(Handle<Map>(factory()->fixed_array_map())));
__ b(ne, &miss);
// Check that the key is within bounds.
diff --git a/src/assembler.cc b/src/assembler.cc
index 58ec8ca..0322747 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -55,6 +55,8 @@
#include "x64/regexp-macro-assembler-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/regexp-macro-assembler-mips.h"
#else // Unknown architecture.
#error "Unknown architecture."
#endif // Target architecture.
@@ -795,6 +797,8 @@
function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
#elif V8_TARGET_ARCH_ARM
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
+#elif V8_TARGET_ARCH_MIPS
+ function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
#else
UNREACHABLE();
#endif
diff --git a/src/atomicops.h b/src/atomicops.h
index 72a0d0f..e2057ed 100644
--- a/src/atomicops.h
+++ b/src/atomicops.h
@@ -158,6 +158,8 @@
#include "atomicops_internals_x86_gcc.h"
#elif defined(__GNUC__) && defined(V8_HOST_ARCH_ARM)
#include "atomicops_internals_arm_gcc.h"
+#elif defined(__GNUC__) && defined(V8_HOST_ARCH_MIPS)
+#include "atomicops_internals_mips_gcc.h"
#else
#error "Atomic operations are not supported on your platform"
#endif
diff --git a/src/atomicops_internals_mips_gcc.h b/src/atomicops_internals_mips_gcc.h
new file mode 100644
index 0000000..5113de2
--- /dev/null
+++ b/src/atomicops_internals_mips_gcc.h
@@ -0,0 +1,169 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+#define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("sync" : : : "memory")
+
+namespace v8 {
+namespace internal {
+
+// Atomically execute:
+// result = *ptr;
+// if (*ptr == old_value)
+// *ptr = new_value;
+// return result;
+//
+// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
+// Always return the old value of "*ptr"
+//
+// This routine implies no memory barriers.
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 prev;
+ __asm__ __volatile__("1:\n"
+ "ll %0, %1\n" // prev = *ptr
+ "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
+ "nop\n" // delay slot nop
+ "sc %2, %1\n" // *ptr = new_value (with atomic check)
+ "beqz %2, 1b\n" // start again on atomic error
+ "nop\n" // delay slot nop
+ "2:\n"
+ : "=&r" (prev), "=m" (*ptr), "+&r" (new_value)
+ : "Ir" (old_value), "r" (new_value), "m" (*ptr)
+ : "memory");
+ return prev;
+}
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr. This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value) {
+ Atomic32 temp, old;
+ __asm__ __volatile__("1:\n"
+ "ll %1, %2\n" // old = *ptr
+ "move %0, %3\n" // temp = new_value
+ "sc %0, %2\n" // *ptr = temp (with atomic check)
+ "beqz %0, 1b\n" // start again on atomic error
+ "nop\n" // delay slot nop
+ : "=&r" (temp), "=&r" (old), "=m" (*ptr)
+ : "r" (new_value), "m" (*ptr)
+ : "memory");
+
+ return old;
+}
+
+// Atomically increment *ptr by "increment". Returns the new value of
+// *ptr with the increment applied. This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 temp, temp2;
+
+ __asm__ __volatile__("1:\n"
+ "ll %0, %2\n" // temp = *ptr
+ "addu %0, %3\n" // temp = temp + increment
+ "move %1, %0\n" // temp2 = temp
+ "sc %0, %2\n" // *ptr = temp (with atomic check)
+ "beqz %0, 1b\n" // start again on atomic error
+ "nop\n" // delay slot nop
+ : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
+ : "Ir" (increment), "m" (*ptr)
+ : "memory");
+ // temp2 now holds the final value.
+ return temp2;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment) {
+ Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
+ ATOMICOPS_COMPILER_BARRIER();
+ return res;
+}
+
+// "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation. "Barrier" operations have both "Acquire" and "Release"
+// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ ATOMICOPS_COMPILER_BARRIER();
+ return x;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value) {
+ ATOMICOPS_COMPILER_BARRIER();
+ return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+}
+
+inline void MemoryBarrier() {
+ ATOMICOPS_COMPILER_BARRIER();
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+ Atomic32 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+ MemoryBarrier();
+ return *ptr;
+}
+
+} } // namespace v8::internal
+
+#undef ATOMICOPS_COMPILER_BARRIER
+
+#endif // V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index e7c0554..9c9bac7 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -69,23 +69,26 @@
Handle<String> Bootstrapper::NativesSourceLookup(int index) {
ASSERT(0 <= index && index < Natives::GetBuiltinsCount());
- if (HEAP->natives_source_cache()->get(index)->IsUndefined()) {
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+ if (heap->natives_source_cache()->get(index)->IsUndefined()) {
if (!Snapshot::IsEnabled() || FLAG_new_snapshot) {
// We can use external strings for the natives.
NativesExternalStringResource* resource =
new NativesExternalStringResource(this,
Natives::GetScriptSource(index).start());
Handle<String> source_code =
- FACTORY->NewExternalStringFromAscii(resource);
- HEAP->natives_source_cache()->set(index, *source_code);
+ factory->NewExternalStringFromAscii(resource);
+ heap->natives_source_cache()->set(index, *source_code);
} else {
// Old snapshot code can't cope with external strings at all.
Handle<String> source_code =
- FACTORY->NewStringFromAscii(Natives::GetScriptSource(index));
- HEAP->natives_source_cache()->set(index, *source_code);
+ factory->NewStringFromAscii(Natives::GetScriptSource(index));
+ heap->natives_source_cache()->set(index, *source_code);
}
}
- Handle<Object> cached_source(HEAP->natives_source_cache()->get(index));
+ Handle<Object> cached_source(heap->natives_source_cache()->get(index));
return Handle<String>::cast(cached_source);
}
@@ -292,9 +295,10 @@
void Bootstrapper::DetachGlobal(Handle<Context> env) {
- JSGlobalProxy::cast(env->global_proxy())->set_context(*FACTORY->null_value());
+ Factory* factory = Isolate::Current()->factory();
+ JSGlobalProxy::cast(env->global_proxy())->set_context(*factory->null_value());
SetObjectPrototype(Handle<JSObject>(env->global_proxy()),
- FACTORY->null_value());
+ factory->null_value());
env->set_global_proxy(env->global());
env->global()->set_global_receiver(env->global());
}
@@ -318,12 +322,13 @@
Handle<JSObject> prototype,
Builtins::Name call,
bool is_ecma_native) {
- Handle<String> symbol = FACTORY->LookupAsciiSymbol(name);
- Handle<Code> call_code = Handle<Code>(
- Isolate::Current()->builtins()->builtin(call));
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
+ Handle<String> symbol = factory->LookupAsciiSymbol(name);
+ Handle<Code> call_code = Handle<Code>(isolate->builtins()->builtin(call));
Handle<JSFunction> function = prototype.is_null() ?
- FACTORY->NewFunctionWithoutPrototype(symbol, call_code) :
- FACTORY->NewFunctionWithPrototype(symbol,
+ factory->NewFunctionWithoutPrototype(symbol, call_code) :
+ factory->NewFunctionWithPrototype(symbol,
type,
instance_size,
prototype,
@@ -339,29 +344,30 @@
Handle<DescriptorArray> Genesis::ComputeFunctionInstanceDescriptor(
PrototypePropertyMode prototypeMode) {
+ Factory* factory = Isolate::Current()->factory();
Handle<DescriptorArray> descriptors =
- FACTORY->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE ? 4 : 5);
+ factory->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE ? 4 : 5);
PropertyAttributes attributes =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
{ // Add length.
- Handle<Proxy> proxy = FACTORY->NewProxy(&Accessors::FunctionLength);
- CallbacksDescriptor d(*FACTORY->length_symbol(), *proxy, attributes);
+ Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionLength);
+ CallbacksDescriptor d(*factory->length_symbol(), *proxy, attributes);
descriptors->Set(0, &d);
}
{ // Add name.
- Handle<Proxy> proxy = FACTORY->NewProxy(&Accessors::FunctionName);
- CallbacksDescriptor d(*FACTORY->name_symbol(), *proxy, attributes);
+ Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionName);
+ CallbacksDescriptor d(*factory->name_symbol(), *proxy, attributes);
descriptors->Set(1, &d);
}
{ // Add arguments.
- Handle<Proxy> proxy = FACTORY->NewProxy(&Accessors::FunctionArguments);
- CallbacksDescriptor d(*FACTORY->arguments_symbol(), *proxy, attributes);
+ Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionArguments);
+ CallbacksDescriptor d(*factory->arguments_symbol(), *proxy, attributes);
descriptors->Set(2, &d);
}
{ // Add caller.
- Handle<Proxy> proxy = FACTORY->NewProxy(&Accessors::FunctionCaller);
- CallbacksDescriptor d(*FACTORY->caller_symbol(), *proxy, attributes);
+ Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionCaller);
+ CallbacksDescriptor d(*factory->caller_symbol(), *proxy, attributes);
descriptors->Set(3, &d);
}
if (prototypeMode != DONT_ADD_PROTOTYPE) {
@@ -369,8 +375,8 @@
if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
attributes = static_cast<PropertyAttributes>(attributes & ~READ_ONLY);
}
- Handle<Proxy> proxy = FACTORY->NewProxy(&Accessors::FunctionPrototype);
- CallbacksDescriptor d(*FACTORY->prototype_symbol(), *proxy, attributes);
+ Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionPrototype);
+ CallbacksDescriptor d(*factory->prototype_symbol(), *proxy, attributes);
descriptors->Set(4, &d);
}
descriptors->Sort();
@@ -413,43 +419,47 @@
function_instance_map_writable_prototype_ =
CreateFunctionMap(ADD_WRITEABLE_PROTOTYPE);
- Handle<String> object_name = Handle<String>(HEAP->Object_symbol());
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+
+ Handle<String> object_name = Handle<String>(heap->Object_symbol());
{ // --- O b j e c t ---
Handle<JSFunction> object_fun =
- FACTORY->NewFunction(object_name, FACTORY->null_value());
+ factory->NewFunction(object_name, factory->null_value());
Handle<Map> object_function_map =
- FACTORY->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+ factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
object_fun->set_initial_map(*object_function_map);
object_function_map->set_constructor(*object_fun);
global_context()->set_object_function(*object_fun);
// Allocate a new prototype for the object function.
- Handle<JSObject> prototype = FACTORY->NewJSObject(
- Isolate::Current()->object_function(),
+ Handle<JSObject> prototype = factory->NewJSObject(
+ isolate->object_function(),
TENURED);
global_context()->set_initial_object_prototype(*prototype);
SetPrototype(object_fun, prototype);
object_function_map->
- set_instance_descriptors(HEAP->empty_descriptor_array());
+ set_instance_descriptors(heap->empty_descriptor_array());
}
// Allocate the empty function as the prototype for function ECMAScript
// 262 15.3.4.
- Handle<String> symbol = FACTORY->LookupAsciiSymbol("Empty");
+ Handle<String> symbol = factory->LookupAsciiSymbol("Empty");
Handle<JSFunction> empty_function =
- FACTORY->NewFunctionWithoutPrototype(symbol, kNonStrictMode);
+ factory->NewFunctionWithoutPrototype(symbol, kNonStrictMode);
// --- E m p t y ---
Handle<Code> code =
- Handle<Code>(Isolate::Current()->builtins()->builtin(
+ Handle<Code>(isolate->builtins()->builtin(
Builtins::kEmptyFunction));
empty_function->set_code(*code);
empty_function->shared()->set_code(*code);
- Handle<String> source = FACTORY->NewStringFromAscii(CStrVector("() {}"));
- Handle<Script> script = FACTORY->NewScript(source);
+ Handle<String> source = factory->NewStringFromAscii(CStrVector("() {}"));
+ Handle<Script> script = factory->NewScript(source);
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
empty_function->shared()->set_script(*script);
empty_function->shared()->set_start_position(0);
@@ -466,7 +476,7 @@
// Allocate the function map first and then patch the prototype later
Handle<Map> function_without_prototype_map(
global_context()->function_without_prototype_map());
- Handle<Map> empty_fm = FACTORY->CopyMapDropDescriptors(
+ Handle<Map> empty_fm = factory->CopyMapDropDescriptors(
function_without_prototype_map);
empty_fm->set_instance_descriptors(
function_without_prototype_map->instance_descriptors());
@@ -480,27 +490,28 @@
PrototypePropertyMode prototypeMode,
Handle<FixedArray> arguments,
Handle<FixedArray> caller) {
+ Factory* factory = Isolate::Current()->factory();
Handle<DescriptorArray> descriptors =
- FACTORY->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE ? 4 : 5);
+ factory->NewDescriptorArray(prototypeMode == DONT_ADD_PROTOTYPE ? 4 : 5);
PropertyAttributes attributes = static_cast<PropertyAttributes>(
DONT_ENUM | DONT_DELETE | READ_ONLY);
{ // length
- Handle<Proxy> proxy = FACTORY->NewProxy(&Accessors::FunctionLength);
- CallbacksDescriptor d(*FACTORY->length_symbol(), *proxy, attributes);
+ Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionLength);
+ CallbacksDescriptor d(*factory->length_symbol(), *proxy, attributes);
descriptors->Set(0, &d);
}
{ // name
- Handle<Proxy> proxy = FACTORY->NewProxy(&Accessors::FunctionName);
- CallbacksDescriptor d(*FACTORY->name_symbol(), *proxy, attributes);
+ Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionName);
+ CallbacksDescriptor d(*factory->name_symbol(), *proxy, attributes);
descriptors->Set(1, &d);
}
{ // arguments
- CallbacksDescriptor d(*FACTORY->arguments_symbol(), *arguments, attributes);
+ CallbacksDescriptor d(*factory->arguments_symbol(), *arguments, attributes);
descriptors->Set(2, &d);
}
{ // caller
- CallbacksDescriptor d(*FACTORY->caller_symbol(), *caller, attributes);
+ CallbacksDescriptor d(*factory->caller_symbol(), *caller, attributes);
descriptors->Set(3, &d);
}
@@ -509,8 +520,8 @@
if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) {
attributes = static_cast<PropertyAttributes>(attributes & ~READ_ONLY);
}
- Handle<Proxy> proxy = FACTORY->NewProxy(&Accessors::FunctionPrototype);
- CallbacksDescriptor d(*FACTORY->prototype_symbol(), *proxy, attributes);
+ Handle<Proxy> proxy = factory->NewProxy(&Accessors::FunctionPrototype);
+ CallbacksDescriptor d(*factory->prototype_symbol(), *proxy, attributes);
descriptors->Set(4, &d);
}
@@ -522,11 +533,14 @@
// ECMAScript 5th Edition, 13.2.3
Handle<JSFunction> Genesis::CreateThrowTypeErrorFunction(
Builtins::Name builtin) {
- Handle<String> name = FACTORY->LookupAsciiSymbol("ThrowTypeError");
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
+
+ Handle<String> name = factory->LookupAsciiSymbol("ThrowTypeError");
Handle<JSFunction> throw_type_error =
- FACTORY->NewFunctionWithoutPrototype(name, kStrictMode);
+ factory->NewFunctionWithoutPrototype(name, kStrictMode);
Handle<Code> code = Handle<Code>(
- Isolate::Current()->builtins()->builtin(builtin));
+ isolate->builtins()->builtin(builtin));
throw_type_error->set_map(global_context()->strict_mode_function_map());
throw_type_error->set_code(*code);
@@ -559,8 +573,9 @@
void Genesis::CreateStrictModeFunctionMaps(Handle<JSFunction> empty) {
// Create the callbacks arrays for ThrowTypeError functions.
// The get/set callacks are filled in after the maps are created below.
- Handle<FixedArray> arguments = FACTORY->NewFixedArray(2, TENURED);
- Handle<FixedArray> caller = FACTORY->NewFixedArray(2, TENURED);
+ Factory* factory = Isolate::Current()->factory();
+ Handle<FixedArray> arguments = factory->NewFixedArray(2, TENURED);
+ Handle<FixedArray> caller = factory->NewFixedArray(2, TENURED);
// Allocate map for the strict mode function instances.
global_context()->set_strict_mode_function_instance_map(
@@ -670,12 +685,16 @@
}
}
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+
if (js_global_template.is_null()) {
- Handle<String> name = Handle<String>(HEAP->empty_symbol());
- Handle<Code> code = Handle<Code>(Isolate::Current()->builtins()->builtin(
+ Handle<String> name = Handle<String>(heap->empty_symbol());
+ Handle<Code> code = Handle<Code>(isolate->builtins()->builtin(
Builtins::kIllegal));
js_global_function =
- FACTORY->NewFunction(name, JS_GLOBAL_OBJECT_TYPE,
+ factory->NewFunction(name, JS_GLOBAL_OBJECT_TYPE,
JSGlobalObject::kSize, code, true);
// Change the constructor property of the prototype of the
// hidden global function to refer to the Object function.
@@ -684,20 +703,20 @@
JSObject::cast(js_global_function->instance_prototype()));
SetLocalPropertyNoThrow(
prototype,
- FACTORY->constructor_symbol(),
- Isolate::Current()->object_function(),
+ factory->constructor_symbol(),
+ isolate->object_function(),
NONE);
} else {
Handle<FunctionTemplateInfo> js_global_constructor(
FunctionTemplateInfo::cast(js_global_template->constructor()));
js_global_function =
- FACTORY->CreateApiFunction(js_global_constructor,
- FACTORY->InnerGlobalObject);
+ factory->CreateApiFunction(js_global_constructor,
+ factory->InnerGlobalObject);
}
js_global_function->initial_map()->set_is_hidden_prototype();
Handle<GlobalObject> inner_global =
- FACTORY->NewGlobalObject(js_global_function);
+ factory->NewGlobalObject(js_global_function);
if (inner_global_out != NULL) {
*inner_global_out = inner_global;
}
@@ -705,11 +724,11 @@
// Step 2: create or re-initialize the global proxy object.
Handle<JSFunction> global_proxy_function;
if (global_template.IsEmpty()) {
- Handle<String> name = Handle<String>(HEAP->empty_symbol());
- Handle<Code> code = Handle<Code>(Isolate::Current()->builtins()->builtin(
+ Handle<String> name = Handle<String>(heap->empty_symbol());
+ Handle<Code> code = Handle<Code>(isolate->builtins()->builtin(
Builtins::kIllegal));
global_proxy_function =
- FACTORY->NewFunction(name, JS_GLOBAL_PROXY_TYPE,
+ factory->NewFunction(name, JS_GLOBAL_PROXY_TYPE,
JSGlobalProxy::kSize, code, true);
} else {
Handle<ObjectTemplateInfo> data =
@@ -717,11 +736,11 @@
Handle<FunctionTemplateInfo> global_constructor(
FunctionTemplateInfo::cast(data->constructor()));
global_proxy_function =
- FACTORY->CreateApiFunction(global_constructor,
- FACTORY->OuterGlobalObject);
+ factory->CreateApiFunction(global_constructor,
+ factory->OuterGlobalObject);
}
- Handle<String> global_name = FACTORY->LookupAsciiSymbol("global");
+ Handle<String> global_name = factory->LookupAsciiSymbol("global");
global_proxy_function->shared()->set_instance_class_name(*global_name);
global_proxy_function->initial_map()->set_is_access_check_needed(true);
@@ -735,7 +754,7 @@
Handle<JSGlobalProxy>::cast(global_object));
} else {
return Handle<JSGlobalProxy>::cast(
- FACTORY->NewJSObject(global_proxy_function, TENURED));
+ factory->NewJSObject(global_proxy_function, TENURED));
}
}
@@ -788,9 +807,13 @@
// object reinitialization.
global_context()->set_security_token(*inner_global);
- Handle<String> object_name = Handle<String>(HEAP->Object_symbol());
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+
+ Handle<String> object_name = Handle<String>(heap->Object_symbol());
SetLocalPropertyNoThrow(inner_global, object_name,
- Isolate::Current()->object_function(), DONT_ENUM);
+ isolate->object_function(), DONT_ENUM);
Handle<JSObject> global = Handle<JSObject>(global_context()->global());
@@ -801,20 +824,20 @@
{ // --- A r r a y ---
Handle<JSFunction> array_function =
InstallFunction(global, "Array", JS_ARRAY_TYPE, JSArray::kSize,
- Isolate::Current()->initial_object_prototype(),
+ isolate->initial_object_prototype(),
Builtins::kArrayCode, true);
array_function->shared()->set_construct_stub(
- Isolate::Current()->builtins()->builtin(Builtins::kArrayConstructCode));
+ isolate->builtins()->builtin(Builtins::kArrayConstructCode));
array_function->shared()->DontAdaptArguments();
// This seems a bit hackish, but we need to make sure Array.length
// is 1.
array_function->shared()->set_length(1);
Handle<DescriptorArray> array_descriptors =
- FACTORY->CopyAppendProxyDescriptor(
- FACTORY->empty_descriptor_array(),
- FACTORY->length_symbol(),
- FACTORY->NewProxy(&Accessors::ArrayLength),
+ factory->CopyAppendProxyDescriptor(
+ factory->empty_descriptor_array(),
+ factory->length_symbol(),
+ factory->NewProxy(&Accessors::ArrayLength),
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE));
// Cache the fast JavaScript array map
@@ -831,7 +854,7 @@
{ // --- N u m b e r ---
Handle<JSFunction> number_fun =
InstallFunction(global, "Number", JS_VALUE_TYPE, JSValue::kSize,
- Isolate::Current()->initial_object_prototype(),
+ isolate->initial_object_prototype(),
Builtins::kIllegal, true);
global_context()->set_number_function(*number_fun);
}
@@ -839,7 +862,7 @@
{ // --- B o o l e a n ---
Handle<JSFunction> boolean_fun =
InstallFunction(global, "Boolean", JS_VALUE_TYPE, JSValue::kSize,
- Isolate::Current()->initial_object_prototype(),
+ isolate->initial_object_prototype(),
Builtins::kIllegal, true);
global_context()->set_boolean_function(*boolean_fun);
}
@@ -847,18 +870,17 @@
{ // --- S t r i n g ---
Handle<JSFunction> string_fun =
InstallFunction(global, "String", JS_VALUE_TYPE, JSValue::kSize,
- Isolate::Current()->initial_object_prototype(),
+ isolate->initial_object_prototype(),
Builtins::kIllegal, true);
string_fun->shared()->set_construct_stub(
- Isolate::Current()->builtins()->builtin(
- Builtins::kStringConstructCode));
+ isolate->builtins()->builtin(Builtins::kStringConstructCode));
global_context()->set_string_function(*string_fun);
// Add 'length' property to strings.
Handle<DescriptorArray> string_descriptors =
- FACTORY->CopyAppendProxyDescriptor(
- FACTORY->empty_descriptor_array(),
- FACTORY->length_symbol(),
- FACTORY->NewProxy(&Accessors::StringLength),
+ factory->CopyAppendProxyDescriptor(
+ factory->empty_descriptor_array(),
+ factory->length_symbol(),
+ factory->NewProxy(&Accessors::StringLength),
static_cast<PropertyAttributes>(DONT_ENUM |
DONT_DELETE |
READ_ONLY));
@@ -872,7 +894,7 @@
// Builtin functions for Date.prototype.
Handle<JSFunction> date_fun =
InstallFunction(global, "Date", JS_VALUE_TYPE, JSValue::kSize,
- Isolate::Current()->initial_object_prototype(),
+ isolate->initial_object_prototype(),
Builtins::kIllegal, true);
global_context()->set_date_function(*date_fun);
@@ -883,7 +905,7 @@
// Builtin functions for RegExp.prototype.
Handle<JSFunction> regexp_fun =
InstallFunction(global, "RegExp", JS_REGEXP_TYPE, JSRegExp::kSize,
- Isolate::Current()->initial_object_prototype(),
+ isolate->initial_object_prototype(),
Builtins::kIllegal, true);
global_context()->set_regexp_function(*regexp_fun);
@@ -892,13 +914,13 @@
ASSERT_EQ(0, initial_map->inobject_properties());
- Handle<DescriptorArray> descriptors = FACTORY->NewDescriptorArray(5);
+ Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(5);
PropertyAttributes final =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
int enum_index = 0;
{
// ECMA-262, section 15.10.7.1.
- FieldDescriptor field(HEAP->source_symbol(),
+ FieldDescriptor field(heap->source_symbol(),
JSRegExp::kSourceFieldIndex,
final,
enum_index++);
@@ -906,7 +928,7 @@
}
{
// ECMA-262, section 15.10.7.2.
- FieldDescriptor field(HEAP->global_symbol(),
+ FieldDescriptor field(heap->global_symbol(),
JSRegExp::kGlobalFieldIndex,
final,
enum_index++);
@@ -914,7 +936,7 @@
}
{
// ECMA-262, section 15.10.7.3.
- FieldDescriptor field(HEAP->ignore_case_symbol(),
+ FieldDescriptor field(heap->ignore_case_symbol(),
JSRegExp::kIgnoreCaseFieldIndex,
final,
enum_index++);
@@ -922,7 +944,7 @@
}
{
// ECMA-262, section 15.10.7.4.
- FieldDescriptor field(HEAP->multiline_symbol(),
+ FieldDescriptor field(heap->multiline_symbol(),
JSRegExp::kMultilineFieldIndex,
final,
enum_index++);
@@ -932,7 +954,7 @@
// ECMA-262, section 15.10.7.5.
PropertyAttributes writable =
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
- FieldDescriptor field(HEAP->last_index_symbol(),
+ FieldDescriptor field(heap->last_index_symbol(),
JSRegExp::kLastIndexFieldIndex,
writable,
enum_index++);
@@ -951,13 +973,13 @@
}
{ // -- J S O N
- Handle<String> name = FACTORY->NewStringFromAscii(CStrVector("JSON"));
- Handle<JSFunction> cons = FACTORY->NewFunction(
+ Handle<String> name = factory->NewStringFromAscii(CStrVector("JSON"));
+ Handle<JSFunction> cons = factory->NewFunction(
name,
- FACTORY->the_hole_value());
+ factory->the_hole_value());
cons->SetInstancePrototype(global_context()->initial_object_prototype());
cons->SetInstanceClassName(*name);
- Handle<JSObject> json_object = FACTORY->NewJSObject(cons, TENURED);
+ Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
ASSERT(json_object->IsJSObject());
SetLocalPropertyNoThrow(global, name, json_object, DONT_ENUM);
global_context()->set_json_object(*json_object);
@@ -967,15 +989,15 @@
// Make sure we can recognize argument objects at runtime.
// This is done by introducing an anonymous function with
// class_name equals 'Arguments'.
- Handle<String> symbol = FACTORY->LookupAsciiSymbol("Arguments");
+ Handle<String> symbol = factory->LookupAsciiSymbol("Arguments");
Handle<Code> code = Handle<Code>(
- Isolate::Current()->builtins()->builtin(Builtins::kIllegal));
+ isolate->builtins()->builtin(Builtins::kIllegal));
Handle<JSObject> prototype =
Handle<JSObject>(
JSObject::cast(global_context()->object_function()->prototype()));
Handle<JSFunction> function =
- FACTORY->NewFunctionWithPrototype(symbol,
+ factory->NewFunctionWithPrototype(symbol,
JS_OBJECT_TYPE,
JSObject::kHeaderSize,
prototype,
@@ -984,25 +1006,25 @@
ASSERT(!function->has_initial_map());
function->shared()->set_instance_class_name(*symbol);
function->shared()->set_expected_nof_properties(2);
- Handle<JSObject> result = FACTORY->NewJSObject(function);
+ Handle<JSObject> result = factory->NewJSObject(function);
global_context()->set_arguments_boilerplate(*result);
// Note: length must be added as the first property and
// callee must be added as the second property.
- SetLocalPropertyNoThrow(result, FACTORY->length_symbol(),
- FACTORY->undefined_value(),
+ SetLocalPropertyNoThrow(result, factory->length_symbol(),
+ factory->undefined_value(),
DONT_ENUM);
- SetLocalPropertyNoThrow(result, FACTORY->callee_symbol(),
- FACTORY->undefined_value(),
+ SetLocalPropertyNoThrow(result, factory->callee_symbol(),
+ factory->undefined_value(),
DONT_ENUM);
#ifdef DEBUG
LookupResult lookup;
- result->LocalLookup(HEAP->callee_symbol(), &lookup);
+ result->LocalLookup(heap->callee_symbol(), &lookup);
ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsCalleeIndex);
- result->LocalLookup(HEAP->length_symbol(), &lookup);
+ result->LocalLookup(heap->length_symbol(), &lookup);
ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
@@ -1020,8 +1042,8 @@
static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
// Create the ThrowTypeError functions.
- Handle<FixedArray> callee = FACTORY->NewFixedArray(2, TENURED);
- Handle<FixedArray> caller = FACTORY->NewFixedArray(2, TENURED);
+ Handle<FixedArray> callee = factory->NewFixedArray(2, TENURED);
+ Handle<FixedArray> caller = factory->NewFixedArray(2, TENURED);
Handle<JSFunction> callee_throw =
CreateThrowTypeErrorFunction(Builtins::kStrictArgumentsCallee);
@@ -1035,23 +1057,23 @@
caller->set(1, *caller_throw);
// Create the descriptor array for the arguments object.
- Handle<DescriptorArray> descriptors = FACTORY->NewDescriptorArray(3);
+ Handle<DescriptorArray> descriptors = factory->NewDescriptorArray(3);
{ // length
- FieldDescriptor d(*FACTORY->length_symbol(), 0, DONT_ENUM);
+ FieldDescriptor d(*factory->length_symbol(), 0, DONT_ENUM);
descriptors->Set(0, &d);
}
{ // callee
- CallbacksDescriptor d(*FACTORY->callee_symbol(), *callee, attributes);
+ CallbacksDescriptor d(*factory->callee_symbol(), *callee, attributes);
descriptors->Set(1, &d);
}
{ // caller
- CallbacksDescriptor d(*FACTORY->caller_symbol(), *caller, attributes);
+ CallbacksDescriptor d(*factory->caller_symbol(), *caller, attributes);
descriptors->Set(2, &d);
}
descriptors->Sort();
// Create the map. Allocate one in-object field for length.
- Handle<Map> map = FACTORY->NewMap(JS_OBJECT_TYPE,
+ Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE,
Heap::kArgumentsObjectSizeStrict);
map->set_instance_descriptors(*descriptors);
map->set_function_with_prototype(true);
@@ -1064,17 +1086,17 @@
global_context()->arguments_boilerplate()->map()->constructor());
// Allocate the arguments boilerplate object.
- Handle<JSObject> result = FACTORY->NewJSObjectFromMap(map);
+ Handle<JSObject> result = factory->NewJSObjectFromMap(map);
global_context()->set_strict_mode_arguments_boilerplate(*result);
// Add length property only for strict mode boilerplate.
- SetLocalPropertyNoThrow(result, FACTORY->length_symbol(),
- FACTORY->undefined_value(),
+ SetLocalPropertyNoThrow(result, factory->length_symbol(),
+ factory->undefined_value(),
DONT_ENUM);
#ifdef DEBUG
LookupResult lookup;
- result->LocalLookup(HEAP->length_symbol(), &lookup);
+ result->LocalLookup(heap->length_symbol(), &lookup);
ASSERT(lookup.IsProperty() && (lookup.type() == FIELD));
ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
@@ -1089,15 +1111,15 @@
{ // --- context extension
// Create a function for the context extension objects.
Handle<Code> code = Handle<Code>(
- Isolate::Current()->builtins()->builtin(Builtins::kIllegal));
+ isolate->builtins()->builtin(Builtins::kIllegal));
Handle<JSFunction> context_extension_fun =
- FACTORY->NewFunction(FACTORY->empty_symbol(),
+ factory->NewFunction(factory->empty_symbol(),
JS_CONTEXT_EXTENSION_OBJECT_TYPE,
JSObject::kHeaderSize,
code,
true);
- Handle<String> name = FACTORY->LookupAsciiSymbol("context_extension");
+ Handle<String> name = factory->LookupAsciiSymbol("context_extension");
context_extension_fun->shared()->set_instance_class_name(*name);
global_context()->set_context_extension_function(*context_extension_fun);
}
@@ -1106,10 +1128,10 @@
{
// Setup the call-as-function delegate.
Handle<Code> code =
- Handle<Code>(Isolate::Current()->builtins()->builtin(
+ Handle<Code>(isolate->builtins()->builtin(
Builtins::kHandleApiCallAsFunction));
Handle<JSFunction> delegate =
- FACTORY->NewFunction(FACTORY->empty_symbol(), JS_OBJECT_TYPE,
+ factory->NewFunction(factory->empty_symbol(), JS_OBJECT_TYPE,
JSObject::kHeaderSize, code, true);
global_context()->set_call_as_function_delegate(*delegate);
delegate->shared()->DontAdaptArguments();
@@ -1118,20 +1140,20 @@
{
// Setup the call-as-constructor delegate.
Handle<Code> code =
- Handle<Code>(Isolate::Current()->builtins()->builtin(
+ Handle<Code>(isolate->builtins()->builtin(
Builtins::kHandleApiCallAsConstructor));
Handle<JSFunction> delegate =
- FACTORY->NewFunction(FACTORY->empty_symbol(), JS_OBJECT_TYPE,
+ factory->NewFunction(factory->empty_symbol(), JS_OBJECT_TYPE,
JSObject::kHeaderSize, code, true);
global_context()->set_call_as_constructor_delegate(*delegate);
delegate->shared()->DontAdaptArguments();
}
// Initialize the out of memory slot.
- global_context()->set_out_of_memory(HEAP->false_value());
+ global_context()->set_out_of_memory(heap->false_value());
// Initialize the data slot.
- global_context()->set_data(HEAP->undefined_value());
+ global_context()->set_data(heap->undefined_value());
}
@@ -1170,6 +1192,7 @@
v8::Extension* extension,
Handle<Context> top_context,
bool use_runtime_context) {
+ Factory* factory = Isolate::Current()->factory();
HandleScope scope;
Handle<SharedFunctionInfo> function_info;
@@ -1177,7 +1200,7 @@
// function and insert it into the cache.
if (cache == NULL || !cache->Lookup(name, &function_info)) {
ASSERT(source->IsAsciiRepresentation());
- Handle<String> script_name = FACTORY->NewStringFromUtf8(name);
+ Handle<String> script_name = factory->NewStringFromUtf8(name);
function_info = Compiler::Compile(
source,
script_name,
@@ -1200,7 +1223,7 @@
? Handle<Context>(top_context->runtime_context())
: top_context);
Handle<JSFunction> fun =
- FACTORY->NewFunctionFromSharedFunctionInfo(function_info, context);
+ factory->NewFunctionFromSharedFunctionInfo(function_info, context);
// Call function using either the runtime object or the global
// object as the receiver. Provide no parameters.
@@ -1217,12 +1240,13 @@
#define INSTALL_NATIVE(Type, name, var) \
- Handle<String> var##_name = FACTORY->LookupAsciiSymbol(name); \
+ Handle<String> var##_name = factory->LookupAsciiSymbol(name); \
global_context()->set_##var(Type::cast( \
global_context()->builtins()->GetPropertyNoExceptionThrown(*var##_name)));
void Genesis::InstallNativeFunctions() {
+ Factory* factory = Isolate::Current()->factory();
HandleScope scope;
INSTALL_NATIVE(JSFunction, "CreateDate", create_date_fun);
INSTALL_NATIVE(JSFunction, "ToNumber", to_number_fun);
@@ -1247,6 +1271,7 @@
HandleScope scope;
Isolate* isolate = Isolate::Current();
Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
// Create a function for the builtins object. Allocate space for the
// JavaScript builtins, a reference to the builtins object
@@ -1412,7 +1437,7 @@
// Allocate the empty script.
Handle<Script> script = factory->NewScript(factory->empty_string());
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
- HEAP->public_set_empty_script(*script);
+ heap->public_set_empty_script(*script);
}
{
// Builtin function for OpaqueReference -- a JSValue-based object,
@@ -1560,7 +1585,7 @@
int enum_index = 0;
{
- FieldDescriptor index_field(HEAP->index_symbol(),
+ FieldDescriptor index_field(heap->index_symbol(),
JSRegExpResult::kIndexIndex,
NONE,
enum_index++);
@@ -1568,7 +1593,7 @@
}
{
- FieldDescriptor input_field(HEAP->input_symbol(),
+ FieldDescriptor input_field(heap->input_symbol(),
JSRegExpResult::kInputIndex,
NONE,
enum_index++);
@@ -1596,17 +1621,18 @@
static Handle<JSObject> ResolveBuiltinIdHolder(
Handle<Context> global_context,
const char* holder_expr) {
+ Factory* factory = Isolate::Current()->factory();
Handle<GlobalObject> global(global_context->global());
const char* period_pos = strchr(holder_expr, '.');
if (period_pos == NULL) {
return Handle<JSObject>::cast(
- GetProperty(global, FACTORY->LookupAsciiSymbol(holder_expr)));
+ GetProperty(global, factory->LookupAsciiSymbol(holder_expr)));
}
ASSERT_EQ(".prototype", period_pos);
Vector<const char> property(holder_expr,
static_cast<int>(period_pos - holder_expr));
Handle<JSFunction> function = Handle<JSFunction>::cast(
- GetProperty(global, FACTORY->LookupSymbol(property)));
+ GetProperty(global, factory->LookupSymbol(property)));
return Handle<JSObject>(JSObject::cast(function->prototype()));
}
@@ -1697,20 +1723,21 @@
void Genesis::InstallSpecialObjects(Handle<Context> global_context) {
+ Factory* factory = Isolate::Current()->factory();
HandleScope scope;
Handle<JSGlobalObject> js_global(
JSGlobalObject::cast(global_context->global()));
// Expose the natives in global if a name for it is specified.
if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
Handle<String> natives_string =
- FACTORY->LookupAsciiSymbol(FLAG_expose_natives_as);
+ factory->LookupAsciiSymbol(FLAG_expose_natives_as);
SetLocalPropertyNoThrow(js_global, natives_string,
Handle<JSObject>(js_global->builtins()), DONT_ENUM);
}
Handle<Object> Error = GetProperty(js_global, "Error");
if (Error->IsJSObject()) {
- Handle<String> name = FACTORY->LookupAsciiSymbol("stackTraceLimit");
+ Handle<String> name = factory->LookupAsciiSymbol("stackTraceLimit");
SetLocalPropertyNoThrow(Handle<JSObject>::cast(Error),
name,
Handle<Smi>(Smi::FromInt(FLAG_stack_trace_limit)),
@@ -1731,7 +1758,7 @@
global_context->security_token());
Handle<String> debug_string =
- FACTORY->LookupAsciiSymbol(FLAG_expose_debug_as);
+ factory->LookupAsciiSymbol(FLAG_expose_debug_as);
Handle<Object> global_proxy(debug->debug_context()->global_proxy());
SetLocalPropertyNoThrow(js_global, debug_string, global_proxy, DONT_ENUM);
}
@@ -1812,19 +1839,18 @@
for (int i = 0; i < extension->dependency_count(); i++) {
if (!InstallExtension(extension->dependencies()[i])) return false;
}
+ Isolate* isolate = Isolate::Current();
Vector<const char> source = CStrVector(extension->source());
- Handle<String> source_code = FACTORY->NewStringFromAscii(source);
+ Handle<String> source_code = isolate->factory()->NewStringFromAscii(source);
bool result = CompileScriptCached(CStrVector(extension->name()),
source_code,
- Isolate::Current()->bootstrapper()->
- extensions_cache(),
+ isolate->bootstrapper()->extensions_cache(),
extension,
- Handle<Context>(
- Isolate::Current()->context()),
+ Handle<Context>(isolate->context()),
false);
- ASSERT(Isolate::Current()->has_pending_exception() != result);
+ ASSERT(isolate->has_pending_exception() != result);
if (!result) {
- Isolate::Current()->clear_pending_exception();
+ isolate->clear_pending_exception();
}
current->set_state(v8::INSTALLED);
return result;
@@ -1884,12 +1910,13 @@
ASSERT(object->IsInstanceOf(
FunctionTemplateInfo::cast(object_template->constructor())));
+ Isolate* isolate = Isolate::Current();
bool pending_exception = false;
Handle<JSObject> obj =
Execution::InstantiateObject(object_template, &pending_exception);
if (pending_exception) {
- ASSERT(Isolate::Current()->has_pending_exception());
- Isolate::Current()->clear_pending_exception();
+ ASSERT(isolate->has_pending_exception());
+ isolate->clear_pending_exception();
return false;
}
TransferObject(obj, object);
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index b370c65..2ecd336 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -37,9 +37,10 @@
namespace internal {
bool CodeStub::FindCodeInCache(Code** code_out) {
- int index = HEAP->code_stubs()->FindEntry(GetKey());
+ Heap* heap = Isolate::Current()->heap();
+ int index = heap->code_stubs()->FindEntry(GetKey());
if (index != NumberDictionary::kNotFound) {
- *code_out = Code::cast(HEAP->code_stubs()->ValueAt(index));
+ *code_out = Code::cast(heap->code_stubs()->ValueAt(index));
return true;
}
return false;
@@ -86,9 +87,12 @@
Handle<Code> CodeStub::GetCode() {
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
Code* code;
if (!FindCodeInCache(&code)) {
- v8::HandleScope scope;
+ HandleScope scope(isolate);
// Generate the new code.
MacroAssembler masm(NULL, 256);
@@ -103,24 +107,24 @@
static_cast<Code::Kind>(GetCodeKind()),
InLoop(),
GetICState());
- Handle<Code> new_object = FACTORY->NewCode(
+ Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
RecordCodeGeneration(*new_object, &masm);
FinishCode(*new_object);
// Update the dictionary and the root in Heap.
Handle<NumberDictionary> dict =
- FACTORY->DictionaryAtNumberPut(
- Handle<NumberDictionary>(HEAP->code_stubs()),
+ factory->DictionaryAtNumberPut(
+ Handle<NumberDictionary>(heap->code_stubs()),
GetKey(),
new_object);
- HEAP->public_set_code_stubs(*dict);
+ heap->public_set_code_stubs(*dict);
code = *new_object;
}
- ASSERT(!NeedsImmovableCode() || HEAP->lo_space()->Contains(code));
- return Handle<Code>(code);
+ ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code));
+ return Handle<Code>(code, isolate);
}
@@ -130,6 +134,7 @@
// Generate the new code.
MacroAssembler masm(NULL, 256);
GenerateCode(&masm);
+ Heap* heap = masm.isolate()->heap();
// Create the code object.
CodeDesc desc;
@@ -142,7 +147,7 @@
GetICState());
Object* new_object;
{ MaybeObject* maybe_new_object =
- HEAP->CreateCode(desc, flags, masm.CodeObject());
+ heap->CreateCode(desc, flags, masm.CodeObject());
if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
}
code = Code::cast(new_object);
@@ -151,9 +156,9 @@
// Try to update the code cache but do not fail if unable.
MaybeObject* maybe_new_object =
- HEAP->code_stubs()->AtNumberPut(GetKey(), code);
+ heap->code_stubs()->AtNumberPut(GetKey(), code);
if (maybe_new_object->ToObject(&new_object)) {
- HEAP->public_set_code_stubs(NumberDictionary::cast(new_object));
+ heap->public_set_code_stubs(NumberDictionary::cast(new_object));
}
}
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 64fc2ca..d408034 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -80,10 +80,19 @@
#define CODE_STUB_LIST_ARM(V)
#endif
+// List of code stubs only used on MIPS platforms.
+#ifdef V8_TARGET_ARCH_MIPS
+#define CODE_STUB_LIST_MIPS(V) \
+ V(RegExpCEntry)
+#else
+#define CODE_STUB_LIST_MIPS(V)
+#endif
+
// Combined list of code stubs.
#define CODE_STUB_LIST(V) \
CODE_STUB_LIST_ALL_PLATFORMS(V) \
- CODE_STUB_LIST_ARM(V)
+ CODE_STUB_LIST_ARM(V) \
+ CODE_STUB_LIST_MIPS(V)
// Mode to overwrite BinaryExpression values.
enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index bc53c68..ef51950 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -332,10 +332,11 @@
void CpuProfiler::DeleteAllProfiles() {
- ASSERT(Isolate::Current()->cpu_profiler() != NULL);
+ Isolate* isolate = Isolate::Current();
+ ASSERT(isolate->cpu_profiler() != NULL);
if (is_profiling())
- Isolate::Current()->cpu_profiler()->StopProcessor();
- Isolate::Current()->cpu_profiler()->ResetProfiles();
+ isolate->cpu_profiler()->StopProcessor();
+ isolate->cpu_profiler()->ResetProfiles();
}
@@ -367,10 +368,11 @@
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, String* name) {
- Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
+ Isolate* isolate = Isolate::Current();
+ isolate->cpu_profiler()->processor_->CodeCreateEvent(
tag,
name,
- HEAP->empty_string(),
+ isolate->heap()->empty_string(),
v8::CpuProfileNode::kNoLineNumberInfo,
code->address(),
code->ExecutableSize(),
@@ -382,10 +384,11 @@
Code* code,
SharedFunctionInfo* shared,
String* name) {
- Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
+ Isolate* isolate = Isolate::Current();
+ isolate->cpu_profiler()->processor_->CodeCreateEvent(
tag,
name,
- HEAP->empty_string(),
+ isolate->heap()->empty_string(),
v8::CpuProfileNode::kNoLineNumberInfo,
code->address(),
code->ExecutableSize(),
@@ -493,26 +496,28 @@
void CpuProfiler::StartProcessorIfNotStarted() {
if (processor_ == NULL) {
+ Isolate* isolate = Isolate::Current();
+
// Disable logging when using the new implementation.
- saved_logging_nesting_ = LOGGER->logging_nesting_;
- LOGGER->logging_nesting_ = 0;
+ saved_logging_nesting_ = isolate->logger()->logging_nesting_;
+ isolate->logger()->logging_nesting_ = 0;
generator_ = new ProfileGenerator(profiles_);
- processor_ = new ProfilerEventsProcessor(Isolate::Current(), generator_);
+ processor_ = new ProfilerEventsProcessor(isolate, generator_);
NoBarrier_Store(&is_profiling_, true);
processor_->Start();
// Enumerate stuff we already have in the heap.
- if (HEAP->HasBeenSetup()) {
+ if (isolate->heap()->HasBeenSetup()) {
if (!FLAG_prof_browser_mode) {
bool saved_log_code_flag = FLAG_log_code;
FLAG_log_code = true;
- LOGGER->LogCodeObjects();
+ isolate->logger()->LogCodeObjects();
FLAG_log_code = saved_log_code_flag;
}
- LOGGER->LogCompiledFunctions();
- LOGGER->LogAccessorCallbacks();
+ isolate->logger()->LogCompiledFunctions();
+ isolate->logger()->LogAccessorCallbacks();
}
// Enable stack sampling.
- Sampler* sampler = reinterpret_cast<Sampler*>(LOGGER->ticker_);
+ Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_);
if (!sampler->IsActive()) {
sampler->Start();
need_to_stop_sampler_ = true;
@@ -552,7 +557,8 @@
void CpuProfiler::StopProcessor() {
- Sampler* sampler = reinterpret_cast<Sampler*>(LOGGER->ticker_);
+ Logger* logger = Isolate::Current()->logger();
+ Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
sampler->DecreaseProfilingDepth();
if (need_to_stop_sampler_) {
sampler->Stop();
@@ -565,7 +571,7 @@
processor_ = NULL;
NoBarrier_Store(&is_profiling_, false);
generator_ = NULL;
- LOGGER->logging_nesting_ = saved_logging_nesting_;
+ logger->logging_nesting_ = saved_logging_nesting_;
}
} } // namespace v8::internal
diff --git a/src/d8.gyp b/src/d8.gyp
index 3283e38..901fd65 100644
--- a/src/d8.gyp
+++ b/src/d8.gyp
@@ -38,7 +38,10 @@
'../src',
],
'defines': [
+ 'ENABLE_LOGGING_AND_PROFILING',
'ENABLE_DEBUGGER_SUPPORT',
+ 'ENABLE_VMSTATE_TRACKING',
+ 'V8_FAST_TLS',
],
'sources': [
'd8.cc',
diff --git a/src/debug.cc b/src/debug.cc
index 87afe34..bc532ef 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -633,7 +633,7 @@
void ScriptCache::Add(Handle<Script> script) {
- Isolate* isolate = Isolate::Current();
+ GlobalHandles* global_handles = Isolate::Current()->global_handles();
// Create an entry in the hash map for the script.
int id = Smi::cast(script->id())->value();
HashMap::Entry* entry =
@@ -647,8 +647,8 @@
// global handle as the value in the hash map.
Handle<Script> script_ =
Handle<Script>::cast(
- (isolate->global_handles()->Create(*script)));
- isolate->global_handles()->MakeWeak(
+ (global_handles->Create(*script)));
+ global_handles->MakeWeak(
reinterpret_cast<Object**>(script_.location()),
this,
ScriptCache::HandleWeakScript);
@@ -671,23 +671,23 @@
void ScriptCache::ProcessCollectedScripts() {
- Isolate* isolate = Isolate::Current();
+ Debugger* debugger = Isolate::Current()->debugger();
for (int i = 0; i < collected_scripts_.length(); i++) {
- isolate->debugger()->OnScriptCollected(collected_scripts_[i]);
+ debugger->OnScriptCollected(collected_scripts_[i]);
}
collected_scripts_.Clear();
}
void ScriptCache::Clear() {
- Isolate* isolate = Isolate::Current();
+ GlobalHandles* global_handles = Isolate::Current()->global_handles();
// Iterate the script cache to get rid of all the weak handles.
for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
ASSERT(entry != NULL);
Object** location = reinterpret_cast<Object**>(entry->value);
ASSERT((*location)->IsScript());
- isolate->global_handles()->ClearWeakness(location);
- isolate->global_handles()->Destroy(location);
+ global_handles->ClearWeakness(location);
+ global_handles->Destroy(location);
}
// Clear the content of the hash map.
HashMap::Clear();
@@ -717,11 +717,11 @@
if (create_heap_objects) {
// Get code to handle debug break on return.
debug_break_return_ =
- Isolate::Current()->builtins()->builtin(Builtins::kReturn_DebugBreak);
+ isolate_->builtins()->builtin(Builtins::kReturn_DebugBreak);
ASSERT(debug_break_return_->IsCode());
// Get code to handle debug break in debug break slots.
debug_break_slot_ =
- Isolate::Current()->builtins()->builtin(Builtins::kSlot_DebugBreak);
+ isolate_->builtins()->builtin(Builtins::kSlot_DebugBreak);
ASSERT(debug_break_slot_->IsCode());
}
}
@@ -748,11 +748,11 @@
DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
- Isolate* isolate = Isolate::Current();
+ GlobalHandles* global_handles = Isolate::Current()->global_handles();
// Globalize the request debug info object and make it weak.
debug_info_ = Handle<DebugInfo>::cast(
- (isolate->global_handles()->Create(debug_info)));
- isolate->global_handles()->MakeWeak(
+ (global_handles->Create(debug_info)));
+ global_handles->MakeWeak(
reinterpret_cast<Object**>(debug_info_.location()),
this,
Debug::HandleWeakDebugInfo);
@@ -766,7 +766,9 @@
bool Debug::CompileDebuggerScript(int index) {
- HandleScope scope;
+ Isolate* isolate = Isolate::Current();
+ Factory* factory = isolate->factory();
+ HandleScope scope(isolate);
// Bail out if the index is invalid.
if (index == -1) {
@@ -775,9 +777,9 @@
// Find source and name for the requested script.
Handle<String> source_code =
- Isolate::Current()->bootstrapper()->NativesSourceLookup(index);
+ isolate->bootstrapper()->NativesSourceLookup(index);
Vector<const char> name = Natives::GetScriptName(index);
- Handle<String> script_name = FACTORY->NewStringFromAscii(name);
+ Handle<String> script_name = factory->NewStringFromAscii(name);
// Compile the script.
Handle<SharedFunctionInfo> function_info;
@@ -789,16 +791,16 @@
// Silently ignore stack overflows during compilation.
if (function_info.is_null()) {
- ASSERT(Isolate::Current()->has_pending_exception());
- Isolate::Current()->clear_pending_exception();
+ ASSERT(isolate->has_pending_exception());
+ isolate->clear_pending_exception();
return false;
}
// Execute the shared function in the debugger context.
- Handle<Context> context = Isolate::Current()->global_context();
+ Handle<Context> context = isolate->global_context();
bool caught_exception = false;
Handle<JSFunction> function =
- FACTORY->NewFunctionFromSharedFunctionInfo(function_info, context);
+ factory->NewFunctionFromSharedFunctionInfo(function_info, context);
Handle<Object> result =
Execution::TryCall(function, Handle<Object>(context->global()),
0, NULL, &caught_exception);
@@ -823,43 +825,44 @@
// Return if debugger is already loaded.
if (IsLoaded()) return true;
- Isolate* isolate = Isolate::Current();
+ ASSERT(Isolate::Current() == isolate_);
+ Debugger* debugger = isolate_->debugger();
// Bail out if we're already in the process of compiling the native
// JavaScript source code for the debugger.
- if (isolate->debugger()->compiling_natives() ||
- isolate->debugger()->is_loading_debugger())
+ if (debugger->compiling_natives() ||
+ debugger->is_loading_debugger())
return false;
- isolate->debugger()->set_loading_debugger(true);
+ debugger->set_loading_debugger(true);
// Disable breakpoints and interrupts while compiling and running the
// debugger scripts including the context creation code.
DisableBreak disable(true);
- PostponeInterruptsScope postpone(isolate);
+ PostponeInterruptsScope postpone(isolate_);
// Create the debugger context.
- HandleScope scope;
+ HandleScope scope(isolate_);
Handle<Context> context =
- isolate->bootstrapper()->CreateEnvironment(
+ isolate_->bootstrapper()->CreateEnvironment(
Handle<Object>::null(),
v8::Handle<ObjectTemplate>(),
NULL);
// Use the debugger context.
- SaveContext save(isolate);
- isolate->set_context(*context);
+ SaveContext save(isolate_);
+ isolate_->set_context(*context);
// Expose the builtins object in the debugger context.
- Handle<String> key = FACTORY->LookupAsciiSymbol("builtins");
+ Handle<String> key = isolate_->factory()->LookupAsciiSymbol("builtins");
Handle<GlobalObject> global = Handle<GlobalObject>(context->global());
RETURN_IF_EMPTY_HANDLE_VALUE(
- isolate,
+ isolate_,
SetProperty(global, key, Handle<Object>(global->builtins()),
NONE, kNonStrictMode),
false);
// Compile the JavaScript for the debugger in the debugger context.
- isolate->debugger()->set_compiling_natives(true);
+ debugger->set_compiling_natives(true);
bool caught_exception =
!CompileDebuggerScript(Natives::GetIndex("mirror")) ||
!CompileDebuggerScript(Natives::GetIndex("debug"));
@@ -869,11 +872,11 @@
!CompileDebuggerScript(Natives::GetIndex("liveedit"));
}
- isolate->debugger()->set_compiling_natives(false);
+ debugger->set_compiling_natives(false);
// Make sure we mark the debugger as not loading before we might
// return.
- isolate->debugger()->set_loading_debugger(false);
+ debugger->set_loading_debugger(false);
// Check for caught exceptions.
if (caught_exception) return false;
@@ -920,7 +923,7 @@
Debug* debug = isolate->debug();
Heap* heap = isolate->heap();
- HandleScope scope;
+ HandleScope scope(isolate);
ASSERT(args.length() == 0);
debug->thread_local_.frame_drop_mode_ = FRAMES_UNTOUCHED;
@@ -1032,6 +1035,8 @@
// triggered. This function returns a JSArray with the break point objects
// which is triggered.
Handle<Object> Debug::CheckBreakPoints(Handle<Object> break_point_objects) {
+ Factory* factory = isolate_->factory();
+
// Count the number of break points hit. If there are multiple break points
// they are in a FixedArray.
Handle<FixedArray> break_points_hit;
@@ -1039,7 +1044,7 @@
ASSERT(!break_point_objects->IsUndefined());
if (break_point_objects->IsFixedArray()) {
Handle<FixedArray> array(FixedArray::cast(*break_point_objects));
- break_points_hit = FACTORY->NewFixedArray(array->length());
+ break_points_hit = factory->NewFixedArray(array->length());
for (int i = 0; i < array->length(); i++) {
Handle<Object> o(array->get(i));
if (CheckBreakPoint(o)) {
@@ -1047,7 +1052,7 @@
}
}
} else {
- break_points_hit = FACTORY->NewFixedArray(1);
+ break_points_hit = factory->NewFixedArray(1);
if (CheckBreakPoint(break_point_objects)) {
break_points_hit->set(break_points_hit_count++, *break_point_objects);
}
@@ -1055,10 +1060,10 @@
// Return undefined if no break points were triggered.
if (break_points_hit_count == 0) {
- return FACTORY->undefined_value();
+ return factory->undefined_value();
}
// Return break points hit as a JSArray.
- Handle<JSArray> result = FACTORY->NewJSArrayWithElements(break_points_hit);
+ Handle<JSArray> result = factory->NewJSArrayWithElements(break_points_hit);
result->set_length(Smi::FromInt(break_points_hit_count));
return result;
}
@@ -1066,21 +1071,23 @@
// Check whether a single break point object is triggered.
bool Debug::CheckBreakPoint(Handle<Object> break_point_object) {
- HandleScope scope;
+ ASSERT(Isolate::Current() == isolate_);
+ Factory* factory = isolate_->factory();
+ HandleScope scope(isolate_);
// Ignore check if break point object is not a JSObject.
if (!break_point_object->IsJSObject()) return true;
// Get the function IsBreakPointTriggered (defined in debug-debugger.js).
Handle<String> is_break_point_triggered_symbol =
- FACTORY->LookupAsciiSymbol("IsBreakPointTriggered");
+ factory->LookupAsciiSymbol("IsBreakPointTriggered");
Handle<JSFunction> check_break_point =
Handle<JSFunction>(JSFunction::cast(
debug_context()->global()->GetPropertyNoExceptionThrown(
*is_break_point_triggered_symbol)));
// Get the break id as an object.
- Handle<Object> break_id = FACTORY->NewNumberFromInt(Debug::break_id());
+ Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id());
// Call HandleBreakPointx.
bool caught_exception = false;
@@ -1090,7 +1097,7 @@
reinterpret_cast<Object**>(break_point_object.location())
};
Handle<Object> result = Execution::TryCall(check_break_point,
- Isolate::Current()->js_builtins_object(), argc, argv, &caught_exception);
+ isolate_->js_builtins_object(), argc, argv, &caught_exception);
// If exception or non boolean result handle as not triggered
if (caught_exception || !result->IsBoolean()) {
@@ -1120,7 +1127,7 @@
void Debug::SetBreakPoint(Handle<SharedFunctionInfo> shared,
Handle<Object> break_point_object,
int* source_position) {
- HandleScope scope;
+ HandleScope scope(isolate_);
if (!EnsureDebugInfo(shared)) {
// Return if retrieving debug info failed.
@@ -1144,7 +1151,7 @@
void Debug::ClearBreakPoint(Handle<Object> break_point_object) {
- HandleScope scope;
+ HandleScope scope(isolate_);
DebugInfoListNode* node = debug_info_list_;
while (node != NULL) {
@@ -1250,7 +1257,8 @@
void Debug::PrepareStep(StepAction step_action, int step_count) {
- HandleScope scope;
+ ASSERT(Isolate::Current() == isolate_);
+ HandleScope scope(isolate_);
ASSERT(Debug::InDebugger());
// Remember this step action and count.
@@ -1398,7 +1406,8 @@
// Reverse lookup required as the minor key cannot be retrieved
// from the code object.
Handle<Object> obj(
- HEAP->code_stubs()->SlowReverseLookup(*call_function_stub));
+ isolate_->heap()->code_stubs()->SlowReverseLookup(
+ *call_function_stub));
ASSERT(!obj.is_null());
ASSERT(!(*obj)->IsUndefined());
ASSERT(obj->IsSmi());
@@ -1553,13 +1562,15 @@
// Simple function for returning the source positions for active break points.
Handle<Object> Debug::GetSourceBreakLocations(
Handle<SharedFunctionInfo> shared) {
- if (!HasDebugInfo(shared)) return Handle<Object>(HEAP->undefined_value());
+ Isolate* isolate = Isolate::Current();
+ Heap* heap = isolate->heap();
+ if (!HasDebugInfo(shared)) return Handle<Object>(heap->undefined_value());
Handle<DebugInfo> debug_info = GetDebugInfo(shared);
if (debug_info->GetBreakPointCount() == 0) {
- return Handle<Object>(HEAP->undefined_value());
+ return Handle<Object>(heap->undefined_value());
}
Handle<FixedArray> locations =
- FACTORY->NewFixedArray(debug_info->GetBreakPointCount());
+ isolate->factory()->NewFixedArray(debug_info->GetBreakPointCount());
int count = 0;
for (int i = 0; i < debug_info->break_points()->length(); i++) {
if (!debug_info->break_points()->get(i)->IsUndefined()) {
@@ -1732,7 +1743,8 @@
} else {
prev->set_next(current->next());
}
- current->debug_info()->shared()->set_debug_info(HEAP->undefined_value());
+ current->debug_info()->shared()->set_debug_info(
+ isolate_->heap()->undefined_value());
delete current;
// If there are no more debug info objects there are not more break
@@ -1750,7 +1762,8 @@
void Debug::SetAfterBreakTarget(JavaScriptFrame* frame) {
- HandleScope scope;
+ ASSERT(Isolate::Current() == isolate_);
+ HandleScope scope(isolate_);
// Get the executing function in which the debug break occurred.
Handle<SharedFunctionInfo> shared =
@@ -1764,7 +1777,7 @@
Handle<Code> original_code(debug_info->original_code());
#ifdef DEBUG
// Get the code which is actually executing.
- Handle<Code> frame_code(frame->LookupCode(Isolate::Current()));
+ Handle<Code> frame_code(frame->LookupCode(isolate_));
ASSERT(frame_code.is_identical_to(code));
#endif
@@ -1833,7 +1846,7 @@
bool Debug::IsBreakAtReturn(JavaScriptFrame* frame) {
- HandleScope scope;
+ HandleScope scope(isolate_);
// Get the executing function in which the debug break occurred.
Handle<SharedFunctionInfo> shared =
@@ -1882,13 +1895,14 @@
void Debug::ClearMirrorCache() {
+ ASSERT(Isolate::Current() == isolate_);
PostponeInterruptsScope postpone(isolate_);
- HandleScope scope;
- ASSERT(Isolate::Current()->context() == *Debug::debug_context());
+ HandleScope scope(isolate_);
+ ASSERT(isolate_->context() == *Debug::debug_context());
// Clear the mirror cache.
Handle<String> function_name =
- FACTORY->LookupSymbol(CStrVector("ClearMirrorCache"));
+ isolate_->factory()->LookupSymbol(CStrVector("ClearMirrorCache"));
Handle<Object> fun(Isolate::Current()->global()->GetPropertyNoExceptionThrown(
*function_name));
ASSERT(fun->IsJSFunction());
@@ -1901,13 +1915,15 @@
void Debug::CreateScriptCache() {
- HandleScope scope;
+ ASSERT(Isolate::Current() == isolate_);
+ Heap* heap = isolate_->heap();
+ HandleScope scope(isolate_);
// Perform two GCs to get rid of all unreferenced scripts. The first GC gets
// rid of all the cached script wrappers and the second gets rid of the
// scripts which are no longer referenced.
- HEAP->CollectAllGarbage(false);
- HEAP->CollectAllGarbage(false);
+ heap->CollectAllGarbage(false);
+ heap->CollectAllGarbage(false);
ASSERT(script_cache_ == NULL);
script_cache_ = new ScriptCache();
@@ -1941,6 +1957,7 @@
Handle<FixedArray> Debug::GetLoadedScripts() {
+ ASSERT(Isolate::Current() == isolate_);
// Create and fill the script cache when the loaded scripts is requested for
// the first time.
if (script_cache_ == NULL) {
@@ -1950,12 +1967,12 @@
// If the script cache is not active just return an empty array.
ASSERT(script_cache_ != NULL);
if (script_cache_ == NULL) {
- FACTORY->NewFixedArray(0);
+ isolate_->factory()->NewFixedArray(0);
}
// Perform GC to get unreferenced scripts evicted from the cache before
// returning the content.
- HEAP->CollectAllGarbage(false);
+ isolate_->heap()->CollectAllGarbage(false);
// Get the scripts from the cache.
return script_cache_->GetScripts();
@@ -2008,13 +2025,14 @@
ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
// Create the execution state object.
- Handle<String> constructor_str = FACTORY->LookupSymbol(constructor_name);
+ Handle<String> constructor_str =
+ isolate_->factory()->LookupSymbol(constructor_name);
Handle<Object> constructor(
isolate_->global()->GetPropertyNoExceptionThrown(*constructor_str));
ASSERT(constructor->IsJSFunction());
if (!constructor->IsJSFunction()) {
*caught_exception = true;
- return FACTORY->undefined_value();
+ return isolate_->factory()->undefined_value();
}
Handle<Object> js_object = Execution::TryCall(
Handle<JSFunction>::cast(constructor),
@@ -2027,7 +2045,7 @@
Handle<Object> Debugger::MakeExecutionState(bool* caught_exception) {
ASSERT(Isolate::Current() == isolate_);
// Create the execution state object.
- Handle<Object> break_id = FACTORY->NewNumberFromInt(
+ Handle<Object> break_id = isolate_->factory()->NewNumberFromInt(
isolate_->debug()->break_id());
const int argc = 1;
Object** argv[argc] = { break_id.location() };
@@ -2056,12 +2074,13 @@
bool uncaught,
bool* caught_exception) {
ASSERT(Isolate::Current() == isolate_);
+ Factory* factory = isolate_->factory();
// Create the new exception event object.
const int argc = 3;
Object** argv[argc] = { exec_state.location(),
exception.location(),
- uncaught ? FACTORY->true_value().location() :
- FACTORY->false_value().location()};
+ uncaught ? factory->true_value().location() :
+ factory->false_value().location()};
return MakeJSObject(CStrVector("MakeExceptionEvent"),
argc, argv, caught_exception);
}
@@ -2082,14 +2101,15 @@
bool before,
bool* caught_exception) {
ASSERT(Isolate::Current() == isolate_);
+ Factory* factory = isolate_->factory();
// Create the compile event object.
Handle<Object> exec_state = MakeExecutionState(caught_exception);
Handle<Object> script_wrapper = GetScriptWrapper(script);
const int argc = 3;
Object** argv[argc] = { exec_state.location(),
script_wrapper.location(),
- before ? FACTORY->true_value().location() :
- FACTORY->false_value().location() };
+ before ? factory->true_value().location() :
+ factory->false_value().location() };
return MakeJSObject(CStrVector("MakeCompileEvent"),
argc,
@@ -2116,20 +2136,21 @@
void Debugger::OnException(Handle<Object> exception, bool uncaught) {
ASSERT(Isolate::Current() == isolate_);
- HandleScope scope;
+ HandleScope scope(isolate_);
+ Debug* debug = isolate_->debug();
// Bail out based on state or if there is no listener for this event
- if (isolate_->debug()->InDebugger()) return;
+ if (debug->InDebugger()) return;
if (!Debugger::EventActive(v8::Exception)) return;
// Bail out if exception breaks are not active
if (uncaught) {
// Uncaught exceptions are reported by either flags.
- if (!(isolate_->debug()->break_on_uncaught_exception() ||
- isolate_->debug()->break_on_exception())) return;
+ if (!(debug->break_on_uncaught_exception() ||
+ debug->break_on_exception())) return;
} else {
// Caught exceptions are reported is activated.
- if (!isolate_->debug()->break_on_exception()) return;
+ if (!debug->break_on_exception()) return;
}
// Enter the debugger.
@@ -2137,7 +2158,7 @@
if (debugger.FailedToEnter()) return;
// Clear all current stepping setup.
- isolate_->debug()->ClearStepping();
+ debug->ClearStepping();
// Create the event data object.
bool caught_exception = false;
Handle<Object> exec_state = MakeExecutionState(&caught_exception);
@@ -2160,7 +2181,7 @@
void Debugger::OnDebugBreak(Handle<Object> break_points_hit,
bool auto_continue) {
ASSERT(Isolate::Current() == isolate_);
- HandleScope scope;
+ HandleScope scope(isolate_);
// Debugger has already been entered by caller.
ASSERT(isolate_->context() == *isolate_->debug()->debug_context());
@@ -2193,7 +2214,7 @@
void Debugger::OnBeforeCompile(Handle<Script> script) {
ASSERT(Isolate::Current() == isolate_);
- HandleScope scope;
+ HandleScope scope(isolate_);
// Bail out based on state or if there is no listener for this event
if (isolate_->debug()->InDebugger()) return;
@@ -2223,10 +2244,11 @@
void Debugger::OnAfterCompile(Handle<Script> script,
AfterCompileFlags after_compile_flags) {
ASSERT(Isolate::Current() == isolate_);
- HandleScope scope;
+ HandleScope scope(isolate_);
+ Debug* debug = isolate_->debug();
// Add the newly compiled script to the script cache.
- isolate_->debug()->AddScriptToScriptCache(script);
+ debug->AddScriptToScriptCache(script);
// No more to do if not debugging.
if (!IsDebuggerActive()) return;
@@ -2235,7 +2257,7 @@
if (compiling_natives()) return;
// Store whether in debugger before entering debugger.
- bool in_debugger = isolate_->debug()->InDebugger();
+ bool in_debugger = debug->InDebugger();
// Enter the debugger.
EnterDebugger debugger;
@@ -2246,9 +2268,9 @@
// Get the function UpdateScriptBreakPoints (defined in debug-debugger.js).
Handle<String> update_script_break_points_symbol =
- FACTORY->LookupAsciiSymbol("UpdateScriptBreakPoints");
+ isolate_->factory()->LookupAsciiSymbol("UpdateScriptBreakPoints");
Handle<Object> update_script_break_points =
- Handle<Object>(isolate_->debug()->debug_context()->global()->
+ Handle<Object>(debug->debug_context()->global()->
GetPropertyNoExceptionThrown(*update_script_break_points_symbol));
if (!update_script_break_points->IsJSFunction()) {
return;
@@ -2291,7 +2313,7 @@
void Debugger::OnScriptCollected(int id) {
ASSERT(Isolate::Current() == isolate_);
- HandleScope scope;
+ HandleScope scope(isolate_);
// No more to do if not debugging.
if (!IsDebuggerActive()) return;
@@ -2321,7 +2343,7 @@
Handle<JSObject> event_data,
bool auto_continue) {
ASSERT(Isolate::Current() == isolate_);
- HandleScope scope;
+ HandleScope scope(isolate_);
// Clear any pending debug break if this is a real break.
if (!auto_continue) {
@@ -2421,13 +2443,14 @@
void Debugger::UnloadDebugger() {
ASSERT(Isolate::Current() == isolate_);
+ Debug* debug = isolate_->debug();
// Make sure that there are no breakpoints left.
- isolate_->debug()->ClearAllBreakPoints();
+ debug->ClearAllBreakPoints();
// Unload the debugger if feasible.
if (!never_unload_debugger_) {
- isolate_->debug()->Unload();
+ debug->Unload();
}
// Clear the flag indicating that the debugger should be unloaded.
@@ -2440,7 +2463,7 @@
Handle<JSObject> event_data,
bool auto_continue) {
ASSERT(Isolate::Current() == isolate_);
- HandleScope scope;
+ HandleScope scope(isolate_);
if (!isolate_->debug()->Load()) return;
@@ -2610,17 +2633,18 @@
void Debugger::SetEventListener(Handle<Object> callback,
Handle<Object> data) {
ASSERT(Isolate::Current() == isolate_);
- HandleScope scope;
+ HandleScope scope(isolate_);
+ GlobalHandles* global_handles = isolate_->global_handles();
// Clear the global handles for the event listener and the event listener data
// object.
if (!event_listener_.is_null()) {
- isolate_->global_handles()->Destroy(
+ global_handles->Destroy(
reinterpret_cast<Object**>(event_listener_.location()));
event_listener_ = Handle<Object>();
}
if (!event_listener_data_.is_null()) {
- isolate_->global_handles()->Destroy(
+ global_handles->Destroy(
reinterpret_cast<Object**>(event_listener_data_.location()));
event_listener_data_ = Handle<Object>();
}
@@ -2629,12 +2653,12 @@
// object.
if (!callback->IsUndefined() && !callback->IsNull()) {
event_listener_ = Handle<Object>::cast(
- isolate_->global_handles()->Create(*callback));
+ global_handles->Create(*callback));
if (data.is_null()) {
- data = FACTORY->undefined_value();
+ data = isolate_->factory()->undefined_value();
}
event_listener_data_ = Handle<Object>::cast(
- isolate_->global_handles()->Create(*data));
+ global_handles->Create(*data));
}
ListenersChanged();
@@ -2658,13 +2682,13 @@
void Debugger::ListenersChanged() {
- Isolate* isolate = Isolate::Current();
+ ASSERT(Isolate::Current() == isolate_);
if (IsDebuggerActive()) {
// Disable the compilation cache when the debugger is active.
- isolate->compilation_cache()->Disable();
+ isolate_->compilation_cache()->Disable();
debugger_unload_pending_ = false;
} else {
- isolate->compilation_cache()->Enable();
+ isolate_->compilation_cache()->Enable();
// Unload the debugger if event listener and message handler cleared.
// Schedule this for later, because we may be in non-V8 thread.
debugger_unload_pending_ = true;
@@ -2776,14 +2800,14 @@
// Enter the debugger.
EnterDebugger debugger;
if (debugger.FailedToEnter()) {
- return FACTORY->undefined_value();
+ return isolate_->factory()->undefined_value();
}
// Create the execution state.
bool caught_exception = false;
Handle<Object> exec_state = MakeExecutionState(&caught_exception);
if (caught_exception) {
- return FACTORY->undefined_value();
+ return isolate_->factory()->undefined_value();
}
static const int kArgc = 2;
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 76a45ba..0bc6409 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -97,7 +97,11 @@
#define FLAG FLAG_FULL
// Flags for Crankshaft.
-DEFINE_bool(crankshaft, true, "use crankshaft")
+#ifdef V8_TARGET_ARCH_MIPS
+ DEFINE_bool(crankshaft, false, "use crankshaft")
+#else
+ DEFINE_bool(crankshaft, true, "use crankshaft")
+#endif
DEFINE_string(hydrogen_filter, "", "hydrogen use/trace filter")
DEFINE_bool(use_hydrogen, true, "use generated hydrogen for compilation")
DEFINE_bool(build_lithium, true, "use lithium chunk builder")
@@ -161,6 +165,8 @@
"enable use of VFP3 instructions if available (ARM only)")
DEFINE_bool(enable_armv7, true,
"enable use of ARMv7 instructions if available (ARM only)")
+DEFINE_bool(enable_fpu, true,
+ "enable use of MIPS FPU instructions if available (MIPS only)")
// bootstrapper.cc
DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
diff --git a/src/globals.h b/src/globals.h
index 9b24bf6..5ab9806 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -54,7 +54,7 @@
#if CAN_USE_UNALIGNED_ACCESSES
#define V8_HOST_CAN_READ_UNALIGNED 1
#endif
-#elif defined(_MIPS_ARCH_MIPS32R2)
+#elif defined(__MIPSEL__)
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
#else
@@ -72,7 +72,7 @@
#define V8_TARGET_ARCH_IA32 1
#elif defined(__ARMEL__)
#define V8_TARGET_ARCH_ARM 1
-#elif defined(_MIPS_ARCH_MIPS32R2)
+#elif defined(__MIPSEL__)
#define V8_TARGET_ARCH_MIPS 1
#else
#error Target architecture was not detected as supported by v8
diff --git a/src/heap.cc b/src/heap.cc
index 1c1e084..5d1a66e 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -49,6 +49,10 @@
#include "regexp-macro-assembler.h"
#include "arm/regexp-macro-assembler-arm.h"
#endif
+#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
+#include "regexp-macro-assembler.h"
+#include "mips/regexp-macro-assembler-mips.h"
+#endif
namespace v8 {
namespace internal {
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index c73ce4c..9bbe164 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -36,6 +36,8 @@
#include "x64/lithium-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/lithium-mips.h"
#else
#error Unsupported target architecture.
#endif
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 7d9c814..2383192 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -43,6 +43,8 @@
#include "x64/lithium-codegen-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-codegen-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/lithium-codegen-mips.h"
#else
#error Unsupported target architecture.
#endif
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 711616f..96faae9 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -1359,6 +1359,9 @@
case TRBinaryOpIC::HEAP_NUMBER:
GenerateHeapNumberStub(masm);
break;
+ case TRBinaryOpIC::ODDBALL:
+ GenerateOddballStub(masm);
+ break;
case TRBinaryOpIC::STRING:
GenerateStringStub(masm);
break;
@@ -2024,9 +2027,41 @@
}
+void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
+ Label call_runtime;
+
+ if (op_ == Token::ADD) {
+ // Handle string addition here, because it is the only operation
+ // that does not do a ToNumber conversion on the operands.
+ GenerateAddStrings(masm);
+ }
+
+ // Convert odd ball arguments to numbers.
+ NearLabel check, done;
+ __ cmp(edx, FACTORY->undefined_value());
+ __ j(not_equal, &check);
+ if (Token::IsBitOp(op_)) {
+ __ xor_(edx, Operand(edx));
+ } else {
+ __ mov(edx, Immediate(FACTORY->nan_value()));
+ }
+ __ jmp(&done);
+ __ bind(&check);
+ __ cmp(eax, FACTORY->undefined_value());
+ __ j(not_equal, &done);
+ if (Token::IsBitOp(op_)) {
+ __ xor_(eax, Operand(eax));
+ } else {
+ __ mov(eax, Immediate(FACTORY->nan_value()));
+ }
+ __ bind(&done);
+
+ GenerateHeapNumberStub(masm);
+}
+
+
void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
// Floating point case.
switch (op_) {
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 9e5f0a5..31fa645 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -306,6 +306,7 @@
void GenerateSmiStub(MacroAssembler* masm);
void GenerateInt32Stub(MacroAssembler* masm);
void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateOddballStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateAddStrings(MacroAssembler* masm);
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 026572f..c6342d7 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -369,13 +369,31 @@
// There are no translation commands for the caller's pc and fp, the
// context, and the function. Set them up explicitly.
- for (int i = 0; ok && i < 4; i++) {
+ for (int i = StandardFrameConstants::kCallerPCOffset;
+ ok && i >= StandardFrameConstants::kMarkerOffset;
+ i -= kPointerSize) {
uint32_t input_value = input_->GetFrameSlot(input_offset);
if (FLAG_trace_osr) {
- PrintF(" [esp + %d] <- 0x%08x ; [esp + %d] (fixed part)\n",
+ const char* name = "UNKNOWN";
+ switch (i) {
+ case StandardFrameConstants::kCallerPCOffset:
+ name = "caller's pc";
+ break;
+ case StandardFrameConstants::kCallerFPOffset:
+ name = "fp";
+ break;
+ case StandardFrameConstants::kContextOffset:
+ name = "context";
+ break;
+ case StandardFrameConstants::kMarkerOffset:
+ name = "function";
+ break;
+ }
+ PrintF(" [esp + %d] <- 0x%08x ; [esp + %d] (fixed part - %s)\n",
output_offset,
input_value,
- input_offset);
+ input_offset,
+ name);
}
output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
input_offset -= kPointerSize;
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index fff754b..1691098 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -116,16 +116,6 @@
}
-bool LCodeGen::GenerateRelocPadding() {
- int reloc_size = masm()->relocation_writer_size();
- while (reloc_size < deoptimization_reloc_size.min_size) {
- __ RecordComment(RelocInfo::kFillerCommentString, true);
- reloc_size += RelocInfo::kMinRelocCommentSize;
- }
- return !is_aborted();
-}
-
-
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index a673c45..199a80a 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1905,7 +1905,6 @@
ASSERT(instr->key()->representation().IsInteger32());
LOperand* external_pointer = UseRegister(instr->external_pointer());
- LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegister(instr->key());
LOperand* temp = NULL;
@@ -1916,6 +1915,15 @@
temp = FixedTemp(eax);
}
+ LOperand* val = NULL;
+ if (array_type == kExternalByteArray ||
+ array_type == kExternalUnsignedByteArray) {
+ // We need a byte register in this case for the value.
+ val = UseFixed(instr->value(), eax);
+ } else {
+ val = UseRegister(instr->value());
+ }
+
return new LStoreKeyedSpecializedArrayElement(external_pointer,
key,
val,
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
index bb8d2d3..7730ee3 100644
--- a/src/ia32/stub-cache-ia32.cc
+++ b/src/ia32/stub-cache-ia32.cc
@@ -139,7 +139,7 @@
// Check that the properties array is a dictionary.
__ cmp(FieldOperand(properties, HeapObject::kMapOffset),
- Immediate(FACTORY->hash_table_map()));
+ Immediate(masm->isolate()->factory()->hash_table_map()));
__ j(not_equal, miss_label);
// Compute the capacity mask.
@@ -179,7 +179,7 @@
ASSERT_EQ(kSmiTagSize, 1);
__ mov(entity_name, Operand(properties, index, times_half_pointer_size,
kElementsStartOffset - kHeapObjectTag));
- __ cmp(entity_name, FACTORY->undefined_value());
+ __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
if (i != kProbes - 1) {
__ j(equal, &done, taken);
@@ -860,10 +860,10 @@
if (Serializer::enabled()) {
__ mov(scratch, Immediate(Handle<Object>(cell)));
__ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
- Immediate(FACTORY->the_hole_value()));
+ Immediate(masm->isolate()->factory()->the_hole_value()));
} else {
__ cmp(Operand::Cell(Handle<JSGlobalPropertyCell>(cell)),
- Immediate(FACTORY->the_hole_value()));
+ Immediate(masm->isolate()->factory()->the_hole_value()));
}
__ j(not_equal, miss, not_taken);
return cell;
@@ -916,8 +916,6 @@
ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
&& !scratch2.is(scratch1));
- Heap* heap = isolate()->heap();
-
// Keep track of the current object in register reg.
Register reg = object_reg;
JSObject* current = object;
@@ -942,7 +940,7 @@
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
if (!name->IsSymbol()) {
- MaybeObject* maybe_lookup_result = heap->LookupSymbol(name);
+ MaybeObject* maybe_lookup_result = heap()->LookupSymbol(name);
Object* lookup_result = NULL; // Initialization to please compiler.
if (!maybe_lookup_result->ToObject(&lookup_result)) {
set_failure(Failure::cast(maybe_lookup_result));
@@ -962,7 +960,7 @@
__ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // from now the object is in holder_reg
__ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else if (heap->InNewSpace(prototype)) {
+ } else if (heap()->InNewSpace(prototype)) {
// Get the map of the current object.
__ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
__ cmp(Operand(scratch1), Immediate(Handle<Map>(current->map())));
@@ -1216,7 +1214,7 @@
// Check if interceptor provided a value for property. If it's
// the case, return immediately.
Label interceptor_failed;
- __ cmp(eax, FACTORY->no_interceptor_result_sentinel());
+ __ cmp(eax, factory()->no_interceptor_result_sentinel());
__ j(equal, &interceptor_failed);
__ LeaveInternalFrame();
__ ret(0);
@@ -1474,7 +1472,7 @@
// Check that the elements are in fast mode and writable.
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(FACTORY->fixed_array_map()));
+ Immediate(factory()->fixed_array_map()));
__ j(not_equal, &call_builtin);
if (argc == 1) { // Otherwise fall through to call builtin.
@@ -1550,7 +1548,7 @@
// ... and fill the rest with holes.
for (int i = 1; i < kAllocationDelta; i++) {
__ mov(Operand(edx, i * kPointerSize),
- Immediate(FACTORY->the_hole_value()));
+ Immediate(factory()->the_hole_value()));
}
// Restore receiver to edx as finish sequence assumes it's here.
@@ -1596,7 +1594,7 @@
// If object is not an array, bail out to regular call.
if (!object->IsJSArray() || cell != NULL) {
- return isolate()->heap()->undefined_value();
+ return heap()->undefined_value();
}
Label miss, return_undefined, call_builtin;
@@ -1619,7 +1617,7 @@
// Check that the elements are in fast mode and writable.
__ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
- Immediate(FACTORY->fixed_array_map()));
+ Immediate(factory()->fixed_array_map()));
__ j(not_equal, &call_builtin);
// Get the array's length into ecx and calculate new length.
@@ -1633,7 +1631,7 @@
__ mov(eax, FieldOperand(ebx,
ecx, times_half_pointer_size,
FixedArray::kHeaderSize));
- __ cmp(Operand(eax), Immediate(FACTORY->the_hole_value()));
+ __ cmp(Operand(eax), Immediate(factory()->the_hole_value()));
__ j(equal, &call_builtin);
// Set the array's length.
@@ -1643,11 +1641,11 @@
__ mov(FieldOperand(ebx,
ecx, times_half_pointer_size,
FixedArray::kHeaderSize),
- Immediate(FACTORY->the_hole_value()));
+ Immediate(factory()->the_hole_value()));
__ ret((argc + 1) * kPointerSize);
__ bind(&return_undefined);
- __ mov(eax, Immediate(FACTORY->undefined_value()));
+ __ mov(eax, Immediate(factory()->undefined_value()));
__ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);
@@ -1714,7 +1712,7 @@
if (argc > 0) {
__ mov(index, Operand(esp, (argc - 0) * kPointerSize));
} else {
- __ Set(index, Immediate(FACTORY->undefined_value()));
+ __ Set(index, Immediate(factory()->undefined_value()));
}
StringCharCodeAtGenerator char_code_at_generator(receiver,
@@ -1733,7 +1731,7 @@
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
- __ Set(eax, Immediate(FACTORY->nan_value()));
+ __ Set(eax, Immediate(factory()->nan_value()));
__ ret((argc + 1) * kPointerSize);
}
@@ -1765,7 +1763,7 @@
// If object is not a string, bail out to regular call.
if (!object->IsString() || cell != NULL) {
- return isolate()->heap()->undefined_value();
+ return heap()->undefined_value();
}
const int argc = arguments().immediate();
@@ -1799,7 +1797,7 @@
if (argc > 0) {
__ mov(index, Operand(esp, (argc - 0) * kPointerSize));
} else {
- __ Set(index, Immediate(FACTORY->undefined_value()));
+ __ Set(index, Immediate(factory()->undefined_value()));
}
StringCharAtGenerator char_at_generator(receiver,
@@ -1819,7 +1817,7 @@
if (index_out_of_range.is_linked()) {
__ bind(&index_out_of_range);
- __ Set(eax, Immediate(FACTORY->empty_string()));
+ __ Set(eax, Immediate(factory()->empty_string()));
__ ret((argc + 1) * kPointerSize);
}
@@ -1923,7 +1921,7 @@
// -- esp[(argc + 1) * 4] : receiver
// -----------------------------------
- if (isolate()->cpu_features()->IsSupported(SSE2)) {
+ if (!isolate()->cpu_features()->IsSupported(SSE2)) {
return isolate()->heap()->undefined_value();
}
@@ -1966,7 +1964,7 @@
// Check if the argument is a heap number and load its value into xmm0.
Label slow;
- __ CheckMap(eax, FACTORY->heap_number_map(), &slow, true);
+ __ CheckMap(eax, factory()->heap_number_map(), &slow, true);
__ movdbl(xmm0, FieldOperand(eax, HeapNumber::kValueOffset));
// Check if the argument is strictly positive. Note this also
@@ -2110,7 +2108,7 @@
// Check if the argument is a heap number and load its exponent and
// sign into ebx.
__ bind(¬_smi);
- __ CheckMap(eax, FACTORY->heap_number_map(), &slow, true);
+ __ CheckMap(eax, factory()->heap_number_map(), &slow, true);
__ mov(ebx, FieldOperand(eax, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive,
@@ -2155,12 +2153,11 @@
ASSERT(optimization.is_simple_api_call());
// Bail out if object is a global object as we don't want to
// repatch it to global receiver.
- Heap* heap = isolate()->heap();
- if (object->IsGlobalObject()) return heap->undefined_value();
- if (cell != NULL) return heap->undefined_value();
+ if (object->IsGlobalObject()) return heap()->undefined_value();
+ if (cell != NULL) return heap()->undefined_value();
int depth = optimization.GetPrototypeDepthOfExpectedType(
JSObject::cast(object), holder);
- if (depth == kInvalidProtoDepth) return heap->undefined_value();
+ if (depth == kInvalidProtoDepth) return heap()->undefined_value();
Label miss, miss_before_stack_reserved;
@@ -2311,9 +2308,9 @@
} else {
Label fast;
// Check that the object is a boolean.
- __ cmp(edx, FACTORY->true_value());
+ __ cmp(edx, factory()->true_value());
__ j(equal, &fast, taken);
- __ cmp(edx, FACTORY->false_value());
+ __ cmp(edx, factory()->false_value());
__ j(not_equal, &miss, not_taken);
__ bind(&fast);
// Check that the maps starting from the prototype haven't changed.
@@ -2637,7 +2634,7 @@
// cell could have been deleted and reintroducing the global needs
// to update the property details in the property dictionary of the
// global object. We bail out to the runtime system to do that.
- __ cmp(cell_operand, FACTORY->the_hole_value());
+ __ cmp(cell_operand, factory()->the_hole_value());
__ j(equal, &miss);
// Store the value in the cell.
@@ -2723,7 +2720,7 @@
// Get the elements array and make sure it is a fast element array, not 'cow'.
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
__ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(FACTORY->fixed_array_map()));
+ Immediate(factory()->fixed_array_map()));
__ j(not_equal, &miss, not_taken);
// Check that the key is within bounds.
@@ -2935,10 +2932,10 @@
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
- __ cmp(ebx, FACTORY->the_hole_value());
+ __ cmp(ebx, factory()->the_hole_value());
__ j(equal, &miss, not_taken);
} else if (FLAG_debug_code) {
- __ cmp(ebx, FACTORY->the_hole_value());
+ __ cmp(ebx, factory()->the_hole_value());
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
@@ -3195,7 +3192,7 @@
// Load the result and make sure it's not the hole.
__ mov(ebx, Operand(ecx, eax, times_2,
FixedArray::kHeaderSize - kHeapObjectTag));
- __ cmp(ebx, FACTORY->the_hole_value());
+ __ cmp(ebx, factory()->the_hole_value());
__ j(equal, &miss, not_taken);
__ mov(eax, ebx);
__ ret(0);
@@ -3224,7 +3221,7 @@
// code for the function thereby hitting the break points.
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kDebugInfoOffset));
- __ cmp(ebx, FACTORY->undefined_value());
+ __ cmp(ebx, factory()->undefined_value());
__ j(not_equal, &generic_stub_call, not_taken);
#endif
@@ -3261,7 +3258,7 @@
// ebx: initial map
// edx: JSObject (untagged)
__ mov(Operand(edx, JSObject::kMapOffset), ebx);
- __ mov(ebx, FACTORY->empty_fixed_array());
+ __ mov(ebx, factory()->empty_fixed_array());
__ mov(Operand(edx, JSObject::kPropertiesOffset), ebx);
__ mov(Operand(edx, JSObject::kElementsOffset), ebx);
@@ -3278,7 +3275,7 @@
__ lea(ecx, Operand(esp, eax, times_4, 1 * kPointerSize));
// Use edi for holding undefined which is used in several places below.
- __ mov(edi, FACTORY->undefined_value());
+ __ mov(edi, factory()->undefined_value());
// eax: argc
// ecx: first argument
@@ -3593,7 +3590,7 @@
// edi: elements array
// ebx: untagged index
__ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- Immediate(FACTORY->heap_number_map()));
+ Immediate(factory()->heap_number_map()));
__ j(not_equal, &slow);
// The WebGL specification leaves the behavior of storing NaN and
diff --git a/src/ic.cc b/src/ic.cc
index 01d4ca0..382b438 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -2131,6 +2131,7 @@
case SMI: return "SMI";
case INT32: return "Int32s";
case HEAP_NUMBER: return "HeapNumbers";
+ case ODDBALL: return "Oddball";
case STRING: return "Strings";
case GENERIC: return "Generic";
default: return "Invalid";
@@ -2145,6 +2146,7 @@
case SMI:
case INT32:
case HEAP_NUMBER:
+ case ODDBALL:
case STRING:
return MONOMORPHIC;
case GENERIC:
@@ -2192,6 +2194,10 @@
return STRING;
}
+ // Check for oddball objects.
+ if (left->IsUndefined() && right->IsNumber()) return ODDBALL;
+ if (left->IsNumber() && right->IsUndefined()) return ODDBALL;
+
return GENERIC;
}
diff --git a/src/ic.h b/src/ic.h
index f226660..bb8a981 100644
--- a/src/ic.h
+++ b/src/ic.h
@@ -610,6 +610,7 @@
SMI,
INT32,
HEAP_NUMBER,
+ ODDBALL,
STRING, // Only used for addition operation. At least one string operand.
GENERIC
};
diff --git a/src/isolate.cc b/src/isolate.cc
index 824cc37..a163532 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -168,6 +168,77 @@
}
+void Isolate::PreallocatedStorageInit(size_t size) {
+ ASSERT(free_list_.next_ == &free_list_);
+ ASSERT(free_list_.previous_ == &free_list_);
+ PreallocatedStorage* free_chunk =
+ reinterpret_cast<PreallocatedStorage*>(new char[size]);
+ free_list_.next_ = free_list_.previous_ = free_chunk;
+ free_chunk->next_ = free_chunk->previous_ = &free_list_;
+ free_chunk->size_ = size - sizeof(PreallocatedStorage);
+ preallocated_storage_preallocated_ = true;
+}
+
+
+void* Isolate::PreallocatedStorageNew(size_t size) {
+ if (!preallocated_storage_preallocated_) {
+ return FreeStoreAllocationPolicy::New(size);
+ }
+ ASSERT(free_list_.next_ != &free_list_);
+ ASSERT(free_list_.previous_ != &free_list_);
+
+ size = (size + kPointerSize - 1) & ~(kPointerSize - 1);
+ // Search for exact fit.
+ for (PreallocatedStorage* storage = free_list_.next_;
+ storage != &free_list_;
+ storage = storage->next_) {
+ if (storage->size_ == size) {
+ storage->Unlink();
+ storage->LinkTo(&in_use_list_);
+ return reinterpret_cast<void*>(storage + 1);
+ }
+ }
+ // Search for first fit.
+ for (PreallocatedStorage* storage = free_list_.next_;
+ storage != &free_list_;
+ storage = storage->next_) {
+ if (storage->size_ >= size + sizeof(PreallocatedStorage)) {
+ storage->Unlink();
+ storage->LinkTo(&in_use_list_);
+ PreallocatedStorage* left_over =
+ reinterpret_cast<PreallocatedStorage*>(
+ reinterpret_cast<char*>(storage + 1) + size);
+ left_over->size_ = storage->size_ - size - sizeof(PreallocatedStorage);
+ ASSERT(size + left_over->size_ + sizeof(PreallocatedStorage) ==
+ storage->size_);
+ storage->size_ = size;
+ left_over->LinkTo(&free_list_);
+ return reinterpret_cast<void*>(storage + 1);
+ }
+ }
+ // Allocation failure.
+ ASSERT(false);
+ return NULL;
+}
+
+
+// We don't attempt to coalesce.
+void Isolate::PreallocatedStorageDelete(void* p) {
+ if (p == NULL) {
+ return;
+ }
+ if (!preallocated_storage_preallocated_) {
+ FreeStoreAllocationPolicy::Delete(p);
+ return;
+ }
+ PreallocatedStorage* storage = reinterpret_cast<PreallocatedStorage*>(p) - 1;
+ ASSERT(storage->next_->previous_ == storage);
+ ASSERT(storage->previous_->next_ == storage);
+ storage->Unlink();
+ storage->LinkTo(&free_list_);
+}
+
+
Isolate* Isolate::default_isolate_ = NULL;
Thread::LocalStorageKey Isolate::isolate_key_;
Thread::LocalStorageKey Isolate::thread_id_key_;
@@ -382,7 +453,8 @@
zone_.isolate_ = this;
stack_guard_.isolate_ = this;
-#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__)
+#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
+ defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
simulator_initialized_ = false;
simulator_i_cache_ = NULL;
simulator_redirection_ = NULL;
@@ -658,10 +730,8 @@
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
-#if defined(V8_TARGET_ARCH_ARM)
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
Simulator::Initialize();
-#elif defined(V8_TARGET_ARCH_MIPS)
- ::assembler::mips::Simulator::Initialize();
#endif
#endif
diff --git a/src/isolate.h b/src/isolate.h
index 90fce61..03a4866 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -93,11 +93,13 @@
class DebuggerAgent;
#endif
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM)
+#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
+ !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
class Redirection;
class Simulator;
#endif
+
// Static indirection table for handles to constants. If a frame
// element represents a constant, the data contains an index into
// this table of handles to the actual constants.
@@ -195,10 +197,8 @@
Address handler_; // try-blocks are chained through the stack
#ifdef USE_SIMULATOR
-#ifdef V8_TARGET_ARCH_ARM
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
Simulator* simulator_;
-#elif V8_TARGET_ARCH_MIPS
- assembler::mips::Simulator* simulator_;
#endif
#endif // USE_SIMULATOR
@@ -221,7 +221,7 @@
Address try_catch_handler_address_;
};
-#if defined(V8_TARGET_ARCH_ARM)
+#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
#define ISOLATE_PLATFORM_INIT_LIST(V) \
/* VirtualFrame::SpilledScope state */ \
@@ -229,7 +229,7 @@
/* CodeGenerator::EmitNamedStore state */ \
V(int, inlined_write_barrier_size, -1)
-#if !defined(__arm__)
+#if !defined(__arm__) && !defined(__mips__)
class HashMap;
#endif
@@ -292,8 +292,6 @@
/* Assembler state. */ \
/* A previously allocated buffer of kMinimalBufferSize bytes, or NULL. */ \
V(byte*, assembler_spare_buffer, NULL) \
- /*This static counter ensures that NativeAllocationCheckers can be nested.*/ \
- V(int, allocation_disallowed, 0) \
V(FatalErrorCallback, exception_behavior, NULL) \
V(v8::Debug::MessageHandler, message_handler, NULL) \
/* To distinguish the function templates, so that we can find them in the */ \
@@ -341,7 +339,8 @@
thread_id_(thread_id),
stack_limit_(0),
thread_state_(NULL),
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM)
+#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
+ !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
simulator_(NULL),
#endif
next_(NULL),
@@ -353,7 +352,8 @@
ThreadState* thread_state() const { return thread_state_; }
void set_thread_state(ThreadState* value) { thread_state_ = value; }
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM)
+#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
+ !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
Simulator* simulator() const { return simulator_; }
void set_simulator(Simulator* simulator) {
simulator_ = simulator;
@@ -370,7 +370,8 @@
uintptr_t stack_limit_;
ThreadState* thread_state_;
-#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM)
+#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
+ !defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
Simulator* simulator_;
#endif
@@ -854,7 +855,8 @@
int* code_kind_statistics() { return code_kind_statistics_; }
#endif
-#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__)
+#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
+ defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
bool simulator_initialized() { return simulator_initialized_; }
void set_simulator_initialized(bool initialized) {
simulator_initialized_ = initialized;
@@ -1076,7 +1078,8 @@
ZoneObjectList frame_element_constant_list_;
ZoneObjectList result_constant_list_;
-#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__)
+#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
+ defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
bool simulator_initialized_;
HashMap* simulator_i_cache_;
Redirection* simulator_redirection_;
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index d5f59ea..06aae35 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -50,6 +50,8 @@
#include "x64/regexp-macro-assembler-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/regexp-macro-assembler-mips.h"
#else
#error Unsupported target architecture.
#endif
@@ -5340,6 +5342,8 @@
RegExpMacroAssemblerX64 macro_assembler(mode, (data->capture_count + 1) * 2);
#elif V8_TARGET_ARCH_ARM
RegExpMacroAssemblerARM macro_assembler(mode, (data->capture_count + 1) * 2);
+#elif V8_TARGET_ARCH_MIPS
+ RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2);
#endif
#else // V8_INTERPRETED_REGEXP
diff --git a/src/lithium-allocator-inl.h b/src/lithium-allocator-inl.h
index 84c5bbd..c0beaaf 100644
--- a/src/lithium-allocator-inl.h
+++ b/src/lithium-allocator-inl.h
@@ -36,6 +36,8 @@
#include "x64/lithium-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/lithium-mips.h"
#else
#error "Unknown architecture."
#endif
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index 965a226..f62a7db 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -37,6 +37,8 @@
#include "x64/lithium-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/lithium-arm.h"
+#elif V8_TARGET_ARCH_MIPS
+#include "mips/lithium-mips.h"
#else
#error "Unknown architecture."
#endif
diff --git a/src/log.cc b/src/log.cc
index dc7906c..6991f3d 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -1134,7 +1134,6 @@
msg.AppendAddress(sample->pc);
msg.Append(',');
msg.AppendAddress(sample->sp);
- msg.Append(',');
if (sample->has_external_callback) {
msg.Append(",1,");
msg.AppendAddress(sample->external_callback);
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index 2e63461..f7453d1 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -38,21 +38,13 @@
#include "mips/assembler-mips.h"
#include "cpu.h"
+#include "debug.h"
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
-// Condition
-
-Condition NegateCondition(Condition cc) {
- ASSERT(cc != cc_always);
- return static_cast<Condition>(cc ^ 1);
-}
-
-
-// -----------------------------------------------------------------------------
// Operand and MemOperand
Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
@@ -61,17 +53,13 @@
rmode_ = rmode;
}
+
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
-Operand::Operand(const char* s) {
- rm_ = no_reg;
- imm32_ = reinterpret_cast<int32_t>(s);
- rmode_ = RelocInfo::EMBEDDED_STRING;
-}
Operand::Operand(Smi* value) {
rm_ = no_reg;
@@ -79,10 +67,12 @@
rmode_ = RelocInfo::NONE;
}
+
Operand::Operand(Register rm) {
rm_ = rm;
}
+
bool Operand::is_reg() const {
return rm_.is_valid();
}
@@ -105,8 +95,29 @@
Address RelocInfo::target_address_address() {
- ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- return reinterpret_cast<Address>(pc_);
+ ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
+ || rmode_ == EMBEDDED_OBJECT
+ || rmode_ == EXTERNAL_REFERENCE);
+ // Read the address of the word containing the target_address in an
+ // instruction stream.
+ // The only architecture-independent user of this function is the serializer.
+ // The serializer uses it to find out how many raw bytes of instruction to
+ // output before the next target.
+ // For an instructions like LUI/ORI where the target bits are mixed into the
+ // instruction bits, the size of the target will be zero, indicating that the
+ // serializer should not step forward in memory after a target is resolved
+ // and written. In this case the target_address_address function should
+ // return the end of the instructions to be patched, allowing the
+ // deserializer to deserialize the instructions as raw bytes and put them in
+ // place, ready to be patched with the target. In our case, that is the
+ // address of the instruction that follows LUI/ORI instruction pair.
+ return reinterpret_cast<Address>(
+ pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
+}
+
+
+int RelocInfo::target_address_size() {
+ return Assembler::kExternalTargetSize;
}
@@ -130,8 +141,15 @@
Object** RelocInfo::target_object_address() {
+ // Provide a "natural pointer" to the embedded object,
+ // which can be de-referenced during heap iteration.
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
- return reinterpret_cast<Object**>(pc_);
+ // TODO(mips): Commenting out, to simplify arch-independent changes.
+ // GC won't work like this, but this commit is for asm/disasm/sim.
+ // reconstructed_obj_ptr_ =
+ // reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
+ // return &reconstructed_obj_ptr_;
+ return NULL;
}
@@ -143,23 +161,55 @@
Address* RelocInfo::target_reference_address() {
ASSERT(rmode_ == EXTERNAL_REFERENCE);
- return reinterpret_cast<Address*>(pc_);
+ // TODO(mips): Commenting out, to simplify arch-independent changes.
+ // GC won't work like this, but this commit is for asm/disasm/sim.
+ // reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
+ // return &reconstructed_adr_ptr_;
+ return NULL;
+}
+
+
+Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
+ ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+ Address address = Memory::Address_at(pc_);
+ return Handle<JSGlobalPropertyCell>(
+ reinterpret_cast<JSGlobalPropertyCell**>(address));
+}
+
+
+JSGlobalPropertyCell* RelocInfo::target_cell() {
+ ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+ Address address = Memory::Address_at(pc_);
+ Object* object = HeapObject::FromAddress(
+ address - JSGlobalPropertyCell::kValueOffset);
+ return reinterpret_cast<JSGlobalPropertyCell*>(object);
+}
+
+
+void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell) {
+ ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
+ Address address = cell->address() + JSGlobalPropertyCell::kValueOffset;
+ Memory::Address_at(pc_) = address;
}
Address RelocInfo::call_address() {
- ASSERT(IsPatchedReturnSequence());
- // The 2 instructions offset assumes patched return sequence.
- ASSERT(IsJSReturn(rmode()));
- return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ // The pc_ offset of 0 assumes mips patched return sequence per
+ // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
+ // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
+ return Assembler::target_address_at(pc_);
}
void RelocInfo::set_call_address(Address target) {
- ASSERT(IsPatchedReturnSequence());
- // The 2 instructions offset assumes patched return sequence.
- ASSERT(IsJSReturn(rmode()));
- Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+ // The pc_ offset of 0 assumes mips patched return sequence per
+ // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
+ // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
+ Assembler::set_target_address_at(pc_, target);
}
@@ -169,9 +219,8 @@
Object** RelocInfo::call_object_address() {
- ASSERT(IsPatchedReturnSequence());
- // The 2 instructions offset assumes patched return sequence.
- ASSERT(IsJSReturn(rmode()));
+ ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+ (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
}
@@ -182,13 +231,76 @@
bool RelocInfo::IsPatchedReturnSequence() {
-#ifdef DEBUG
- PrintF("%s - %d - %s : Checking for jal(r)",
- __FILE__, __LINE__, __func__);
+ Instr instr0 = Assembler::instr_at(pc_);
+ Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
+ Instr instr2 = Assembler::instr_at(pc_ + 2 * Assembler::kInstrSize);
+ bool patched_return = ((instr0 & kOpcodeMask) == LUI &&
+ (instr1 & kOpcodeMask) == ORI &&
+ (instr2 & kOpcodeMask) == SPECIAL &&
+ (instr2 & kFunctionFieldMask) == JALR);
+ return patched_return;
+}
+
+
+bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
+ Instr current_instr = Assembler::instr_at(pc_);
+ return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
+}
+
+
+void RelocInfo::Visit(ObjectVisitor* visitor) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ // RelocInfo is needed when pointer must be updated/serialized, such as
+ // UpdatingVisitor in mark-compact.cc or Serializer in serialize.cc.
+ // It is ignored by visitors that do not need it.
+ // Commenting out, to simplify arch-independednt changes.
+ // GC won't work like this, but this commit is for asm/disasm/sim.
+ // visitor->VisitPointer(target_object_address(), this);
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ visitor->VisitCodeTarget(this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ // RelocInfo is needed when external-references must be serialized by
+ // Serializer Visitor in serialize.cc. It is ignored by visitors that
+ // do not need it.
+ // Commenting out, to simplify arch-independednt changes.
+ // Serializer won't work like this, but this commit is for asm/disasm/sim.
+ // visitor->VisitExternalReference(target_reference_address(), this);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ // TODO(isolates): Get a cached isolate below.
+ } else if (((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence())) &&
+ Isolate::Current()->debug()->has_break_points()) {
+ visitor->VisitDebugTarget(this);
#endif
- return ((Assembler::instr_at(pc_) & kOpcodeMask) == SPECIAL) &&
- (((Assembler::instr_at(pc_) & kFunctionFieldMask) == JAL) ||
- ((Assembler::instr_at(pc_) & kFunctionFieldMask) == JALR));
+ } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+ visitor->VisitRuntimeEntry(this);
+ }
+}
+
+
+template<typename StaticVisitor>
+void RelocInfo::Visit(Heap* heap) {
+ RelocInfo::Mode mode = rmode();
+ if (mode == RelocInfo::EMBEDDED_OBJECT) {
+ StaticVisitor::VisitPointer(heap, target_object_address());
+ } else if (RelocInfo::IsCodeTarget(mode)) {
+ StaticVisitor::VisitCodeTarget(this);
+ } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+ StaticVisitor::VisitExternalReference(target_reference_address());
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ } else if (heap->isolate()->debug()->has_break_points() &&
+ ((RelocInfo::IsJSReturn(mode) &&
+ IsPatchedReturnSequence()) ||
+ (RelocInfo::IsDebugBreakSlot(mode) &&
+ IsPatchedDebugBreakSlotSequence()))) {
+ StaticVisitor::VisitDebugTarget(this);
+#endif
+ } else if (mode == RelocInfo::RUNTIME_ENTRY) {
+ StaticVisitor::VisitRuntimeEntry(this);
+ }
}
@@ -203,10 +315,18 @@
}
+void Assembler::CheckTrampolinePoolQuick() {
+ if (pc_offset() >= next_buffer_check_) {
+ CheckTrampolinePool();
+ }
+}
+
+
void Assembler::emit(Instr x) {
CheckBuffer();
*reinterpret_cast<Instr*>(pc_) = x;
pc_ += kInstrSize;
+ CheckTrampolinePoolQuick();
}
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index a3b316b..7d00da1 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -40,83 +40,41 @@
#include "mips/assembler-mips-inl.h"
#include "serialize.h"
-
namespace v8 {
namespace internal {
+CpuFeatures::CpuFeatures()
+ : supported_(0),
+ enabled_(0),
+ found_by_runtime_probing_(0) {
+}
+void CpuFeatures::Probe(bool portable) {
+ // If the compiler is allowed to use fpu then we can use fpu too in our
+ // code generation.
+#if !defined(__mips__)
+ // For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled.
+ if (FLAG_enable_fpu) {
+ supported_ |= 1u << FPU;
+ }
+#else
+ if (portable && Serializer::enabled()) {
+ supported_ |= OS::CpuFeaturesImpliedByPlatform();
+ return; // No features if we might serialize.
+ }
-const Register no_reg = { -1 };
+ if (OS::MipsCpuHasFeature(FPU)) {
+ // This implementation also sets the FPU flags if
+ // runtime detection of FPU returns true.
+ supported_ |= 1u << FPU;
+ found_by_runtime_probing_ |= 1u << FPU;
+ }
-const Register zero_reg = { 0 };
-const Register at = { 1 };
-const Register v0 = { 2 };
-const Register v1 = { 3 };
-const Register a0 = { 4 };
-const Register a1 = { 5 };
-const Register a2 = { 6 };
-const Register a3 = { 7 };
-const Register t0 = { 8 };
-const Register t1 = { 9 };
-const Register t2 = { 10 };
-const Register t3 = { 11 };
-const Register t4 = { 12 };
-const Register t5 = { 13 };
-const Register t6 = { 14 };
-const Register t7 = { 15 };
-const Register s0 = { 16 };
-const Register s1 = { 17 };
-const Register s2 = { 18 };
-const Register s3 = { 19 };
-const Register s4 = { 20 };
-const Register s5 = { 21 };
-const Register s6 = { 22 };
-const Register s7 = { 23 };
-const Register t8 = { 24 };
-const Register t9 = { 25 };
-const Register k0 = { 26 };
-const Register k1 = { 27 };
-const Register gp = { 28 };
-const Register sp = { 29 };
-const Register s8_fp = { 30 };
-const Register ra = { 31 };
+ if (!portable) found_by_runtime_probing_ = 0;
+#endif
+}
-const FPURegister no_creg = { -1 };
-
-const FPURegister f0 = { 0 };
-const FPURegister f1 = { 1 };
-const FPURegister f2 = { 2 };
-const FPURegister f3 = { 3 };
-const FPURegister f4 = { 4 };
-const FPURegister f5 = { 5 };
-const FPURegister f6 = { 6 };
-const FPURegister f7 = { 7 };
-const FPURegister f8 = { 8 };
-const FPURegister f9 = { 9 };
-const FPURegister f10 = { 10 };
-const FPURegister f11 = { 11 };
-const FPURegister f12 = { 12 };
-const FPURegister f13 = { 13 };
-const FPURegister f14 = { 14 };
-const FPURegister f15 = { 15 };
-const FPURegister f16 = { 16 };
-const FPURegister f17 = { 17 };
-const FPURegister f18 = { 18 };
-const FPURegister f19 = { 19 };
-const FPURegister f20 = { 20 };
-const FPURegister f21 = { 21 };
-const FPURegister f22 = { 22 };
-const FPURegister f23 = { 23 };
-const FPURegister f24 = { 24 };
-const FPURegister f25 = { 25 };
-const FPURegister f26 = { 26 };
-const FPURegister f27 = { 27 };
-const FPURegister f28 = { 28 };
-const FPURegister f29 = { 29 };
-const FPURegister f30 = { 30 };
-const FPURegister f31 = { 31 };
-
int ToNumber(Register reg) {
ASSERT(reg.is_valid());
const int kNumbers[] = {
@@ -156,6 +114,7 @@
return kNumbers[reg.code()];
}
+
Register ToRegister(int num) {
ASSERT(num >= 0 && num < kNumRegisters);
const Register kRegisters[] = {
@@ -181,6 +140,15 @@
const int RelocInfo::kApplyMask = 0;
+
+bool RelocInfo::IsCodedSpecially() {
+ // The deserializer needs to know whether a pointer is specially coded. Being
+ // specially coded on MIPS means that it is a lui/ori instruction, and that is
+ // always the case inside code objects.
+ return true;
+}
+
+
// Patch the code at the current address with the supplied instructions.
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
Instr* pc = reinterpret_cast<Instr*>(pc_);
@@ -210,7 +178,7 @@
rm_ = no_reg;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
- ASSERT(!Heap::InNewSpace(obj));
+ ASSERT(!HEAP->InNewSpace(obj));
if (obj->IsHeapObject()) {
imm32_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
@@ -221,26 +189,66 @@
}
}
-MemOperand::MemOperand(Register rm, int16_t offset) : Operand(rm) {
+
+MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
offset_ = offset;
}
// -----------------------------------------------------------------------------
-// Implementation of Assembler.
+// Specific instructions, constants, and masks.
-static const int kMinimalBufferSize = 4*KB;
-static byte* spare_buffer_ = NULL;
+static const int kNegOffset = 0x00008000;
+// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
+// operations as post-increment of sp.
+const Instr kPopInstruction = ADDIU | (sp.code() << kRsShift)
+ | (sp.code() << kRtShift) | (kPointerSize & kImm16Mask);
+// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
+const Instr kPushInstruction = ADDIU | (sp.code() << kRsShift)
+ | (sp.code() << kRtShift) | (-kPointerSize & kImm16Mask);
+// sw(r, MemOperand(sp, 0))
+const Instr kPushRegPattern = SW | (sp.code() << kRsShift)
+ | (0 & kImm16Mask);
+// lw(r, MemOperand(sp, 0))
+const Instr kPopRegPattern = LW | (sp.code() << kRsShift)
+ | (0 & kImm16Mask);
-Assembler::Assembler(void* buffer, int buffer_size) {
+const Instr kLwRegFpOffsetPattern = LW | (s8_fp.code() << kRsShift)
+ | (0 & kImm16Mask);
+
+const Instr kSwRegFpOffsetPattern = SW | (s8_fp.code() << kRsShift)
+ | (0 & kImm16Mask);
+
+const Instr kLwRegFpNegOffsetPattern = LW | (s8_fp.code() << kRsShift)
+ | (kNegOffset & kImm16Mask);
+
+const Instr kSwRegFpNegOffsetPattern = SW | (s8_fp.code() << kRsShift)
+ | (kNegOffset & kImm16Mask);
+// A mask for the Rt register for push, pop, lw, sw instructions.
+const Instr kRtMask = kRtFieldMask;
+const Instr kLwSwInstrTypeMask = 0xffe00000;
+const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
+const Instr kLwSwOffsetMask = kImm16Mask;
+
+
+// Spare buffer.
+static const int kMinimalBufferSize = 4 * KB;
+
+
+Assembler::Assembler(void* buffer, int buffer_size)
+ : AssemblerBase(Isolate::Current()),
+ positions_recorder_(this),
+ allow_peephole_optimization_(false) {
+ // BUG(3245989): disable peephole optimization if crankshaft is enabled.
+ allow_peephole_optimization_ = FLAG_peephole_optimization;
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
- if (spare_buffer_ != NULL) {
- buffer = spare_buffer_;
- spare_buffer_ = NULL;
+ if (isolate()->assembler_spare_buffer() != NULL) {
+ buffer = isolate()->assembler_spare_buffer();
+ isolate()->set_assembler_spare_buffer(NULL);
}
}
if (buffer == NULL) {
@@ -263,17 +271,19 @@
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
- current_statement_position_ = RelocInfo::kNoPosition;
- current_position_ = RelocInfo::kNoPosition;
- written_statement_position_ = current_statement_position_;
- written_position_ = current_position_;
+
+ last_trampoline_pool_end_ = 0;
+ no_trampoline_pool_before_ = 0;
+ trampoline_pool_blocked_nesting_ = 0;
+ next_buffer_check_ = kMaxBranchOffset - kTrampolineSize;
}
Assembler::~Assembler() {
if (own_buffer_) {
- if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
- spare_buffer_ = buffer_;
+ if (isolate()->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate()->set_assembler_spare_buffer(buffer_);
} else {
DeleteArray(buffer_);
}
@@ -282,7 +292,7 @@
void Assembler::GetCode(CodeDesc* desc) {
- ASSERT(pc_ <= reloc_info_writer.pos()); // no overlap
+ ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
// Setup code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
@@ -291,6 +301,60 @@
}
+void Assembler::Align(int m) {
+ ASSERT(m >= 4 && IsPowerOf2(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop();
+ }
+}
+
+
+void Assembler::CodeTargetAlign() {
+ // No advantage to aligning branch/call targets to more than
+ // single instruction, that I am aware of.
+ Align(4);
+}
+
+
+Register Assembler::GetRt(Instr instr) {
+ Register rt;
+ rt.code_ = (instr & kRtMask) >> kRtShift;
+ return rt;
+}
+
+
+bool Assembler::IsPop(Instr instr) {
+ return (instr & ~kRtMask) == kPopRegPattern;
+}
+
+
+bool Assembler::IsPush(Instr instr) {
+ return (instr & ~kRtMask) == kPushRegPattern;
+}
+
+
+bool Assembler::IsSwRegFpOffset(Instr instr) {
+ return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
+}
+
+
+bool Assembler::IsLwRegFpOffset(Instr instr) {
+ return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
+}
+
+
+bool Assembler::IsSwRegFpNegOffset(Instr instr) {
+ return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
+ kSwRegFpNegOffsetPattern);
+}
+
+
+bool Assembler::IsLwRegFpNegOffset(Instr instr) {
+ return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
+ kLwRegFpNegOffsetPattern);
+}
+
+
// Labels refer to positions in the (to be) generated code.
// There are bound, linked, and unused labels.
//
@@ -301,14 +365,19 @@
// to be generated; pos() is the position of the last
// instruction using the label.
+// The link chain is terminated by a value in the instruction of -1,
+// which is an otherwise illegal value (branch -1 is inf loop).
+// The instruction 16-bit offset field addresses 32-bit words, but in
+// code is conv to an 18-bit value addressing bytes, hence the -4 value.
-// The link chain is terminated by a negative code position (must be aligned).
const int kEndOfChain = -4;
-bool Assembler::is_branch(Instr instr) {
+
+bool Assembler::IsBranch(Instr instr) {
uint32_t opcode = ((instr & kOpcodeMask));
uint32_t rt_field = ((instr & kRtFieldMask));
uint32_t rs_field = ((instr & kRsFieldMask));
+ uint32_t label_constant = (instr & ~kImm16Mask);
// Checks if the instruction is a branch.
return opcode == BEQ ||
opcode == BNE ||
@@ -320,7 +389,79 @@
opcode == BGTZL||
(opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
rt_field == BLTZAL || rt_field == BGEZAL)) ||
- (opcode == COP1 && rs_field == BC1); // Coprocessor branch.
+ (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
+ label_constant == 0; // Emitted label const in reg-exp engine.
+}
+
+
+bool Assembler::IsNop(Instr instr, unsigned int type) {
+ // See Assembler::nop(type).
+ ASSERT(type < 32);
+ uint32_t opcode = ((instr & kOpcodeMask));
+ uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
+ uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
+ uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
+
+ // nop(type) == sll(zero_reg, zero_reg, type);
+ // Technically all these values will be 0 but
+ // this makes more sense to the reader.
+
+ bool ret = (opcode == SLL &&
+ rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
+ rs == static_cast<uint32_t>(ToNumber(zero_reg)) &&
+ sa == type);
+
+ return ret;
+}
+
+
+int32_t Assembler::GetBranchOffset(Instr instr) {
+ ASSERT(IsBranch(instr));
+ return ((int16_t)(instr & kImm16Mask)) << 2;
+}
+
+
+bool Assembler::IsLw(Instr instr) {
+ return ((instr & kOpcodeMask) == LW);
+}
+
+
+int16_t Assembler::GetLwOffset(Instr instr) {
+ ASSERT(IsLw(instr));
+ return ((instr & kImm16Mask));
+}
+
+
+Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
+ ASSERT(IsLw(instr));
+
+ // We actually create a new lw instruction based on the original one.
+ Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
+ | (offset & kImm16Mask);
+
+ return temp_instr;
+}
+
+
+bool Assembler::IsSw(Instr instr) {
+ return ((instr & kOpcodeMask) == SW);
+}
+
+
+Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
+ ASSERT(IsSw(instr));
+ return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
+}
+
+
+bool Assembler::IsAddImmediate(Instr instr) {
+ return ((instr & kOpcodeMask) == ADDIU);
+}
+
+
+Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
+ ASSERT(IsAddImmediate(instr));
+ return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
}
@@ -328,16 +469,25 @@
Instr instr = instr_at(pos);
if ((instr & ~kImm16Mask) == 0) {
// Emitted label constant, not part of a branch.
- return instr - (Code::kHeaderSize - kHeapObjectTag);
+ if (instr == 0) {
+ return kEndOfChain;
+ } else {
+ int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+ return (imm18 + pos);
+ }
}
// Check we have a branch instruction.
- ASSERT(is_branch(instr));
+ ASSERT(IsBranch(instr));
// Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
// the compiler uses arithmectic shifts for signed integers.
- int32_t imm18 = ((instr &
- static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+ int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
- return pos + kBranchPCOffset + imm18;
+ if (imm18 == kEndOfChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ return pos + kBranchPCOffset + imm18;
+ }
}
@@ -351,7 +501,7 @@
return;
}
- ASSERT(is_branch(instr));
+ ASSERT(IsBranch(instr));
int32_t imm18 = target_pos - (pos + kBranchPCOffset);
ASSERT((imm18 & 3) == 0);
@@ -388,10 +538,28 @@
void Assembler::bind_to(Label* L, int pos) {
- ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position
+ ASSERT(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
while (L->is_linked()) {
int32_t fixup_pos = L->pos();
- next(L); // call next before overwriting link with target at fixup_pos
+ int32_t dist = pos - fixup_pos;
+ next(L); // Call next before overwriting link with target at fixup_pos.
+ if (dist > kMaxBranchOffset) {
+ do {
+ int32_t trampoline_pos = get_trampoline_entry(fixup_pos);
+ ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
+ target_at_put(fixup_pos, trampoline_pos);
+ fixup_pos = trampoline_pos;
+ dist = pos - fixup_pos;
+ } while (dist > kMaxBranchOffset);
+ } else if (dist < -kMaxBranchOffset) {
+ do {
+ int32_t trampoline_pos = get_trampoline_entry(fixup_pos, false);
+ ASSERT((trampoline_pos - fixup_pos) >= -kMaxBranchOffset);
+ target_at_put(fixup_pos, trampoline_pos);
+ fixup_pos = trampoline_pos;
+ dist = pos - fixup_pos;
+ } while (dist < -kMaxBranchOffset);
+ };
target_at_put(fixup_pos, pos);
}
L->bind_to(pos);
@@ -416,16 +584,16 @@
ASSERT(link == kEndOfChain);
target_at_put(fixup_pos, appendix->pos());
} else {
- // L is empty, simply use appendix
+ // L is empty, simply use appendix.
*L = *appendix;
}
}
- appendix->Unuse(); // appendix should not be used anymore
+ appendix->Unuse(); // Appendix should not be used anymore.
}
void Assembler::bind(Label* L) {
- ASSERT(!L->is_bound()); // label can only be bound once
+ ASSERT(!L->is_bound()); // Label can only be bound once.
bind_to(L, pc_offset());
}
@@ -433,11 +601,11 @@
void Assembler::next(Label* L) {
ASSERT(L->is_linked());
int link = target_at(L->pos());
- if (link > 0) {
- L->link_to(link);
- } else {
- ASSERT(link == kEndOfChain);
+ ASSERT(link > 0 || link == kEndOfChain);
+ if (link == kEndOfChain) {
L->Unuse();
+ } else if (link > 0) {
+ L->link_to(link);
}
}
@@ -446,13 +614,8 @@
// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
// space. There is no guarantee that the relocated location can be similarly
// encoded.
-bool Assembler::MustUseAt(RelocInfo::Mode rmode) {
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
- return Serializer::enabled();
- } else if (rmode == RelocInfo::NONE) {
- return false;
- }
- return true;
+bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
+ return rmode != RelocInfo::NONE;
}
@@ -470,14 +633,28 @@
void Assembler::GenInstrRegister(Opcode opcode,
+ Register rs,
+ Register rt,
+ uint16_t msb,
+ uint16_t lsb,
+ SecondaryField func) {
+ ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
+ Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+ | (msb << kRdShift) | (lsb << kSaShift) | func;
+ emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
SecondaryField fmt,
FPURegister ft,
FPURegister fs,
FPURegister fd,
SecondaryField func) {
ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
- Instr instr = opcode | fmt | (ft.code() << 16) | (fs.code() << kFsShift)
- | (fd.code() << 6) | func;
+ ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+ Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
+ | (fd.code() << kFdShift) | func;
emit(instr);
}
@@ -489,8 +666,22 @@
FPURegister fd,
SecondaryField func) {
ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
+ ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
Instr instr = opcode | fmt | (rt.code() << kRtShift)
- | (fs.code() << kFsShift) | (fd.code() << 6) | func;
+ | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
+ emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
+ SecondaryField fmt,
+ Register rt,
+ FPUControlRegister fs,
+ SecondaryField func) {
+ ASSERT(fs.is_valid() && rt.is_valid());
+ ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
+ Instr instr =
+ opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
emit(instr);
}
@@ -523,6 +714,7 @@
FPURegister ft,
int32_t j) {
ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
+ ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
| (j & kImm16Mask);
emit(instr);
@@ -532,26 +724,122 @@
// Registers are in the order of the instruction encoding, from left to right.
void Assembler::GenInstrJump(Opcode opcode,
uint32_t address) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
ASSERT(is_uint26(address));
Instr instr = opcode | address;
emit(instr);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
+}
+
+
+// Returns the next free label entry from the next trampoline pool.
+int32_t Assembler::get_label_entry(int32_t pos, bool next_pool) {
+ int trampoline_count = trampolines_.length();
+ int32_t label_entry = 0;
+ ASSERT(trampoline_count > 0);
+
+ if (next_pool) {
+ for (int i = 0; i < trampoline_count; i++) {
+ if (trampolines_[i].start() > pos) {
+ label_entry = trampolines_[i].take_label();
+ break;
+ }
+ }
+ } else { // Caller needs a label entry from the previous pool.
+ for (int i = trampoline_count-1; i >= 0; i--) {
+ if (trampolines_[i].end() < pos) {
+ label_entry = trampolines_[i].take_label();
+ break;
+ }
+ }
+ }
+ return label_entry;
+}
+
+
+// Returns the next free trampoline entry from the next trampoline pool.
+int32_t Assembler::get_trampoline_entry(int32_t pos, bool next_pool) {
+ int trampoline_count = trampolines_.length();
+ int32_t trampoline_entry = 0;
+ ASSERT(trampoline_count > 0);
+
+ if (next_pool) {
+ for (int i = 0; i < trampoline_count; i++) {
+ if (trampolines_[i].start() > pos) {
+ trampoline_entry = trampolines_[i].take_slot();
+ break;
+ }
+ }
+ } else { // Caller needs a trampoline entry from the previous pool.
+ for (int i = trampoline_count-1; i >= 0; i--) {
+ if (trampolines_[i].end() < pos) {
+ trampoline_entry = trampolines_[i].take_slot();
+ break;
+ }
+ }
+ }
+ return trampoline_entry;
}
int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
int32_t target_pos;
+ int32_t pc_offset_v = pc_offset();
+
if (L->is_bound()) {
target_pos = L->pos();
+ int32_t dist = pc_offset_v - target_pos;
+ if (dist > kMaxBranchOffset) {
+ do {
+ int32_t trampoline_pos = get_trampoline_entry(target_pos);
+ ASSERT((trampoline_pos - target_pos) > 0);
+ ASSERT((trampoline_pos - target_pos) <= kMaxBranchOffset);
+ target_at_put(trampoline_pos, target_pos);
+ target_pos = trampoline_pos;
+ dist = pc_offset_v - target_pos;
+ } while (dist > kMaxBranchOffset);
+ } else if (dist < -kMaxBranchOffset) {
+ do {
+ int32_t trampoline_pos = get_trampoline_entry(target_pos, false);
+ ASSERT((target_pos - trampoline_pos) > 0);
+ ASSERT((target_pos - trampoline_pos) <= kMaxBranchOffset);
+ target_at_put(trampoline_pos, target_pos);
+ target_pos = trampoline_pos;
+ dist = pc_offset_v - target_pos;
+ } while (dist < -kMaxBranchOffset);
+ }
} else {
if (L->is_linked()) {
- target_pos = L->pos(); // L's link
+ target_pos = L->pos(); // L's link.
+ int32_t dist = pc_offset_v - target_pos;
+ if (dist > kMaxBranchOffset) {
+ do {
+ int32_t label_pos = get_label_entry(target_pos);
+ ASSERT((label_pos - target_pos) < kMaxBranchOffset);
+ label_at_put(L, label_pos);
+ target_pos = label_pos;
+ dist = pc_offset_v - target_pos;
+ } while (dist > kMaxBranchOffset);
+ } else if (dist < -kMaxBranchOffset) {
+ do {
+ int32_t label_pos = get_label_entry(target_pos, false);
+ ASSERT((label_pos - target_pos) > -kMaxBranchOffset);
+ label_at_put(L, label_pos);
+ target_pos = label_pos;
+ dist = pc_offset_v - target_pos;
+ } while (dist < -kMaxBranchOffset);
+ }
+ L->link_to(pc_offset());
} else {
- target_pos = kEndOfChain;
+ L->link_to(pc_offset());
+ return kEndOfChain;
}
- L->link_to(pc_offset());
}
int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
+ ASSERT((offset & 3) == 0);
+ ASSERT(is_int16(offset >> 2));
+
return offset;
}
@@ -560,14 +848,20 @@
int target_pos;
if (L->is_bound()) {
target_pos = L->pos();
+ instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
} else {
if (L->is_linked()) {
- target_pos = L->pos(); // L's link
+ target_pos = L->pos(); // L's link.
+ int32_t imm18 = target_pos - at_offset;
+ ASSERT((imm18 & 3) == 0);
+ int32_t imm16 = imm18 >> 2;
+ ASSERT(is_int16(imm16));
+ instr_at_put(at_offset, (imm16 & kImm16Mask));
} else {
target_pos = kEndOfChain;
+ instr_at_put(at_offset, 0);
}
L->link_to(at_offset);
- instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
}
}
@@ -580,47 +874,66 @@
void Assembler::bal(int16_t offset) {
+ positions_recorder()->WriteRecordedPositions();
bgezal(zero_reg, offset);
}
void Assembler::beq(Register rs, Register rt, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BEQ, rs, rt, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bgez(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BGEZ, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bgezal(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bgtz(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BGTZ, rs, zero_reg, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::blez(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BLEZ, rs, zero_reg, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bltz(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BLTZ, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bltzal(Register rs, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::bne(Register rs, Register rt, int16_t offset) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BNE, rs, rt, offset);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -631,18 +944,27 @@
void Assembler::jr(Register rs) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (rs.is(ra)) {
+ positions_recorder()->WriteRecordedPositions();
+ }
GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
void Assembler::jal(int32_t target) {
+ positions_recorder()->WriteRecordedPositions();
ASSERT(is_uint28(target) && ((target & 3) == 0));
GenInstrJump(JAL, target >> 2);
}
void Assembler::jalr(Register rs, Register rd) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ positions_recorder()->WriteRecordedPositions();
GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -650,28 +972,164 @@
// Arithmetic.
-void Assembler::add(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADD);
-}
-
-
void Assembler::addu(Register rd, Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
}
-void Assembler::addi(Register rd, Register rs, int32_t j) {
- GenInstrImmediate(ADDI, rs, rd, j);
-}
-
-
void Assembler::addiu(Register rd, Register rs, int32_t j) {
GenInstrImmediate(ADDIU, rs, rd, j);
-}
+ // Eliminate pattern: push(r), pop().
+ // addiu(sp, sp, Operand(-kPointerSize));
+ // sw(src, MemOperand(sp, 0);
+ // addiu(sp, sp, Operand(kPointerSize));
+ // Both instructions can be eliminated.
+ if (can_peephole_optimize(3) &&
+ // Pattern.
+ instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
+ (instr_at(pc_ - 2 * kInstrSize) & ~kRtMask) == kPushRegPattern &&
+ (instr_at(pc_ - 3 * kInstrSize)) == kPushInstruction) {
+ pc_ -= 3 * kInstrSize;
+ if (FLAG_print_peephole_optimization) {
+ PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
+ }
+ }
-void Assembler::sub(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUB);
+ // Eliminate pattern: push(ry), pop(rx).
+ // addiu(sp, sp, -kPointerSize)
+ // sw(ry, MemOperand(sp, 0)
+ // lw(rx, MemOperand(sp, 0)
+ // addiu(sp, sp, kPointerSize);
+ // Both instructions can be eliminated if ry = rx.
+ // If ry != rx, a register copy from ry to rx is inserted
+ // after eliminating the push and the pop instructions.
+ if (can_peephole_optimize(4)) {
+ Instr pre_push_sp_set = instr_at(pc_ - 4 * kInstrSize);
+ Instr push_instr = instr_at(pc_ - 3 * kInstrSize);
+ Instr pop_instr = instr_at(pc_ - 2 * kInstrSize);
+ Instr post_pop_sp_set = instr_at(pc_ - 1 * kInstrSize);
+
+ if (IsPush(push_instr) &&
+ IsPop(pop_instr) && pre_push_sp_set == kPushInstruction &&
+ post_pop_sp_set == kPopInstruction) {
+ if ((pop_instr & kRtMask) != (push_instr & kRtMask)) {
+ // For consecutive push and pop on different registers,
+ // we delete both the push & pop and insert a register move.
+ // push ry, pop rx --> mov rx, ry.
+ Register reg_pushed, reg_popped;
+ reg_pushed = GetRt(push_instr);
+ reg_popped = GetRt(pop_instr);
+ pc_ -= 4 * kInstrSize;
+ // Insert a mov instruction, which is better than a pair of push & pop.
+ or_(reg_popped, reg_pushed, zero_reg);
+ if (FLAG_print_peephole_optimization) {
+ PrintF("%x push/pop (diff reg) replaced by a reg move\n",
+ pc_offset());
+ }
+ } else {
+ // For consecutive push and pop on the same register,
+ // both the push and the pop can be deleted.
+ pc_ -= 4 * kInstrSize;
+ if (FLAG_print_peephole_optimization) {
+ PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
+ }
+ }
+ }
+ }
+
+ if (can_peephole_optimize(5)) {
+ Instr pre_push_sp_set = instr_at(pc_ - 5 * kInstrSize);
+ Instr mem_write_instr = instr_at(pc_ - 4 * kInstrSize);
+ Instr lw_instr = instr_at(pc_ - 3 * kInstrSize);
+ Instr mem_read_instr = instr_at(pc_ - 2 * kInstrSize);
+ Instr post_pop_sp_set = instr_at(pc_ - 1 * kInstrSize);
+
+ if (IsPush(mem_write_instr) &&
+ pre_push_sp_set == kPushInstruction &&
+ IsPop(mem_read_instr) &&
+ post_pop_sp_set == kPopInstruction) {
+ if ((IsLwRegFpOffset(lw_instr) ||
+ IsLwRegFpNegOffset(lw_instr))) {
+ if ((mem_write_instr & kRtMask) ==
+ (mem_read_instr & kRtMask)) {
+ // Pattern: push & pop from/to same register,
+ // with a fp+offset lw in between.
+ //
+ // The following:
+ // addiu sp, sp, -4
+ // sw rx, [sp, #0]!
+ // lw rz, [fp, #-24]
+ // lw rx, [sp, 0],
+ // addiu sp, sp, 4
+ //
+ // Becomes:
+ // if(rx == rz)
+ // delete all
+ // else
+ // lw rz, [fp, #-24]
+
+ if ((mem_write_instr & kRtMask) == (lw_instr & kRtMask)) {
+ pc_ -= 5 * kInstrSize;
+ } else {
+ pc_ -= 5 * kInstrSize;
+ // Reinsert back the lw rz.
+ emit(lw_instr);
+ }
+ if (FLAG_print_peephole_optimization) {
+ PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
+ }
+ } else {
+ // Pattern: push & pop from/to different registers
+ // with a fp + offset lw in between.
+ //
+ // The following:
+ // addiu sp, sp ,-4
+ // sw rx, [sp, 0]
+ // lw rz, [fp, #-24]
+ // lw ry, [sp, 0]
+ // addiu sp, sp, 4
+ //
+ // Becomes:
+ // if(ry == rz)
+ // mov ry, rx;
+ // else if(rx != rz)
+ // lw rz, [fp, #-24]
+ // mov ry, rx
+ // else if((ry != rz) || (rx == rz)) becomes:
+ // mov ry, rx
+ // lw rz, [fp, #-24]
+
+ Register reg_pushed, reg_popped;
+ if ((mem_read_instr & kRtMask) == (lw_instr & kRtMask)) {
+ reg_pushed = GetRt(mem_write_instr);
+ reg_popped = GetRt(mem_read_instr);
+ pc_ -= 5 * kInstrSize;
+ or_(reg_popped, reg_pushed, zero_reg); // Move instruction.
+ } else if ((mem_write_instr & kRtMask)
+ != (lw_instr & kRtMask)) {
+ reg_pushed = GetRt(mem_write_instr);
+ reg_popped = GetRt(mem_read_instr);
+ pc_ -= 5 * kInstrSize;
+ emit(lw_instr);
+ or_(reg_popped, reg_pushed, zero_reg); // Move instruction.
+ } else if (((mem_read_instr & kRtMask)
+ != (lw_instr & kRtMask)) ||
+ ((mem_write_instr & kRtMask)
+ == (lw_instr & kRtMask)) ) {
+ reg_pushed = GetRt(mem_write_instr);
+ reg_popped = GetRt(mem_read_instr);
+ pc_ -= 5 * kInstrSize;
+ or_(reg_popped, reg_pushed, zero_reg); // Move instruction.
+ emit(lw_instr);
+ }
+ if (FLAG_print_peephole_optimization) {
+ PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
+ }
+ }
+ }
+ }
+ }
}
@@ -743,7 +1201,15 @@
// Shifts.
-void Assembler::sll(Register rd, Register rt, uint16_t sa) {
+void Assembler::sll(Register rd,
+ Register rt,
+ uint16_t sa,
+ bool coming_from_nop) {
+ // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
+ // generated using the sll instruction. They must be generated using
+ // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
+ // instructions.
+ ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
}
@@ -773,30 +1239,199 @@
}
+void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
+ // Should be called via MacroAssembler::Ror.
+ ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
+ ASSERT(mips32r2);
+ Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
+ | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
+ emit(instr);
+}
+
+
+void Assembler::rotrv(Register rd, Register rt, Register rs) {
+ // Should be called via MacroAssembler::Ror.
+ ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
+ ASSERT(mips32r2);
+ Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+ | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
+ emit(instr);
+}
+
+
//------------Memory-instructions-------------
+// Helper for base-reg + offset, when offset is larger than int16.
+void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
+ ASSERT(!src.rm().is(at));
+ lui(at, src.offset_ >> kLuiShift);
+ ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
+ addu(at, at, src.rm()); // Add base register.
+}
+
+
void Assembler::lb(Register rd, const MemOperand& rs) {
- GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LB, at, rd, 0); // Equiv to lb(rd, MemOperand(at, 0));
+ }
}
void Assembler::lbu(Register rd, const MemOperand& rs) {
- GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LBU, at, rd, 0); // Equiv to lbu(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::lh(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LH, at, rd, 0); // Equiv to lh(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::lhu(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LHU, at, rd, 0); // Equiv to lhu(rd, MemOperand(at, 0));
+ }
}
void Assembler::lw(Register rd, const MemOperand& rs) {
- GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LW, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
+ }
+
+ if (can_peephole_optimize(2)) {
+ Instr sw_instr = instr_at(pc_ - 2 * kInstrSize);
+ Instr lw_instr = instr_at(pc_ - 1 * kInstrSize);
+
+ if ((IsSwRegFpOffset(sw_instr) &&
+ IsLwRegFpOffset(lw_instr)) ||
+ (IsSwRegFpNegOffset(sw_instr) &&
+ IsLwRegFpNegOffset(lw_instr))) {
+ if ((lw_instr & kLwSwInstrArgumentMask) ==
+ (sw_instr & kLwSwInstrArgumentMask)) {
+ // Pattern: Lw/sw same fp+offset, same register.
+ //
+ // The following:
+ // sw rx, [fp, #-12]
+ // lw rx, [fp, #-12]
+ //
+ // Becomes:
+ // sw rx, [fp, #-12]
+
+ pc_ -= 1 * kInstrSize;
+ if (FLAG_print_peephole_optimization) {
+ PrintF("%x sw/lw (fp + same offset), same reg\n", pc_offset());
+ }
+ } else if ((lw_instr & kLwSwOffsetMask) ==
+ (sw_instr & kLwSwOffsetMask)) {
+ // Pattern: Lw/sw same fp+offset, different register.
+ //
+ // The following:
+ // sw rx, [fp, #-12]
+ // lw ry, [fp, #-12]
+ //
+ // Becomes:
+ // sw rx, [fp, #-12]
+ // mov ry, rx
+
+ Register reg_stored, reg_loaded;
+ reg_stored = GetRt(sw_instr);
+ reg_loaded = GetRt(lw_instr);
+ pc_ -= 1 * kInstrSize;
+ // Insert a mov instruction, which is better than lw.
+ or_(reg_loaded, reg_stored, zero_reg); // Move instruction.
+ if (FLAG_print_peephole_optimization) {
+ PrintF("%x sw/lw (fp + same offset), diff reg \n", pc_offset());
+ }
+ }
+ }
+ }
+}
+
+
+void Assembler::lwl(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::lwr(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
}
void Assembler::sb(Register rd, const MemOperand& rs) {
- GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to store.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(SB, at, rd, 0); // Equiv to sb(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::sh(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to store.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(SH, at, rd, 0); // Equiv to sh(rd, MemOperand(at, 0));
+ }
}
void Assembler::sw(Register rd, const MemOperand& rs) {
- GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to store.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(SW, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
+ }
+
+ // Eliminate pattern: pop(), push(r).
+ // addiu sp, sp, Operand(kPointerSize);
+ // addiu sp, sp, Operand(-kPointerSize);
+ // -> sw r, MemOpernad(sp, 0);
+ if (can_peephole_optimize(3) &&
+ // Pattern.
+ instr_at(pc_ - 1 * kInstrSize) ==
+ (kPushRegPattern | (rd.code() << kRtShift)) &&
+ instr_at(pc_ - 2 * kInstrSize) == kPushInstruction &&
+ instr_at(pc_ - 3 * kInstrSize) == kPopInstruction) {
+ pc_ -= 3 * kInstrSize;
+ GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
+ if (FLAG_print_peephole_optimization) {
+ PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
+ }
+ }
+}
+
+
+void Assembler::swl(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::swr(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
}
@@ -841,7 +1476,8 @@
void Assembler::tltu(Register rs, Register rt, uint16_t code) {
ASSERT(is_uint10(code));
- Instr instr = SPECIAL | TLTU | rs.code() << kRsShift
+ Instr instr =
+ SPECIAL | TLTU | rs.code() << kRsShift
| rt.code() << kRtShift | code << 6;
emit(instr);
}
@@ -896,6 +1532,54 @@
}
+// Conditional move.
+void Assembler::movz(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
+}
+
+
+void Assembler::movn(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
+}
+
+
+void Assembler::movt(Register rd, Register rs, uint16_t cc) {
+ Register rt;
+ rt.code_ = (cc & 0x0003) << 2 | 1;
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
+}
+
+
+void Assembler::movf(Register rd, Register rs, uint16_t cc) {
+ Register rt;
+ rt.code_ = (cc & 0x0003) << 2 | 0;
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
+}
+
+
+// Bit twiddling.
+void Assembler::clz(Register rd, Register rs) {
+ // Clz instr requires same GPR number in 'rd' and 'rt' fields.
+ GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
+}
+
+
+void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
+ // Should be called via MacroAssembler::Ins.
+ // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
+ ASSERT(mips32r2);
+ GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
+}
+
+
+void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
+ // Should be called via MacroAssembler::Ext.
+ // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
+ ASSERT(mips32r2);
+ GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
+}
+
+
//--------Coprocessor-instructions----------------
// Load, store, move.
@@ -905,7 +1589,12 @@
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
- GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
+ // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
+ // load to two 32-bit loads.
+ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ + 4);
}
@@ -915,27 +1604,74 @@
void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
- GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
+ // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
+ // store to two 32-bit stores.
+ GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ + 4);
}
-void Assembler::mtc1(FPURegister fs, Register rt) {
+void Assembler::mtc1(Register rt, FPURegister fs) {
GenInstrRegister(COP1, MTC1, rt, fs, f0);
}
-void Assembler::mthc1(FPURegister fs, Register rt) {
- GenInstrRegister(COP1, MTHC1, rt, fs, f0);
-}
-
-
-void Assembler::mfc1(FPURegister fs, Register rt) {
+void Assembler::mfc1(Register rt, FPURegister fs) {
GenInstrRegister(COP1, MFC1, rt, fs, f0);
}
-void Assembler::mfhc1(FPURegister fs, Register rt) {
- GenInstrRegister(COP1, MFHC1, rt, fs, f0);
+void Assembler::ctc1(Register rt, FPUControlRegister fs) {
+ GenInstrRegister(COP1, CTC1, rt, fs);
+}
+
+
+void Assembler::cfc1(Register rt, FPUControlRegister fs) {
+ GenInstrRegister(COP1, CFC1, rt, fs);
+}
+
+
+// Arithmetic.
+
+void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
+}
+
+
+void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
+}
+
+
+void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
+}
+
+
+void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+ GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
+}
+
+
+void Assembler::abs_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
+}
+
+
+void Assembler::mov_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
+}
+
+
+void Assembler::neg_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
+}
+
+
+void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
}
@@ -951,22 +1687,107 @@
}
+void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
+}
+
+
+void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
+}
+
+
+void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
+}
+
+
+void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
+}
+
+
+void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
+}
+
+
+void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
+}
+
+
+void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
+}
+
+
+void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
+}
+
+
void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
+ ASSERT(mips32r2);
GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
}
void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
+ ASSERT(mips32r2);
GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
}
+void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
+ ASSERT(mips32r2);
+ GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
+}
+
+
+void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
+ ASSERT(mips32r2);
+ GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
+}
+
+
+void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
+}
+
+
+void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
+}
+
+
+void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
+}
+
+
+void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
+}
+
+
+void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
+}
+
+
+void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
+ GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
+}
+
+
void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
}
void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
+ ASSERT(mips32r2);
GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
}
@@ -982,6 +1803,7 @@
void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
+ ASSERT(mips32r2);
GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
}
@@ -993,7 +1815,8 @@
// Conditions.
void Assembler::c(FPUCondition cond, SecondaryField fmt,
- FPURegister ft, FPURegister fs, uint16_t cc) {
+ FPURegister fs, FPURegister ft, uint16_t cc) {
+ ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
ASSERT(is_uint3(cc));
ASSERT((fmt & ~(31 << kRsShift)) == 0);
Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
@@ -1002,7 +1825,18 @@
}
+void Assembler::fcmp(FPURegister src1, const double src2,
+ FPUCondition cond) {
+ ASSERT(isolate()->cpu_features()->IsSupported(FPU));
+ ASSERT(src2 == 0.0);
+ mtc1(zero_reg, f14);
+ cvt_d_w(f14, f14);
+ c(cond, D, src1, f14, 0);
+}
+
+
void Assembler::bc1f(int16_t offset, uint16_t cc) {
+ ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
ASSERT(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
emit(instr);
@@ -1010,6 +1844,7 @@
void Assembler::bc1t(int16_t offset, uint16_t cc) {
+ ASSERT(isolate()->cpu_features()->IsEnabled(FPU));
ASSERT(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
emit(instr);
@@ -1018,66 +1853,32 @@
// Debugging.
void Assembler::RecordJSReturn() {
- WriteRecordedPositions();
+ positions_recorder()->WriteRecordedPositions();
CheckBuffer();
RecordRelocInfo(RelocInfo::JS_RETURN);
}
+void Assembler::RecordDebugBreakSlot() {
+ positions_recorder()->WriteRecordedPositions();
+ CheckBuffer();
+ RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
+}
+
+
void Assembler::RecordComment(const char* msg) {
- if (FLAG_debug_code) {
+ if (FLAG_code_comments) {
CheckBuffer();
RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
}
}
-void Assembler::RecordPosition(int pos) {
- if (pos == RelocInfo::kNoPosition) return;
- ASSERT(pos >= 0);
- current_position_ = pos;
-}
-
-
-void Assembler::RecordStatementPosition(int pos) {
- if (pos == RelocInfo::kNoPosition) return;
- ASSERT(pos >= 0);
- current_statement_position_ = pos;
-}
-
-
-bool Assembler::WriteRecordedPositions() {
- bool written = false;
-
- // Write the statement position if it is different from what was written last
- // time.
- if (current_statement_position_ != written_statement_position_) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
- written_statement_position_ = current_statement_position_;
- written = true;
- }
-
- // Write the position if it is different from what was written last time and
- // also different from the written statement position.
- if (current_position_ != written_position_ &&
- current_position_ != written_statement_position_) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::POSITION, current_position_);
- written_position_ = current_position_;
- written = true;
- }
-
- // Return whether something was written.
- return written;
-}
-
-
void Assembler::GrowBuffer() {
if (!own_buffer_) FATAL("external code buffer is too small");
// Compute new buffer size.
- CodeDesc desc; // the new buffer
+ CodeDesc desc; // The new buffer.
if (buffer_size_ < 4*KB) {
desc.buffer_size = 4*KB;
} else if (buffer_size_ < 1*MB) {
@@ -1085,7 +1886,7 @@
} else {
desc.buffer_size = buffer_size_ + 1*MB;
}
- CHECK_GT(desc.buffer_size, 0); // no overflow
+ CHECK_GT(desc.buffer_size, 0); // No overflow.
// Setup new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
@@ -1108,7 +1909,6 @@
reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.last_pc() + pc_delta);
-
// On ia32 and ARM pc relative addressing is used, and we thus need to apply a
// shift by pc_delta. But on MIPS the target address it directly loaded, so
// we do not need to relocate here.
@@ -1117,11 +1917,26 @@
}
+void Assembler::db(uint8_t data) {
+ CheckBuffer();
+ *reinterpret_cast<uint8_t*>(pc_) = data;
+ pc_ += sizeof(uint8_t);
+}
+
+
+void Assembler::dd(uint32_t data) {
+ CheckBuffer();
+ *reinterpret_cast<uint32_t*>(pc_) = data;
+ pc_ += sizeof(uint32_t);
+}
+
+
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
- RelocInfo rinfo(pc_, rmode, data); // we do not try to reuse pool constants
- if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
+ RelocInfo rinfo(pc_, rmode, data); // We do not try to reuse pool constants.
+ if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
// Adjust code for new modes.
- ASSERT(RelocInfo::IsJSReturn(rmode)
+ ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
+ || RelocInfo::IsJSReturn(rmode)
|| RelocInfo::IsComment(rmode)
|| RelocInfo::IsPosition(rmode));
// These modes do not need an entry in the constant pool.
@@ -1133,12 +1948,72 @@
!FLAG_debug_code) {
return;
}
- ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
+ ASSERT(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
reloc_info_writer.Write(&rinfo);
}
}
+void Assembler::BlockTrampolinePoolFor(int instructions) {
+ BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
+}
+
+
+void Assembler::CheckTrampolinePool(bool force_emit) {
+ // Calculate the offset of the next check.
+ next_buffer_check_ = pc_offset() + kCheckConstInterval;
+
+ int dist = pc_offset() - last_trampoline_pool_end_;
+
+ if (dist <= kMaxDistBetweenPools && !force_emit) {
+ return;
+ }
+
+ // Some small sequences of instructions must not be broken up by the
+ // insertion of a trampoline pool; such sequences are protected by setting
+ // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
+ // which are both checked here. Also, recursive calls to CheckTrampolinePool
+ // are blocked by trampoline_pool_blocked_nesting_.
+ if ((trampoline_pool_blocked_nesting_ > 0) ||
+ (pc_offset() < no_trampoline_pool_before_)) {
+ // Emission is currently blocked; make sure we try again as soon as
+ // possible.
+ if (trampoline_pool_blocked_nesting_ > 0) {
+ next_buffer_check_ = pc_offset() + kInstrSize;
+ } else {
+ next_buffer_check_ = no_trampoline_pool_before_;
+ }
+ return;
+ }
+
+ // First we emit jump (2 instructions), then we emit trampoline pool.
+ { BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label after_pool;
+ b(&after_pool);
+ nop();
+
+ int pool_start = pc_offset();
+ for (int i = 0; i < kSlotsPerTrampoline; i++) {
+ b(&after_pool);
+ nop();
+ }
+ for (int i = 0; i < kLabelsPerTrampoline; i++) {
+ emit(0);
+ }
+ last_trampoline_pool_end_ = pc_offset() - kInstrSize;
+ bind(&after_pool);
+ trampolines_.Add(Trampoline(pool_start,
+ kSlotsPerTrampoline,
+ kLabelsPerTrampoline));
+
+ // Since a trampoline pool was just emitted,
+ // move the check offset forward by the standard interval.
+ next_buffer_check_ = last_trampoline_pool_end_ + kMaxDistBetweenPools;
+ }
+ return;
+}
+
+
Address Assembler::target_address_at(Address pc) {
Instr instr1 = instr_at(pc);
Instr instr2 = instr_at(pc + kInstrSize);
@@ -1157,7 +2032,7 @@
return reinterpret_cast<Address>((instr2 & kImm16Mask) << 16);
}
} else if ((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) {
- // 32 bits value.
+ // 32 bit value.
return reinterpret_cast<Address>(
(instr1 & kImm16Mask) << 16 | (instr2 & kImm16Mask));
}
@@ -1176,38 +2051,37 @@
#ifdef DEBUG
Instr instr1 = instr_at(pc);
- // Check we have indeed the result from a li with MustUseAt true.
+ // Check we have indeed the result from a li with MustUseReg true.
CHECK(((instr1 & kOpcodeMask) == LUI && (instr2 & kOpcodeMask) == ORI) ||
((instr1 == 0) && ((instr2 & kOpcodeMask)== ADDIU ||
(instr2 & kOpcodeMask)== ORI ||
(instr2 & kOpcodeMask)== LUI)));
#endif
-
uint32_t rt_code = (instr2 & kRtFieldMask);
uint32_t* p = reinterpret_cast<uint32_t*>(pc);
uint32_t itarget = reinterpret_cast<uint32_t>(target);
if (is_int16(itarget)) {
- // nop
- // addiu rt zero_reg j
+ // nop.
+ // addiu rt zero_reg j.
*p = nopInstr;
- *(p+1) = ADDIU | rt_code | (itarget & LOMask);
- } else if (!(itarget & HIMask)) {
- // nop
- // ori rt zero_reg j
+ *(p+1) = ADDIU | rt_code | (itarget & kImm16Mask);
+ } else if (!(itarget & kHiMask)) {
+ // nop.
+ // ori rt zero_reg j.
*p = nopInstr;
- *(p+1) = ORI | rt_code | (itarget & LOMask);
- } else if (!(itarget & LOMask)) {
- // nop
- // lui rt (HIMask & itarget)>>16
+ *(p+1) = ORI | rt_code | (itarget & kImm16Mask);
+ } else if (!(itarget & kImm16Mask)) {
+ // nop.
+ // lui rt (kHiMask & itarget) >> kLuiShift.
*p = nopInstr;
- *(p+1) = LUI | rt_code | ((itarget & HIMask)>>16);
+ *(p+1) = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
} else {
- // lui rt (HIMask & itarget)>>16
- // ori rt rt, (LOMask & itarget)
- *p = LUI | rt_code | ((itarget & HIMask)>>16);
- *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & LOMask);
+ // lui rt (kHiMask & itarget) >> kLuiShift.
+ // ori rt rt, (kImm16Mask & itarget).
+ *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
+ *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
}
CPU::FlushICache(pc, 2 * sizeof(int32_t));
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index a687c2b..5a6e271 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -41,8 +41,6 @@
#include "constants-mips.h"
#include "serialize.h"
-using namespace assembler::mips;
-
namespace v8 {
namespace internal {
@@ -73,6 +71,44 @@
// Core register.
struct Register {
+ static const int kNumRegisters = v8::internal::kNumRegisters;
+ static const int kNumAllocatableRegisters = 14; // v0 through t7
+
+ static int ToAllocationIndex(Register reg) {
+ return reg.code() - 2; // zero_reg and 'at' are skipped.
+ }
+
+ static Register FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ return from_code(index + 2); // zero_reg and 'at' are skipped.
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ const char* const names[] = {
+ "v0",
+ "v1",
+ "a0",
+ "a1",
+ "a2",
+ "a3",
+ "t0",
+ "t1",
+ "t2",
+ "t3",
+ "t4",
+ "t5",
+ "t6",
+ "t7",
+ };
+ return names[index];
+ }
+
+ static Register from_code(int code) {
+ Register r = { code };
+ return r;
+ }
+
bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
bool is(Register reg) const { return code_ == reg.code_; }
int code() const {
@@ -88,40 +124,41 @@
int code_;
};
-extern const Register no_reg;
+const Register no_reg = { -1 };
-extern const Register zero_reg;
-extern const Register at;
-extern const Register v0;
-extern const Register v1;
-extern const Register a0;
-extern const Register a1;
-extern const Register a2;
-extern const Register a3;
-extern const Register t0;
-extern const Register t1;
-extern const Register t2;
-extern const Register t3;
-extern const Register t4;
-extern const Register t5;
-extern const Register t6;
-extern const Register t7;
-extern const Register s0;
-extern const Register s1;
-extern const Register s2;
-extern const Register s3;
-extern const Register s4;
-extern const Register s5;
-extern const Register s6;
-extern const Register s7;
-extern const Register t8;
-extern const Register t9;
-extern const Register k0;
-extern const Register k1;
-extern const Register gp;
-extern const Register sp;
-extern const Register s8_fp;
-extern const Register ra;
+const Register zero_reg = { 0 };
+const Register at = { 1 };
+const Register v0 = { 2 };
+const Register v1 = { 3 };
+const Register a0 = { 4 };
+const Register a1 = { 5 };
+const Register a2 = { 6 };
+const Register a3 = { 7 };
+const Register t0 = { 8 };
+const Register t1 = { 9 };
+const Register t2 = { 10 };
+const Register t3 = { 11 };
+const Register t4 = { 12 };
+const Register t5 = { 13 };
+const Register t6 = { 14 };
+const Register t7 = { 15 };
+const Register s0 = { 16 };
+const Register s1 = { 17 };
+const Register s2 = { 18 };
+const Register s3 = { 19 };
+const Register s4 = { 20 };
+const Register s5 = { 21 };
+const Register s6 = { 22 };
+const Register s7 = { 23 };
+const Register t8 = { 24 };
+const Register t9 = { 25 };
+const Register k0 = { 26 };
+const Register k1 = { 27 };
+const Register gp = { 28 };
+const Register sp = { 29 };
+const Register s8_fp = { 30 };
+const Register ra = { 31 };
+
int ToNumber(Register reg);
@@ -129,7 +166,50 @@
// Coprocessor register.
struct FPURegister {
- bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegister ; }
+ static const int kNumRegisters = v8::internal::kNumFPURegisters;
+ // f0 has been excluded from allocation. This is following ia32
+ // where xmm0 is excluded.
+ static const int kNumAllocatableRegisters = 15;
+
+ static int ToAllocationIndex(FPURegister reg) {
+ ASSERT(reg.code() != 0);
+ ASSERT(reg.code() % 2 == 0);
+ return (reg.code() / 2) - 1;
+ }
+
+ static FPURegister FromAllocationIndex(int index) {
+ ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ return from_code((index + 1) * 2);
+ }
+
+ static const char* AllocationIndexToString(int index) {
+ ASSERT(index >= 0 && index < kNumAllocatableRegisters);
+ const char* const names[] = {
+ "f2",
+ "f4",
+ "f6",
+ "f8",
+ "f10",
+ "f12",
+ "f14",
+ "f16",
+ "f18",
+ "f20",
+ "f22",
+ "f24",
+ "f26",
+ "f28",
+ "f30"
+ };
+ return names[index];
+ }
+
+ static FPURegister from_code(int code) {
+ FPURegister r = { code };
+ return r;
+ }
+
+ bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegisters ; }
bool is(FPURegister creg) const { return code_ == creg.code_; }
int code() const {
ASSERT(is_valid());
@@ -139,84 +219,77 @@
ASSERT(is_valid());
return 1 << code_;
}
-
+ void setcode(int f) {
+ code_ = f;
+ ASSERT(is_valid());
+ }
// Unfortunately we can't make this private in a struct.
int code_;
};
-extern const FPURegister no_creg;
+typedef FPURegister DoubleRegister;
-extern const FPURegister f0;
-extern const FPURegister f1;
-extern const FPURegister f2;
-extern const FPURegister f3;
-extern const FPURegister f4;
-extern const FPURegister f5;
-extern const FPURegister f6;
-extern const FPURegister f7;
-extern const FPURegister f8;
-extern const FPURegister f9;
-extern const FPURegister f10;
-extern const FPURegister f11;
-extern const FPURegister f12; // arg
-extern const FPURegister f13;
-extern const FPURegister f14; // arg
-extern const FPURegister f15;
-extern const FPURegister f16;
-extern const FPURegister f17;
-extern const FPURegister f18;
-extern const FPURegister f19;
-extern const FPURegister f20;
-extern const FPURegister f21;
-extern const FPURegister f22;
-extern const FPURegister f23;
-extern const FPURegister f24;
-extern const FPURegister f25;
-extern const FPURegister f26;
-extern const FPURegister f27;
-extern const FPURegister f28;
-extern const FPURegister f29;
-extern const FPURegister f30;
-extern const FPURegister f31;
+const FPURegister no_creg = { -1 };
+const FPURegister f0 = { 0 }; // Return value in hard float mode.
+const FPURegister f1 = { 1 };
+const FPURegister f2 = { 2 };
+const FPURegister f3 = { 3 };
+const FPURegister f4 = { 4 };
+const FPURegister f5 = { 5 };
+const FPURegister f6 = { 6 };
+const FPURegister f7 = { 7 };
+const FPURegister f8 = { 8 };
+const FPURegister f9 = { 9 };
+const FPURegister f10 = { 10 };
+const FPURegister f11 = { 11 };
+const FPURegister f12 = { 12 }; // Arg 0 in hard float mode.
+const FPURegister f13 = { 13 };
+const FPURegister f14 = { 14 }; // Arg 1 in hard float mode.
+const FPURegister f15 = { 15 };
+const FPURegister f16 = { 16 };
+const FPURegister f17 = { 17 };
+const FPURegister f18 = { 18 };
+const FPURegister f19 = { 19 };
+const FPURegister f20 = { 20 };
+const FPURegister f21 = { 21 };
+const FPURegister f22 = { 22 };
+const FPURegister f23 = { 23 };
+const FPURegister f24 = { 24 };
+const FPURegister f25 = { 25 };
+const FPURegister f26 = { 26 };
+const FPURegister f27 = { 27 };
+const FPURegister f28 = { 28 };
+const FPURegister f29 = { 29 };
+const FPURegister f30 = { 30 };
+const FPURegister f31 = { 31 };
-// Returns the equivalent of !cc.
-// Negation of the default no_condition (-1) results in a non-default
-// no_condition value (-2). As long as tests for no_condition check
-// for condition < 0, this will work as expected.
-inline Condition NegateCondition(Condition cc);
+// FPU (coprocessor 1) control registers.
+// Currently only FCSR (#31) is implemented.
+struct FPUControlRegister {
+ static const int kFCSRRegister = 31;
+ static const int kInvalidFPUControlRegister = -1;
-inline Condition ReverseCondition(Condition cc) {
- switch (cc) {
- case Uless:
- return Ugreater;
- case Ugreater:
- return Uless;
- case Ugreater_equal:
- return Uless_equal;
- case Uless_equal:
- return Ugreater_equal;
- case less:
- return greater;
- case greater:
- return less;
- case greater_equal:
- return less_equal;
- case less_equal:
- return greater_equal;
- default:
- return cc;
- };
-}
-
-
-enum Hint {
- no_hint = 0
+ bool is_valid() const { return code_ == kFCSRRegister; }
+ bool is(FPUControlRegister creg) const { return code_ == creg.code_; }
+ int code() const {
+ ASSERT(is_valid());
+ return code_;
+ }
+ int bit() const {
+ ASSERT(is_valid());
+ return 1 << code_;
+ }
+ void setcode(int f) {
+ code_ = f;
+ ASSERT(is_valid());
+ }
+ // Unfortunately we can't make this private in a struct.
+ int code_;
};
-inline Hint NegateHint(Hint hint) {
- return no_hint;
-}
+const FPUControlRegister no_fpucreg = { -1 };
+const FPUControlRegister FCSR = { kFCSRRegister };
// -----------------------------------------------------------------------------
@@ -258,16 +331,75 @@
class MemOperand : public Operand {
public:
- explicit MemOperand(Register rn, int16_t offset = 0);
+ explicit MemOperand(Register rn, int32_t offset = 0);
private:
- int16_t offset_;
+ int32_t offset_;
friend class Assembler;
};
-class Assembler : public Malloced {
+// CpuFeatures keeps track of which features are supported by the target CPU.
+// Supported features must be enabled by a Scope before use.
+class CpuFeatures {
+ public:
+ // Detect features of the target CPU. Set safe defaults if the serializer
+ // is enabled (snapshots must be portable).
+ void Probe(bool portable);
+
+ // Check whether a feature is supported by the target CPU.
+ bool IsSupported(CpuFeature f) const {
+ if (f == FPU && !FLAG_enable_fpu) return false;
+ return (supported_ & (1u << f)) != 0;
+ }
+
+ // Check whether a feature is currently enabled.
+ bool IsEnabled(CpuFeature f) const {
+ return (enabled_ & (1u << f)) != 0;
+ }
+
+ // Enable a specified feature within a scope.
+ class Scope BASE_EMBEDDED {
+#ifdef DEBUG
+ public:
+ explicit Scope(CpuFeature f)
+ : cpu_features_(Isolate::Current()->cpu_features()),
+ isolate_(Isolate::Current()) {
+ ASSERT(cpu_features_->IsSupported(f));
+ ASSERT(!Serializer::enabled() ||
+ (cpu_features_->found_by_runtime_probing_ & (1u << f)) == 0);
+ old_enabled_ = cpu_features_->enabled_;
+ cpu_features_->enabled_ |= 1u << f;
+ }
+ ~Scope() {
+ ASSERT_EQ(Isolate::Current(), isolate_);
+ cpu_features_->enabled_ = old_enabled_;
+ }
+ private:
+ unsigned old_enabled_;
+ CpuFeatures* cpu_features_;
+ Isolate* isolate_;
+#else
+ public:
+ explicit Scope(CpuFeature f) {}
+#endif
+ };
+
+ private:
+ CpuFeatures();
+
+ unsigned supported_;
+ unsigned enabled_;
+ unsigned found_by_runtime_probing_;
+
+ friend class Isolate;
+
+ DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
+};
+
+
+class Assembler : public AssemblerBase {
public:
// Create an assembler. Instructions and relocation information are emitted
// into a buffer, with the instructions starting from the beginning and the
@@ -285,6 +417,9 @@
Assembler(void* buffer, int buffer_size);
~Assembler();
+ // Overrides the default provided by FLAG_debug_code.
+ void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
@@ -320,12 +455,6 @@
// The high 8 bits are set to zero.
void label_at_put(Label* L, int at_offset);
- // Size of an instruction.
- static const int kInstrSize = sizeof(Instr);
-
- // Difference between address of current opcode and target address offset.
- static const int kBranchPCOffset = 4;
-
// Read/Modify the code target address in the branch/call instruction at pc.
static Address target_address_at(Address pc);
static void set_target_address_at(Address pc, Address target);
@@ -344,8 +473,25 @@
set_target_address_at(instruction_payload, target);
}
- static const int kCallTargetSize = 3 * kPointerSize;
- static const int kExternalTargetSize = 3 * kPointerSize;
+ // Size of an instruction.
+ static const int kInstrSize = sizeof(Instr);
+
+ // Difference between address of current opcode and target address offset.
+ static const int kBranchPCOffset = 4;
+
+ // Here we are patching the address in the LUI/ORI instruction pair.
+ // These values are used in the serialization process and must be zero for
+ // MIPS platform, as Code, Embedded Object or External-reference pointers
+ // are split across two consecutive instructions and don't exist separately
+ // in the code, so the serializer should not step forwards in memory after
+ // a target is resolved and written.
+ static const int kCallTargetSize = 0 * kInstrSize;
+ static const int kExternalTargetSize = 0 * kInstrSize;
+
+ // Number of consecutive instructions used to store 32bit constant.
+ // Used in RelocInfo::target_address_address() function to tell serializer
+ // address of the instruction that follows LUI/ORI instruction pair.
+ static const int kInstructionsFor32BitConstant = 2;
// Distance between the instruction referring to the address of the call
// target and the return address.
@@ -353,16 +499,53 @@
// Distance between start of patched return sequence and the emitted address
// to jump to.
- static const int kPatchReturnSequenceAddressOffset = kInstrSize;
+ static const int kPatchReturnSequenceAddressOffset = 0;
// Distance between start of patched debug break slot and the emitted address
// to jump to.
- static const int kPatchDebugBreakSlotAddressOffset = kInstrSize;
+ static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
+
+ // Difference between address of current opcode and value read from pc
+ // register.
+ static const int kPcLoadDelta = 4;
+
+ // Number of instructions used for the JS return sequence. The constant is
+ // used by the debugger to patch the JS return sequence.
+ static const int kJSReturnSequenceInstructions = 7;
+ static const int kDebugBreakSlotInstructions = 4;
+ static const int kDebugBreakSlotLength =
+ kDebugBreakSlotInstructions * kInstrSize;
+
// ---------------------------------------------------------------------------
// Code generation.
- void nop() { sll(zero_reg, zero_reg, 0); }
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2 (>= 4).
+ void Align(int m);
+ // Aligns code to something that's optimal for a jump target for the platform.
+ void CodeTargetAlign();
+
+ // Different nop operations are used by the code generator to detect certain
+ // states of the generated code.
+ enum NopMarkerTypes {
+ NON_MARKING_NOP = 0,
+ DEBUG_BREAK_NOP,
+ // IC markers.
+ PROPERTY_ACCESS_INLINED,
+ PROPERTY_ACCESS_INLINED_CONTEXT,
+ PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
+ // Helper values.
+ LAST_CODE_MARKER,
+ FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
+ };
+
+ // type == 0 is the default non-marking type.
+ void nop(unsigned int type = 0) {
+ ASSERT(type < 32);
+ sll(zero_reg, zero_reg, type, true);
+ }
//------- Branch and jump instructions --------
@@ -400,9 +583,7 @@
//-------Data-processing-instructions---------
// Arithmetic.
- void add(Register rd, Register rs, Register rt);
void addu(Register rd, Register rs, Register rt);
- void sub(Register rd, Register rs, Register rt);
void subu(Register rd, Register rs, Register rt);
void mult(Register rs, Register rt);
void multu(Register rs, Register rt);
@@ -410,7 +591,6 @@
void divu(Register rs, Register rt);
void mul(Register rd, Register rs, Register rt);
- void addi(Register rd, Register rs, int32_t j);
void addiu(Register rd, Register rs, int32_t j);
// Logical.
@@ -425,21 +605,33 @@
void lui(Register rd, int32_t j);
// Shifts.
- void sll(Register rd, Register rt, uint16_t sa);
+ // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
+ // and may cause problems in normal code. coming_from_nop makes sure this
+ // doesn't happen.
+ void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false);
void sllv(Register rd, Register rt, Register rs);
void srl(Register rd, Register rt, uint16_t sa);
void srlv(Register rd, Register rt, Register rs);
void sra(Register rt, Register rd, uint16_t sa);
void srav(Register rt, Register rd, Register rs);
+ void rotr(Register rd, Register rt, uint16_t sa);
+ void rotrv(Register rd, Register rt, Register rs);
//------------Memory-instructions-------------
void lb(Register rd, const MemOperand& rs);
void lbu(Register rd, const MemOperand& rs);
+ void lh(Register rd, const MemOperand& rs);
+ void lhu(Register rd, const MemOperand& rs);
void lw(Register rd, const MemOperand& rs);
+ void lwl(Register rd, const MemOperand& rs);
+ void lwr(Register rd, const MemOperand& rs);
void sb(Register rd, const MemOperand& rs);
+ void sh(Register rd, const MemOperand& rs);
void sw(Register rd, const MemOperand& rs);
+ void swl(Register rd, const MemOperand& rs);
+ void swr(Register rd, const MemOperand& rs);
//-------------Misc-instructions--------------
@@ -463,6 +655,16 @@
void slti(Register rd, Register rs, int32_t j);
void sltiu(Register rd, Register rs, int32_t j);
+ // Conditional move.
+ void movz(Register rd, Register rs, Register rt);
+ void movn(Register rd, Register rs, Register rt);
+ void movt(Register rd, Register rs, uint16_t cc = 0);
+ void movf(Register rd, Register rs, uint16_t cc = 0);
+
+ // Bit twiddling.
+ void clz(Register rd, Register rs);
+ void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
//--------Coprocessor-instructions----------------
@@ -473,19 +675,44 @@
void swc1(FPURegister fs, const MemOperand& dst);
void sdc1(FPURegister fs, const MemOperand& dst);
- // When paired with MTC1 to write a value to a 64-bit FPR, the MTC1 must be
- // executed first, followed by the MTHC1.
- void mtc1(FPURegister fs, Register rt);
- void mthc1(FPURegister fs, Register rt);
- void mfc1(FPURegister fs, Register rt);
- void mfhc1(FPURegister fs, Register rt);
+ void mtc1(Register rt, FPURegister fs);
+ void mfc1(Register rt, FPURegister fs);
+
+ void ctc1(Register rt, FPUControlRegister fs);
+ void cfc1(Register rt, FPUControlRegister fs);
+
+ // Arithmetic.
+ void add_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
+ void abs_d(FPURegister fd, FPURegister fs);
+ void mov_d(FPURegister fd, FPURegister fs);
+ void neg_d(FPURegister fd, FPURegister fs);
+ void sqrt_d(FPURegister fd, FPURegister fs);
// Conversion.
void cvt_w_s(FPURegister fd, FPURegister fs);
void cvt_w_d(FPURegister fd, FPURegister fs);
+ void trunc_w_s(FPURegister fd, FPURegister fs);
+ void trunc_w_d(FPURegister fd, FPURegister fs);
+ void round_w_s(FPURegister fd, FPURegister fs);
+ void round_w_d(FPURegister fd, FPURegister fs);
+ void floor_w_s(FPURegister fd, FPURegister fs);
+ void floor_w_d(FPURegister fd, FPURegister fs);
+ void ceil_w_s(FPURegister fd, FPURegister fs);
+ void ceil_w_d(FPURegister fd, FPURegister fs);
void cvt_l_s(FPURegister fd, FPURegister fs);
void cvt_l_d(FPURegister fd, FPURegister fs);
+ void trunc_l_s(FPURegister fd, FPURegister fs);
+ void trunc_l_d(FPURegister fd, FPURegister fs);
+ void round_l_s(FPURegister fd, FPURegister fs);
+ void round_l_d(FPURegister fd, FPURegister fs);
+ void floor_l_s(FPURegister fd, FPURegister fs);
+ void floor_l_d(FPURegister fd, FPURegister fs);
+ void ceil_l_s(FPURegister fd, FPURegister fs);
+ void ceil_l_d(FPURegister fd, FPURegister fs);
void cvt_s_w(FPURegister fd, FPURegister fs);
void cvt_s_l(FPURegister fd, FPURegister fs);
@@ -503,32 +730,60 @@
void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); }
void bc1t(int16_t offset, uint16_t cc = 0);
void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); }
-
+ void fcmp(FPURegister src1, const double src2, FPUCondition cond);
// Check the code size generated from label to here.
int InstructionsGeneratedSince(Label* l) {
return (pc_offset() - l->pos()) / kInstrSize;
}
+ // Class for scoping postponing the trampoline pool generation.
+ class BlockTrampolinePoolScope {
+ public:
+ explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockTrampolinePool();
+ }
+ ~BlockTrampolinePoolScope() {
+ assem_->EndBlockTrampolinePool();
+ }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
+ };
+
// Debugging.
// Mark address of the ExitJSFrame code.
void RecordJSReturn();
+ // Mark address of a debug break slot.
+ void RecordDebugBreakSlot();
+
// Record a comment relocation entry that can be used by a disassembler.
- // Use --debug_code to enable.
+ // Use --code-comments to enable.
void RecordComment(const char* msg);
- void RecordPosition(int pos);
- void RecordStatementPosition(int pos);
- bool WriteRecordedPositions();
+ // Writes a single byte or word of data in the code stream. Used for
+ // inline tables, e.g., jump-tables.
+ void db(uint8_t data);
+ void dd(uint32_t data);
int32_t pc_offset() const { return pc_ - buffer_; }
- int32_t current_position() const { return current_position_; }
- int32_t current_statement_position() const {
- return current_statement_position_;
+
+ PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+
+ bool can_peephole_optimize(int instructions) {
+ if (!allow_peephole_optimization_) return false;
+ if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false;
+ return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize;
}
+ // Postpone the generation of the trampoline pool for the specified number of
+ // instructions.
+ void BlockTrampolinePoolFor(int instructions);
+
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
// an instruction or relocation information.
@@ -537,12 +792,9 @@
// Get the number of bytes available in the buffer.
inline int available_space() const { return reloc_info_writer.pos() - pc_; }
- protected:
- int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
-
// Read/patch instructions.
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
- void instr_at_put(byte* pc, Instr instr) {
+ static void instr_at_put(byte* pc, Instr instr) {
*reinterpret_cast<Instr*>(pc) = instr;
}
Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
@@ -551,7 +803,34 @@
}
// Check if an instruction is a branch of some kind.
- bool is_branch(Instr instr);
+ static bool IsBranch(Instr instr);
+
+ static bool IsNop(Instr instr, unsigned int type);
+ static bool IsPop(Instr instr);
+ static bool IsPush(Instr instr);
+ static bool IsLwRegFpOffset(Instr instr);
+ static bool IsSwRegFpOffset(Instr instr);
+ static bool IsLwRegFpNegOffset(Instr instr);
+ static bool IsSwRegFpNegOffset(Instr instr);
+
+ static Register GetRt(Instr instr);
+
+ static int32_t GetBranchOffset(Instr instr);
+ static bool IsLw(Instr instr);
+ static int16_t GetLwOffset(Instr instr);
+ static Instr SetLwOffset(Instr instr, int16_t offset);
+
+ static bool IsSw(Instr instr);
+ static Instr SetSwOffset(Instr instr, int16_t offset);
+ static bool IsAddImmediate(Instr instr);
+ static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
+
+ void CheckTrampolinePool(bool force_emit = false);
+
+ protected:
+ bool emit_debug_code() const { return emit_debug_code_; }
+
+ int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Decode branch instruction at pos and return branch target pos.
int target_at(int32_t pos);
@@ -560,11 +839,28 @@
void target_at_put(int32_t pos, int32_t target_pos);
// Say if we need to relocate with this mode.
- bool MustUseAt(RelocInfo::Mode rmode);
+ bool MustUseReg(RelocInfo::Mode rmode);
// Record reloc info for current pc_.
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+ // Block the emission of the trampoline pool before pc_offset.
+ void BlockTrampolinePoolBefore(int pc_offset) {
+ if (no_trampoline_pool_before_ < pc_offset)
+ no_trampoline_pool_before_ = pc_offset;
+ }
+
+ void StartBlockTrampolinePool() {
+ trampoline_pool_blocked_nesting_++;
+ }
+ void EndBlockTrampolinePool() {
+ trampoline_pool_blocked_nesting_--;
+ }
+
+ bool is_trampoline_pool_blocked() const {
+ return trampoline_pool_blocked_nesting_ > 0;
+ }
+
private:
// Code buffer:
// The buffer into which code and relocation info are generated.
@@ -585,6 +881,22 @@
static const int kGap = 32;
byte* pc_; // The program counter - moves forward.
+
+ // Repeated checking whether the trampoline pool should be emitted is rather
+ // expensive. By default we only check again once a number of instructions
+ // has been generated.
+ static const int kCheckConstIntervalInst = 32;
+ static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
+
+ int next_buffer_check_; // pc offset of next buffer check.
+
+ // Emission of the trampoline pool may be blocked in some code sequences.
+ int trampoline_pool_blocked_nesting_; // Block emission if this is not zero.
+ int no_trampoline_pool_before_; // Block emission before this pc offset.
+
+ // Keep track of the last emitted pool to guarantee a maximal distance.
+ int last_trampoline_pool_end_; // pc offset of the end of the last pool.
+
// Relocation information generation.
// Each relocation is encoded as a variable size value.
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
@@ -593,16 +905,11 @@
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
- // Source position information.
- int current_position_;
- int current_statement_position_;
- int written_position_;
- int written_statement_position_;
-
// Code emission.
inline void CheckBuffer();
void GrowBuffer();
inline void emit(Instr x);
+ inline void CheckTrampolinePoolQuick();
// Instruction generation.
// We have 3 different kind of encoding layout on MIPS.
@@ -620,6 +927,13 @@
SecondaryField func = NULLSF);
void GenInstrRegister(Opcode opcode,
+ Register rs,
+ Register rt,
+ uint16_t msb,
+ uint16_t lsb,
+ SecondaryField func);
+
+ void GenInstrRegister(Opcode opcode,
SecondaryField fmt,
FPURegister ft,
FPURegister fs,
@@ -633,6 +947,12 @@
FPURegister fd,
SecondaryField func = NULLSF);
+ void GenInstrRegister(Opcode opcode,
+ SecondaryField fmt,
+ Register rt,
+ FPUControlRegister fs,
+ SecondaryField func = NULLSF);
+
void GenInstrImmediate(Opcode opcode,
Register rs,
@@ -651,6 +971,8 @@
void GenInstrJump(Opcode opcode,
uint32_t address);
+ // Helpers.
+ void LoadRegPlusOffsetToAt(const MemOperand& src);
// Labels.
void print(Label* L);
@@ -658,8 +980,85 @@
void link_to(Label* L, Label* appendix);
void next(Label* L);
+ // One trampoline consists of:
+ // - space for trampoline slots,
+ // - space for labels.
+ //
+ // Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
+ // Space for trampoline slots preceeds space for labels. Each label is of one
+ // instruction size, so total amount for labels is equal to
+ // label_count * kInstrSize.
+ class Trampoline {
+ public:
+ Trampoline(int start, int slot_count, int label_count) {
+ start_ = start;
+ next_slot_ = start;
+ free_slot_count_ = slot_count;
+ next_label_ = start + slot_count * 2 * kInstrSize;
+ free_label_count_ = label_count;
+ end_ = next_label_ + (label_count - 1) * kInstrSize;
+ }
+ int start() {
+ return start_;
+ }
+ int end() {
+ return end_;
+ }
+ int take_slot() {
+ int trampoline_slot = next_slot_;
+ ASSERT(free_slot_count_ > 0);
+ free_slot_count_--;
+ next_slot_ += 2 * kInstrSize;
+ return trampoline_slot;
+ }
+ int take_label() {
+ int label_pos = next_label_;
+ ASSERT(free_label_count_ > 0);
+ free_label_count_--;
+ next_label_ += kInstrSize;
+ return label_pos;
+ }
+ private:
+ int start_;
+ int end_;
+ int next_slot_;
+ int free_slot_count_;
+ int next_label_;
+ int free_label_count_;
+ };
+
+ int32_t get_label_entry(int32_t pos, bool next_pool = true);
+ int32_t get_trampoline_entry(int32_t pos, bool next_pool = true);
+
+ static const int kSlotsPerTrampoline = 2304;
+ static const int kLabelsPerTrampoline = 8;
+ static const int kTrampolineInst =
+ 2 * kSlotsPerTrampoline + kLabelsPerTrampoline;
+ static const int kTrampolineSize = kTrampolineInst * kInstrSize;
+ static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
+ static const int kMaxDistBetweenPools =
+ kMaxBranchOffset - 2 * kTrampolineSize;
+
+ List<Trampoline> trampolines_;
+
friend class RegExpMacroAssemblerMIPS;
friend class RelocInfo;
+ friend class CodePatcher;
+ friend class BlockTrampolinePoolScope;
+
+ PositionsRecorder positions_recorder_;
+ bool allow_peephole_optimization_;
+ bool emit_debug_code_;
+ friend class PositionsRecorder;
+ friend class EnsureSpace;
+};
+
+
+class EnsureSpace BASE_EMBEDDED {
+ public:
+ explicit EnsureSpace(Assembler* assembler) {
+ assembler->CheckBuffer();
+ }
};
} } // namespace v8::internal
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 9532938..b4bab8e 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -33,6 +33,8 @@
#include "codegen-inl.h"
#include "debug.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
#include "runtime.h"
namespace v8 {
@@ -59,11 +61,21 @@
}
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
+void Builtins::Generate_JSConstructStubCountdown(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
@@ -74,111 +86,43 @@
}
-static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
- bool is_construct) {
- // Called from JSEntryStub::GenerateBody
-
- // Registers:
- // a0: entry_address
- // a1: function
- // a2: reveiver_pointer
- // a3: argc
- // s0: argv
- //
- // Stack:
- // arguments slots
- // handler frame
- // entry frame
- // callee saved registers + ra
- // 4 args slots
- // args
-
- // Clear the context before we push it when entering the JS frame.
- __ li(cp, Operand(0, RelocInfo::NONE));
-
- // Enter an internal frame.
- __ EnterInternalFrame();
-
- // Set up the context from the function argument.
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
-
- // Set up the roots register.
- ExternalReference roots_address = ExternalReference::roots_address();
- __ li(s6, Operand(roots_address));
-
- // Push the function and the receiver onto the stack.
- __ MultiPushReversed(a1.bit() | a2.bit());
-
- // Copy arguments to the stack in a loop.
- // a3: argc
- // s0: argv, ie points to first arg
- Label loop, entry;
- __ sll(t0, a3, kPointerSizeLog2);
- __ add(t2, s0, t0);
- __ b(&entry);
- __ nop(); // Branch delay slot nop.
- // t2 points past last arg.
- __ bind(&loop);
- __ lw(t0, MemOperand(s0)); // Read next parameter.
- __ addiu(s0, s0, kPointerSize);
- __ lw(t0, MemOperand(t0)); // Dereference handle.
- __ Push(t0); // Push parameter.
- __ bind(&entry);
- __ Branch(ne, &loop, s0, Operand(t2));
-
- // Registers:
- // a0: entry_address
- // a1: function
- // a2: reveiver_pointer
- // a3: argc
- // s0: argv
- // s6: roots_address
- //
- // Stack:
- // arguments
- // receiver
- // function
- // arguments slots
- // handler frame
- // entry frame
- // callee saved registers + ra
- // 4 args slots
- // args
-
- // Initialize all JavaScript callee-saved registers, since they will be seen
- // by the garbage collector as part of handlers.
- __ LoadRoot(t4, Heap::kUndefinedValueRootIndex);
- __ mov(s1, t4);
- __ mov(s2, t4);
- __ mov(s3, t4);
- __ mov(s4, s4);
- __ mov(s5, t4);
- // s6 holds the root address. Do not clobber.
- // s7 is cp. Do not init.
-
- // Invoke the code and pass argc as a0.
- __ mov(a0, a3);
- if (is_construct) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x164);
- } else {
- ParameterCount actual(a0);
- __ InvokeFunction(a1, actual, CALL_FUNCTION);
- }
-
- __ LeaveInternalFrame();
-
- __ Jump(ra);
-}
-
-
void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, false);
+ UNIMPLEMENTED_MIPS();
}
void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
- Generate_JSEntryTrampolineHelper(masm, true);
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
}
@@ -194,7 +138,6 @@
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
- __ break_(0x201);
}
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
new file mode 100644
index 0000000..6cc272c
--- /dev/null
+++ b/src/mips/code-stubs-mips.cc
@@ -0,0 +1,752 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_MIPS)
+
+#include "bootstrapper.h"
+#include "code-stubs.h"
+#include "codegen-inl.h"
+#include "regexp-macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void ToNumberStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FastNewClosureStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FastNewContextStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// Takes a Smi and converts to an IEEE 64 bit floating point value in two
+// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
+// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
+// scratch register. Destroys the source register. No GC occurs during this
+// stub so you don't have to set up the frame.
+class ConvertToDoubleStub : public CodeStub {
+ public:
+ ConvertToDoubleStub(Register result_reg_1,
+ Register result_reg_2,
+ Register source_reg,
+ Register scratch_reg)
+ : result1_(result_reg_1),
+ result2_(result_reg_2),
+ source_(source_reg),
+ zeros_(scratch_reg) { }
+
+ private:
+ Register result1_;
+ Register result2_;
+ Register source_;
+ Register zeros_;
+
+ // Minor key encoding in 16 bits.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 14> {};
+
+ Major MajorKey() { return ConvertToDouble; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return result1_.code() +
+ (result2_.code() << 4) +
+ (source_.code() << 8) +
+ (zeros_.code() << 12);
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "ConvertToDoubleStub"; }
+
+#ifdef DEBUG
+ void Print() { PrintF("ConvertToDoubleStub\n"); }
+#endif
+};
+
+
+void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+class FloatingPointHelper : public AllStatic {
+ public:
+
+ enum Destination {
+ kFPURegisters,
+ kCoreRegisters
+ };
+
+
+ // Loads smis from a0 and a1 (right and left in binary operations) into
+ // floating point registers. Depending on the destination the values ends up
+ // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
+ // is floating point registers FPU must be supported. If core registers are
+ // requested when FPU is supported f12 and f14 will be scratched.
+ static void LoadSmis(MacroAssembler* masm,
+ Destination destination,
+ Register scratch1,
+ Register scratch2);
+
+ // Loads objects from a0 and a1 (right and left in binary operations) into
+ // floating point registers. Depending on the destination the values ends up
+ // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
+ // is floating point registers FPU must be supported. If core registers are
+ // requested when FPU is supported f12 and f14 will still be scratched. If
+ // either a0 or a1 is not a number (not smi and not heap number object) the
+ // not_number label is jumped to with a0 and a1 intact.
+ static void LoadOperands(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number);
+ // Loads the number from object into dst as a 32-bit integer if possible. If
+ // the object is not a 32-bit integer control continues at the label
+ // not_int32. If FPU is supported double_scratch is used but not scratch2.
+ static void LoadNumberAsInteger(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister double_scratch,
+ Label* not_int32);
+ private:
+ static void LoadNumber(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register object,
+ FPURegister dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number);
+};
+
+
+void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register scratch1,
+ Register scratch2) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FloatingPointHelper::LoadOperands(
+ MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* slow) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
+ Destination destination,
+ Register object,
+ FPURegister dst,
+ Register dst1,
+ Register dst2,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
+ Register object,
+ Register dst,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ FPURegister double_scratch,
+ Label* not_int32) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// See comment for class, this does NOT work for int32's that are in Smi range.
+void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void EmitNanCheck(MacroAssembler* masm, Condition cc) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ bool object_is_smi,
+ Label* not_found) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void NumberToStringStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
+// On exit, v0 is 0, positive, or negative (smi) to indicate the result
+// of the comparison.
+void CompareStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// This stub does not handle the inlined cases (Smis, Booleans, undefined).
+// The stub returns zero for false, and a non-zero value for true.
+void ToBooleanStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// We fall into this code if the operands were Smis, but the result was
+// not (eg. overflow). We branch into this code (to the not_smi label) if
+// the operands were not both Smi. The operands are in lhs and rhs.
+// To call the C-implemented binary fp operation routines we need to end up
+// with the double precision floating point operands in a0 and a1 (for the
+// value in a1) and a2 and a3 (for the value in a0).
+void GenericBinaryOpStub::HandleBinaryOpSlowCases(MacroAssembler* masm,
+ Label* not_smi,
+ Register lhs,
+ Register rhs,
+ const Builtins::JavaScript& builtin) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// For bitwise ops where the inputs are not both Smis we here try to determine
+// whether both inputs are either Smis or at least heap numbers that can be
+// represented by a 32 bit signed value. We truncate towards zero as required
+// by the ES spec. If this is the case we do the bitwise op and see if the
+// result is a Smi. If so, great, otherwise we try to find a heap number to
+// write the answer into (either by allocating or by overwriting).
+// On entry the operands are in lhs (x) and rhs (y). (Result = x op y).
+// On exit the result is in v0.
+void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
+ Register lhs,
+ Register rhs) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+ GenericBinaryOpStub stub(key, type_info);
+ return stub.GetCode();
+}
+
+
+Handle<Code> GetTypeRecordingBinaryOpStub(int key,
+ TRBinaryOpIC::TypeInfo type_info,
+ TRBinaryOpIC::TypeInfo result_type_info) {
+ TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
+ return stub.GetCode();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
+ MacroAssembler* masm) {
+ UNIMPLEMENTED();
+}
+
+
+void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+const char* TypeRecordingBinaryOpStub::GetName() {
+ UNIMPLEMENTED_MIPS();
+ return name_;
+}
+
+
+
+void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation(
+ MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
+ bool smi_operands,
+ Label* not_numbers,
+ Label* gc_required) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// Generate the smi code. If the operation on smis are successful this return is
+// generated. If the result is not a smi and heap number allocation is not
+// requested the code falls through. If number allocation is requested but a
+// heap number cannot be allocated the code jumps to the lable gc_required.
+void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
+ Label* gc_required,
+ SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
+ MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+
+void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
+ UNIMPLEMENTED_MIPS();
+ return Runtime::kAbort;
+}
+
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+bool CEntryStub::NeedsImmovableCode() {
+ return true;
+}
+
+
+void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
+ UncatchableExceptionType type) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CEntryStub::GenerateCore(MacroAssembler* masm,
+ Label* throw_normal_exception,
+ Label* throw_termination_exception,
+ Label* throw_out_of_memory_exception,
+ bool do_gc,
+ bool always_allocate) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// Uses registers a0 to t0. Expected input is
+// object in a0 (or at sp+1*kPointerSize) and function in
+// a1 (or at sp), depending on whether or not
+// args_in_registers() is true.
+void InstanceofStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
+const char* CompareStub::GetName() {
+ UNIMPLEMENTED_MIPS();
+ return name_;
+}
+
+
+int CompareStub::MinorKey() {
+ UNIMPLEMENTED_MIPS();
+ return 0;
+}
+
+
+// StringCharCodeAtGenerator
+
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharAtGenerator
+
+void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void StringCharAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+class StringHelper : public AllStatic {
+ public:
+ // Generate code for copying characters using a simple loop. This should only
+ // be used in places where the number of characters is small and the
+ // additional setup and checking in GenerateCopyCharactersLong adds too much
+ // overhead. Copying of overlapping regions is not supported.
+ // Dest register ends at the position after the last character written.
+ static void GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii);
+
+ // Generate code for copying a large number of characters. This function
+ // is allowed to spend extra time setting up conditions to make copying
+ // faster. Copying of overlapping regions is not supported.
+ // Dest register ends at the position after the last character written.
+ static void GenerateCopyCharactersLong(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ int flags);
+
+
+ // Probe the symbol table for a two character string. If the string is
+ // not found by probing a jump to the label not_found is performed. This jump
+ // does not guarantee that the string is not in the symbol table. If the
+ // string is found the code falls through with the string in register r0.
+ // Contents of both c1 and c2 registers are modified. At the exit c1 is
+ // guaranteed to contain halfword with low and high bytes equal to
+ // initial contents of c1 and c2 respectively.
+ static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label* not_found);
+
+ // Generate string hash.
+ static void GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character);
+
+ static void GenerateHashGetHash(MacroAssembler* masm,
+ Register hash);
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+enum CopyCharactersFlags {
+ COPY_ASCII = 1,
+ DEST_ALWAYS_ALIGNED = 2
+};
+
+
+void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ int flags) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
+ Register c1,
+ Register c2,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4,
+ Register scratch5,
+ Label* not_found) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void StringHelper::GenerateHashInit(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
+ Register hash,
+ Register character) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
+ Register hash) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register right,
+ Register left,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void StringAddStub::Generate(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements_map,
+ Register elements,
+ Register scratch1,
+ Register scratch2,
+ Register result,
+ Label* not_pixel_array,
+ Label* key_not_smi,
+ Label* out_of_range) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+#undef __
+
+} } // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS
+
diff --git a/src/mips/code-stubs-mips.h b/src/mips/code-stubs-mips.h
new file mode 100644
index 0000000..675730a
--- /dev/null
+++ b/src/mips/code-stubs-mips.h
@@ -0,0 +1,511 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MIPS_CODE_STUBS_ARM_H_
+#define V8_MIPS_CODE_STUBS_ARM_H_
+
+#include "ic-inl.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+// Compute a transcendental math function natively, or call the
+// TranscendentalCache runtime function.
+class TranscendentalCacheStub: public CodeStub {
+ public:
+ explicit TranscendentalCacheStub(TranscendentalCache::Type type)
+ : type_(type) {}
+ void Generate(MacroAssembler* masm);
+ private:
+ TranscendentalCache::Type type_;
+ Major MajorKey() { return TranscendentalCache; }
+ int MinorKey() { return type_; }
+ Runtime::FunctionId RuntimeFunction();
+};
+
+
+class ToBooleanStub: public CodeStub {
+ public:
+ explicit ToBooleanStub(Register tos) : tos_(tos) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Register tos_;
+ Major MajorKey() { return ToBoolean; }
+ int MinorKey() { return tos_.code(); }
+};
+
+
+class GenericBinaryOpStub : public CodeStub {
+ public:
+ static const int kUnknownIntValue = -1;
+
+ GenericBinaryOpStub(Token::Value op,
+ OverwriteMode mode,
+ Register lhs,
+ Register rhs,
+ int constant_rhs = kUnknownIntValue)
+ : op_(op),
+ mode_(mode),
+ lhs_(lhs),
+ rhs_(rhs),
+ constant_rhs_(constant_rhs),
+ specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
+ runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
+ name_(NULL) { }
+
+ GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ lhs_(LhsRegister(RegisterBits::decode(key))),
+ rhs_(RhsRegister(RegisterBits::decode(key))),
+ constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
+ specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
+ runtime_operands_type_(type_info),
+ name_(NULL) { }
+
+ private:
+ Token::Value op_;
+ OverwriteMode mode_;
+ Register lhs_;
+ Register rhs_;
+ int constant_rhs_;
+ bool specialized_on_rhs_;
+ BinaryOpIC::TypeInfo runtime_operands_type_;
+ char* name_;
+
+ static const int kMaxKnownRhs = 0x40000000;
+ static const int kKnownRhsKeyBits = 6;
+
+ // Minor key encoding in 16 bits.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 6> {};
+ class TypeInfoBits: public BitField<int, 8, 3> {};
+ class RegisterBits: public BitField<bool, 11, 1> {};
+ class KnownIntBits: public BitField<int, 12, kKnownRhsKeyBits> {};
+
+ Major MajorKey() { return GenericBinaryOp; }
+ int MinorKey() {
+ ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
+ (lhs_.is(a1) && rhs_.is(a0)));
+ // Encode the parameters in a unique 16 bit value.
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | KnownIntBits::encode(MinorKeyForKnownInt())
+ | TypeInfoBits::encode(runtime_operands_type_)
+ | RegisterBits::encode(lhs_.is(a0));
+ }
+
+ void Generate(MacroAssembler* masm);
+ void HandleNonSmiBitwiseOp(MacroAssembler* masm,
+ Register lhs,
+ Register rhs);
+ void HandleBinaryOpSlowCases(MacroAssembler* masm,
+ Label* not_smi,
+ Register lhs,
+ Register rhs,
+ const Builtins::JavaScript& builtin);
+ void GenerateTypeTransition(MacroAssembler* masm);
+
+ static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
+ if (constant_rhs == kUnknownIntValue) return false;
+ if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
+ if (op == Token::MOD) {
+ if (constant_rhs <= 1) return false;
+ if (constant_rhs <= 10) return true;
+ if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
+ return false;
+ }
+ return false;
+ }
+
+ int MinorKeyForKnownInt() {
+ if (!specialized_on_rhs_) return 0;
+ if (constant_rhs_ <= 10) return constant_rhs_ + 1;
+ ASSERT(IsPowerOf2(constant_rhs_));
+ int key = 12;
+ int d = constant_rhs_;
+ while ((d & 1) == 0) {
+ key++;
+ d >>= 1;
+ }
+ ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
+ return key;
+ }
+
+ int KnownBitsForMinorKey(int key) {
+ if (!key) return 0;
+ if (key <= 11) return key - 1;
+ int d = 1;
+ while (key != 12) {
+ key--;
+ d <<= 1;
+ }
+ return d;
+ }
+
+ Register LhsRegister(bool lhs_is_a0) {
+ return lhs_is_a0 ? a0 : a1;
+ }
+
+ Register RhsRegister(bool lhs_is_a0) {
+ return lhs_is_a0 ? a1 : a0;
+ }
+
+ bool HasSmiSmiFastPath() {
+ return op_ != Token::DIV;
+ }
+
+ bool ShouldGenerateSmiCode() {
+ return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
+ runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
+ runtime_operands_type_ != BinaryOpIC::STRINGS;
+ }
+
+ bool ShouldGenerateFPCode() {
+ return runtime_operands_type_ != BinaryOpIC::STRINGS;
+ }
+
+ virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return BinaryOpIC::ToState(runtime_operands_type_);
+ }
+
+ const char* GetName();
+
+ virtual void FinishCode(Code* code) {
+ code->set_binary_op_type(runtime_operands_type_);
+ }
+
+#ifdef DEBUG
+ void Print() {
+ if (!specialized_on_rhs_) {
+ PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
+ } else {
+ PrintF("GenericBinaryOpStub (%s by %d)\n",
+ Token::String(op_),
+ constant_rhs_);
+ }
+ }
+#endif
+};
+
+class TypeRecordingBinaryOpStub: public CodeStub {
+ public:
+ TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
+ : op_(op),
+ mode_(mode),
+ operands_type_(TRBinaryOpIC::UNINITIALIZED),
+ result_type_(TRBinaryOpIC::UNINITIALIZED),
+ name_(NULL) {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ TypeRecordingBinaryOpStub(
+ int key,
+ TRBinaryOpIC::TypeInfo operands_type,
+ TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ use_fpu_(FPUBits::decode(key)),
+ operands_type_(operands_type),
+ result_type_(result_type),
+ name_(NULL) { }
+
+ private:
+ enum SmiCodeGenerateHeapNumberResults {
+ ALLOW_HEAPNUMBER_RESULTS,
+ NO_HEAPNUMBER_RESULTS
+ };
+
+ Token::Value op_;
+ OverwriteMode mode_;
+ bool use_fpu_;
+
+ // Operand type information determined at runtime.
+ TRBinaryOpIC::TypeInfo operands_type_;
+ TRBinaryOpIC::TypeInfo result_type_;
+
+ char* name_;
+
+ const char* GetName();
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("TypeRecordingBinaryOpStub %d (op %s), "
+ "(mode %d, runtime_type_info %s)\n",
+ MinorKey(),
+ Token::String(op_),
+ static_cast<int>(mode_),
+ TRBinaryOpIC::GetName(operands_type_));
+ }
+#endif
+
+ // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 7> {};
+ class FPUBits: public BitField<bool, 9, 1> {};
+ class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
+ class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
+
+ Major MajorKey() { return TypeRecordingBinaryOp; }
+ int MinorKey() {
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | FPUBits::encode(use_fpu_)
+ | OperandTypeInfoBits::encode(operands_type_)
+ | ResultTypeInfoBits::encode(result_type_);
+ }
+
+ void Generate(MacroAssembler* masm);
+ void GenerateGeneric(MacroAssembler* masm);
+ void GenerateSmiSmiOperation(MacroAssembler* masm);
+ void GenerateFPOperation(MacroAssembler* masm,
+ bool smi_operands,
+ Label* not_numbers,
+ Label* gc_required);
+ void GenerateSmiCode(MacroAssembler* masm,
+ Label* gc_required,
+ SmiCodeGenerateHeapNumberResults heapnumber_results);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+ void GenerateUninitializedStub(MacroAssembler* masm);
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateInt32Stub(MacroAssembler* masm);
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateStringStub(MacroAssembler* masm);
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateAddStrings(MacroAssembler* masm);
+ void GenerateCallRuntime(MacroAssembler* masm);
+
+ void GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateTypeTransition(MacroAssembler* masm);
+ void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
+
+ virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return TRBinaryOpIC::ToState(operands_type_);
+ }
+
+ virtual void FinishCode(Code* code) {
+ code->set_type_recording_binary_op_type(operands_type_);
+ code->set_type_recording_binary_op_result_type(result_type_);
+ }
+
+ friend class CodeGenerator;
+};
+
+
+// Flag that indicates how to generate code for the stub StringAddStub.
+enum StringAddFlags {
+ NO_STRING_ADD_FLAGS = 0,
+ NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
+};
+
+
+class StringAddStub: public CodeStub {
+ public:
+ explicit StringAddStub(StringAddFlags flags) {
+ string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
+ }
+
+ private:
+ Major MajorKey() { return StringAdd; }
+ int MinorKey() { return string_check_ ? 0 : 1; }
+
+ void Generate(MacroAssembler* masm);
+
+ // Should the stub check whether arguments are strings?
+ bool string_check_;
+};
+
+
+class SubStringStub: public CodeStub {
+ public:
+ SubStringStub() {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class StringCompareStub: public CodeStub {
+ public:
+ StringCompareStub() { }
+
+ // Compare two flat ASCII strings and returns result in v0.
+ // Does not use the stack.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ private:
+ Major MajorKey() { return StringCompare; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+// This stub can convert a signed int32 to a heap number (double). It does
+// not work for int32s that are in Smi range! No GC occurs during this stub
+// so you don't have to set up the frame.
+class WriteInt32ToHeapNumberStub : public CodeStub {
+ public:
+ WriteInt32ToHeapNumberStub(Register the_int,
+ Register the_heap_number,
+ Register scratch,
+ Register scratch2)
+ : the_int_(the_int),
+ the_heap_number_(the_heap_number),
+ scratch_(scratch),
+ sign_(scratch2) { }
+
+ private:
+ Register the_int_;
+ Register the_heap_number_;
+ Register scratch_;
+ Register sign_;
+
+ // Minor key encoding in 16 bits.
+ class IntRegisterBits: public BitField<int, 0, 4> {};
+ class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
+ class ScratchRegisterBits: public BitField<int, 8, 4> {};
+
+ Major MajorKey() { return WriteInt32ToHeapNumber; }
+ int MinorKey() {
+ // Encode the parameters in a unique 16 bit value.
+ return IntRegisterBits::encode(the_int_.code())
+ | HeapNumberRegisterBits::encode(the_heap_number_.code())
+ | ScratchRegisterBits::encode(scratch_.code());
+ }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
+
+#ifdef DEBUG
+ void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
+#endif
+};
+
+
+class NumberToStringStub: public CodeStub {
+ public:
+ NumberToStringStub() { }
+
+ // Generate code to do a lookup in the number string cache. If the number in
+ // the register object is found in the cache the generated code falls through
+ // with the result in the result register. The object and the result register
+ // can be the same. If the number is not found in the cache the code jumps to
+ // the label not_found with only the content of register object unchanged.
+ static void GenerateLookupNumberStringCache(MacroAssembler* masm,
+ Register object,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ bool object_is_smi,
+ Label* not_found);
+
+ private:
+ Major MajorKey() { return NumberToString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "NumberToStringStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("NumberToStringStub\n");
+ }
+#endif
+};
+
+
+// Enter C code from generated RegExp code in a way that allows
+// the C code to fix the return address in case of a GC.
+// Currently only needed on ARM and MIPS.
+class RegExpCEntryStub: public CodeStub {
+ public:
+ RegExpCEntryStub() {}
+ virtual ~RegExpCEntryStub() {}
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Major MajorKey() { return RegExpCEntry; }
+ int MinorKey() { return 0; }
+
+ bool NeedsImmovableCode() { return true; }
+
+ const char* GetName() { return "RegExpCEntryStub"; }
+};
+
+
+// Generate code the to load an element from a pixel array. The receiver is
+// assumed to not be a smi and to have elements, the caller must guarantee this
+// precondition. If the receiver does not have elements that are pixel arrays,
+// the generated code jumps to not_pixel_array. If key is not a smi, then the
+// generated code branches to key_not_smi. Callers can specify NULL for
+// key_not_smi to signal that a smi check has already been performed on key so
+// that the smi check is not generated . If key is not a valid index within the
+// bounds of the pixel array, the generated code jumps to out_of_range.
+void GenerateFastPixelArrayLoad(MacroAssembler* masm,
+ Register receiver,
+ Register key,
+ Register elements_map,
+ Register elements,
+ Register scratch1,
+ Register scratch2,
+ Register result,
+ Label* not_pixel_array,
+ Label* key_not_smi,
+ Label* out_of_range);
+
+
+} } // namespace v8::internal
+
+#endif // V8_MIPS_CODE_STUBS_ARM_H_
diff --git a/src/mips/codegen-mips-inl.h b/src/mips/codegen-mips-inl.h
index 3a511b8..be9ae9e 100644
--- a/src/mips/codegen-mips-inl.h
+++ b/src/mips/codegen-mips-inl.h
@@ -29,6 +29,8 @@
#ifndef V8_MIPS_CODEGEN_MIPS_INL_H_
#define V8_MIPS_CODEGEN_MIPS_INL_H_
+#include "virtual-frame-mips.h"
+
namespace v8 {
namespace internal {
@@ -42,26 +44,18 @@
}
+// Note: this has been hacked for submisson. Mips branches require two
+// additional operands: Register src1, const Operand& src2.
+void DeferredCode::Branch(Condition cond) {
+ __ Branch(&entry_label_, cond, zero_reg, Operand(0));
+}
+
+
void Reference::GetValueAndSpill() {
GetValue();
}
-void CodeGenerator::VisitAndSpill(Statement* statement) {
- Visit(statement);
-}
-
-
-void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
- VisitStatements(statements);
-}
-
-
-void CodeGenerator::LoadAndSpill(Expression* expression) {
- Load(expression);
-}
-
-
#undef __
} } // namespace v8::internal
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index b522616..c1149df 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -31,36 +31,62 @@
#if defined(V8_TARGET_ARCH_MIPS)
#include "bootstrapper.h"
+#include "code-stubs.h"
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
#include "ic-inl.h"
+#include "jsregexp.h"
+#include "jump-target-inl.h"
#include "parser.h"
+#include "regexp-macro-assembler.h"
+#include "regexp-stack.h"
#include "register-allocator-inl.h"
#include "runtime.h"
#include "scopes.h"
+#include "stub-cache.h"
#include "virtual-frame-inl.h"
-
-
+#include "virtual-frame-mips-inl.h"
namespace v8 {
namespace internal {
+
#define __ ACCESS_MASM(masm_)
-
-
-// -----------------------------------------------------------------------------
+// -------------------------------------------------------------------------
// Platform-specific DeferredCode functions.
-
void DeferredCode::SaveRegisters() {
- UNIMPLEMENTED_MIPS();
+ // On MIPS you either have a completely spilled frame or you
+ // handle it yourself, but at the moment there's no automation
+ // of registers and deferred code.
}
void DeferredCode::RestoreRegisters() {
- UNIMPLEMENTED_MIPS();
+}
+
+
+// -------------------------------------------------------------------------
+// Platform-specific RuntimeCallHelper functions.
+
+void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ frame_state_->frame()->AssertIsSpilled();
+}
+
+
+void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+}
+
+
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ masm->EnterInternalFrame();
+}
+
+
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+ masm->LeaveInternalFrame();
}
@@ -69,21 +95,28 @@
CodeGenState::CodeGenState(CodeGenerator* owner)
: owner_(owner),
- true_target_(NULL),
- false_target_(NULL),
- previous_(NULL) {
- owner_->set_state(this);
+ previous_(owner->state()) {
+ owner->set_state(this);
}
-CodeGenState::CodeGenState(CodeGenerator* owner,
+ConditionCodeGenState::ConditionCodeGenState(CodeGenerator* owner,
JumpTarget* true_target,
JumpTarget* false_target)
- : owner_(owner),
+ : CodeGenState(owner),
true_target_(true_target),
- false_target_(false_target),
- previous_(owner->state()) {
- owner_->set_state(this);
+ false_target_(false_target) {
+ owner->set_state(this);
+}
+
+
+TypeInfoCodeGenState::TypeInfoCodeGenState(CodeGenerator* owner,
+ Slot* slot,
+ TypeInfo type_info)
+ : CodeGenState(owner),
+ slot_(slot) {
+ owner->set_state(this);
+ old_type_info_ = owner->set_type_info(slot, type_info);
}
@@ -93,16 +126,25 @@
}
+TypeInfoCodeGenState::~TypeInfoCodeGenState() {
+ owner()->set_type_info(slot_, old_type_info_);
+}
+
+
// -----------------------------------------------------------------------------
-// CodeGenerator implementation
+// CodeGenerator implementation.
CodeGenerator::CodeGenerator(MacroAssembler* masm)
: deferred_(8),
masm_(masm),
+ info_(NULL),
frame_(NULL),
allocator_(NULL),
cc_reg_(cc_always),
state_(NULL),
+ loop_nesting_(0),
+ type_info_(NULL),
+ function_return_(JumpTarget::BIDIRECTIONAL),
function_return_is_shadowed_(false) {
}
@@ -114,356 +156,249 @@
// cp: callee's context
void CodeGenerator::Generate(CompilationInfo* info) {
- // Record the position for debugging purposes.
- CodeForFunctionPosition(info->function());
-
- // Initialize state.
- info_ = info;
- ASSERT(allocator_ == NULL);
- RegisterAllocator register_allocator(this);
- allocator_ = ®ister_allocator;
- ASSERT(frame_ == NULL);
- frame_ = new VirtualFrame();
- cc_reg_ = cc_always;
-
- {
- CodeGenState state(this);
-
- // Registers:
- // a1: called JS function
- // ra: return address
- // fp: caller's frame pointer
- // sp: stack pointer
- // cp: callee's context
- //
- // Stack:
- // arguments
- // receiver
-
- frame_->Enter();
-
- // Allocate space for locals and initialize them.
- frame_->AllocateStackSlots();
-
- // Initialize the function return target.
- function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
- function_return_is_shadowed_ = false;
-
- VirtualFrame::SpilledScope spilled_scope;
- if (scope()->num_heap_slots() > 0) {
- UNIMPLEMENTED_MIPS();
- }
-
- {
- Comment cmnt2(masm_, "[ copy context parameters into .context");
-
- // Note that iteration order is relevant here! If we have the same
- // parameter twice (e.g., function (x, y, x)), and that parameter
- // needs to be copied into the context, it must be the last argument
- // passed to the parameter that needs to be copied. This is a rare
- // case so we don't check for it, instead we rely on the copying
- // order: such a parameter is copied repeatedly into the same
- // context location and thus the last value is what is seen inside
- // the function.
- for (int i = 0; i < scope()->num_parameters(); i++) {
- UNIMPLEMENTED_MIPS();
- }
- }
-
- // Store the arguments object. This must happen after context
- // initialization because the arguments object may be stored in the
- // context.
- if (scope()->arguments() != NULL) {
- UNIMPLEMENTED_MIPS();
- }
-
- // Generate code to 'execute' declarations and initialize functions
- // (source elements). In case of an illegal redeclaration we need to
- // handle that instead of processing the declarations.
- if (scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ illegal redeclarations");
- scope()->VisitIllegalRedeclaration(this);
- } else {
- Comment cmnt(masm_, "[ declarations");
- ProcessDeclarations(scope()->declarations());
- // Bail out if a stack-overflow exception occurred when processing
- // declarations.
- if (HasStackOverflow()) return;
- }
-
- if (FLAG_trace) {
- UNIMPLEMENTED_MIPS();
- }
-
- // Compile the body of the function in a vanilla state. Don't
- // bother compiling all the code if the scope has an illegal
- // redeclaration.
- if (!scope()->HasIllegalRedeclaration()) {
- Comment cmnt(masm_, "[ function body");
-#ifdef DEBUG
- bool is_builtin = Isolate::Current()->bootstrapper()->IsActive();
- bool should_trace =
- is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls;
- if (should_trace) {
- UNIMPLEMENTED_MIPS();
- }
-#endif
- VisitStatementsAndSpill(info->function()->body());
- }
- }
-
- if (has_valid_frame() || function_return_.is_linked()) {
- if (!function_return_.is_linked()) {
- CodeForReturnPosition(info->function());
- }
- // Registers:
- // v0: result
- // sp: stack pointer
- // fp: frame pointer
- // cp: callee's context
-
- __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
-
- function_return_.Bind();
- if (FLAG_trace) {
- UNIMPLEMENTED_MIPS();
- }
-
- // Add a label for checking the size of the code used for returning.
- Label check_exit_codesize;
- masm_->bind(&check_exit_codesize);
-
- masm_->mov(sp, fp);
- masm_->lw(fp, MemOperand(sp, 0));
- masm_->lw(ra, MemOperand(sp, 4));
- masm_->addiu(sp, sp, 8);
-
- // Here we use masm_-> instead of the __ macro to avoid the code coverage
- // tool from instrumenting as we rely on the code size here.
- // TODO(MIPS): Should we be able to use more than 0x1ffe parameters?
- masm_->addiu(sp, sp, (scope()->num_parameters() + 1) * kPointerSize);
- masm_->Jump(ra);
- // The Jump automatically generates a nop in the branch delay slot.
-
- // Check that the size of the code used for returning matches what is
- // expected by the debugger.
- ASSERT_EQ(kJSReturnSequenceLength,
- masm_->InstructionsGeneratedSince(&check_exit_codesize));
- }
-
- // Code generation state must be reset.
- ASSERT(!has_cc());
- ASSERT(state_ == NULL);
- ASSERT(!function_return_is_shadowed_);
- function_return_.Unuse();
- DeleteFrame();
-
- // Process any deferred code using the register allocator.
- if (!HasStackOverflow()) {
- ProcessDeferred();
- }
-
- allocator_ = NULL;
+ UNIMPLEMENTED_MIPS();
}
-void CodeGenerator::LoadReference(Reference* ref) {
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ LoadReference");
- Expression* e = ref->expression();
- Property* property = e->AsProperty();
- Variable* var = e->AsVariableProxy()->AsVariable();
-
- if (property != NULL) {
- UNIMPLEMENTED_MIPS();
- } else if (var != NULL) {
- // The expression is a variable proxy that does not rewrite to a
- // property. Global variables are treated as named property references.
- if (var->is_global()) {
- LoadGlobal();
- ref->set_type(Reference::NAMED);
- } else {
- ASSERT(var->slot() != NULL);
- ref->set_type(Reference::SLOT);
- }
- } else {
- UNIMPLEMENTED_MIPS();
- }
-}
-
-
-void CodeGenerator::UnloadReference(Reference* ref) {
- VirtualFrame::SpilledScope spilled_scope;
- // Pop a reference from the stack while preserving TOS.
- Comment cmnt(masm_, "[ UnloadReference");
- int size = ref->size();
- if (size > 0) {
- frame_->EmitPop(a0);
- frame_->Drop(size);
- frame_->EmitPush(a0);
- }
- ref->set_unloaded();
+int CodeGenerator::NumberOfSlot(Slot* slot) {
+ UNIMPLEMENTED_MIPS();
+ return 0;
}
MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
- // Currently, this assertion will fail if we try to assign to
- // a constant variable that is constant because it is read-only
- // (such as the variable referring to a named function expression).
- // We need to implement assignments to read-only variables.
- // Ideally, we should do this during AST generation (by converting
- // such assignments into expression statements); however, in general
- // we may not be able to make the decision until past AST generation,
- // that is when the entire program is known.
- ASSERT(slot != NULL);
- int index = slot->index();
- switch (slot->type()) {
- case Slot::PARAMETER:
- UNIMPLEMENTED_MIPS();
- return MemOperand(no_reg, 0);
-
- case Slot::LOCAL:
- return frame_->LocalAt(index);
-
- case Slot::CONTEXT: {
- UNIMPLEMENTED_MIPS();
- return MemOperand(no_reg, 0);
- }
-
- default:
- UNREACHABLE();
- return MemOperand(no_reg, 0);
- }
+ UNIMPLEMENTED_MIPS();
+ return MemOperand(zero_reg, 0);
}
-// Loads a value on TOS. If it is a boolean value, the result may have been
-// (partially) translated into branches, or it may have set the condition
-// code register. If force_cc is set, the value is forced to set the
-// condition code register and no value is pushed. If the condition code
-// register was set, has_cc() is true and cc_reg_ contains the condition to
-// test for 'true'.
+MemOperand CodeGenerator::ContextSlotOperandCheckExtensions(
+ Slot* slot,
+ Register tmp,
+ Register tmp2,
+ JumpTarget* slow) {
+ UNIMPLEMENTED_MIPS();
+ return MemOperand(zero_reg, 0);
+}
+
+
void CodeGenerator::LoadCondition(Expression* x,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_cc) {
- ASSERT(!has_cc());
- int original_height = frame_->height();
-
- { CodeGenState new_state(this, true_target, false_target);
- Visit(x);
-
- // If we hit a stack overflow, we may not have actually visited
- // the expression. In that case, we ensure that we have a
- // valid-looking frame state because we will continue to generate
- // code as we unwind the C++ stack.
- //
- // It's possible to have both a stack overflow and a valid frame
- // state (eg, a subexpression overflowed, visiting it returned
- // with a dummied frame state, and visiting this expression
- // returned with a normal-looking state).
- if (HasStackOverflow() &&
- has_valid_frame() &&
- !has_cc() &&
- frame_->height() == original_height) {
- true_target->Jump();
- }
- }
- if (force_cc && frame_ != NULL && !has_cc()) {
- // Convert the TOS value to a boolean in the condition code register.
- UNIMPLEMENTED_MIPS();
- }
- ASSERT(!force_cc || !has_valid_frame() || has_cc());
- ASSERT(!has_valid_frame() ||
- (has_cc() && frame_->height() == original_height) ||
- (!has_cc() && frame_->height() == original_height + 1));
+ UNIMPLEMENTED_MIPS();
}
void CodeGenerator::Load(Expression* x) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- JumpTarget true_target;
- JumpTarget false_target;
- LoadCondition(x, &true_target, &false_target, false);
-
- if (has_cc()) {
- UNIMPLEMENTED_MIPS();
- }
-
- if (true_target.is_linked() || false_target.is_linked()) {
- UNIMPLEMENTED_MIPS();
- }
- ASSERT(has_valid_frame());
- ASSERT(!has_cc());
- ASSERT(frame_->height() == original_height + 1);
+ UNIMPLEMENTED_MIPS();
}
void CodeGenerator::LoadGlobal() {
- VirtualFrame::SpilledScope spilled_scope;
- __ lw(a0, GlobalObject());
- frame_->EmitPush(a0);
+ UNIMPLEMENTED_MIPS();
}
-void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
- VirtualFrame::SpilledScope spilled_scope;
- if (slot->type() == Slot::LOOKUP) {
- UNIMPLEMENTED_MIPS();
- } else {
- __ lw(a0, SlotOperand(slot, a2));
- frame_->EmitPush(a0);
- if (slot->var()->mode() == Variable::CONST) {
- UNIMPLEMENTED_MIPS();
- }
- }
+void CodeGenerator::LoadGlobalReceiver(Register scratch) {
+ UNIMPLEMENTED_MIPS();
}
-void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
- ASSERT(slot != NULL);
- if (slot->type() == Slot::LOOKUP) {
- UNIMPLEMENTED_MIPS();
- } else {
- ASSERT(!slot->var()->is_dynamic());
+ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
+ UNIMPLEMENTED_MIPS();
+ return EAGER_ARGUMENTS_ALLOCATION;
+}
- JumpTarget exit;
- if (init_state == CONST_INIT) {
- UNIMPLEMENTED_MIPS();
- }
- // We must execute the store. Storing a variable must keep the
- // (new) value on the stack. This is necessary for compiling
- // assignment expressions.
- //
- // Note: We will reach here even with slot->var()->mode() ==
- // Variable::CONST because of const declarations which will
- // initialize consts to 'the hole' value and by doing so, end up
- // calling this code. a2 may be loaded with context; used below in
- // RecordWrite.
- frame_->EmitPop(a0);
- __ sw(a0, SlotOperand(slot, a2));
- frame_->EmitPush(a0);
- if (slot->type() == Slot::CONTEXT) {
- UNIMPLEMENTED_MIPS();
- }
- // If we definitely did not jump over the assignment, we do not need
- // to bind the exit label. Doing so can defeat peephole
- // optimization.
- if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
- exit.Bind();
- }
+void CodeGenerator::StoreArgumentsObject(bool initial) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::LoadTypeofExpression(Expression* x) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+Reference::Reference(CodeGenerator* cgen,
+ Expression* expression,
+ bool persist_after_get)
+ : cgen_(cgen),
+ expression_(expression),
+ type_(ILLEGAL),
+ persist_after_get_(persist_after_get) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+Reference::~Reference() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::LoadReference(Reference* ref) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::UnloadReference(Reference* ref) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given
+// register to a boolean in the condition code register. The code
+// may jump to 'false_target' in case the register converts to 'false'.
+void CodeGenerator::ToBoolean(JumpTarget* true_target,
+ JumpTarget* false_target) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenericBinaryOperation(Token::Value op,
+ OverwriteMode overwrite_mode,
+ GenerateInlineSmi inline_smi,
+ int constant_rhs) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+class DeferredInlineSmiOperation: public DeferredCode {
+ public:
+ DeferredInlineSmiOperation(Token::Value op,
+ int value,
+ bool reversed,
+ OverwriteMode overwrite_mode,
+ Register tos)
+ : op_(op),
+ value_(value),
+ reversed_(reversed),
+ overwrite_mode_(overwrite_mode),
+ tos_register_(tos) {
+ set_comment("[ DeferredInlinedSmiOperation");
}
+
+ virtual void Generate();
+ // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and
+ // Exit(). Currently on MIPS SaveRegisters() and RestoreRegisters() are empty
+ // methods, it is the responsibility of the deferred code to save and restore
+ // registers.
+ virtual bool AutoSaveAndRestore() { return false; }
+
+ void JumpToNonSmiInput(Condition cond, Register cmp1, const Operand& cmp2);
+ void JumpToAnswerOutOfRange(Condition cond,
+ Register cmp1,
+ const Operand& cmp2);
+
+ private:
+ void GenerateNonSmiInput();
+ void GenerateAnswerOutOfRange();
+ void WriteNonSmiAnswer(Register answer,
+ Register heap_number,
+ Register scratch);
+
+ Token::Value op_;
+ int value_;
+ bool reversed_;
+ OverwriteMode overwrite_mode_;
+ Register tos_register_;
+ Label non_smi_input_;
+ Label answer_out_of_range_;
+};
+
+
+// For bit operations we try harder and handle the case where the input is not
+// a Smi but a 32bits integer without calling the generic stub.
+void DeferredInlineSmiOperation::JumpToNonSmiInput(Condition cond,
+ Register cmp1,
+ const Operand& cmp2) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// For bit operations the result is always 32bits so we handle the case where
+// the result does not fit in a Smi without calling the generic stub.
+void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond,
+ Register cmp1,
+ const Operand& cmp2) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// On entry the non-constant side of the binary operation is in tos_register_
+// and the constant smi side is nowhere. The tos_register_ is not used by the
+// virtual frame. On exit the answer is in the tos_register_ and the virtual
+// frame is unchanged.
+void DeferredInlineSmiOperation::Generate() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// Convert and write the integer answer into heap_number.
+void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer,
+ Register heap_number,
+ Register scratch) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void DeferredInlineSmiOperation::GenerateNonSmiInput() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void DeferredInlineSmiOperation::GenerateAnswerOutOfRange() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::SmiOperation(Token::Value op,
+ Handle<Object> value,
+ bool reversed,
+ OverwriteMode mode) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// On MIPS we load registers condReg1 and condReg2 with the values which should
+// be compared. With the CodeGenerator::cc_reg_ condition, functions will be
+// able to evaluate correctly the condition. (eg CodeGenerator::Branch)
+void CodeGenerator::Comparison(Condition cc,
+ Expression* left,
+ Expression* right,
+ bool strict) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
+ CallFunctionFlags flags,
+ int position) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::CallApplyLazy(Expression* applicand,
+ Expression* receiver,
+ VariableProxy* arguments,
+ int position) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::Branch(bool if_true, JumpTarget* target) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::CheckStack() {
+ UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
- VirtualFrame::SpilledScope spilled_scope;
- for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
- VisitAndSpill(statements->at(i));
- }
+ UNIMPLEMENTED_MIPS();
}
@@ -473,14 +408,7 @@
void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
- VirtualFrame::SpilledScope spilled_scope;
- frame_->EmitPush(cp);
- __ li(t0, Operand(pairs));
- frame_->EmitPush(t0);
- __ li(t0, Operand(Smi::FromInt(is_eval() ? 1 : 0)));
- frame_->EmitPush(t0);
- frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
- // The result is discarded.
+ UNIMPLEMENTED_MIPS();
}
@@ -490,17 +418,7 @@
void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ ExpressionStatement");
- CodeForStatementPosition(node);
- Expression* expression = node->expression();
- expression->MarkAsStatement();
- LoadAndSpill(expression);
- frame_->Drop();
- ASSERT(frame_->height() == original_height);
+ UNIMPLEMENTED_MIPS();
}
@@ -525,22 +443,12 @@
void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ ReturnStatement");
+ UNIMPLEMENTED_MIPS();
+}
- CodeForStatementPosition(node);
- LoadAndSpill(node->expression());
- if (function_return_is_shadowed_) {
- frame_->EmitPop(v0);
- function_return_.Jump();
- } else {
- // Pop the result from the frame and prepare the frame for
- // returning thus making it easier to merge.
- frame_->EmitPop(v0);
- frame_->PrepareForReturn();
- function_return_.Jump();
- }
+void CodeGenerator::GenerateReturnSequence() {
+ UNIMPLEMENTED_MIPS();
}
@@ -594,6 +502,13 @@
}
+void CodeGenerator::InstantiateFunction(
+ Handle<SharedFunctionInfo> function_info,
+ bool pretenure) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
UNIMPLEMENTED_MIPS();
}
@@ -610,46 +525,49 @@
}
+void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
+ TypeofState state) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
+ TypeofState typeof_state,
+ JumpTarget* slow) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
+ TypeofState typeof_state,
+ JumpTarget* slow,
+ JumpTarget* done) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void CodeGenerator::VisitSlot(Slot* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Slot");
- LoadFromSlot(node, typeof_state());
- ASSERT(frame_->height() == original_height + 1);
+ UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ VariableProxy");
-
- Variable* var = node->var();
- Expression* expr = var->rewrite();
- if (expr != NULL) {
- Visit(expr);
- } else {
- ASSERT(var->is_global());
- Reference ref(this, node);
- ref.GetValueAndSpill();
- }
- ASSERT(frame_->height() == original_height + 1);
+ UNIMPLEMENTED_MIPS();
}
void CodeGenerator::VisitLiteral(Literal* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Literal");
- __ li(t0, Operand(node->handle()));
- frame_->EmitPush(t0);
- ASSERT(frame_->height() == original_height + 1);
+ UNIMPLEMENTED_MIPS();
}
@@ -673,48 +591,23 @@
}
+void CodeGenerator::EmitSlotAssignment(Assignment* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void CodeGenerator::VisitAssignment(Assignment* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Assignment");
-
- { Reference target(this, node->target());
- if (target.is_illegal()) {
- // Fool the virtual frame into thinking that we left the assignment's
- // value on the frame.
- frame_->EmitPush(zero_reg);
- ASSERT(frame_->height() == original_height + 1);
- return;
- }
-
- if (node->op() == Token::ASSIGN ||
- node->op() == Token::INIT_VAR ||
- node->op() == Token::INIT_CONST) {
- LoadAndSpill(node->value());
- } else {
- UNIMPLEMENTED_MIPS();
- }
-
- Variable* var = node->target()->AsVariableProxy()->AsVariable();
- if (var != NULL &&
- (var->mode() == Variable::CONST) &&
- node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
- // Assignment ignored - leave the value on the stack.
- } else {
- CodeForSourcePosition(node->position());
- if (node->op() == Token::INIT_CONST) {
- // Dynamic constant initializations must use the function context
- // and initialize the actual constant declared. Dynamic variable
- // initializations are simply assignments and use SetValue.
- target.SetValue(CONST_INIT);
- } else {
- target.SetValue(NOT_CONST_INIT);
- }
- }
- }
- ASSERT(frame_->height() == original_height + 1);
+ UNIMPLEMENTED_MIPS();
}
@@ -729,73 +622,7 @@
void CodeGenerator::VisitCall(Call* node) {
-#ifdef DEBUG
- int original_height = frame_->height();
-#endif
- VirtualFrame::SpilledScope spilled_scope;
- Comment cmnt(masm_, "[ Call");
-
- Expression* function = node->expression();
- ZoneList<Expression*>* args = node->arguments();
-
- // Standard function call.
- // Check if the function is a variable or a property.
- Variable* var = function->AsVariableProxy()->AsVariable();
- Property* property = function->AsProperty();
-
- // ------------------------------------------------------------------------
- // Fast-case: Use inline caching.
- // ---
- // According to ECMA-262, section 11.2.3, page 44, the function to call
- // must be resolved after the arguments have been evaluated. The IC code
- // automatically handles this by loading the arguments before the function
- // is resolved in cache misses (this also holds for megamorphic calls).
- // ------------------------------------------------------------------------
-
- if (var != NULL && var->is_possibly_eval()) {
- UNIMPLEMENTED_MIPS();
- } else if (var != NULL && !var->is_this() && var->is_global()) {
- // ----------------------------------
- // JavaScript example: 'foo(1, 2, 3)' // foo is global
- // ----------------------------------
-
- int arg_count = args->length();
-
- // We need sp to be 8 bytes aligned when calling the stub.
- __ SetupAlignedCall(t0, arg_count);
-
- // Pass the global object as the receiver and let the IC stub
- // patch the stack to use the global proxy as 'this' in the
- // invoked function.
- LoadGlobal();
-
- // Load the arguments.
- for (int i = 0; i < arg_count; i++) {
- LoadAndSpill(args->at(i));
- }
-
- // Setup the receiver register and call the IC initialization code.
- __ li(a2, Operand(var->name()));
- InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
- Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop);
- CodeForSourcePosition(node->position());
- frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT,
- arg_count + 1);
- __ ReturnFromAlignedCall();
- __ lw(cp, frame_->Context());
- // Remove the function from the stack.
- frame_->EmitPush(v0);
-
- } else if (var != NULL && var->slot() != NULL &&
- var->slot()->type() == Slot::LOOKUP) {
- UNIMPLEMENTED_MIPS();
- } else if (property != NULL) {
- UNIMPLEMENTED_MIPS();
- } else {
- UNIMPLEMENTED_MIPS();
- }
-
- ASSERT(frame_->height() == original_height + 1);
+ UNIMPLEMENTED_MIPS();
}
@@ -839,30 +666,112 @@
}
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
-// This should generate code that performs a charCodeAt() call or returns
-// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
-// It is not yet implemented on ARM, so it always goes to the slow case.
-void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
+class DeferredStringCharCodeAt : public DeferredCode {
+ public:
+ DeferredStringCharCodeAt(Register object,
+ Register index,
+ Register scratch,
+ Register result)
+ : result_(result),
+ char_code_at_generator_(object,
+ index,
+ scratch,
+ result,
+ &need_conversion_,
+ &need_conversion_,
+ &index_out_of_range_,
+ STRING_INDEX_IS_NUMBER) {}
+
+ StringCharCodeAtGenerator* fast_case_generator() {
+ return &char_code_at_generator_;
+ }
+
+ virtual void Generate() {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ private:
+ Register result_;
+
+ Label need_conversion_;
+ Label index_out_of_range_;
+
+ StringCharCodeAtGenerator char_code_at_generator_;
+};
+
+
+void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
-void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
+class DeferredStringCharFromCode : public DeferredCode {
+ public:
+ DeferredStringCharFromCode(Register code,
+ Register result)
+ : char_from_code_generator_(code, result) {}
+
+ StringCharFromCodeGenerator* fast_case_generator() {
+ return &char_from_code_generator_;
+ }
+
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_from_code_generator_.GenerateSlow(masm(), call_helper);
+ }
+
+ private:
+ StringCharFromCodeGenerator char_from_code_generator_;
+};
+
+
+void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+class DeferredStringCharAt : public DeferredCode {
+ public:
+ DeferredStringCharAt(Register object,
+ Register index,
+ Register scratch1,
+ Register scratch2,
+ Register result)
+ : result_(result),
+ char_at_generator_(object,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &need_conversion_,
+ &need_conversion_,
+ &index_out_of_range_,
+ STRING_INDEX_IS_NUMBER) {}
+
+ StringCharAtGenerator* fast_case_generator() {
+ return &char_at_generator_;
+ }
+
+ virtual void Generate() {
+ UNIMPLEMENTED_MIPS();
+}
+
+ private:
+ Register result_;
+
+ Label need_conversion_;
+ Label index_out_of_range_;
+
+ StringCharAtGenerator char_at_generator_;
+};
+
+
+void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
@@ -877,6 +786,55 @@
}
+void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
+ public:
+ DeferredIsStringWrapperSafeForDefaultValueOf(Register object,
+ Register map_result,
+ Register scratch1,
+ Register scratch2)
+ : object_(object),
+ map_result_(map_result),
+ scratch1_(scratch1),
+ scratch2_(scratch2) { }
+
+ virtual void Generate() {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ private:
+ Register object_;
+ Register map_result_;
+ Register scratch1_;
+ Register scratch2_;
+};
+
+
+void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf(
+ ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
@@ -892,32 +850,8 @@
}
-void CodeGenerator::GenerateRandomHeapNumber(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateRandomHeapNumber(
+ ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
@@ -942,11 +876,109 @@
}
+void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+class DeferredSearchCache: public DeferredCode {
+ public:
+ DeferredSearchCache(Register dst, Register cache, Register key)
+ : dst_(dst), cache_(cache), key_(key) {
+ set_comment("[ DeferredSearchCache");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register dst_, cache_, key_;
+};
+
+
+void DeferredSearchCache::Generate() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
UNIMPLEMENTED_MIPS();
}
+class DeferredSwapElements: public DeferredCode {
+ public:
+ DeferredSwapElements(Register object, Register index1, Register index2)
+ : object_(object), index1_(index1), index2_(index2) {
+ set_comment("[ DeferredSwapElements");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register object_, index1_, index2_;
+};
+
+
+void DeferredSwapElements::Generate() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
UNIMPLEMENTED_MIPS();
}
@@ -957,11 +989,39 @@
}
+class DeferredCountOperation: public DeferredCode {
+ public:
+ DeferredCountOperation(Register value,
+ bool is_increment,
+ bool is_postfix,
+ int target_size)
+ : value_(value),
+ is_increment_(is_increment),
+ is_postfix_(is_postfix),
+ target_size_(target_size) {}
+
+ virtual void Generate() {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ private:
+ Register value_;
+ bool is_increment_;
+ bool is_postfix_;
+ int target_size_;
+};
+
+
void CodeGenerator::VisitCountOperation(CountOperation* node) {
UNIMPLEMENTED_MIPS();
}
+void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
UNIMPLEMENTED_MIPS();
}
@@ -977,8 +1037,138 @@
}
+void CodeGenerator::VisitCompareToNull(CompareToNull* node) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+class DeferredReferenceGetNamedValue: public DeferredCode {
+ public:
+ explicit DeferredReferenceGetNamedValue(Register receiver,
+ Handle<String> name,
+ bool is_contextual)
+ : receiver_(receiver),
+ name_(name),
+ is_contextual_(is_contextual),
+ is_dont_delete_(false) {
+ set_comment(is_contextual
+ ? "[ DeferredReferenceGetNamedValue (contextual)"
+ : "[ DeferredReferenceGetNamedValue");
+ }
+
+ virtual void Generate();
+
+ void set_is_dont_delete(bool value) {
+ ASSERT(is_contextual_);
+ is_dont_delete_ = value;
+ }
+
+ private:
+ Register receiver_;
+ Handle<String> name_;
+ bool is_contextual_;
+ bool is_dont_delete_;
+};
+
+
+
+void DeferredReferenceGetNamedValue::Generate() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+class DeferredReferenceGetKeyedValue: public DeferredCode {
+ public:
+ DeferredReferenceGetKeyedValue(Register key, Register receiver)
+ : key_(key), receiver_(receiver) {
+ set_comment("[ DeferredReferenceGetKeyedValue");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register key_;
+ Register receiver_;
+};
+
+
+void DeferredReferenceGetKeyedValue::Generate() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+class DeferredReferenceSetKeyedValue: public DeferredCode {
+ public:
+ DeferredReferenceSetKeyedValue(Register value,
+ Register key,
+ Register receiver)
+ : value_(value), key_(key), receiver_(receiver) {
+ set_comment("[ DeferredReferenceSetKeyedValue");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register value_;
+ Register key_;
+ Register receiver_;
+};
+
+
+void DeferredReferenceSetKeyedValue::Generate() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+class DeferredReferenceSetNamedValue: public DeferredCode {
+ public:
+ DeferredReferenceSetNamedValue(Register value,
+ Register receiver,
+ Handle<String> name)
+ : value_(value), receiver_(receiver), name_(name) {
+ set_comment("[ DeferredReferenceSetNamedValue");
+ }
+
+ virtual void Generate();
+
+ private:
+ Register value_;
+ Register receiver_;
+ Handle<String> name_;
+};
+
+
+void DeferredReferenceSetNamedValue::Generate() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::EmitKeyedLoad() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void CodeGenerator::EmitKeyedStore(StaticType* key_type,
+ WriteBarrierCharacter wb_info) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
#ifdef DEBUG
-bool CodeGenerator::HasValidEntryRegisters() { return true; }
+bool CodeGenerator::HasValidEntryRegisters() {
+ UNIMPLEMENTED_MIPS();
+ return false;
+}
#endif
@@ -986,449 +1176,33 @@
#define __ ACCESS_MASM(masm)
// -----------------------------------------------------------------------------
-// Reference support
-
-Reference::Reference(CodeGenerator* cgen,
- Expression* expression,
- bool persist_after_get)
- : cgen_(cgen),
- expression_(expression),
- type_(ILLEGAL),
- persist_after_get_(persist_after_get) {
- cgen->LoadReference(this);
-}
-
-
-Reference::~Reference() {
- ASSERT(is_unloaded() || is_illegal());
-}
+// Reference support.
Handle<String> Reference::GetName() {
- ASSERT(type_ == NAMED);
- Property* property = expression_->AsProperty();
- if (property == NULL) {
- // Global variable reference treated as a named property reference.
- VariableProxy* proxy = expression_->AsVariableProxy();
- ASSERT(proxy->AsVariable() != NULL);
- ASSERT(proxy->AsVariable()->is_global());
- return proxy->name();
- } else {
- Literal* raw_name = property->key()->AsLiteral();
- ASSERT(raw_name != NULL);
- return Handle<String>(String::cast(*raw_name->handle()));
- }
+ UNIMPLEMENTED_MIPS();
+ return Handle<String>();
+}
+
+
+void Reference::DupIfPersist() {
+ UNIMPLEMENTED_MIPS();
}
void Reference::GetValue() {
- ASSERT(cgen_->HasValidEntryRegisters());
- ASSERT(!is_illegal());
- ASSERT(!cgen_->has_cc());
- Property* property = expression_->AsProperty();
- if (property != NULL) {
- cgen_->CodeForSourcePosition(property->position());
- }
-
- switch (type_) {
- case SLOT: {
- UNIMPLEMENTED_MIPS();
- break;
- }
-
- case NAMED: {
- UNIMPLEMENTED_MIPS();
- break;
- }
-
- case KEYED: {
- UNIMPLEMENTED_MIPS();
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-void Reference::SetValue(InitState init_state) {
- ASSERT(!is_illegal());
- ASSERT(!cgen_->has_cc());
- MacroAssembler* masm = cgen_->masm();
- Property* property = expression_->AsProperty();
- if (property != NULL) {
- cgen_->CodeForSourcePosition(property->position());
- }
-
- switch (type_) {
- case SLOT: {
- Comment cmnt(masm, "[ Store to Slot");
- Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
- cgen_->StoreToSlot(slot, init_state);
- cgen_->UnloadReference(this);
- break;
- }
-
- case NAMED: {
- UNIMPLEMENTED_MIPS();
- break;
- }
-
- case KEYED: {
- UNIMPLEMENTED_MIPS();
- break;
- }
-
- default:
- UNREACHABLE();
- }
-}
-
-
-// On entry a0 and a1 are the things to be compared. On exit v0 is 0,
-// positive or negative to indicate the result of the comparison.
-void CompareStub::Generate(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
- __ break_(0x765);
}
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
+void Reference::SetValue(InitState init_state, WriteBarrierCharacter wb_info) {
UNIMPLEMENTED_MIPS();
- return Handle<Code>::null();
}
-void StackCheckStub::Generate(MacroAssembler* masm) {
+const char* GenericBinaryOpStub::GetName() {
UNIMPLEMENTED_MIPS();
- __ break_(0x790);
-}
-
-
-void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x808);
-}
-
-
-void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
- UncatchableExceptionType type) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x815);
-}
-
-void CEntryStub::GenerateCore(MacroAssembler* masm,
- Label* throw_normal_exception,
- Label* throw_termination_exception,
- Label* throw_out_of_memory_exception,
- bool do_gc,
- bool always_allocate) {
- // s0: number of arguments including receiver (C callee-saved)
- // s1: pointer to the first argument (C callee-saved)
- // s2: pointer to builtin function (C callee-saved)
-
- if (do_gc) {
- UNIMPLEMENTED_MIPS();
- }
-
- ExternalReference scope_depth =
- ExternalReference::heap_always_allocate_scope_depth();
- if (always_allocate) {
- UNIMPLEMENTED_MIPS();
- }
-
- // Call C built-in.
- // a0 = argc, a1 = argv
- __ mov(a0, s0);
- __ mov(a1, s1);
-
- __ CallBuiltin(s2);
-
- if (always_allocate) {
- UNIMPLEMENTED_MIPS();
- }
-
- // Check for failure result.
- Label failure_returned;
- ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
- __ addiu(a2, v0, 1);
- __ andi(t0, a2, kFailureTagMask);
- __ Branch(eq, &failure_returned, t0, Operand(zero_reg));
-
- // Exit C frame and return.
- // v0:v1: result
- // sp: stack pointer
- // fp: frame pointer
- __ LeaveExitFrame(mode_);
-
- // Check if we should retry or throw exception.
- Label retry;
- __ bind(&failure_returned);
- ASSERT(Failure::RETRY_AFTER_GC == 0);
- __ andi(t0, v0, ((1 << kFailureTypeTagSize) - 1) << kFailureTagSize);
- __ Branch(eq, &retry, t0, Operand(zero_reg));
-
- // Special handling of out of memory exceptions.
- Failure* out_of_memory = Failure::OutOfMemoryException();
- __ Branch(eq, throw_out_of_memory_exception,
- v0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
-
- // Retrieve the pending exception and clear the variable.
- __ LoadExternalReference(t0, ExternalReference::the_hole_value_location());
- __ lw(a3, MemOperand(t0));
- __ LoadExternalReference(t0,
- ExternalReference(Isolate::k_pending_exception_address));
- __ lw(v0, MemOperand(t0));
- __ sw(a3, MemOperand(t0));
-
- // Special handling of termination exceptions which are uncatchable
- // by javascript code.
- __ Branch(eq, throw_termination_exception,
- v0, Operand(FACTORY->termination_exception()));
-
- // Handle normal exception.
- __ b(throw_normal_exception);
- __ nop(); // Branch delay slot nop.
-
- __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying
-}
-
-void CEntryStub::Generate(MacroAssembler* masm) {
- // Called from JavaScript; parameters are on stack as if calling JS function
- // a0: number of arguments including receiver
- // a1: pointer to builtin function
- // fp: frame pointer (restored after C call)
- // sp: stack pointer (restored as callee's sp after C call)
- // cp: current context (C callee-saved)
-
- // NOTE: Invocations of builtins may return failure objects
- // instead of a proper result. The builtin entry handles
- // this by performing a garbage collection and retrying the
- // builtin once.
-
- // Enter the exit frame that transitions from JavaScript to C++.
- __ EnterExitFrame(mode_, s0, s1, s2);
-
- // s0: number of arguments (C callee-saved)
- // s1: pointer to first argument (C callee-saved)
- // s2: pointer to builtin function (C callee-saved)
-
- Label throw_normal_exception;
- Label throw_termination_exception;
- Label throw_out_of_memory_exception;
-
- // Call into the runtime system.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- false,
- false);
-
- // Do space-specific GC and retry runtime call.
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- false);
-
- // Do full GC and retry runtime call one final time.
- Failure* failure = Failure::InternalError();
- __ li(v0, Operand(reinterpret_cast<int32_t>(failure)));
- GenerateCore(masm,
- &throw_normal_exception,
- &throw_termination_exception,
- &throw_out_of_memory_exception,
- true,
- true);
-
- __ bind(&throw_out_of_memory_exception);
- GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
-
- __ bind(&throw_termination_exception);
- GenerateThrowUncatchable(masm, TERMINATION);
-
- __ bind(&throw_normal_exception);
- GenerateThrowTOS(masm);
-}
-
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
- Label invoke, exit;
-
- // Registers:
- // a0: entry address
- // a1: function
- // a2: reveiver
- // a3: argc
- //
- // Stack:
- // 4 args slots
- // args
-
- // Save callee saved registers on the stack.
- __ MultiPush((kCalleeSaved | ra.bit()) & ~sp.bit());
-
- // We build an EntryFrame.
- __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used.
- int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
- __ li(t2, Operand(Smi::FromInt(marker)));
- __ li(t1, Operand(Smi::FromInt(marker)));
- __ LoadExternalReference(t0,
- ExternalReference(Isolate::k_c_entry_fp_address));
- __ lw(t0, MemOperand(t0));
- __ MultiPush(t0.bit() | t1.bit() | t2.bit() | t3.bit());
-
- // Setup frame pointer for the frame to be pushed.
- __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
-
- // Load argv in s0 register.
- __ lw(s0, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize +
- StandardFrameConstants::kCArgsSlotsSize));
-
- // Registers:
- // a0: entry_address
- // a1: function
- // a2: reveiver_pointer
- // a3: argc
- // s0: argv
- //
- // Stack:
- // caller fp |
- // function slot | entry frame
- // context slot |
- // bad fp (0xff...f) |
- // callee saved registers + ra
- // 4 args slots
- // args
-
- // Call a faked try-block that does the invoke.
- __ bal(&invoke);
- __ nop(); // Branch delay slot nop.
-
- // Caught exception: Store result (exception) in the pending
- // exception field in the JSEnv and return a failure sentinel.
- // Coming in here the fp will be invalid because the PushTryHandler below
- // sets it to 0 to signal the existence of the JSEntry frame.
- __ LoadExternalReference(t0,
- ExternalReference(Isolate::k_pending_exception_address));
- __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0.
- __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception())));
- __ b(&exit);
- __ nop(); // Branch delay slot nop.
-
- // Invoke: Link this frame into the handler chain.
- __ bind(&invoke);
- __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
- // If an exception not caught by another handler occurs, this handler
- // returns control to the code after the bal(&invoke) above, which
- // restores all kCalleeSaved registers (including cp and fp) to their
- // saved values before returning a failure to C.
-
- // Clear any pending exceptions.
- __ LoadExternalReference(t0, ExternalReference::the_hole_value_location());
- __ lw(t1, MemOperand(t0));
- __ LoadExternalReference(t0,
- ExternalReference(Isolate::k_pending_exception_address));
- __ sw(t1, MemOperand(t0));
-
- // Invoke the function by calling through JS entry trampoline builtin.
- // Notice that we cannot store a reference to the trampoline code directly in
- // this stub, because runtime stubs are not traversed when doing GC.
-
- // Registers:
- // a0: entry_address
- // a1: function
- // a2: reveiver_pointer
- // a3: argc
- // s0: argv
- //
- // Stack:
- // handler frame
- // entry frame
- // callee saved registers + ra
- // 4 args slots
- // args
-
- if (is_construct) {
- ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
- __ LoadExternalReference(t0, construct_entry);
- } else {
- ExternalReference entry(Builtins::JSEntryTrampoline);
- __ LoadExternalReference(t0, entry);
- }
- __ lw(t9, MemOperand(t0)); // deref address
-
- // Call JSEntryTrampoline.
- __ addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
- __ CallBuiltin(t9);
-
- // Unlink this frame from the handler chain. When reading the
- // address of the next handler, there is no need to use the address
- // displacement since the current stack pointer (sp) points directly
- // to the stack handler.
- __ lw(t1, MemOperand(sp, StackHandlerConstants::kNextOffset));
- __ LoadExternalReference(t0, ExternalReference(Isolate::k_handler_address));
- __ sw(t1, MemOperand(t0));
-
- // This restores sp to its position before PushTryHandler.
- __ addiu(sp, sp, StackHandlerConstants::kSize);
-
- __ bind(&exit); // v0 holds result
- // Restore the top frame descriptors from the stack.
- __ Pop(t1);
- __ LoadExternalReference(t0,
- ExternalReference(Isolate::k_c_entry_fp_address));
- __ sw(t1, MemOperand(t0));
-
- // Reset the stack to the callee saved registers.
- __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
-
- // Restore callee saved registers from the stack.
- __ MultiPop((kCalleeSaved | ra.bit()) & ~sp.bit());
- // Return.
- __ Jump(ra);
-}
-
-
-// This stub performs an instanceof, calling the builtin function if
-// necessary. Uses a1 for the object, a0 for the function that it may
-// be an instance of (these are fetched from the stack).
-void InstanceofStub::Generate(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x845);
-}
-
-
-void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x851);
-}
-
-
-void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x857);
-}
-
-
-void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x863);
-}
-
-
-const char* CompareStub::GetName() {
- UNIMPLEMENTED_MIPS();
- return NULL; // UNIMPLEMENTED RETURN
-}
-
-
-int CompareStub::MinorKey() {
- // Encode the two parameters in a unique 16 bit value.
- ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15));
- return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0);
+ return name_;
}
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
index 66f891b..0a2cd45 100644
--- a/src/mips/codegen-mips.h
+++ b/src/mips/codegen-mips.h
@@ -29,17 +29,37 @@
#ifndef V8_MIPS_CODEGEN_MIPS_H_
#define V8_MIPS_CODEGEN_MIPS_H_
+
+#include "ast.h"
+#include "code-stubs-mips.h"
+#include "ic-inl.h"
+
namespace v8 {
namespace internal {
+#if(defined(__mips_hard_float) && __mips_hard_float != 0)
+// Use floating-point coprocessor instructions. This flag is raised when
+// -mhard-float is passed to the compiler.
+static const bool IsMipsSoftFloatABI = false;
+#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
+// Not using floating-point coprocessor instructions. This flag is raised when
+// -msoft-float is passed to the compiler.
+static const bool IsMipsSoftFloatABI = true;
+#else
+static const bool IsMipsSoftFloatABI = true;
+#endif
+
// Forward declarations
class CompilationInfo;
class DeferredCode;
+class JumpTarget;
class RegisterAllocator;
class RegisterFile;
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
+enum WriteBarrierCharacter { UNLIKELY_SMI, LIKELY_SMI, NEVER_NEWSPACE };
// -----------------------------------------------------------------------------
@@ -101,7 +121,12 @@
// on the expression stack. The value is stored in the location specified
// by the reference, and is left on top of the stack, after the reference
// is popped from beneath it (unloaded).
- void SetValue(InitState init_state);
+ void SetValue(InitState init_state, WriteBarrierCharacter wb);
+
+ // This is in preparation for something that uses the reference on the stack.
+ // If we need this reference afterwards get then dup it now. Otherwise mark
+ // it as used.
+ inline void DupIfPersist();
private:
CodeGenerator* cgen_;
@@ -126,31 +151,24 @@
// leaves the code generator with a NULL state.
explicit CodeGenState(CodeGenerator* owner);
- // Create a code generator state based on a code generator's current
- // state. The new state has its own typeof state and pair of branch
- // labels.
- CodeGenState(CodeGenerator* owner,
- JumpTarget* true_target,
- JumpTarget* false_target);
+
// Destroy a code generator state and restore the owning code generator's
// previous state.
- ~CodeGenState();
+ virtual ~CodeGenState();
- TypeofState typeof_state() const { return typeof_state_; }
- JumpTarget* true_target() const { return true_target_; }
- JumpTarget* false_target() const { return false_target_; }
+ virtual JumpTarget* true_target() const { return NULL; }
+ virtual JumpTarget* false_target() const { return NULL; }
+
+ protected:
+ inline CodeGenerator* owner() { return owner_; }
+ inline CodeGenState* previous() const { return previous_; }
private:
// The owning code generator.
CodeGenerator* owner_;
- // A flag indicating whether we are compiling the immediate subexpression
- // of a typeof expression.
- TypeofState typeof_state_;
- JumpTarget* true_target_;
- JumpTarget* false_target_;
// The previous state of the owning code generator, restored when
// this state is destroyed.
@@ -158,6 +176,50 @@
};
+class ConditionCodeGenState : public CodeGenState {
+ public:
+ // Create a code generator state based on a code generator's current
+ // state. The new state has its own pair of branch labels.
+ ConditionCodeGenState(CodeGenerator* owner,
+ JumpTarget* true_target,
+ JumpTarget* false_target);
+
+ virtual JumpTarget* true_target() const { return true_target_; }
+ virtual JumpTarget* false_target() const { return false_target_; }
+
+ private:
+ JumpTarget* true_target_;
+ JumpTarget* false_target_;
+};
+
+
+class TypeInfoCodeGenState : public CodeGenState {
+ public:
+ TypeInfoCodeGenState(CodeGenerator* owner,
+ Slot* slot_number,
+ TypeInfo info);
+ virtual ~TypeInfoCodeGenState();
+
+ virtual JumpTarget* true_target() const { return previous()->true_target(); }
+ virtual JumpTarget* false_target() const {
+ return previous()->false_target();
+ }
+
+ private:
+ Slot* slot_;
+ TypeInfo old_type_info_;
+};
+
+
+// -------------------------------------------------------------------------
+// Arguments allocation mode
+
+enum ArgumentsAllocationMode {
+ NO_ARGUMENTS_ALLOCATION,
+ EAGER_ARGUMENTS_ALLOCATION,
+ LAZY_ARGUMENTS_ALLOCATION
+};
+
// -----------------------------------------------------------------------------
// CodeGenerator
@@ -173,9 +235,7 @@
SECONDARY
};
- // Takes a function literal, generates code for it. This function should only
- // be called by compiler.cc.
- static Handle<Code> MakeCode(CompilationInfo* info);
+ static bool MakeCode(CompilationInfo* info);
// Printing of AST, etc. as requested by flags.
static void MakeCodePrologue(CompilationInfo* info);
@@ -185,6 +245,9 @@
Code::Flags flags,
CompilationInfo* info);
+ // Print the code after compiling it.
+ static void PrintCode(Handle<Code> code, CompilationInfo* info);
+
#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
#endif
@@ -194,7 +257,9 @@
bool is_toplevel,
Handle<Script> script);
- static void RecordPositions(MacroAssembler* masm, int pos);
+ static bool RecordPositions(MacroAssembler* masm,
+ int pos,
+ bool right_here = false);
// Accessors
MacroAssembler* masm() { return masm_; }
@@ -216,73 +281,105 @@
CodeGenState* state() { return state_; }
void set_state(CodeGenState* state) { state_ = state; }
+ TypeInfo type_info(Slot* slot) {
+ int index = NumberOfSlot(slot);
+ if (index == kInvalidSlotNumber) return TypeInfo::Unknown();
+ return (*type_info_)[index];
+ }
+
+ TypeInfo set_type_info(Slot* slot, TypeInfo info) {
+ int index = NumberOfSlot(slot);
+ ASSERT(index >= kInvalidSlotNumber);
+ if (index != kInvalidSlotNumber) {
+ TypeInfo previous_value = (*type_info_)[index];
+ (*type_info_)[index] = info;
+ return previous_value;
+ }
+ return TypeInfo::Unknown();
+ }
void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
- static const int kUnknownIntValue = -1;
-
- // Number of instructions used for the JS return sequence. The constant is
- // used by the debugger to patch the JS return sequence.
- static const int kJSReturnSequenceLength = 7;
-
- // If the name is an inline runtime function call return the number of
- // expected arguments. Otherwise return -1.
- static int InlineRuntimeCallArgumentsCount(Handle<String> name);
+ // Constants related to patching of inlined load/store.
+ static int GetInlinedKeyedLoadInstructionsAfterPatch() {
+ // This is in correlation with the padding in MacroAssembler::Abort.
+ return FLAG_debug_code ? 45 : 20;
+ }
+ static const int kInlinedKeyedStoreInstructionsAfterPatch = 9;
+ static int GetInlinedNamedStoreInstructionsAfterPatch() {
+ ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
+ // Magic number 5: instruction count after patched map load:
+ // li: 2 (liu & ori), Branch : 2 (bne & nop), sw : 1
+ return Isolate::Current()->inlined_write_barrier_size() + 5;
+ }
private:
+ // Type of a member function that generates inline code for a native function.
+ typedef void (CodeGenerator::*InlineFunctionGenerator)
+ (ZoneList<Expression*>*);
+
+ static const InlineFunctionGenerator kInlineFunctionGenerators[];
+
+
// Construction/Destruction.
explicit CodeGenerator(MacroAssembler* masm);
// Accessors.
inline bool is_eval();
inline Scope* scope();
+ inline bool is_strict_mode();
+ inline StrictModeFlag strict_mode_flag();
// Generating deferred code.
void ProcessDeferred();
+ static const int kInvalidSlotNumber = -1;
+
+ int NumberOfSlot(Slot* slot);
// State
bool has_cc() const { return cc_reg_ != cc_always; }
- TypeofState typeof_state() const { return state_->typeof_state(); }
+
JumpTarget* true_target() const { return state_->true_target(); }
JumpTarget* false_target() const { return state_->false_target(); }
- // We don't track loop nesting level on mips yet.
- int loop_nesting() const { return 0; }
+ // Track loop nesting level.
+ int loop_nesting() const { return loop_nesting_; }
+ void IncrementLoopNesting() { loop_nesting_++; }
+ void DecrementLoopNesting() { loop_nesting_--; }
// Node visitors.
void VisitStatements(ZoneList<Statement*>* statements);
+ virtual void VisitSlot(Slot* node);
#define DEF_VISIT(type) \
- void Visit##type(type* node);
+ virtual void Visit##type(type* node);
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
- // Visit a statement and then spill the virtual frame if control flow can
- // reach the end of the statement (ie, it does not exit via break,
- // continue, return, or throw). This function is used temporarily while
- // the code generator is being transformed.
- inline void VisitAndSpill(Statement* statement);
-
- // Visit a list of statements and then spill the virtual frame if control
- // flow can reach the end of the list.
- inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
-
// Main code generation function
void Generate(CompilationInfo* info);
+ // Generate the return sequence code. Should be called no more than
+ // once per compiled function, immediately after binding the return
+ // target (which can not be done more than once). The return value should
+ // be in v0.
+ void GenerateReturnSequence();
+
+ // Returns the arguments allocation mode.
+ ArgumentsAllocationMode ArgumentsMode();
+
+ // Store the arguments object and allocate it if necessary.
+ void StoreArgumentsObject(bool initial);
+
// The following are used by class Reference.
void LoadReference(Reference* ref);
void UnloadReference(Reference* ref);
- MemOperand ContextOperand(Register context, int index) const {
- return MemOperand(context, Context::SlotOffset(index));
- }
-
MemOperand SlotOperand(Slot* slot, Register tmp);
- // Expressions
- MemOperand GlobalObject() const {
- return ContextOperand(cp, Context::GLOBAL_INDEX);
- }
+ MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
+ Register tmp,
+ Register tmp2,
+ JumpTarget* slow);
void LoadCondition(Expression* x,
JumpTarget* true_target,
@@ -290,35 +387,113 @@
bool force_cc);
void Load(Expression* x);
void LoadGlobal();
+ void LoadGlobalReceiver(Register scratch);
- // Generate code to push the value of an expression on top of the frame
- // and then spill the frame fully to memory. This function is used
- // temporarily while the code generator is being transformed.
- inline void LoadAndSpill(Expression* expression);
+
+ // Special code for typeof expressions: Unfortunately, we must
+ // be careful when loading the expression in 'typeof'
+ // expressions. We are not allowed to throw reference errors for
+ // non-existing properties of the global object, so we must make it
+ // look like an explicit property access, instead of an access
+ // through the context chain.
+ void LoadTypeofExpression(Expression* x);
+
+ // Store a keyed property. Key and receiver are on the stack and the value is
+ // in a0. Result is returned in r0.
+ void EmitKeyedStore(StaticType* key_type, WriteBarrierCharacter wb_info);
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
+ void LoadFromGlobalSlotCheckExtensions(Slot* slot,
+ TypeofState typeof_state,
+ JumpTarget* slow);
+ void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
+
+ // Support for loading from local/global variables and arguments
+ // whose location is known unless they are shadowed by
+ // eval-introduced bindings. Generates no code for unsupported slot
+ // types and therefore expects to fall through to the slow jump target.
+ void EmitDynamicLoadFromSlotFastCase(Slot* slot,
+ TypeofState typeof_state,
+ JumpTarget* slow,
+ JumpTarget* done);
+
// Store the value on top of the stack to a slot.
void StoreToSlot(Slot* slot, InitState init_state);
- struct InlineRuntimeLUT {
- void (CodeGenerator::*method)(ZoneList<Expression*>*);
- const char* name;
- int nargs;
- };
+ // Support for compiling assignment expressions.
+ void EmitSlotAssignment(Assignment* node);
+ void EmitNamedPropertyAssignment(Assignment* node);
+ void EmitKeyedPropertyAssignment(Assignment* node);
- static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
+ // Load a named property, returning it in v0. The receiver is passed on the
+ // stack, and remains there.
+ void EmitNamedLoad(Handle<String> name, bool is_contextual);
+
+ // Store to a named property. If the store is contextual, value is passed on
+ // the frame and consumed. Otherwise, receiver and value are passed on the
+ // frame and consumed. The result is returned in v0.
+ void EmitNamedStore(Handle<String> name, bool is_contextual);
+
+ // Load a keyed property, leaving it in v0. The receiver and key are
+ // passed on the stack, and remain there.
+ void EmitKeyedLoad();
+
+ void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
+
+ // Generate code that computes a shortcutting logical operation.
+ void GenerateLogicalBooleanOperation(BinaryOperation* node);
+
+ void GenericBinaryOperation(Token::Value op,
+ OverwriteMode overwrite_mode,
+ GenerateInlineSmi inline_smi,
+ int known_rhs =
+ GenericBinaryOpStub::kUnknownIntValue);
+
+ void VirtualFrameBinaryOperation(Token::Value op,
+ OverwriteMode overwrite_mode,
+ int known_rhs =
+ GenericBinaryOpStub::kUnknownIntValue);
+
+ void SmiOperation(Token::Value op,
+ Handle<Object> value,
+ bool reversed,
+ OverwriteMode mode);
+
+ void Comparison(Condition cc,
+ Expression* left,
+ Expression* right,
+ bool strict = false);
+
+ void CallWithArguments(ZoneList<Expression*>* arguments,
+ CallFunctionFlags flags,
+ int position);
+
+ // An optimized implementation of expressions of the form
+ // x.apply(y, arguments). We call x the applicand and y the receiver.
+ // The optimization avoids allocating an arguments object if possible.
+ void CallApplyLazy(Expression* applicand,
+ Expression* receiver,
+ VariableProxy* arguments,
+ int position);
+
+ // Control flow
+ void Branch(bool if_true, JumpTarget* target);
+ void CheckStack();
+
bool CheckForInlineRuntimeCall(CallRuntime* node);
static Handle<Code> ComputeLazyCompile(int argc);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
- Handle<Code> ComputeCallInitialize(int argc, InLoopFlag in_loop);
-
// Declare global variables and functions in the given array of
// name/value pairs.
void DeclareGlobals(Handle<FixedArray> pairs);
+ // Instantiate the function based on the shared function info.
+ void InstantiateFunction(Handle<SharedFunctionInfo> function_info,
+ bool pretenure);
+
// Support for type checks.
void GenerateIsSmi(ZoneList<Expression*>* args);
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
@@ -338,10 +513,13 @@
void GenerateSetValueOf(ZoneList<Expression*>* args);
// Fast support for charCodeAt(n).
- void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+ void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
// Fast support for string.charAt(n) and string[n].
- void GenerateCharFromCode(ZoneList<Expression*>* args);
+ void GenerateStringCharFromCode(ZoneList<Expression*>* args);
+
+ // Fast support for string.charAt(n) and string[n].
+ void GenerateStringCharAt(ZoneList<Expression*>* args);
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
@@ -358,14 +536,38 @@
void GenerateStringAdd(ZoneList<Expression*>* args);
void GenerateSubString(ZoneList<Expression*>* args);
void GenerateStringCompare(ZoneList<Expression*>* args);
+ void GenerateIsStringWrapperSafeForDefaultValueOf(
+ ZoneList<Expression*>* args);
+
+ // Support for direct calls from JavaScript to native RegExp code.
void GenerateRegExpExec(ZoneList<Expression*>* args);
+
+ void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
+
+ // Support for fast native caches.
+ void GenerateGetFromCache(ZoneList<Expression*>* args);
+
+ // Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
+ // Fast swapping of elements.
+ void GenerateSwapElements(ZoneList<Expression*>* args);
+
+ // Fast call for custom callbacks.
+ void GenerateCallFunction(ZoneList<Expression*>* args);
+
// Fast call to math functions.
void GenerateMathPow(ZoneList<Expression*>* args);
void GenerateMathSin(ZoneList<Expression*>* args);
void GenerateMathCos(ZoneList<Expression*>* args);
void GenerateMathSqrt(ZoneList<Expression*>* args);
+ void GenerateMathLog(ZoneList<Expression*>* args);
+
+ void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
+
+ void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
+ void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
+ void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
// Simple condition analysis.
enum ConditionAnalysis {
@@ -389,9 +591,6 @@
bool HasValidEntryRegisters();
#endif
- bool is_eval_; // Tells whether code is generated for eval.
-
- Handle<Script> script_;
List<DeferredCode*> deferred_;
// Assembler
@@ -404,7 +603,9 @@
RegisterAllocator* allocator_;
Condition cc_reg_;
CodeGenState* state_;
+ int loop_nesting_;
+ Vector<TypeInfo>* type_info_;
// Jump targets
BreakTarget function_return_;
@@ -413,14 +614,15 @@
// to some unlinking code).
bool function_return_is_shadowed_;
- static InlineRuntimeLUT kInlineRuntimeLUT[];
-
friend class VirtualFrame;
+ friend class Isolate;
friend class JumpTarget;
friend class Reference;
friend class FastCodeGenerator;
friend class FullCodeGenerator;
friend class FullCodeGenSyntaxChecker;
+ friend class InlineRuntimeFunctionsTable;
+ friend class LCodeGen;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
diff --git a/src/mips/constants-mips.cc b/src/mips/constants-mips.cc
index 49502bd..16e49c9 100644
--- a/src/mips/constants-mips.cc
+++ b/src/mips/constants-mips.cc
@@ -31,10 +31,8 @@
#include "constants-mips.h"
-namespace assembler {
-namespace mips {
-
-namespace v8i = v8::internal;
+namespace v8 {
+namespace internal {
// -----------------------------------------------------------------------------
@@ -102,20 +100,20 @@
}
-const char* FPURegister::names_[kNumFPURegister] = {
+const char* FPURegisters::names_[kNumFPURegisters] = {
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11",
"f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
"f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
};
// List of alias names which can be used when referring to MIPS registers.
-const FPURegister::RegisterAlias FPURegister::aliases_[] = {
+const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
{kInvalidRegister, NULL}
};
-const char* FPURegister::Name(int creg) {
+const char* FPURegisters::Name(int creg) {
const char* result;
- if ((0 <= creg) && (creg < kNumFPURegister)) {
+ if ((0 <= creg) && (creg < kNumFPURegisters)) {
result = names_[creg];
} else {
result = "nocreg";
@@ -124,9 +122,9 @@
}
-int FPURegister::Number(const char* name) {
+int FPURegisters::Number(const char* name) {
// Look through the canonical names.
- for (int i = 0; i < kNumSimuRegisters; i++) {
+ for (int i = 0; i < kNumFPURegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
@@ -149,8 +147,8 @@
// -----------------------------------------------------------------------------
// Instruction
-bool Instruction::IsForbiddenInBranchDelay() {
- int op = OpcodeFieldRaw();
+bool Instruction::IsForbiddenInBranchDelay() const {
+ const int op = OpcodeFieldRaw();
switch (op) {
case J:
case JAL:
@@ -189,13 +187,18 @@
}
-bool Instruction::IsLinkingInstruction() {
- int op = OpcodeFieldRaw();
+bool Instruction::IsLinkingInstruction() const {
+ const int op = OpcodeFieldRaw();
switch (op) {
case JAL:
- case BGEZAL:
- case BLTZAL:
- return true;
+ case REGIMM:
+ switch (RtFieldRaw()) {
+ case BGEZAL:
+ case BLTZAL:
+ return true;
+ default:
+ return false;
+ };
case SPECIAL:
switch (FunctionFieldRaw()) {
case JALR:
@@ -209,7 +212,7 @@
}
-bool Instruction::IsTrap() {
+bool Instruction::IsTrap() const {
if (OpcodeFieldRaw() != SPECIAL) {
return false;
} else {
@@ -264,6 +267,9 @@
case TLTU:
case TEQ:
case TNE:
+ case MOVZ:
+ case MOVN:
+ case MOVCI:
return kRegisterType;
default:
UNREACHABLE();
@@ -272,13 +278,23 @@
case SPECIAL2:
switch (FunctionFieldRaw()) {
case MUL:
+ case CLZ:
+ return kRegisterType;
+ default:
+ UNREACHABLE();
+ };
+ break;
+ case SPECIAL3:
+ switch (FunctionFieldRaw()) {
+ case INS:
+ case EXT:
return kRegisterType;
default:
UNREACHABLE();
};
break;
case COP1: // Coprocessor instructions
- switch (FunctionFieldRaw()) {
+ switch (RsFieldRawNoAssert()) {
case BC1: // branch on coprocessor condition
return kImmediateType;
default:
@@ -304,10 +320,17 @@
case BLEZL:
case BGTZL:
case LB:
+ case LH:
+ case LWL:
case LW:
case LBU:
+ case LHU:
+ case LWR:
case SB:
+ case SH:
+ case SWL:
case SW:
+ case SWR:
case LWC1:
case LDC1:
case SWC1:
@@ -323,6 +346,7 @@
return kUnsupported;
}
-} } // namespace assembler::mips
+
+} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/constants-mips.h b/src/mips/constants-mips.h
index d0fdf88..b20e9a2 100644
--- a/src/mips/constants-mips.h
+++ b/src/mips/constants-mips.h
@@ -28,15 +28,25 @@
#ifndef V8_MIPS_CONSTANTS_H_
#define V8_MIPS_CONSTANTS_H_
-#include "checks.h"
-
// UNIMPLEMENTED_ macro for MIPS.
+#ifdef DEBUG
#define UNIMPLEMENTED_MIPS() \
v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
__FILE__, __LINE__, __func__)
+#else
+#define UNIMPLEMENTED_MIPS()
+#endif
+
#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
+#ifdef _MIPS_ARCH_MIPS32R2
+ #define mips32r2 1
+#else
+ #define mips32r2 0
+#endif
+
+
// Defines constants and accessor classes to assemble, disassemble and
// simulate MIPS32 instructions.
//
@@ -44,8 +54,8 @@
// Volume II: The MIPS32 Instruction Set
// Try www.cs.cornell.edu/courses/cs3410/2008fa/MIPS_Vol2.pdf.
-namespace assembler {
-namespace mips {
+namespace v8 {
+namespace internal {
// -----------------------------------------------------------------------------
// Registers and FPURegister.
@@ -61,9 +71,18 @@
static const int kPCRegister = 34;
// Number coprocessor registers.
-static const int kNumFPURegister = 32;
+static const int kNumFPURegisters = 32;
static const int kInvalidFPURegister = -1;
+// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
+static const int kFCSRRegister = 31;
+static const int kInvalidFPUControlRegister = -1;
+static const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1;
+
+// FCSR constants.
+static const uint32_t kFCSRFlagMask = (1 << 6) - 1;
+static const uint32_t kFCSRFlagShift = 2;
+
// Helper functions for converting between register numbers and names.
class Registers {
public:
@@ -88,7 +107,7 @@
};
// Helper functions for converting between register numbers and names.
-class FPURegister {
+class FPURegisters {
public:
// Return the name of the register.
static const char* Name(int reg);
@@ -103,7 +122,7 @@
private:
- static const char* names_[kNumFPURegister];
+ static const char* names_[kNumFPURegisters];
static const RegisterAlias aliases_[];
};
@@ -136,6 +155,7 @@
static const int kSaBits = 5;
static const int kFunctionShift = 0;
static const int kFunctionBits = 6;
+static const int kLuiShift = 16;
static const int kImm16Shift = 0;
static const int kImm16Bits = 16;
@@ -146,6 +166,14 @@
static const int kFsBits = 5;
static const int kFtShift = 16;
static const int kFtBits = 5;
+static const int kFdShift = 6;
+static const int kFdBits = 5;
+static const int kFCccShift = 8;
+static const int kFCccBits = 3;
+static const int kFBccShift = 18;
+static const int kFBccBits = 3;
+static const int kFBtrueShift = 16;
+static const int kFBtrueBits = 1;
// ----- Miscellianous useful masks.
// Instruction bit masks.
@@ -159,9 +187,9 @@
static const int kFunctionFieldMask =
((1 << kFunctionBits) - 1) << kFunctionShift;
// Misc masks.
-static const int HIMask = 0xffff << 16;
-static const int LOMask = 0xffff;
-static const int signMask = 0x80000000;
+static const int kHiMask = 0xffff << 16;
+static const int kLoMask = 0xffff;
+static const int kSignMask = 0x80000000;
// ----- MIPS Opcodes and Function Fields.
@@ -194,12 +222,20 @@
BGTZL = ((2 << 3) + 7) << kOpcodeShift,
SPECIAL2 = ((3 << 3) + 4) << kOpcodeShift,
+ SPECIAL3 = ((3 << 3) + 7) << kOpcodeShift,
LB = ((4 << 3) + 0) << kOpcodeShift,
+ LH = ((4 << 3) + 1) << kOpcodeShift,
+ LWL = ((4 << 3) + 2) << kOpcodeShift,
LW = ((4 << 3) + 3) << kOpcodeShift,
LBU = ((4 << 3) + 4) << kOpcodeShift,
+ LHU = ((4 << 3) + 5) << kOpcodeShift,
+ LWR = ((4 << 3) + 6) << kOpcodeShift,
SB = ((5 << 3) + 0) << kOpcodeShift,
+ SH = ((5 << 3) + 1) << kOpcodeShift,
+ SWL = ((5 << 3) + 2) << kOpcodeShift,
SW = ((5 << 3) + 3) << kOpcodeShift,
+ SWR = ((5 << 3) + 6) << kOpcodeShift,
LWC1 = ((6 << 3) + 1) << kOpcodeShift,
LDC1 = ((6 << 3) + 5) << kOpcodeShift,
@@ -216,9 +252,12 @@
SLLV = ((0 << 3) + 4),
SRLV = ((0 << 3) + 6),
SRAV = ((0 << 3) + 7),
+ MOVCI = ((0 << 3) + 1),
JR = ((1 << 3) + 0),
JALR = ((1 << 3) + 1),
+ MOVZ = ((1 << 3) + 2),
+ MOVN = ((1 << 3) + 3),
BREAK = ((1 << 3) + 5),
MFHI = ((2 << 3) + 0),
@@ -250,6 +289,12 @@
// SPECIAL2 Encoding of Function Field.
MUL = ((0 << 3) + 2),
+ CLZ = ((4 << 3) + 0),
+ CLO = ((4 << 3) + 1),
+
+ // SPECIAL3 Encoding of Function Field.
+ EXT = ((0 << 3) + 0),
+ INS = ((0 << 3) + 4),
// REGIMM encoding of rt Field.
BLTZ = ((0 << 3) + 0) << 16,
@@ -259,8 +304,10 @@
// COP1 Encoding of rs Field.
MFC1 = ((0 << 3) + 0) << 21,
+ CFC1 = ((0 << 3) + 2) << 21,
MFHC1 = ((0 << 3) + 3) << 21,
MTC1 = ((0 << 3) + 4) << 21,
+ CTC1 = ((0 << 3) + 6) << 21,
MTHC1 = ((0 << 3) + 7) << 21,
BC1 = ((1 << 3) + 0) << 21,
S = ((2 << 3) + 0) << 21,
@@ -269,14 +316,46 @@
L = ((2 << 3) + 5) << 21,
PS = ((2 << 3) + 6) << 21,
// COP1 Encoding of Function Field When rs=S.
+ ROUND_L_S = ((1 << 3) + 0),
+ TRUNC_L_S = ((1 << 3) + 1),
+ CEIL_L_S = ((1 << 3) + 2),
+ FLOOR_L_S = ((1 << 3) + 3),
+ ROUND_W_S = ((1 << 3) + 4),
+ TRUNC_W_S = ((1 << 3) + 5),
+ CEIL_W_S = ((1 << 3) + 6),
+ FLOOR_W_S = ((1 << 3) + 7),
CVT_D_S = ((4 << 3) + 1),
CVT_W_S = ((4 << 3) + 4),
CVT_L_S = ((4 << 3) + 5),
CVT_PS_S = ((4 << 3) + 6),
// COP1 Encoding of Function Field When rs=D.
+ ADD_D = ((0 << 3) + 0),
+ SUB_D = ((0 << 3) + 1),
+ MUL_D = ((0 << 3) + 2),
+ DIV_D = ((0 << 3) + 3),
+ SQRT_D = ((0 << 3) + 4),
+ ABS_D = ((0 << 3) + 5),
+ MOV_D = ((0 << 3) + 6),
+ NEG_D = ((0 << 3) + 7),
+ ROUND_L_D = ((1 << 3) + 0),
+ TRUNC_L_D = ((1 << 3) + 1),
+ CEIL_L_D = ((1 << 3) + 2),
+ FLOOR_L_D = ((1 << 3) + 3),
+ ROUND_W_D = ((1 << 3) + 4),
+ TRUNC_W_D = ((1 << 3) + 5),
+ CEIL_W_D = ((1 << 3) + 6),
+ FLOOR_W_D = ((1 << 3) + 7),
CVT_S_D = ((4 << 3) + 0),
CVT_W_D = ((4 << 3) + 4),
CVT_L_D = ((4 << 3) + 5),
+ C_F_D = ((6 << 3) + 0),
+ C_UN_D = ((6 << 3) + 1),
+ C_EQ_D = ((6 << 3) + 2),
+ C_UEQ_D = ((6 << 3) + 3),
+ C_OLT_D = ((6 << 3) + 4),
+ C_ULT_D = ((6 << 3) + 5),
+ C_OLE_D = ((6 << 3) + 6),
+ C_ULE_D = ((6 << 3) + 7),
// COP1 Encoding of Function Field When rs=W or L.
CVT_S_W = ((4 << 3) + 0),
CVT_D_W = ((4 << 3) + 1),
@@ -293,7 +372,7 @@
// the 'U' prefix is used to specify unsigned comparisons.
enum Condition {
// Any value < 0 is considered no_condition.
- no_condition = -1,
+ kNoCondition = -1,
overflow = 0,
no_overflow = 1,
@@ -321,12 +400,59 @@
eq = equal,
not_zero = not_equal,
ne = not_equal,
+ nz = not_equal,
sign = negative,
not_sign = positive,
+ mi = negative,
+ pl = positive,
+ hi = Ugreater,
+ ls = Uless_equal,
+ ge = greater_equal,
+ lt = less,
+ gt = greater,
+ le = less_equal,
+ hs = Ugreater_equal,
+ lo = Uless,
+ al = cc_always,
- cc_default = no_condition
+ cc_default = kNoCondition
};
+
+// Returns the equivalent of !cc.
+// Negation of the default kNoCondition (-1) results in a non-default
+// no_condition value (-2). As long as tests for no_condition check
+// for condition < 0, this will work as expected.
+inline Condition NegateCondition(Condition cc) {
+ ASSERT(cc != cc_always);
+ return static_cast<Condition>(cc ^ 1);
+}
+
+
+inline Condition ReverseCondition(Condition cc) {
+ switch (cc) {
+ case Uless:
+ return Ugreater;
+ case Ugreater:
+ return Uless;
+ case Ugreater_equal:
+ return Uless_equal;
+ case Uless_equal:
+ return Ugreater_equal;
+ case less:
+ return greater;
+ case greater:
+ return less;
+ case greater_equal:
+ return less_equal;
+ case less_equal:
+ return greater_equal;
+ default:
+ return cc;
+ };
+}
+
+
// ----- Coprocessor conditions.
enum FPUCondition {
F, // False
@@ -340,6 +466,46 @@
};
+// -----------------------------------------------------------------------------
+// Hints.
+
+// Branch hints are not used on the MIPS. They are defined so that they can
+// appear in shared function signatures, but will be ignored in MIPS
+// implementations.
+enum Hint {
+ no_hint = 0
+};
+
+
+inline Hint NegateHint(Hint hint) {
+ return no_hint;
+}
+
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+// These constants are declared in assembler-mips.cc, as they use named
+// registers and other constants.
+
+// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
+// operations as post-increment of sp.
+extern const Instr kPopInstruction;
+// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
+extern const Instr kPushInstruction;
+// sw(r, MemOperand(sp, 0))
+extern const Instr kPushRegPattern;
+// lw(r, MemOperand(sp, 0))
+extern const Instr kPopRegPattern;
+extern const Instr kLwRegFpOffsetPattern;
+extern const Instr kSwRegFpOffsetPattern;
+extern const Instr kLwRegFpNegOffsetPattern;
+extern const Instr kSwRegFpNegOffsetPattern;
+// A mask for the Rt register for push, pop, lw, sw instructions.
+extern const Instr kRtMask;
+extern const Instr kLwSwInstrTypeMask;
+extern const Instr kLwSwInstrArgumentMask;
+extern const Instr kLwSwOffsetMask;
+
// Break 0xfffff, reserved for redirected real time call.
const Instr rtCallRedirInstr = SPECIAL | BREAK | call_rt_redirected << 6;
// A nop instruction. (Encoding of sll 0 0 0).
@@ -348,10 +514,10 @@
class Instruction {
public:
enum {
- kInstructionSize = 4,
- kInstructionSizeLog2 = 2,
+ kInstrSize = 4,
+ kInstrSizeLog2 = 2,
// On MIPS PC cannot actually be directly accessed. We behave as if PC was
- // always the value of the current instruction being exectued.
+ // always the value of the current instruction being executed.
kPCReadOffset = 0
};
@@ -388,45 +554,64 @@
// Accessors for the different named fields used in the MIPS encoding.
- inline Opcode OpcodeField() const {
+ inline Opcode OpcodeValue() const {
return static_cast<Opcode>(
Bits(kOpcodeShift + kOpcodeBits - 1, kOpcodeShift));
}
- inline int RsField() const {
+ inline int RsValue() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return Bits(kRsShift + kRsBits - 1, kRsShift);
}
- inline int RtField() const {
+ inline int RtValue() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return Bits(kRtShift + kRtBits - 1, kRtShift);
}
- inline int RdField() const {
+ inline int RdValue() const {
ASSERT(InstructionType() == kRegisterType);
return Bits(kRdShift + kRdBits - 1, kRdShift);
}
- inline int SaField() const {
+ inline int SaValue() const {
ASSERT(InstructionType() == kRegisterType);
return Bits(kSaShift + kSaBits - 1, kSaShift);
}
- inline int FunctionField() const {
+ inline int FunctionValue() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
return Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
}
- inline int FsField() const {
- return Bits(kFsShift + kRsBits - 1, kFsShift);
+ inline int FdValue() const {
+ return Bits(kFdShift + kFdBits - 1, kFdShift);
}
- inline int FtField() const {
- return Bits(kFtShift + kRsBits - 1, kFtShift);
+ inline int FsValue() const {
+ return Bits(kFsShift + kFsBits - 1, kFsShift);
+ }
+
+ inline int FtValue() const {
+ return Bits(kFtShift + kFtBits - 1, kFtShift);
+ }
+
+ // Float Compare condition code instruction bits.
+ inline int FCccValue() const {
+ return Bits(kFCccShift + kFCccBits - 1, kFCccShift);
+ }
+
+ // Float Branch condition code instruction bits.
+ inline int FBccValue() const {
+ return Bits(kFBccShift + kFBccBits - 1, kFBccShift);
+ }
+
+ // Float Branch true/false instruction bit.
+ inline int FBtrueValue() const {
+ return Bits(kFBtrueShift + kFBtrueBits - 1, kFBtrueShift);
}
// Return the fields at their original place in the instruction encoding.
@@ -440,6 +625,11 @@
return InstructionBits() & kRsFieldMask;
}
+ // Same as above function, but safe to call within InstructionType().
+ inline int RsFieldRawNoAssert() const {
+ return InstructionBits() & kRsFieldMask;
+ }
+
inline int RtFieldRaw() const {
ASSERT(InstructionType() == kRegisterType ||
InstructionType() == kImmediateType);
@@ -461,37 +651,37 @@
}
// Get the secondary field according to the opcode.
- inline int SecondaryField() const {
+ inline int SecondaryValue() const {
Opcode op = OpcodeFieldRaw();
switch (op) {
case SPECIAL:
case SPECIAL2:
- return FunctionField();
+ return FunctionValue();
case COP1:
- return RsField();
+ return RsValue();
case REGIMM:
- return RtField();
+ return RtValue();
default:
return NULLSF;
}
}
- inline int32_t Imm16Field() const {
+ inline int32_t Imm16Value() const {
ASSERT(InstructionType() == kImmediateType);
return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
}
- inline int32_t Imm26Field() const {
+ inline int32_t Imm26Value() const {
ASSERT(InstructionType() == kJumpType);
return Bits(kImm16Shift + kImm26Bits - 1, kImm26Shift);
}
// Say if the instruction should not be used in a branch delay slot.
- bool IsForbiddenInBranchDelay();
+ bool IsForbiddenInBranchDelay() const;
// Say if the instruction 'links'. eg: jal, bal.
- bool IsLinkingInstruction();
+ bool IsLinkingInstruction() const;
// Say if the instruction is a break or a trap.
- bool IsTrap();
+ bool IsTrap() const;
// Instructions are read of out a code stream. The only way to get a
// reference to an instruction is to convert a pointer. There is no way
@@ -510,16 +700,24 @@
// -----------------------------------------------------------------------------
// MIPS assembly various constants.
-static const int kArgsSlotsSize = 4 * Instruction::kInstructionSize;
+
+static const int kArgsSlotsSize = 4 * Instruction::kInstrSize;
static const int kArgsSlotsNum = 4;
+// C/C++ argument slots size.
+static const int kCArgsSlotsSize = 4 * Instruction::kInstrSize;
+// JS argument slots size.
+static const int kJSArgsSlotsSize = 0 * Instruction::kInstrSize;
+// Assembly builtins argument slots size.
+static const int kBArgsSlotsSize = 0 * Instruction::kInstrSize;
-static const int kBranchReturnOffset = 2 * Instruction::kInstructionSize;
+static const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
-static const int kDoubleAlignment = 2 * 8;
-static const int kDoubleAlignmentMask = kDoubleAlignmentMask - 1;
+static const int kDoubleAlignmentBits = 3;
+static const int kDoubleAlignment = (1 << kDoubleAlignmentBits);
+static const int kDoubleAlignmentMask = kDoubleAlignment - 1;
-} } // namespace assembler::mips
+} } // namespace v8::internal
#endif // #ifndef V8_MIPS_CONSTANTS_H_
diff --git a/src/mips/cpu-mips.cc b/src/mips/cpu-mips.cc
index 659fc01..36f577b 100644
--- a/src/mips/cpu-mips.cc
+++ b/src/mips/cpu-mips.cc
@@ -39,16 +39,25 @@
#if defined(V8_TARGET_ARCH_MIPS)
#include "cpu.h"
+#include "macro-assembler.h"
+
+#include "simulator.h" // For cache flushing.
namespace v8 {
namespace internal {
+
void CPU::Setup() {
- // Nothing to do.
+ CpuFeatures* cpu_features = Isolate::Current()->cpu_features();
+ cpu_features->Probe(true);
+ if (!cpu_features->IsSupported(FPU) || Serializer::enabled()) {
+ V8::DisableCrankshaft();
+ }
}
+
void CPU::FlushICache(void* start, size_t size) {
-#ifdef __mips
+#if !defined (USE_SIMULATOR)
int res;
// See http://www.linux-mips.org/wiki/Cacheflush_Syscall
@@ -58,7 +67,14 @@
V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
}
-#endif // #ifdef __mips
+#else // USE_SIMULATOR.
+ // Not generating mips instructions for C-code. This means that we are
+ // building a mips emulator based target. We should notify the simulator
+ // that the Icache was flushed.
+ // None of this code ends up in the snapshot so there are no issues
+ // around whether or not to generate the code when building snapshots.
+ Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
+#endif // USE_SIMULATOR.
}
@@ -68,6 +84,7 @@
#endif // #ifdef __mips
}
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/debug-mips.cc b/src/mips/debug-mips.cc
index b8ae68e..35df69b 100644
--- a/src/mips/debug-mips.cc
+++ b/src/mips/debug-mips.cc
@@ -38,8 +38,10 @@
namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
+
bool BreakLocationIterator::IsDebugBreakAtReturn() {
- return Debug::IsDebugBreakAtReturn(rinfo());
+ UNIMPLEMENTED_MIPS();
+ return false;
}
@@ -54,18 +56,33 @@
}
-// A debug break in the exit code is identified by a call.
+// A debug break in the exit code is identified by the JS frame exit code
+// having been patched with li/call psuedo-instrunction (liu/ori/jalr)
bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
- return rinfo->IsPatchedReturnSequence();
+ UNIMPLEMENTED_MIPS();
+ return false;
+}
+
+
+bool BreakLocationIterator::IsDebugBreakAtSlot() {
+ UNIMPLEMENTED_MIPS();
+ return false;
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtSlot() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtSlot() {
+ UNIMPLEMENTED_MIPS();
}
#define __ ACCESS_MASM(masm)
-
-
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
@@ -106,12 +123,23 @@
}
-void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on mips");
+void Debug::GenerateSlot(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
}
+
+void Debug::GenerateSlotDebugBreak(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void Debug::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void Debug::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
- masm->Abort("LiveEdit frame dropping is not supported on mips");
+ UNIMPLEMENTED_MIPS();
}
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
new file mode 100644
index 0000000..4b69859
--- /dev/null
+++ b/src/mips/deoptimizer-mips.cc
@@ -0,0 +1,91 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#include "codegen.h"
+#include "deoptimizer.h"
+#include "full-codegen.h"
+#include "safepoint-table.h"
+
+// Note: this file was taken from the X64 version. ARM has a partially working
+// lithium implementation, but for now it is not ported to mips.
+
+namespace v8 {
+namespace internal {
+
+
+int Deoptimizer::table_entry_size_ = 10;
+
+
+int Deoptimizer::patch_size() {
+ const int kCallInstructionSizeInWords = 3;
+ return kCallInstructionSizeInWords * Assembler::kInstrSize;
+}
+
+
+void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+ UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
+ Code* check_code,
+ Code* replacement_code) {
+ UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
+ Code* check_code,
+ Code* replacement_code) {
+ UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::DoComputeOsrOutputFrame() {
+ UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
+ int frame_index) {
+ UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::EntryGenerator::Generate() {
+ UNIMPLEMENTED();
+}
+
+
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+ UNIMPLEMENTED();
+}
+
+
+} } // namespace v8::internal
diff --git a/src/mips/disasm-mips.cc b/src/mips/disasm-mips.cc
index 7cb52a6..b7ceb2b 100644
--- a/src/mips/disasm-mips.cc
+++ b/src/mips/disasm-mips.cc
@@ -34,10 +34,9 @@
// NameConverter converter;
// Disassembler d(converter);
// for (byte_* pc = begin; pc < end;) {
-// char buffer[128];
-// buffer[0] = '\0';
-// byte_* prev_pc = pc;
-// pc += d.InstructionDecode(buffer, sizeof buffer, pc);
+// v8::internal::EmbeddedVector<char, 256> buffer;
+// byte* prev_pc = pc;
+// pc += d.InstructionDecode(buffer, pc);
// printf("%p %08x %s\n",
// prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
// }
@@ -59,17 +58,13 @@
#if defined(V8_TARGET_ARCH_MIPS)
-#include "constants-mips.h"
+#include "mips/constants-mips.h"
#include "disasm.h"
#include "macro-assembler.h"
#include "platform.h"
-namespace assembler {
-namespace mips {
-
-
-namespace v8i = v8::internal;
-
+namespace v8 {
+namespace internal {
//------------------------------------------------------------------------------
@@ -99,7 +94,7 @@
// Printing of common values.
void PrintRegister(int reg);
- void PrintCRegister(int creg);
+ void PrintFPURegister(int freg);
void PrintRs(Instruction* instr);
void PrintRt(Instruction* instr);
void PrintRd(Instruction* instr);
@@ -107,6 +102,9 @@
void PrintFt(Instruction* instr);
void PrintFd(Instruction* instr);
void PrintSa(Instruction* instr);
+ void PrintSd(Instruction* instr);
+ void PrintBc(Instruction* instr);
+ void PrintCc(Instruction* instr);
void PrintFunction(Instruction* instr);
void PrintSecondaryField(Instruction* instr);
void PrintUImm16(Instruction* instr);
@@ -119,7 +117,7 @@
// Handle formatting of instructions and their options.
int FormatRegister(Instruction* instr, const char* option);
- int FormatCRegister(Instruction* instr, const char* option);
+ int FormatFPURegister(Instruction* instr, const char* option);
int FormatOption(Instruction* instr, const char* option);
void Format(Instruction* instr, const char* format);
void Unknown(Instruction* instr);
@@ -166,84 +164,100 @@
void Decoder::PrintRs(Instruction* instr) {
- int reg = instr->RsField();
+ int reg = instr->RsValue();
PrintRegister(reg);
}
void Decoder::PrintRt(Instruction* instr) {
- int reg = instr->RtField();
+ int reg = instr->RtValue();
PrintRegister(reg);
}
void Decoder::PrintRd(Instruction* instr) {
- int reg = instr->RdField();
+ int reg = instr->RdValue();
PrintRegister(reg);
}
-// Print the Cregister name according to the active name converter.
-void Decoder::PrintCRegister(int creg) {
- Print(converter_.NameOfXMMRegister(creg));
+// Print the FPUregister name according to the active name converter.
+void Decoder::PrintFPURegister(int freg) {
+ Print(converter_.NameOfXMMRegister(freg));
}
void Decoder::PrintFs(Instruction* instr) {
- int creg = instr->RsField();
- PrintCRegister(creg);
+ int freg = instr->RsValue();
+ PrintFPURegister(freg);
}
void Decoder::PrintFt(Instruction* instr) {
- int creg = instr->RtField();
- PrintCRegister(creg);
+ int freg = instr->RtValue();
+ PrintFPURegister(freg);
}
void Decoder::PrintFd(Instruction* instr) {
- int creg = instr->RdField();
- PrintCRegister(creg);
+ int freg = instr->RdValue();
+ PrintFPURegister(freg);
}
// Print the integer value of the sa field.
void Decoder::PrintSa(Instruction* instr) {
- int sa = instr->SaField();
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", sa);
+ int sa = instr->SaValue();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
+}
+
+
+// Print the integer value of the rd field, (when it is not used as reg).
+void Decoder::PrintSd(Instruction* instr) {
+ int sd = instr->RdValue();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd);
+}
+
+
+// Print the integer value of the cc field for the bc1t/f instructions.
+void Decoder::PrintBc(Instruction* instr) {
+ int cc = instr->FBccValue();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", cc);
+}
+
+
+// Print the integer value of the cc field for the FP compare instructions.
+void Decoder::PrintCc(Instruction* instr) {
+ int cc = instr->FCccValue();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "cc(%d)", cc);
}
// Print 16-bit unsigned immediate value.
void Decoder::PrintUImm16(Instruction* instr) {
- int32_t imm = instr->Imm16Field();
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%u", imm);
+ int32_t imm = instr->Imm16Value();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm);
}
// Print 16-bit signed immediate value.
void Decoder::PrintSImm16(Instruction* instr) {
- int32_t imm = ((instr->Imm16Field())<<16)>>16;
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", imm);
+ int32_t imm = ((instr->Imm16Value())<<16)>>16;
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
}
// Print 16-bit hexa immediate value.
void Decoder::PrintXImm16(Instruction* instr) {
- int32_t imm = instr->Imm16Field();
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "0x%x", imm);
+ int32_t imm = instr->Imm16Value();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
}
// Print 26-bit immediate value.
void Decoder::PrintImm26(Instruction* instr) {
- int32_t imm = instr->Imm26Field();
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", imm);
+ int32_t imm = instr->Imm26Value();
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
}
@@ -254,8 +268,8 @@
switch (instr->FunctionFieldRaw()) {
case BREAK: {
int32_t code = instr->Bits(25, 6);
- out_buffer_pos_ +=
- v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%05x", code);
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "0x%05x (%d)", code, code);
break;
}
case TGE:
@@ -266,7 +280,7 @@
case TNE: {
int32_t code = instr->Bits(15, 6);
out_buffer_pos_ +=
- v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code);
+ OS::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code);
break;
}
default: // Not a break or trap instruction.
@@ -285,15 +299,15 @@
int Decoder::FormatRegister(Instruction* instr, const char* format) {
ASSERT(format[0] == 'r');
if (format[1] == 's') { // 'rs: Rs register
- int reg = instr->RsField();
+ int reg = instr->RsValue();
PrintRegister(reg);
return 2;
} else if (format[1] == 't') { // 'rt: rt register
- int reg = instr->RtField();
+ int reg = instr->RtValue();
PrintRegister(reg);
return 2;
} else if (format[1] == 'd') { // 'rd: rd register
- int reg = instr->RdField();
+ int reg = instr->RdValue();
PrintRegister(reg);
return 2;
}
@@ -302,21 +316,21 @@
}
-// Handle all Cregister based formatting in this function to reduce the
+// Handle all FPUregister based formatting in this function to reduce the
// complexity of FormatOption.
-int Decoder::FormatCRegister(Instruction* instr, const char* format) {
+int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
ASSERT(format[0] == 'f');
if (format[1] == 's') { // 'fs: fs register
- int reg = instr->RsField();
- PrintCRegister(reg);
+ int reg = instr->FsValue();
+ PrintFPURegister(reg);
return 2;
} else if (format[1] == 't') { // 'ft: ft register
- int reg = instr->RtField();
- PrintCRegister(reg);
+ int reg = instr->FtValue();
+ PrintFPURegister(reg);
return 2;
} else if (format[1] == 'd') { // 'fd: fd register
- int reg = instr->RdField();
- PrintCRegister(reg);
+ int reg = instr->FdValue();
+ PrintFPURegister(reg);
return 2;
}
UNREACHABLE();
@@ -359,12 +373,31 @@
case 'r': { // 'r: registers
return FormatRegister(instr, format);
}
- case 'f': { // 'f: Cregisters
- return FormatCRegister(instr, format);
+ case 'f': { // 'f: FPUregisters
+ return FormatFPURegister(instr, format);
}
case 's': { // 'sa
- ASSERT(STRING_STARTS_WITH(format, "sa"));
- PrintSa(instr);
+ switch (format[1]) {
+ case 'a': {
+ ASSERT(STRING_STARTS_WITH(format, "sa"));
+ PrintSa(instr);
+ return 2;
+ }
+ case 'd': {
+ ASSERT(STRING_STARTS_WITH(format, "sd"));
+ PrintSd(instr);
+ return 2;
+ }
+ }
+ }
+ case 'b': { // 'bc - Special for bc1 cc field.
+ ASSERT(STRING_STARTS_WITH(format, "bc"));
+ PrintBc(instr);
+ return 2;
+ }
+ case 'C': { // 'Cc - Special for c.xx.d cc field.
+ ASSERT(STRING_STARTS_WITH(format, "Cc"));
+ PrintCc(instr);
return 2;
}
};
@@ -401,45 +434,160 @@
switch (instr->OpcodeFieldRaw()) {
case COP1: // Coprocessor instructions
switch (instr->RsFieldRaw()) {
- case BC1: // branch on coprocessor condition
+ case BC1: // bc1 handled in DecodeTypeImmediate.
UNREACHABLE();
break;
case MFC1:
- Format(instr, "mfc1 'rt, 'fs");
+ Format(instr, "mfc1 'rt, 'fs");
break;
case MFHC1:
- Format(instr, "mfhc1 rt, 'fs");
+ Format(instr, "mfhc1 'rt, 'fs");
break;
case MTC1:
- Format(instr, "mtc1 'rt, 'fs");
+ Format(instr, "mtc1 'rt, 'fs");
+ break;
+ // These are called "fs" too, although they are not FPU registers.
+ case CTC1:
+ Format(instr, "ctc1 'rt, 'fs");
+ break;
+ case CFC1:
+ Format(instr, "cfc1 'rt, 'fs");
break;
case MTHC1:
- Format(instr, "mthc1 rt, 'fs");
+ Format(instr, "mthc1 'rt, 'fs");
+ break;
+ case D:
+ switch (instr->FunctionFieldRaw()) {
+ case ADD_D:
+ Format(instr, "add.d 'fd, 'fs, 'ft");
+ break;
+ case SUB_D:
+ Format(instr, "sub.d 'fd, 'fs, 'ft");
+ break;
+ case MUL_D:
+ Format(instr, "mul.d 'fd, 'fs, 'ft");
+ break;
+ case DIV_D:
+ Format(instr, "div.d 'fd, 'fs, 'ft");
+ break;
+ case ABS_D:
+ Format(instr, "abs.d 'fd, 'fs");
+ break;
+ case MOV_D:
+ Format(instr, "mov.d 'fd, 'fs");
+ break;
+ case NEG_D:
+ Format(instr, "neg.d 'fd, 'fs");
+ break;
+ case SQRT_D:
+ Format(instr, "sqrt.d 'fd, 'fs");
+ break;
+ case CVT_W_D:
+ Format(instr, "cvt.w.d 'fd, 'fs");
+ break;
+ case CVT_L_D: {
+ if (mips32r2) {
+ Format(instr, "cvt.l.d 'fd, 'fs");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case TRUNC_W_D:
+ Format(instr, "trunc.w.d 'fd, 'fs");
+ break;
+ case TRUNC_L_D: {
+ if (mips32r2) {
+ Format(instr, "trunc.l.d 'fd, 'fs");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case ROUND_W_D:
+ Format(instr, "round.w.d 'fd, 'fs");
+ break;
+ case FLOOR_W_D:
+ Format(instr, "floor.w.d 'fd, 'fs");
+ break;
+ case CEIL_W_D:
+ Format(instr, "ceil.w.d 'fd, 'fs");
+ break;
+ case CVT_S_D:
+ Format(instr, "cvt.s.d 'fd, 'fs");
+ break;
+ case C_F_D:
+ Format(instr, "c.f.d 'fs, 'ft, 'Cc");
+ break;
+ case C_UN_D:
+ Format(instr, "c.un.d 'fs, 'ft, 'Cc");
+ break;
+ case C_EQ_D:
+ Format(instr, "c.eq.d 'fs, 'ft, 'Cc");
+ break;
+ case C_UEQ_D:
+ Format(instr, "c.ueq.d 'fs, 'ft, 'Cc");
+ break;
+ case C_OLT_D:
+ Format(instr, "c.olt.d 'fs, 'ft, 'Cc");
+ break;
+ case C_ULT_D:
+ Format(instr, "c.ult.d 'fs, 'ft, 'Cc");
+ break;
+ case C_OLE_D:
+ Format(instr, "c.ole.d 'fs, 'ft, 'Cc");
+ break;
+ case C_ULE_D:
+ Format(instr, "c.ule.d 'fs, 'ft, 'Cc");
+ break;
+ default:
+ Format(instr, "unknown.cop1.d");
+ break;
+ }
break;
case S:
- case D:
UNIMPLEMENTED_MIPS();
break;
case W:
switch (instr->FunctionFieldRaw()) {
- case CVT_S_W:
- UNIMPLEMENTED_MIPS();
+ case CVT_S_W: // Convert word to float (single).
+ Format(instr, "cvt.s.w 'fd, 'fs");
break;
case CVT_D_W: // Convert word to double.
- Format(instr, "cvt.d.w 'fd, 'fs");
+ Format(instr, "cvt.d.w 'fd, 'fs");
break;
default:
UNREACHABLE();
- };
+ }
break;
case L:
+ switch (instr->FunctionFieldRaw()) {
+ case CVT_D_L: {
+ if (mips32r2) {
+ Format(instr, "cvt.d.l 'fd, 'fs");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case CVT_S_L: {
+ if (mips32r2) {
+ Format(instr, "cvt.s.l 'fd, 'fs");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
case PS:
UNIMPLEMENTED_MIPS();
break;
- break;
default:
UNREACHABLE();
- };
+ }
break;
case SPECIAL:
switch (instr->FunctionFieldRaw()) {
@@ -456,7 +604,15 @@
Format(instr, "sll 'rd, 'rt, 'sa");
break;
case SRL:
- Format(instr, "srl 'rd, 'rt, 'sa");
+ if (instr->RsValue() == 0) {
+ Format(instr, "srl 'rd, 'rt, 'sa");
+ } else {
+ if (mips32r2) {
+ Format(instr, "rotr 'rd, 'rt, 'sa");
+ } else {
+ Unknown(instr);
+ }
+ }
break;
case SRA:
Format(instr, "sra 'rd, 'rt, 'sa");
@@ -465,7 +621,15 @@
Format(instr, "sllv 'rd, 'rt, 'rs");
break;
case SRLV:
- Format(instr, "srlv 'rd, 'rt, 'rs");
+ if (instr->SaValue() == 0) {
+ Format(instr, "srlv 'rd, 'rt, 'rs");
+ } else {
+ if (mips32r2) {
+ Format(instr, "rotrv 'rd, 'rt, 'rs");
+ } else {
+ Unknown(instr);
+ }
+ }
break;
case SRAV:
Format(instr, "srav 'rd, 'rt, 'rs");
@@ -504,9 +668,9 @@
Format(instr, "and 'rd, 'rs, 'rt");
break;
case OR:
- if (0 == instr->RsField()) {
+ if (0 == instr->RsValue()) {
Format(instr, "mov 'rd, 'rt");
- } else if (0 == instr->RtField()) {
+ } else if (0 == instr->RtValue()) {
Format(instr, "mov 'rd, 'rs");
} else {
Format(instr, "or 'rd, 'rs, 'rt");
@@ -545,27 +709,79 @@
case TNE:
Format(instr, "tne 'rs, 'rt, code: 'code");
break;
+ case MOVZ:
+ Format(instr, "movz 'rd, 'rs, 'rt");
+ break;
+ case MOVN:
+ Format(instr, "movn 'rd, 'rs, 'rt");
+ break;
+ case MOVCI:
+ if (instr->Bit(16)) {
+ Format(instr, "movt 'rd, 'rs, 'Cc");
+ } else {
+ Format(instr, "movf 'rd, 'rs, 'Cc");
+ }
+ break;
default:
UNREACHABLE();
- };
+ }
break;
case SPECIAL2:
switch (instr->FunctionFieldRaw()) {
case MUL:
+ Format(instr, "mul 'rd, 'rs, 'rt");
+ break;
+ case CLZ:
+ Format(instr, "clz 'rd, 'rs");
break;
default:
UNREACHABLE();
- };
+ }
+ break;
+ case SPECIAL3:
+ switch (instr->FunctionFieldRaw()) {
+ case INS: {
+ if (mips32r2) {
+ Format(instr, "ins 'rt, 'rs, 'sd, 'sa");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ case EXT: {
+ if (mips32r2) {
+ Format(instr, "ext 'rt, 'rs, 'sd, 'sa");
+ } else {
+ Unknown(instr);
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
break;
default:
UNREACHABLE();
- };
+ }
}
void Decoder::DecodeTypeImmediate(Instruction* instr) {
switch (instr->OpcodeFieldRaw()) {
// ------------- REGIMM class.
+ case COP1:
+ switch (instr->RsFieldRaw()) {
+ case BC1:
+ if (instr->FBtrueValue()) {
+ Format(instr, "bc1t 'bc, 'imm16u");
+ } else {
+ Format(instr, "bc1f 'bc, 'imm16u");
+ }
+ break;
+ default:
+ UNREACHABLE();
+ };
+ break; // Case COP1.
case REGIMM:
switch (instr->RtFieldRaw()) {
case BLTZ:
@@ -582,8 +798,8 @@
break;
default:
UNREACHABLE();
- };
- break; // case REGIMM
+ }
+ break; // Case REGIMM.
// ------------- Branch instructions.
case BEQ:
Format(instr, "beq 'rs, 'rt, 'imm16u");
@@ -626,18 +842,39 @@
case LB:
Format(instr, "lb 'rt, 'imm16s('rs)");
break;
+ case LH:
+ Format(instr, "lh 'rt, 'imm16s('rs)");
+ break;
+ case LWL:
+ Format(instr, "lwl 'rt, 'imm16s('rs)");
+ break;
case LW:
Format(instr, "lw 'rt, 'imm16s('rs)");
break;
case LBU:
Format(instr, "lbu 'rt, 'imm16s('rs)");
break;
+ case LHU:
+ Format(instr, "lhu 'rt, 'imm16s('rs)");
+ break;
+ case LWR:
+ Format(instr, "lwr 'rt, 'imm16s('rs)");
+ break;
case SB:
Format(instr, "sb 'rt, 'imm16s('rs)");
break;
+ case SH:
+ Format(instr, "sh 'rt, 'imm16s('rs)");
+ break;
+ case SWL:
+ Format(instr, "swl 'rt, 'imm16s('rs)");
+ break;
case SW:
Format(instr, "sw 'rt, 'imm16s('rs)");
break;
+ case SWR:
+ Format(instr, "swr 'rt, 'imm16s('rs)");
+ break;
case LWC1:
Format(instr, "lwc1 'ft, 'imm16s('rs)");
break;
@@ -645,10 +882,10 @@
Format(instr, "ldc1 'ft, 'imm16s('rs)");
break;
case SWC1:
- Format(instr, "swc1 'rt, 'imm16s('fs)");
+ Format(instr, "swc1 'ft, 'imm16s('rs)");
break;
case SDC1:
- Format(instr, "sdc1 'rt, 'imm16s('fs)");
+ Format(instr, "sdc1 'ft, 'imm16s('rs)");
break;
default:
UNREACHABLE();
@@ -675,7 +912,7 @@
int Decoder::InstructionDecode(byte_* instr_ptr) {
Instruction* instr = Instruction::At(instr_ptr);
// Print raw instruction bytes.
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%08x ",
instr->InstructionBits());
switch (instr->InstructionType()) {
@@ -695,11 +932,11 @@
UNSUPPORTED_MIPS();
}
}
- return Instruction::kInstructionSize;
+ return Instruction::kInstrSize;
}
-} } // namespace assembler::mips
+} } // namespace v8::internal
@@ -707,8 +944,7 @@
namespace disasm {
-namespace v8i = v8::internal;
-
+using v8::internal::byte_;
const char* NameConverter::NameOfAddress(byte_* addr) const {
v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
@@ -722,12 +958,12 @@
const char* NameConverter::NameOfCPURegister(int reg) const {
- return assembler::mips::Registers::Name(reg);
+ return v8::internal::Registers::Name(reg);
}
const char* NameConverter::NameOfXMMRegister(int reg) const {
- return assembler::mips::FPURegister::Name(reg);
+ return v8::internal::FPURegisters::Name(reg);
}
@@ -755,13 +991,13 @@
int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
byte_* instruction) {
- assembler::mips::Decoder d(converter_, buffer);
+ v8::internal::Decoder d(converter_, buffer);
return d.InstructionDecode(instruction);
}
+// The MIPS assembler does not currently use constant pools.
int Disassembler::ConstantPoolSizeAt(byte_* instruction) {
- UNIMPLEMENTED_MIPS();
return -1;
}
@@ -779,6 +1015,7 @@
}
}
+
#undef UNSUPPORTED
} // namespace disasm
diff --git a/src/mips/frames-mips.cc b/src/mips/frames-mips.cc
index d630562..e2e0c91 100644
--- a/src/mips/frames-mips.cc
+++ b/src/mips/frames-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -37,57 +37,9 @@
namespace internal {
-StackFrame::Type StackFrame::ComputeType(State* state) {
- ASSERT(state->fp != NULL);
- if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
- return ARGUMENTS_ADAPTOR;
- }
- // The marker and function offsets overlap. If the marker isn't a
- // smi then the frame is a JavaScript frame -- and the marker is
- // really the function.
- const int offset = StandardFrameConstants::kMarkerOffset;
- Object* marker = Memory::Object_at(state->fp + offset);
- if (!marker->IsSmi()) return JAVA_SCRIPT;
- return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
-}
-
-
Address ExitFrame::ComputeStackPointer(Address fp) {
- Address sp = fp + ExitFrameConstants::kSPDisplacement;
- const int offset = ExitFrameConstants::kCodeOffset;
- Object* code = Memory::Object_at(fp + offset);
- bool is_debug_exit = code->IsSmi();
- if (is_debug_exit) {
- sp -= kNumJSCallerSaved * kPointerSize;
- }
- return sp;
-}
-
-
-void ExitFrame::Iterate(ObjectVisitor* v) const {
- // Do nothing
-}
-
-
-int JavaScriptFrame::GetProvidedParametersCount() const {
- return ComputeParametersCount();
-}
-
-
-Address JavaScriptFrame::GetCallerStackPointer() const {
UNIMPLEMENTED_MIPS();
- return static_cast<Address>(NULL); // UNIMPLEMENTED RETURN
-}
-
-
-Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
- UNIMPLEMENTED_MIPS();
- return static_cast<Address>(NULL); // UNIMPLEMENTED RETURN
-}
-
-
-Address InternalFrame::GetCallerStackPointer() const {
- return fp() + StandardFrameConstants::kCallerSPOffset;
+ return fp;
}
diff --git a/src/mips/frames-mips.h b/src/mips/frames-mips.h
index 06e9979..6441470 100644
--- a/src/mips/frames-mips.h
+++ b/src/mips/frames-mips.h
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -40,16 +40,17 @@
static const int kNumRegs = 32;
static const RegList kJSCallerSaved =
+ 1 << 2 | // v0
1 << 4 | // a0
1 << 5 | // a1
1 << 6 | // a2
1 << 7; // a3
-static const int kNumJSCallerSaved = 4;
+static const int kNumJSCallerSaved = 5;
// Return the code of the n-th caller-saved register available to JavaScript
-// e.g. JSCallerSavedReg(0) returns r0.code() == 0.
+// e.g. JSCallerSavedReg(0) returns a0.code() == 4.
int JSCallerSavedCode(int n);
@@ -64,6 +65,18 @@
static const int kNumCalleeSaved = 11;
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of 8.
+// TODO(mips): Only 8 registers may actually be sufficient. Revisit.
+static const int kNumSafepointRegisters = 16;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+static const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+static const int kNumSafepointSavedRegisters =
+ kNumJSCallerSaved + kNumCalleeSaved;
+
typedef Object* JSCallerSavedBuffer[kNumJSCallerSaved];
@@ -88,15 +101,14 @@
class ExitFrameConstants : public AllStatic {
public:
- // Exit frames have a debug marker on the stack.
- static const int kSPDisplacement = -1 * kPointerSize;
-
- // The debug marker is just above the frame pointer.
static const int kDebugMarkOffset = -1 * kPointerSize;
// Must be the same as kDebugMarkOffset. Alias introduced when upgrading.
static const int kCodeOffset = -1 * kPointerSize;
+ static const int kSPOffset = -1 * kPointerSize;
- static const int kSavedRegistersOffset = 0 * kPointerSize;
+ // TODO(mips): Use a patched sp value on the stack instead.
+ // A marker of 0 indicates that double registers are saved.
+ static const int kMarkerOffset = -2 * kPointerSize;
// The caller fields are below the frame pointer on the stack.
static const int kCallerFPOffset = +0 * kPointerSize;
@@ -126,6 +138,8 @@
static const int kCArgsSlotsSize = 4 * kPointerSize;
// JS argument slots size.
static const int kJSArgsSlotsSize = 0 * kPointerSize;
+ // Assembly builtins argument slots size.
+ static const int kBArgsSlotsSize = 0 * kPointerSize;
};
@@ -159,6 +173,7 @@
return Memory::Object_at(fp() + offset);
}
+
} } // namespace v8::internal
#endif
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 17ee531..87507ff 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -29,18 +29,55 @@
#if defined(V8_TARGET_ARCH_MIPS)
+// Note on Mips implementation:
+//
+// The result_register() for mips is the 'v0' register, which is defined
+// by the ABI to contain function return values. However, the first
+// parameter to a function is defined to be 'a0'. So there are many
+// places where we have to move a previous result in v0 to a0 for the
+// next call: mov(a0, v0). This is not needed on the other architectures.
+
+#include "code-stubs.h"
#include "codegen-inl.h"
#include "compiler.h"
#include "debug.h"
#include "full-codegen.h"
#include "parser.h"
+#include "scopes.h"
+#include "stub-cache.h"
+
+#include "mips/code-stubs-mips.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
-void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
+// Generate code for a JS function. On entry to the function the receiver
+// and arguments have been pushed on the stack left to right. The actual
+// argument count matches the formal parameter count expected by the
+// function.
+//
+// The live registers are:
+// o a1: the JS function object being called (ie, ourselves)
+// o cp: our context
+// o fp: our caller's frame pointer
+// o sp: stack pointer
+// o ra: return address
+//
+// The function builds a JS frame. Please see JavaScriptFrameConstants in
+// frames-mips.h for its layout.
+void FullCodeGenerator::Generate(CompilationInfo* info) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::ClearAccumulator() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
UNIMPLEMENTED_MIPS();
}
@@ -50,47 +87,165 @@
}
-void FullCodeGenerator::Apply(Expression::Context context, Register reg) {
+void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
UNIMPLEMENTED_MIPS();
}
-void FullCodeGenerator::Apply(Expression::Context context, Slot* slot) {
- UNIMPLEMENTED_MIPS();
-}
-
-void FullCodeGenerator::Apply(Expression::Context context, Literal* lit) {
+void FullCodeGenerator::AccumulatorValueContext::Plug(Slot* slot) const {
UNIMPLEMENTED_MIPS();
}
-void FullCodeGenerator::ApplyTOS(Expression::Context context) {
+void FullCodeGenerator::StackValueContext::Plug(Slot* slot) const {
UNIMPLEMENTED_MIPS();
}
-void FullCodeGenerator::DropAndApply(int count,
- Expression::Context context,
- Register reg) {
+void FullCodeGenerator::TestContext::Plug(Slot* slot) const {
UNIMPLEMENTED_MIPS();
}
-void FullCodeGenerator::Apply(Expression::Context context,
- Label* materialize_true,
- Label* materialize_false) {
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
UNIMPLEMENTED_MIPS();
}
-void FullCodeGenerator::DoTest(Expression::Context context) {
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Heap::RootListIndex index) const {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Heap::RootListIndex index) const {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Handle<Object> lit) const {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EffectContext::DropAndPlug(int count,
+ Register reg) const {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
+ int count,
+ Register reg) const {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+ Register reg) const {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::TestContext::DropAndPlug(int count,
+ Register reg) const {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+ Label* materialize_true,
+ Label* materialize_false) const {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+ Label* materialize_false) const {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(bool flag) const {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::DoTest(Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// Original prototype for mips, needs arch-indep change. Leave out for now.
+// void FullCodeGenerator::Split(Condition cc,
+// Register lhs,
+// const Operand& rhs,
+// Label* if_true,
+// Label* if_false,
+// Label* fall_through) {
+void FullCodeGenerator::Split(Condition cc,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
UNIMPLEMENTED_MIPS();
}
MemOperand FullCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
UNIMPLEMENTED_MIPS();
- return MemOperand(zero_reg, 0); // UNIMPLEMENTED RETURN
+ return MemOperand(zero_reg, 0);
}
@@ -99,6 +254,14 @@
}
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(State state,
+ bool should_normalize,
+ Label* if_true,
+ Label* if_false) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void FullCodeGenerator::Move(Slot* dst,
Register src,
Register scratch1,
@@ -107,6 +270,13 @@
}
+void FullCodeGenerator::EmitDeclaration(Variable* variable,
+ Variable::Mode mode,
+ FunctionLiteral* function) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void FullCodeGenerator::VisitDeclaration(Declaration* decl) {
UNIMPLEMENTED_MIPS();
}
@@ -117,7 +287,18 @@
}
-void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
+ bool pretenure) {
UNIMPLEMENTED_MIPS();
}
@@ -127,8 +308,32 @@
}
-void FullCodeGenerator::EmitVariableLoad(Variable* var,
- Expression::Context context) {
+MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
+ Slot* slot,
+ Label* slow) {
+ UNIMPLEMENTED_MIPS();
+ return MemOperand(zero_reg, 0);
+}
+
+
+void FullCodeGenerator::EmitDynamicLoadFromSlotFastCase(
+ Slot* slot,
+ TypeofState typeof_state,
+ Label* slow,
+ Label* done) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalSlotCheckExtensions(
+ Slot* slot,
+ TypeofState typeof_state,
+ Label* slow) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(Variable* var) {
UNIMPLEMENTED_MIPS();
}
@@ -163,14 +368,28 @@
}
+void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
+ Token::Value op,
+ OverwriteMode mode,
+ Expression* left,
+ Expression* right) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void FullCodeGenerator::EmitBinaryOp(Token::Value op,
- Expression::Context context) {
+ OverwriteMode mode) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
UNIMPLEMENTED_MIPS();
}
void FullCodeGenerator::EmitVariableAssignment(Variable* var,
- Expression::Context context) {
+ Token::Value op) {
UNIMPLEMENTED_MIPS();
}
@@ -189,13 +408,21 @@
UNIMPLEMENTED_MIPS();
}
+
void FullCodeGenerator::EmitCallWithIC(Call* expr,
- Handle<Object> ignored,
+ Handle<Object> name,
RelocInfo::Mode mode) {
UNIMPLEMENTED_MIPS();
}
+void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr,
+ Expression* key,
+ RelocInfo::Mode mode) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void FullCodeGenerator::EmitCallWithStub(Call* expr) {
UNIMPLEMENTED_MIPS();
}
@@ -211,6 +438,202 @@
}
+void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitIsNonNegativeSmi(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitIsObject(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitIsSpecObject(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitIsUndetectableObject(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
+ ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitIsFunction(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitIsArray(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitIsRegExp(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitIsConstructCall(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitObjectEquals(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitArguments(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitArgumentsLength(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitClassOf(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitLog(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitRandomHeapNumber(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitSubString(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitRegExpExec(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitValueOf(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitSetValueOf(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitMathSqrt(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitCallFunction(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitGetFromCache(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitIsRegExpEquivalent(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
UNIMPLEMENTED_MIPS();
}
@@ -226,25 +649,52 @@
}
-void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
UNIMPLEMENTED_MIPS();
}
+bool FullCodeGenerator::TryLiteralCompare(Token::Value op,
+ Expression* left,
+ Expression* right,
+ Label* if_true,
+ Label* if_false,
+ Label* fall_through) {
+ UNIMPLEMENTED_MIPS();
+ return false;
+}
+
+
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
UNIMPLEMENTED_MIPS();
}
+void FullCodeGenerator::VisitCompareToNull(CompareToNull* expr) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
UNIMPLEMENTED_MIPS();
}
-Register FullCodeGenerator::result_register() { return v0; }
+Register FullCodeGenerator::result_register() {
+ UNIMPLEMENTED_MIPS();
+ return v0;
+}
-Register FullCodeGenerator::context_register() { return cp; }
+Register FullCodeGenerator::context_register() {
+ UNIMPLEMENTED_MIPS();
+ return cp;
+}
+
+
+void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
+ UNIMPLEMENTED_MIPS();
+}
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
index e5c2ad8..fa8a7bb 100644
--- a/src/mips/ic-mips.cc
+++ b/src/mips/ic-mips.cc
@@ -32,6 +32,7 @@
#if defined(V8_TARGET_ARCH_MIPS)
#include "codegen-inl.h"
+#include "code-stubs.h"
#include "ic-inl.h"
#include "runtime.h"
#include "stub-cache.h"
@@ -52,7 +53,7 @@
}
-void LoadIC::GenerateStringLength(MacroAssembler* masm) {
+void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) {
UNIMPLEMENTED_MIPS();
}
@@ -65,6 +66,12 @@
// Defined in ic.cc.
Object* CallIC_Miss(Arguments args);
+
+void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
UNIMPLEMENTED_MIPS();
}
@@ -74,51 +81,22 @@
UNIMPLEMENTED_MIPS();
}
-void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
+
+void KeyedCallIC::GenerateMiss(MacroAssembler* masm, int argc) {
UNIMPLEMENTED_MIPS();
- // Registers:
- // a2: name
- // ra: return address
-
- // Get the receiver of the function from the stack.
- __ lw(a3, MemOperand(sp, argc*kPointerSize));
-
- __ EnterInternalFrame();
-
- // Push the receiver and the name of the function.
- __ MultiPush(a2.bit() | a3.bit());
-
- // Call the entry.
- __ li(a0, Operand(2));
- __ li(a1, Operand(ExternalReference(IC_Utility(kCallIC_Miss))));
-
- CEntryStub stub(1);
- __ CallStub(&stub);
-
- // Move result to r1 and leave the internal frame.
- __ mov(a1, v0);
- __ LeaveInternalFrame();
-
- // Check if the receiver is a global object of some sort.
- Label invoke, global;
- __ lw(a2, MemOperand(sp, argc * kPointerSize));
- __ andi(t0, a2, kSmiTagMask);
- __ Branch(eq, &invoke, t0, Operand(zero_reg));
- __ GetObjectType(a2, a3, a3);
- __ Branch(eq, &global, a3, Operand(JS_GLOBAL_OBJECT_TYPE));
- __ Branch(ne, &invoke, a3, Operand(JS_BUILTINS_OBJECT_TYPE));
-
- // Patch the receiver on the stack.
- __ bind(&global);
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
- __ sw(a2, MemOperand(sp, argc * kPointerSize));
-
- // Invoke the function.
- ParameterCount actual(argc);
- __ bind(&invoke);
- __ InvokeFunction(a1, actual, JUMP_FUNCTION);
}
+
+void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
// Defined in ic.cc.
Object* LoadIC_Miss(Arguments args);
@@ -137,19 +115,35 @@
}
-void LoadIC::ClearInlinedVersion(Address address) {}
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
+ UNIMPLEMENTED_MIPS();
return false;
}
-void KeyedLoadIC::ClearInlinedVersion(Address address) {}
+
+bool LoadIC::PatchInlinedContextualLoad(Address address,
+ Object* map,
+ Object* cell,
+ bool is_dont_delete) {
+ UNIMPLEMENTED_MIPS();
+ return false;
+}
+
+
+bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
+ UNIMPLEMENTED_MIPS();
+ return false;
+}
+
+
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
+ UNIMPLEMENTED_MIPS();
return false;
}
-void KeyedStoreIC::ClearInlinedVersion(Address address) {}
-void KeyedStoreIC::RestoreInlinedVersion(Address address) {}
+
bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
+ UNIMPLEMENTED_MIPS();
return false;
}
@@ -162,6 +156,11 @@
}
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
UNIMPLEMENTED_MIPS();
}
@@ -172,7 +171,14 @@
}
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
+void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
UNIMPLEMENTED_MIPS();
}
@@ -187,7 +193,8 @@
}
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
UNIMPLEMENTED_MIPS();
}
@@ -201,8 +208,37 @@
UNIMPLEMENTED_MIPS();
}
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
+ StrictModeFlag strict_mode) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
#undef __
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+ UNIMPLEMENTED_MIPS();
+ return kNoCondition;
+}
+
+
+void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void PatchInlinedSmiCode(Address address) {
+ // Currently there is no smi inlining in the MIPS full code generator.
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/jump-target-mips.cc b/src/mips/jump-target-mips.cc
index 408f75e..bd6d60b 100644
--- a/src/mips/jump-target-mips.cc
+++ b/src/mips/jump-target-mips.cc
@@ -43,41 +43,19 @@
#define __ ACCESS_MASM(cgen()->masm())
+// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
+#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
+ (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
+ (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
+
+
void JumpTarget::DoJump() {
- ASSERT(cgen()->has_valid_frame());
- // Live non-frame registers are not allowed at unconditional jumps
- // because we have no way of invalidating the corresponding results
- // which are still live in the C++ code.
- ASSERT(cgen()->HasValidEntryRegisters());
-
- if (is_bound()) {
- // Backward jump. There already a frame expectation at the target.
- ASSERT(direction_ == BIDIRECTIONAL);
- cgen()->frame()->MergeTo(entry_frame_);
- cgen()->DeleteFrame();
- } else {
- // Use the current frame as the expected one at the target if necessary.
- if (entry_frame_ == NULL) {
- entry_frame_ = cgen()->frame();
- RegisterFile empty;
- cgen()->SetFrame(NULL, &empty);
- } else {
- cgen()->frame()->MergeTo(entry_frame_);
- cgen()->DeleteFrame();
- }
-
- // The predicate is_linked() should be made true. Its implementation
- // detects the presence of a frame pointer in the reaching_frames_ list.
- if (!is_linked()) {
- reaching_frames_.Add(NULL);
- ASSERT(is_linked());
- }
- }
- __ b(&entry_label_);
- __ nop(); // Branch delay slot nop.
+ UNIMPLEMENTED_MIPS();
}
-
+// Original prototype for mips, needs arch-indep change. Leave out for now.
+// void JumpTarget::DoBranch(Condition cc, Hint ignored,
+// Register src1, const Operand& src2) {
void JumpTarget::DoBranch(Condition cc, Hint ignored) {
UNIMPLEMENTED_MIPS();
}
@@ -89,85 +67,12 @@
void JumpTarget::DoBind() {
- ASSERT(!is_bound());
-
- // Live non-frame registers are not allowed at the start of a basic
- // block.
- ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
-
- if (cgen()->has_valid_frame()) {
- // If there is a current frame we can use it on the fall through.
- if (entry_frame_ == NULL) {
- entry_frame_ = new VirtualFrame(cgen()->frame());
- } else {
- ASSERT(cgen()->frame()->Equals(entry_frame_));
- }
- } else {
- // If there is no current frame we must have an entry frame which we can
- // copy.
- ASSERT(entry_frame_ != NULL);
- RegisterFile empty;
- cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
- }
-
- // The predicate is_linked() should be made false. Its implementation
- // detects the presence (or absence) of frame pointers in the
- // reaching_frames_ list. If we inserted a bogus frame to make
- // is_linked() true, remove it now.
- if (is_linked()) {
- reaching_frames_.Clear();
- }
-
- __ bind(&entry_label_);
-}
-
-
-void BreakTarget::Jump() {
- // On ARM we do not currently emit merge code for jumps, so we need to do
- // it explicitly here. The only merging necessary is to drop extra
- // statement state from the stack.
- ASSERT(cgen()->has_valid_frame());
- int count = cgen()->frame()->height() - expected_height_;
- cgen()->frame()->Drop(count);
- DoJump();
-}
-
-
-void BreakTarget::Jump(Result* arg) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void BreakTarget::Bind() {
-#ifdef DEBUG
- // All the forward-reaching frames should have been adjusted at the
- // jumps to this target.
- for (int i = 0; i < reaching_frames_.length(); i++) {
- ASSERT(reaching_frames_[i] == NULL ||
- reaching_frames_[i]->height() == expected_height_);
- }
-#endif
- // Drop leftover statement state from the frame before merging, even
- // on the fall through. This is so we can bind the return target
- // with state on the frame.
- if (cgen()->has_valid_frame()) {
- int count = cgen()->frame()->height() - expected_height_;
- // On ARM we do not currently emit merge code at binding sites, so we need
- // to do it explicitly here. The only merging necessary is to drop extra
- // statement state from the stack.
- cgen()->frame()->Drop(count);
- }
-
- DoBind();
-}
-
-
-void BreakTarget::Bind(Result* arg) {
UNIMPLEMENTED_MIPS();
}
#undef __
+#undef BRANCH_ARGS_CHECK
} } // namespace v8::internal
diff --git a/src/mips/fast-codegen-mips.cc b/src/mips/lithium-codegen-mips.h
similarity index 63%
copy from src/mips/fast-codegen-mips.cc
copy to src/mips/lithium-codegen-mips.h
index 186f9fa..345d912 100644
--- a/src/mips/fast-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.h
@@ -25,53 +25,41 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "v8.h"
+#ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
+#define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
-#if defined(V8_TARGET_ARCH_MIPS)
+#include "mips/lithium-mips.h"
-#include "codegen-inl.h"
-#include "fast-codegen.h"
+#include "deoptimizer.h"
+#include "safepoint-table.h"
+#include "scopes.h"
+
+// Note: this file was taken from the X64 version. ARM has a partially working
+// lithium implementation, but for now it is not ported to mips.
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
+// Forward declarations.
+class LDeferredCode;
-Register FastCodeGenerator::accumulator0() { return no_reg; }
-Register FastCodeGenerator::accumulator1() { return no_reg; }
-Register FastCodeGenerator::scratch0() { return no_reg; }
-Register FastCodeGenerator::scratch1() { return no_reg; }
-Register FastCodeGenerator::receiver_reg() { return no_reg; }
-Register FastCodeGenerator::context_reg() { return no_reg; }
+class LCodeGen BASE_EMBEDDED {
+ public:
+ LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) { }
+ // Try to generate code for the entire chunk, but it may fail if the
+ // chunk contains constructs we cannot handle. Returns true if the
+ // code generation attempt succeeded.
+ bool GenerateCode() {
+ UNIMPLEMENTED();
+ return false;
+ }
-void FastCodeGenerator::Generate(CompilationInfo* info) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> name) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FastCodeGenerator::EmitBitOr() {
- UNIMPLEMENTED_MIPS();
-}
-
-#undef __
-
+ // Finish the code by setting stack height, safepoint, and bailout
+ // information on it.
+ void FinishCode(Handle<Code> code) { UNIMPLEMENTED(); }
+};
} } // namespace v8::internal
-#endif // V8_TARGET_ARCH_MIPS
+#endif // V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
new file mode 100644
index 0000000..e11dfab
--- /dev/null
+++ b/src/mips/lithium-mips.h
@@ -0,0 +1,304 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_MIPS_LITHIUM_MIPS_H_
+#define V8_MIPS_LITHIUM_MIPS_H_
+
+#include "hydrogen.h"
+#include "lithium-allocator.h"
+#include "lithium.h"
+#include "safepoint-table.h"
+
+// Note: this file was taken from the X64 version. ARM has a partially working
+// lithium implementation, but for now it is not ported to mips.
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+class LEnvironment;
+class Translation;
+
+class LInstruction: public ZoneObject {
+ public:
+ LInstruction() { }
+ virtual ~LInstruction() { }
+
+ // Predicates should be generated by macro as in lithium-ia32.h.
+ virtual bool IsLabel() const {
+ UNIMPLEMENTED();
+ return false;
+ }
+ virtual bool IsOsrEntry() const {
+ UNIMPLEMENTED();
+ return false;
+ }
+
+ LPointerMap* pointer_map() const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ bool HasPointerMap() const {
+ UNIMPLEMENTED();
+ return false;
+ }
+
+ void set_environment(LEnvironment* env) { UNIMPLEMENTED(); }
+
+ LEnvironment* environment() const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ bool HasEnvironment() const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ virtual void PrintTo(StringStream* stream) const { UNIMPLEMENTED(); }
+
+ virtual bool IsControl() const {
+ UNIMPLEMENTED();
+ return false;
+ }
+
+ void MarkAsCall() { UNIMPLEMENTED(); }
+ void MarkAsSaveDoubles() { UNIMPLEMENTED(); }
+
+ // Interface to the register allocator and iterators.
+ bool IsMarkedAsCall() const {
+ UNIMPLEMENTED();
+ return false;
+ }
+
+ bool IsMarkedAsSaveDoubles() const {
+ UNIMPLEMENTED();
+ return false;
+ }
+
+ virtual bool HasResult() const {
+ UNIMPLEMENTED();
+ return false;
+ }
+
+ virtual LOperand* result() {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ virtual int InputCount() {
+ UNIMPLEMENTED();
+ return 0;
+ }
+
+ virtual LOperand* InputAt(int i) {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ virtual int TempCount() {
+ UNIMPLEMENTED();
+ return 0;
+ }
+
+ virtual LOperand* TempAt(int i) {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ LOperand* FirstInput() {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ LOperand* Output() {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+#ifdef DEBUG
+ void VerifyCall() { UNIMPLEMENTED(); }
+#endif
+};
+
+
+class LGap: public LInstruction {
+ public:
+ explicit LGap(HBasicBlock* block) { }
+
+ HBasicBlock* block() const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ enum InnerPosition {
+ BEFORE,
+ START,
+ END,
+ AFTER,
+ FIRST_INNER_POSITION = BEFORE,
+ LAST_INNER_POSITION = AFTER
+ };
+
+ LParallelMove* GetOrCreateParallelMove(InnerPosition pos) {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ LParallelMove* GetParallelMove(InnerPosition pos) {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+};
+
+
+class LLabel: public LGap {
+ public:
+ explicit LLabel(HBasicBlock* block) : LGap(block) { }
+};
+
+
+class LOsrEntry: public LInstruction {
+ public:
+ // Function could be generated by a macro as in lithium-ia32.h.
+ static LOsrEntry* cast(LInstruction* instr) {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ LOperand** SpilledRegisterArray() {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+ LOperand** SpilledDoubleRegisterArray() {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ void MarkSpilledRegister(int allocation_index, LOperand* spill_operand) {
+ UNIMPLEMENTED();
+ }
+ void MarkSpilledDoubleRegister(int allocation_index,
+ LOperand* spill_operand) {
+ UNIMPLEMENTED();
+ }
+};
+
+
+class LChunk: public ZoneObject {
+ public:
+ explicit LChunk(CompilationInfo* info, HGraph* graph) { }
+
+ HGraph* graph() const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ CompilationInfo* info() const { return NULL; }
+
+ const ZoneList<LPointerMap*>* pointer_maps() const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ LOperand* GetNextSpillSlot(bool double_slot) {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ LConstantOperand* DefineConstantOperand(HConstant* constant) {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ LLabel* GetLabel(int block_id) const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ const ZoneList<LInstruction*>* instructions() const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ int GetParameterStackSlot(int index) const {
+ UNIMPLEMENTED();
+ return 0;
+ }
+
+ void AddGapMove(int index, LOperand* from, LOperand* to) { UNIMPLEMENTED(); }
+
+ LGap* GetGapAt(int index) const {
+ UNIMPLEMENTED();
+ return NULL;
+ }
+
+ bool IsGapAt(int index) const {
+ UNIMPLEMENTED();
+ return false;
+ }
+
+ int NearestGapPos(int index) const {
+ UNIMPLEMENTED();
+ return 0;
+ }
+
+ void MarkEmptyBlocks() { UNIMPLEMENTED(); }
+
+#ifdef DEBUG
+ void Verify() { UNIMPLEMENTED(); }
+#endif
+};
+
+
+class LChunkBuilder BASE_EMBEDDED {
+ public:
+ LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator) { }
+
+ // Build the sequence for the graph.
+ LChunk* Build() {
+ UNIMPLEMENTED();
+ return NULL;
+ };
+
+ // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node) { \
+ UNIMPLEMENTED(); \
+ return NULL; \
+ }
+ HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_MIPS_LITHIUM_MIPS_H_
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 02ece8c..bd4ab48 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
+#include <limits.h> // For LONG_MIN, LONG_MAX
#include "v8.h"
@@ -41,68 +41,90 @@
MacroAssembler::MacroAssembler(void* buffer, int size)
: Assembler(buffer, size),
- unresolved_(0),
generating_stub_(false),
allow_stub_calls_(true),
- code_object_(Heap::undefined_value()) {
+ code_object_(HEAP->undefined_value()) {
}
+// Arguments macros
+#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
+#define COND_ARGS cond, r1, r2
-void MacroAssembler::Jump(Register target, Condition cond,
- Register r1, const Operand& r2) {
- Jump(Operand(target), cond, r1, r2);
+#define REGISTER_TARGET_BODY(Name) \
+void MacroAssembler::Name(Register target, \
+ BranchDelaySlot bd) { \
+ Name(Operand(target), bd); \
+} \
+void MacroAssembler::Name(Register target, COND_TYPED_ARGS, \
+ BranchDelaySlot bd) { \
+ Name(Operand(target), COND_ARGS, bd); \
}
-void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- Jump(Operand(target, rmode), cond, r1, r2);
+#define INT_PTR_TARGET_BODY(Name) \
+void MacroAssembler::Name(intptr_t target, RelocInfo::Mode rmode, \
+ BranchDelaySlot bd) { \
+ Name(Operand(target, rmode), bd); \
+} \
+void MacroAssembler::Name(intptr_t target, \
+ RelocInfo::Mode rmode, \
+ COND_TYPED_ARGS, \
+ BranchDelaySlot bd) { \
+ Name(Operand(target, rmode), COND_ARGS, bd); \
}
-void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- Jump(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
+#define BYTE_PTR_TARGET_BODY(Name) \
+void MacroAssembler::Name(byte* target, RelocInfo::Mode rmode, \
+ BranchDelaySlot bd) { \
+ Name(reinterpret_cast<intptr_t>(target), rmode, bd); \
+} \
+void MacroAssembler::Name(byte* target, \
+ RelocInfo::Mode rmode, \
+ COND_TYPED_ARGS, \
+ BranchDelaySlot bd) { \
+ Name(reinterpret_cast<intptr_t>(target), rmode, COND_ARGS, bd); \
}
-void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+#define CODE_TARGET_BODY(Name) \
+void MacroAssembler::Name(Handle<Code> target, RelocInfo::Mode rmode, \
+ BranchDelaySlot bd) { \
+ Name(reinterpret_cast<intptr_t>(target.location()), rmode, bd); \
+} \
+void MacroAssembler::Name(Handle<Code> target, \
+ RelocInfo::Mode rmode, \
+ COND_TYPED_ARGS, \
+ BranchDelaySlot bd) { \
+ Name(reinterpret_cast<intptr_t>(target.location()), rmode, COND_ARGS, bd); \
}
-void MacroAssembler::Call(Register target,
- Condition cond, Register r1, const Operand& r2) {
- Call(Operand(target), cond, r1, r2);
+REGISTER_TARGET_BODY(Jump)
+REGISTER_TARGET_BODY(Call)
+INT_PTR_TARGET_BODY(Jump)
+INT_PTR_TARGET_BODY(Call)
+BYTE_PTR_TARGET_BODY(Jump)
+BYTE_PTR_TARGET_BODY(Call)
+CODE_TARGET_BODY(Jump)
+CODE_TARGET_BODY(Call)
+
+#undef COND_TYPED_ARGS
+#undef COND_ARGS
+#undef REGISTER_TARGET_BODY
+#undef BYTE_PTR_TARGET_BODY
+#undef CODE_TARGET_BODY
+
+
+void MacroAssembler::Ret(BranchDelaySlot bd) {
+ Jump(Operand(ra), bd);
}
-void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- Call(Operand(target, rmode), cond, r1, r2);
-}
-
-
-void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
- Call(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2);
-}
-
-
-void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond, Register r1, const Operand& r2) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
- Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2);
-}
-
-
-void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2) {
- Jump(Operand(ra), cond, r1, r2);
+void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2,
+ BranchDelaySlot bd) {
+ Jump(Operand(ra), cond, r1, r2, bd);
}
@@ -111,45 +133,226 @@
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
}
+
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond,
Register src1, const Operand& src2) {
- Branch(NegateCondition(cond), 2, src1, src2);
+ Branch(2, NegateCondition(cond), src1, src2);
lw(destination, MemOperand(s6, index << kPointerSizeLog2));
}
-void MacroAssembler::RecordWrite(Register object, Register offset,
+void MacroAssembler::StoreRoot(Register source,
+ Heap::RootListIndex index) {
+ sw(source, MemOperand(s6, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::StoreRoot(Register source,
+ Heap::RootListIndex index,
+ Condition cond,
+ Register src1, const Operand& src2) {
+ Branch(2, NegateCondition(cond), src1, src2);
+ sw(source, MemOperand(s6, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::RecordWriteHelper(Register object,
+ Register address,
+ Register scratch) {
+ if (FLAG_debug_code) {
+ // Check that the object is not in new space.
+ Label not_in_new_space;
+ InNewSpace(object, scratch, ne, ¬_in_new_space);
+ Abort("new-space object passed to RecordWriteHelper");
+ bind(¬_in_new_space);
+ }
+
+ // Calculate page address: Clear bits from 0 to kPageSizeBits.
+ if (mips32r2) {
+ Ins(object, zero_reg, 0, kPageSizeBits);
+ } else {
+ // The Ins macro is slow on r1, so use shifts instead.
+ srl(object, object, kPageSizeBits);
+ sll(object, object, kPageSizeBits);
+ }
+
+ // Calculate region number.
+ Ext(address, address, Page::kRegionSizeLog2,
+ kPageSizeBits - Page::kRegionSizeLog2);
+
+ // Mark region dirty.
+ lw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
+ li(at, Operand(1));
+ sllv(at, at, address);
+ or_(scratch, scratch, at);
+ sw(scratch, MemOperand(object, Page::kDirtyFlagOffset));
+}
+
+
+void MacroAssembler::InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ Label* branch) {
+ ASSERT(cc == eq || cc == ne);
+ And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
+ Branch(branch, cc, scratch,
+ Operand(ExternalReference::new_space_start(isolate())));
+}
+
+
+// Will clobber 4 registers: object, scratch0, scratch1, at. The
+// register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object,
+ Operand offset,
+ Register scratch0,
+ Register scratch1) {
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are cp.
+ ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp));
+
+ Label done;
+
+ // First, test that the object is not in the new space. We cannot set
+ // region marks for new space pages.
+ InNewSpace(object, scratch0, eq, &done);
+
+ // Add offset into the object.
+ Addu(scratch0, object, offset);
+
+ // Record the actual write.
+ RecordWriteHelper(object, scratch0, scratch1);
+
+ bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (FLAG_debug_code) {
+ li(object, Operand(BitCast<int32_t>(kZapValue)));
+ li(scratch0, Operand(BitCast<int32_t>(kZapValue)));
+ li(scratch1, Operand(BitCast<int32_t>(kZapValue)));
+ }
+}
+
+
+// Will clobber 4 registers: object, address, scratch, ip. The
+// register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object,
+ Register address,
Register scratch) {
- UNIMPLEMENTED_MIPS();
+ // The compiled code assumes that record write doesn't change the
+ // context register, so we check that none of the clobbered
+ // registers are cp.
+ ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp));
+
+ Label done;
+
+ // First, test that the object is not in the new space. We cannot set
+ // region marks for new space pages.
+ InNewSpace(object, scratch, eq, &done);
+
+ // Record the actual write.
+ RecordWriteHelper(object, address, scratch);
+
+ bind(&done);
+
+ // Clobber all input registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (FLAG_debug_code) {
+ li(object, Operand(BitCast<int32_t>(kZapValue)));
+ li(address, Operand(BitCast<int32_t>(kZapValue)));
+ li(scratch, Operand(BitCast<int32_t>(kZapValue)));
+ }
+}
+
+
+// -----------------------------------------------------------------------------
+// Allocation support
+
+
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch,
+ Label* miss) {
+ Label same_contexts;
+
+ ASSERT(!holder_reg.is(scratch));
+ ASSERT(!holder_reg.is(at));
+ ASSERT(!scratch.is(at));
+
+ // Load current lexical context from the stack frame.
+ lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ // In debug mode, make sure the lexical context is set.
+#ifdef DEBUG
+ Check(ne, "we should not have an empty lexical context",
+ scratch, Operand(zero_reg));
+#endif
+
+ // Load the global context of the current context.
+ int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
+ lw(scratch, FieldMemOperand(scratch, offset));
+ lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+
+ // Check the context is a global context.
+ if (FLAG_debug_code) {
+ // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
+ Push(holder_reg); // Temporarily save holder on the stack.
+ // Read the first word and compare to the global_context_map.
+ lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
+ LoadRoot(at, Heap::kGlobalContextMapRootIndex);
+ Check(eq, "JSGlobalObject::global_context should be a global context.",
+ holder_reg, Operand(at));
+ Pop(holder_reg); // Restore holder.
+ }
+
+ // Check if both contexts are the same.
+ lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ Branch(&same_contexts, eq, scratch, Operand(at));
+
+ // Check the context is a global context.
+ if (FLAG_debug_code) {
+ // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
+ Push(holder_reg); // Temporarily save holder on the stack.
+ mov(holder_reg, at); // Move at to its holding place.
+ LoadRoot(at, Heap::kNullValueRootIndex);
+ Check(ne, "JSGlobalProxy::context() should not be null.",
+ holder_reg, Operand(at));
+
+ lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
+ LoadRoot(at, Heap::kGlobalContextMapRootIndex);
+ Check(eq, "JSGlobalObject::global_context should be a global context.",
+ holder_reg, Operand(at));
+ // Restore at is not needed. at is reloaded below.
+ Pop(holder_reg); // Restore holder.
+ // Restore at to holder's context.
+ lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset));
+ }
+
+ // Check that the security token in the calling global object is
+ // compatible with the security token in the receiving global
+ // object.
+ int token_offset = Context::kHeaderSize +
+ Context::SECURITY_TOKEN_INDEX * kPointerSize;
+
+ lw(scratch, FieldMemOperand(scratch, token_offset));
+ lw(at, FieldMemOperand(at, token_offset));
+ Branch(miss, ne, scratch, Operand(at));
+
+ bind(&same_contexts);
}
// ---------------------------------------------------------------------------
// Instruction macros
-void MacroAssembler::Add(Register rd, Register rs, const Operand& rt) {
- if (rt.is_reg()) {
- add(rd, rs, rt.rm());
- } else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
- addi(rd, rs, rt.imm32_);
- } else {
- // li handles the relocation.
- ASSERT(!rs.is(at));
- li(at, rt);
- add(rd, rs, at);
- }
- }
-}
-
-
void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
addu(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
addiu(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
@@ -161,6 +364,22 @@
}
+void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
+ if (rt.is_reg()) {
+ subu(rd, rs, rt.rm());
+ } else {
+ if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
+ addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm).
+ } else {
+ // li handles the relocation.
+ ASSERT(!rs.is(at));
+ li(at, rt);
+ subu(rd, rs, at);
+ }
+ }
+}
+
+
void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
if (rt.is_reg()) {
mul(rd, rs, rt.rm());
@@ -225,7 +444,7 @@
if (rt.is_reg()) {
and_(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
andi(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
@@ -241,7 +460,7 @@
if (rt.is_reg()) {
or_(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
ori(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
@@ -257,7 +476,7 @@
if (rt.is_reg()) {
xor_(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
xori(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
@@ -285,7 +504,7 @@
if (rt.is_reg()) {
slt(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
slti(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
@@ -301,7 +520,7 @@
if (rt.is_reg()) {
sltu(rd, rs, rt.rm());
} else {
- if (is_int16(rt.imm32_) && !MustUseAt(rt.rmode_)) {
+ if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) {
sltiu(rd, rs, rt.imm32_);
} else {
// li handles the relocation.
@@ -313,31 +532,51 @@
}
-//------------Pseudo-instructions-------------
-
-void MacroAssembler::movn(Register rd, Register rt) {
- addiu(at, zero_reg, -1); // Fill at with ones.
- xor_(rd, rt, at);
+void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
+ if (mips32r2) {
+ if (rt.is_reg()) {
+ rotrv(rd, rs, rt.rm());
+ } else {
+ rotr(rd, rs, rt.imm32_);
+ }
+ } else {
+ if (rt.is_reg()) {
+ subu(at, zero_reg, rt.rm());
+ sllv(at, rs, at);
+ srlv(rd, rs, rt.rm());
+ or_(rd, rd, at);
+ } else {
+ if (rt.imm32_ == 0) {
+ srl(rd, rs, 0);
+ } else {
+ srl(at, rs, rt.imm32_);
+ sll(rd, rs, (0x20 - rt.imm32_) & 0x1f);
+ or_(rd, rd, at);
+ }
+ }
+ }
}
+//------------Pseudo-instructions-------------
+
void MacroAssembler::li(Register rd, Operand j, bool gen2instr) {
ASSERT(!j.is_reg());
-
- if (!MustUseAt(j.rmode_) && !gen2instr) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (!MustUseReg(j.rmode_) && !gen2instr) {
// Normal load of an immediate value which does not need Relocation Info.
if (is_int16(j.imm32_)) {
addiu(rd, zero_reg, j.imm32_);
- } else if (!(j.imm32_ & HIMask)) {
+ } else if (!(j.imm32_ & kHiMask)) {
ori(rd, zero_reg, j.imm32_);
- } else if (!(j.imm32_ & LOMask)) {
- lui(rd, (HIMask & j.imm32_) >> 16);
+ } else if (!(j.imm32_ & kImm16Mask)) {
+ lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
} else {
- lui(rd, (HIMask & j.imm32_) >> 16);
- ori(rd, rd, (LOMask & j.imm32_));
+ lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
+ ori(rd, rd, (j.imm32_ & kImm16Mask));
}
- } else if (MustUseAt(j.rmode_) || gen2instr) {
- if (MustUseAt(j.rmode_)) {
+ } else if (MustUseReg(j.rmode_) || gen2instr) {
+ if (MustUseReg(j.rmode_)) {
RecordRelocInfo(j.rmode_, j.imm32_);
}
// We need always the same number of instructions as we may need to patch
@@ -345,15 +584,15 @@
if (is_int16(j.imm32_)) {
nop();
addiu(rd, zero_reg, j.imm32_);
- } else if (!(j.imm32_ & HIMask)) {
+ } else if (!(j.imm32_ & kHiMask)) {
nop();
ori(rd, zero_reg, j.imm32_);
- } else if (!(j.imm32_ & LOMask)) {
+ } else if (!(j.imm32_ & kImm16Mask)) {
nop();
- lui(rd, (HIMask & j.imm32_) >> 16);
+ lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
} else {
- lui(rd, (HIMask & j.imm32_) >> 16);
- ori(rd, rd, (LOMask & j.imm32_));
+ lui(rd, (j.imm32_ & kHiMask) >> kLuiShift);
+ ori(rd, rd, (j.imm32_ & kImm16Mask));
}
}
}
@@ -417,230 +656,772 @@
}
-// Emulated condtional branches do not emit a nop in the branch delay slot.
+void MacroAssembler::Ext(Register rt,
+ Register rs,
+ uint16_t pos,
+ uint16_t size) {
+ ASSERT(pos < 32);
+ ASSERT(pos + size < 32);
-// Trashes the at register if no scratch register is provided.
-void MacroAssembler::Branch(Condition cond, int16_t offset, Register rs,
- const Operand& rt, Register scratch) {
+ if (mips32r2) {
+ ext_(rt, rs, pos, size);
+ } else {
+ // Move rs to rt and shift it left then right to get the
+ // desired bitfield on the right side and zeroes on the left.
+ sll(rt, rs, 32 - (pos + size));
+ srl(rt, rt, 32 - size);
+ }
+}
+
+
+void MacroAssembler::Ins(Register rt,
+ Register rs,
+ uint16_t pos,
+ uint16_t size) {
+ ASSERT(pos < 32);
+ ASSERT(pos + size < 32);
+
+ if (mips32r2) {
+ ins_(rt, rs, pos, size);
+ } else {
+ ASSERT(!rt.is(t8) && !rs.is(t8));
+
+ srl(t8, rt, pos + size);
+ // The left chunk from rt that needs to
+ // be saved is on the right side of t8.
+ sll(at, t8, pos + size);
+ // The 'at' register now contains the left chunk on
+ // the left (proper position) and zeroes.
+ sll(t8, rt, 32 - pos);
+ // t8 now contains the right chunk on the left and zeroes.
+ srl(t8, t8, 32 - pos);
+ // t8 now contains the right chunk on
+ // the right (proper position) and zeroes.
+ or_(rt, at, t8);
+ // rt now contains the left and right chunks from the original rt
+ // in their proper position and zeroes in the middle.
+ sll(t8, rs, 32 - size);
+ // t8 now contains the chunk from rs on the left and zeroes.
+ srl(t8, t8, 32 - size - pos);
+ // t8 now contains the original chunk from rs in
+ // the middle (proper position).
+ or_(rt, rt, t8);
+ // rt now contains the result of the ins instruction in R2 mode.
+ }
+}
+
+
+void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
+ // Move the data from fs to t4.
+ mfc1(t4, fs);
+ return Cvt_d_uw(fd, t4);
+}
+
+
+void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) {
+ // Convert rs to a FP value in fd (and fd + 1).
+ // We do this by converting rs minus the MSB to avoid sign conversion,
+ // then adding 2^31-1 and 1 to the result.
+
+ ASSERT(!fd.is(f20));
+ ASSERT(!rs.is(t9));
+ ASSERT(!rs.is(t8));
+
+ // Save rs's MSB to t8
+ And(t8, rs, 0x80000000);
+ // Remove rs's MSB.
+ And(t9, rs, 0x7FFFFFFF);
+ // Move t9 to fd
+ mtc1(t9, fd);
+
+ // Convert fd to a real FP value.
+ cvt_d_w(fd, fd);
+
+ Label conversion_done;
+
+ // If rs's MSB was 0, it's done.
+ // Otherwise we need to add that to the FP register.
+ Branch(&conversion_done, eq, t8, Operand(zero_reg));
+
+ // First load 2^31 - 1 into f20.
+ Or(t9, zero_reg, 0x7FFFFFFF);
+ mtc1(t9, f20);
+
+ // Convert it to FP and add it to fd.
+ cvt_d_w(f20, f20);
+ add_d(fd, fd, f20);
+ // Now add 1.
+ Or(t9, zero_reg, 1);
+ mtc1(t9, f20);
+
+ cvt_d_w(f20, f20);
+ add_d(fd, fd, f20);
+ bind(&conversion_done);
+}
+
+
+void MacroAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs) {
+ Trunc_uw_d(fs, t4);
+ mtc1(t4, fd);
+}
+
+
+void MacroAssembler::Trunc_uw_d(FPURegister fd, Register rs) {
+ ASSERT(!fd.is(f22));
+ ASSERT(!rs.is(t6));
+
+ // Load 2^31 into f22.
+ Or(t6, zero_reg, 0x80000000);
+ Cvt_d_uw(f22, t6);
+
+ // Test if f22 > fd.
+ c(OLT, D, fd, f22);
+
+ Label simple_convert;
+ // If fd < 2^31 we can convert it normally.
+ bc1t(&simple_convert);
+
+ // First we subtract 2^31 from fd, then trunc it to rs
+ // and add 2^31 to rs.
+
+ sub_d(f22, fd, f22);
+ trunc_w_d(f22, f22);
+ mfc1(rs, f22);
+ or_(rs, rs, t6);
+
+ Label done;
+ Branch(&done);
+ // Simple conversion.
+ bind(&simple_convert);
+ trunc_w_d(f22, fd);
+ mfc1(rs, f22);
+
+ bind(&done);
+}
+
+
+// Tries to get a signed int32 out of a double precision floating point heap
+// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
+// 32bits signed integer range.
+// This method implementation differs from the ARM version for performance
+// reasons.
+void MacroAssembler::ConvertToInt32(Register source,
+ Register dest,
+ Register scratch,
+ Register scratch2,
+ FPURegister double_scratch,
+ Label *not_int32) {
+ Label right_exponent, done;
+ // Get exponent word (ENDIAN issues).
+ lw(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
+ // Get exponent alone in scratch2.
+ And(scratch2, scratch, Operand(HeapNumber::kExponentMask));
+ // Load dest with zero. We use this either for the final shift or
+ // for the answer.
+ mov(dest, zero_reg);
+ // Check whether the exponent matches a 32 bit signed int that is not a Smi.
+ // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
+ // the exponent that we are fastest at and also the highest exponent we can
+ // handle here.
+ const uint32_t non_smi_exponent =
+ (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ // If we have a match of the int32-but-not-Smi exponent then skip some logic.
+ Branch(&right_exponent, eq, scratch2, Operand(non_smi_exponent));
+ // If the exponent is higher than that then go to not_int32 case. This
+ // catches numbers that don't fit in a signed int32, infinities and NaNs.
+ Branch(not_int32, gt, scratch2, Operand(non_smi_exponent));
+
+ // We know the exponent is smaller than 30 (biased). If it is less than
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+ // it rounds to zero.
+ const uint32_t zero_exponent =
+ (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
+ Subu(scratch2, scratch2, Operand(zero_exponent));
+ // Dest already has a Smi zero.
+ Branch(&done, lt, scratch2, Operand(zero_reg));
+ if (!Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+ // We have a shifted exponent between 0 and 30 in scratch2.
+ srl(dest, scratch2, HeapNumber::kExponentShift);
+ // We now have the exponent in dest. Subtract from 30 to get
+ // how much to shift down.
+ li(at, Operand(30));
+ subu(dest, at, dest);
+ }
+ bind(&right_exponent);
+ if (Isolate::Current()->cpu_features()->IsSupported(FPU)) {
+ CpuFeatures::Scope scope(FPU);
+ // MIPS FPU instructions implementing double precision to integer
+ // conversion using round to zero. Since the FP value was qualified
+ // above, the resulting integer should be a legal int32.
+ // The original 'Exponent' word is still in scratch.
+ lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+ mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
+ trunc_w_d(double_scratch, double_scratch);
+ mfc1(dest, double_scratch);
+ } else {
+ // On entry, dest has final downshift, scratch has original sign/exp/mant.
+ // Save sign bit in top bit of dest.
+ And(scratch2, scratch, Operand(0x80000000));
+ Or(dest, dest, Operand(scratch2));
+ // Put back the implicit 1, just above mantissa field.
+ Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
+
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We just orred in the implicit bit so that took care of one and
+ // we want to leave the sign bit 0 so we subtract 2 bits from the shift
+ // distance. But we want to clear the sign-bit so shift one more bit
+ // left, then shift right one bit.
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ sll(scratch, scratch, shift_distance + 1);
+ srl(scratch, scratch, 1);
+
+ // Get the second half of the double. For some exponents we don't
+ // actually need this because the bits get shifted out again, but
+ // it's probably slower to test than just to do it.
+ lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+ // Extract the top 10 bits, and insert those bottom 10 bits of scratch.
+ // The width of the field here is the same as the shift amount above.
+ const int field_width = shift_distance;
+ Ext(scratch2, scratch2, 32-shift_distance, field_width);
+ Ins(scratch, scratch2, 0, field_width);
+ // Move down according to the exponent.
+ srlv(scratch, scratch, dest);
+ // Prepare the negative version of our integer.
+ subu(scratch2, zero_reg, scratch);
+ // Trick to check sign bit (msb) held in dest, count leading zero.
+ // 0 indicates negative, save negative version with conditional move.
+ clz(dest, dest);
+ movz(scratch, scratch2, dest);
+ mov(dest, scratch);
+ }
+ bind(&done);
+}
+
+
+// Emulated condtional branches do not emit a nop in the branch delay slot.
+//
+// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
+#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
+ (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
+ (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
+
+
+void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
+ b(offset);
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+ ASSERT(!rs.is(zero_reg));
Register r2 = no_reg;
+ Register scratch = at;
+
if (rt.is_reg()) {
// We don't want any other register but scratch clobbered.
ASSERT(!scratch.is(rs) && !scratch.is(rt.rm_));
r2 = rt.rm_;
- } else if (cond != cc_always) {
- // We don't want any other register but scratch clobbered.
- ASSERT(!scratch.is(rs));
- r2 = scratch;
- li(r2, rt);
- }
-
- switch (cond) {
- case cc_always:
- b(offset);
- break;
- case eq:
- beq(rs, r2, offset);
- break;
- case ne:
- bne(rs, r2, offset);
- break;
-
+ switch (cond) {
+ case cc_always:
+ b(offset);
+ break;
+ case eq:
+ beq(rs, r2, offset);
+ break;
+ case ne:
+ bne(rs, r2, offset);
+ break;
// Signed comparison
- case greater:
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- break;
- case greater_equal:
- slt(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- break;
- case less:
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- break;
- case less_equal:
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- break;
-
+ case greater:
+ if (r2.is(zero_reg)) {
+ bgtz(rs, offset);
+ } else {
+ slt(scratch, r2, rs);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case greater_equal:
+ if (r2.is(zero_reg)) {
+ bgez(rs, offset);
+ } else {
+ slt(scratch, rs, r2);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case less:
+ if (r2.is(zero_reg)) {
+ bltz(rs, offset);
+ } else {
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case less_equal:
+ if (r2.is(zero_reg)) {
+ blez(rs, offset);
+ } else {
+ slt(scratch, r2, rs);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
// Unsigned comparison.
- case Ugreater:
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, offset);
- break;
- case Ugreater_equal:
- sltu(scratch, rs, r2);
- beq(scratch, zero_reg, offset);
- break;
- case Uless:
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, offset);
- break;
- case Uless_equal:
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, offset);
- break;
-
- default:
- UNREACHABLE();
+ case Ugreater:
+ if (r2.is(zero_reg)) {
+ bgtz(rs, offset);
+ } else {
+ sltu(scratch, r2, rs);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Ugreater_equal:
+ if (r2.is(zero_reg)) {
+ bgez(rs, offset);
+ } else {
+ sltu(scratch, rs, r2);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless:
+ if (r2.is(zero_reg)) {
+ b(offset);
+ } else {
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless_equal:
+ if (r2.is(zero_reg)) {
+ b(offset);
+ } else {
+ sltu(scratch, r2, rs);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ switch (cond) {
+ case cc_always:
+ b(offset);
+ break;
+ case eq:
+ // We don't want any other register but scratch clobbered.
+ ASSERT(!scratch.is(rs));
+ r2 = scratch;
+ li(r2, rt);
+ beq(rs, r2, offset);
+ break;
+ case ne:
+ // We don't want any other register but scratch clobbered.
+ ASSERT(!scratch.is(rs));
+ r2 = scratch;
+ li(r2, rt);
+ bne(rs, r2, offset);
+ break;
+ // Signed comparison
+ case greater:
+ if (rt.imm32_ == 0) {
+ bgtz(rs, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, r2, rs);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case greater_equal:
+ if (rt.imm32_ == 0) {
+ bgez(rs, offset);
+ } else if (is_int16(rt.imm32_)) {
+ slti(scratch, rs, rt.imm32_);
+ beq(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, rs, r2);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case less:
+ if (rt.imm32_ == 0) {
+ bltz(rs, offset);
+ } else if (is_int16(rt.imm32_)) {
+ slti(scratch, rs, rt.imm32_);
+ bne(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, rs, r2);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case less_equal:
+ if (rt.imm32_ == 0) {
+ blez(rs, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, r2, rs);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ // Unsigned comparison.
+ case Ugreater:
+ if (rt.imm32_ == 0) {
+ bgtz(rs, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, r2, rs);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Ugreater_equal:
+ if (rt.imm32_ == 0) {
+ bgez(rs, offset);
+ } else if (is_int16(rt.imm32_)) {
+ sltiu(scratch, rs, rt.imm32_);
+ beq(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, rs, r2);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless:
+ if (rt.imm32_ == 0) {
+ b(offset);
+ } else if (is_int16(rt.imm32_)) {
+ sltiu(scratch, rs, rt.imm32_);
+ bne(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, rs, r2);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless_equal:
+ if (rt.imm32_ == 0) {
+ b(offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, r2, rs);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
}
- // Emit a nop in the branch delay slot.
- nop();
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
}
-void MacroAssembler::Branch(Condition cond, Label* L, Register rs,
- const Operand& rt, Register scratch) {
- Register r2 = no_reg;
- if (rt.is_reg()) {
- r2 = rt.rm_;
- } else if (cond != cc_always) {
- r2 = scratch;
- li(r2, rt);
- }
-
+void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
// We use branch_offset as an argument for the branch instructions to be sure
// it is called just before generating the branch instruction, as needed.
- switch (cond) {
- case cc_always:
- b(shifted_branch_offset(L, false));
- break;
- case eq:
- beq(rs, r2, shifted_branch_offset(L, false));
- break;
- case ne:
- bne(rs, r2, shifted_branch_offset(L, false));
- break;
+ b(shifted_branch_offset(L, false));
- // Signed comparison
- case greater:
- slt(scratch, r2, rs);
- bne(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
- case greater_equal:
- slt(scratch, rs, r2);
- beq(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
- case less:
- slt(scratch, rs, r2);
- bne(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
- case less_equal:
- slt(scratch, r2, rs);
- beq(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
-
- // Unsigned comparison.
- case Ugreater:
- sltu(scratch, r2, rs);
- bne(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
- case Ugreater_equal:
- sltu(scratch, rs, r2);
- beq(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
- case Uless:
- sltu(scratch, rs, r2);
- bne(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
- case Uless_equal:
- sltu(scratch, r2, rs);
- beq(scratch, zero_reg, shifted_branch_offset(L, false));
- break;
-
- default:
- UNREACHABLE();
- }
- // Emit a nop in the branch delay slot.
- nop();
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
}
-// Trashes the at register if no scratch register is provided.
+void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ int32_t offset;
+ Register r2 = no_reg;
+ Register scratch = at;
+ if (rt.is_reg()) {
+ r2 = rt.rm_;
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ switch (cond) {
+ case cc_always:
+ offset = shifted_branch_offset(L, false);
+ b(offset);
+ break;
+ case eq:
+ offset = shifted_branch_offset(L, false);
+ beq(rs, r2, offset);
+ break;
+ case ne:
+ offset = shifted_branch_offset(L, false);
+ bne(rs, r2, offset);
+ break;
+ // Signed comparison
+ case greater:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ bgtz(rs, offset);
+ } else {
+ slt(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case greater_equal:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ bgez(rs, offset);
+ } else {
+ slt(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case less:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ bltz(rs, offset);
+ } else {
+ slt(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case less_equal:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ blez(rs, offset);
+ } else {
+ slt(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ // Unsigned comparison.
+ case Ugreater:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ bgtz(rs, offset);
+ } else {
+ sltu(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Ugreater_equal:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ bgez(rs, offset);
+ } else {
+ sltu(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ b(offset);
+ } else {
+ sltu(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless_equal:
+ if (r2.is(zero_reg)) {
+ offset = shifted_branch_offset(L, false);
+ b(offset);
+ } else {
+ sltu(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ switch (cond) {
+ case cc_always:
+ offset = shifted_branch_offset(L, false);
+ b(offset);
+ break;
+ case eq:
+ r2 = scratch;
+ li(r2, rt);
+ offset = shifted_branch_offset(L, false);
+ beq(rs, r2, offset);
+ break;
+ case ne:
+ r2 = scratch;
+ li(r2, rt);
+ offset = shifted_branch_offset(L, false);
+ bne(rs, r2, offset);
+ break;
+ // Signed comparison
+ case greater:
+ if (rt.imm32_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ bgtz(rs, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case greater_equal:
+ if (rt.imm32_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ bgez(rs, offset);
+ } else if (is_int16(rt.imm32_)) {
+ slti(scratch, rs, rt.imm32_);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case less:
+ if (rt.imm32_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ bltz(rs, offset);
+ } else if (is_int16(rt.imm32_)) {
+ slti(scratch, rs, rt.imm32_);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case less_equal:
+ if (rt.imm32_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ blez(rs, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ slt(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ // Unsigned comparison.
+ case Ugreater:
+ if (rt.imm32_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ bgtz(rs, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Ugreater_equal:
+ if (rt.imm32_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ bgez(rs, offset);
+ } else if (is_int16(rt.imm32_)) {
+ sltiu(scratch, rs, rt.imm32_);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless:
+ if (rt.imm32_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ b(offset);
+ } else if (is_int16(rt.imm32_)) {
+ sltiu(scratch, rs, rt.imm32_);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, rs, r2);
+ offset = shifted_branch_offset(L, false);
+ bne(scratch, zero_reg, offset);
+ }
+ break;
+ case Uless_equal:
+ if (rt.imm32_ == 0) {
+ offset = shifted_branch_offset(L, false);
+ b(offset);
+ } else {
+ r2 = scratch;
+ li(r2, rt);
+ sltu(scratch, r2, rs);
+ offset = shifted_branch_offset(L, false);
+ beq(scratch, zero_reg, offset);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ // Check that offset could actually hold on an int16_t.
+ ASSERT(is_int16(offset));
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
// We need to use a bgezal or bltzal, but they can't be used directly with the
// slt instructions. We could use sub or add instead but we would miss overflow
// cases, so we keep slt and add an intermediate third instruction.
-void MacroAssembler::BranchAndLink(Condition cond, int16_t offset, Register rs,
- const Operand& rt, Register scratch) {
- Register r2 = no_reg;
- if (rt.is_reg()) {
- r2 = rt.rm_;
- } else if (cond != cc_always) {
- r2 = scratch;
- li(r2, rt);
- }
+void MacroAssembler::BranchAndLink(int16_t offset,
+ BranchDelaySlot bdslot) {
+ bal(offset);
- switch (cond) {
- case cc_always:
- bal(offset);
- break;
- case eq:
- bne(rs, r2, 2);
- nop();
- bal(offset);
- break;
- case ne:
- beq(rs, r2, 2);
- nop();
- bal(offset);
- break;
-
- // Signed comparison
- case greater:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case greater_equal:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
- case less:
- slt(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case less_equal:
- slt(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
-
- // Unsigned comparison.
- case Ugreater:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case Ugreater_equal:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
- case Uless:
- sltu(scratch, rs, r2);
- addiu(scratch, scratch, -1);
- bgezal(scratch, offset);
- break;
- case Uless_equal:
- sltu(scratch, r2, rs);
- addiu(scratch, scratch, -1);
- bltzal(scratch, offset);
- break;
-
- default:
- UNREACHABLE();
- }
- // Emit a nop in the branch delay slot.
- nop();
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
}
-void MacroAssembler::BranchAndLink(Condition cond, Label* L, Register rs,
- const Operand& rt, Register scratch) {
+void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
Register r2 = no_reg;
+ Register scratch = at;
+
if (rt.is_reg()) {
r2 = rt.rm_;
} else if (cond != cc_always) {
@@ -650,157 +1431,372 @@
switch (cond) {
case cc_always:
- bal(shifted_branch_offset(L, false));
+ bal(offset);
break;
case eq:
bne(rs, r2, 2);
nop();
- bal(shifted_branch_offset(L, false));
+ bal(offset);
break;
case ne:
beq(rs, r2, 2);
nop();
- bal(shifted_branch_offset(L, false));
+ bal(offset);
break;
// Signed comparison
case greater:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
- bgezal(scratch, shifted_branch_offset(L, false));
+ bgezal(scratch, offset);
break;
case greater_equal:
slt(scratch, rs, r2);
addiu(scratch, scratch, -1);
- bltzal(scratch, shifted_branch_offset(L, false));
+ bltzal(scratch, offset);
break;
case less:
slt(scratch, rs, r2);
addiu(scratch, scratch, -1);
- bgezal(scratch, shifted_branch_offset(L, false));
+ bgezal(scratch, offset);
break;
case less_equal:
slt(scratch, r2, rs);
addiu(scratch, scratch, -1);
- bltzal(scratch, shifted_branch_offset(L, false));
+ bltzal(scratch, offset);
break;
// Unsigned comparison.
case Ugreater:
sltu(scratch, r2, rs);
addiu(scratch, scratch, -1);
- bgezal(scratch, shifted_branch_offset(L, false));
+ bgezal(scratch, offset);
break;
case Ugreater_equal:
sltu(scratch, rs, r2);
addiu(scratch, scratch, -1);
- bltzal(scratch, shifted_branch_offset(L, false));
+ bltzal(scratch, offset);
break;
case Uless:
sltu(scratch, rs, r2);
addiu(scratch, scratch, -1);
- bgezal(scratch, shifted_branch_offset(L, false));
+ bgezal(scratch, offset);
break;
case Uless_equal:
sltu(scratch, r2, rs);
addiu(scratch, scratch, -1);
- bltzal(scratch, shifted_branch_offset(L, false));
+ bltzal(scratch, offset);
break;
default:
UNREACHABLE();
}
- // Emit a nop in the branch delay slot.
- nop();
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
+ bal(shifted_branch_offset(L, false));
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
+ const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BRANCH_ARGS_CHECK(cond, rs, rt);
+
+ int32_t offset;
+ Register r2 = no_reg;
+ Register scratch = at;
+ if (rt.is_reg()) {
+ r2 = rt.rm_;
+ } else if (cond != cc_always) {
+ r2 = scratch;
+ li(r2, rt);
+ }
+
+ switch (cond) {
+ case cc_always:
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case eq:
+ bne(rs, r2, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+ case ne:
+ beq(rs, r2, 2);
+ nop();
+ offset = shifted_branch_offset(L, false);
+ bal(offset);
+ break;
+
+ // Signed comparison
+ case greater:
+ slt(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
+ break;
+ case greater_equal:
+ slt(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
+ break;
+ case less:
+ slt(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
+ break;
+ case less_equal:
+ slt(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
+ break;
+
+ // Unsigned comparison.
+ case Ugreater:
+ sltu(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
+ break;
+ case Ugreater_equal:
+ sltu(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
+ break;
+ case Uless:
+ sltu(scratch, rs, r2);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bgezal(scratch, offset);
+ break;
+ case Uless_equal:
+ sltu(scratch, r2, rs);
+ addiu(scratch, scratch, -1);
+ offset = shifted_branch_offset(L, false);
+ bltzal(scratch, offset);
+ break;
+
+ default:
+ UNREACHABLE();
+ }
+
+ // Check that offset could actually hold on an int16_t.
+ ASSERT(is_int16(offset));
+
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+void MacroAssembler::Jump(const Operand& target, BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (target.is_reg()) {
+ jr(target.rm());
+ } else {
+ if (!MustUseReg(target.rmode_)) {
+ j(target.imm32_);
+ } else {
+ li(t9, target);
+ jr(t9);
+ }
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
}
void MacroAssembler::Jump(const Operand& target,
- Condition cond, Register rs, const Operand& rt) {
+ Condition cond, Register rs, const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ BRANCH_ARGS_CHECK(cond, rs, rt);
if (target.is_reg()) {
if (cond == cc_always) {
jr(target.rm());
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
jr(target.rm());
}
- } else { // !target.is_reg()
- if (!MustUseAt(target.rmode_)) {
+ } else { // Not register target.
+ if (!MustUseReg(target.rmode_)) {
if (cond == cc_always) {
j(target.imm32_);
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
j(target.imm32_); // Will generate only one instruction.
}
- } else { // MustUseAt(target)
- li(at, target);
+ } else { // MustUseReg(target)
+ li(t9, target);
if (cond == cc_always) {
- jr(at);
+ jr(t9);
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
- jr(at); // Will generate only one instruction.
+ Branch(2, NegateCondition(cond), rs, rt);
+ jr(t9); // Will generate only one instruction.
}
}
}
- // Emit a nop in the branch delay slot.
- nop();
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
}
+// Note: To call gcc-compiled C code on mips, you must call thru t9.
+void MacroAssembler::Call(const Operand& target, BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (target.is_reg()) {
+ jalr(target.rm());
+ } else { // !target.is_reg()
+ if (!MustUseReg(target.rmode_)) {
+ jal(target.imm32_);
+ } else { // MustUseReg(target)
+ li(t9, target);
+ jalr(t9);
+ }
+ }
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
+}
+
+
+// Note: To call gcc-compiled C code on mips, you must call thru t9.
void MacroAssembler::Call(const Operand& target,
- Condition cond, Register rs, const Operand& rt) {
+ Condition cond, Register rs, const Operand& rt,
+ BranchDelaySlot bdslot) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ BRANCH_ARGS_CHECK(cond, rs, rt);
if (target.is_reg()) {
if (cond == cc_always) {
jalr(target.rm());
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
jalr(target.rm());
}
} else { // !target.is_reg()
- if (!MustUseAt(target.rmode_)) {
+ if (!MustUseReg(target.rmode_)) {
if (cond == cc_always) {
jal(target.imm32_);
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
+ Branch(2, NegateCondition(cond), rs, rt);
jal(target.imm32_); // Will generate only one instruction.
}
- } else { // MustUseAt(target)
- li(at, target);
+ } else { // MustUseReg(target)
+ li(t9, target);
if (cond == cc_always) {
- jalr(at);
+ jalr(t9);
} else {
- Branch(NegateCondition(cond), 2, rs, rt);
- jalr(at); // Will generate only one instruction.
+ Branch(2, NegateCondition(cond), rs, rt);
+ jalr(t9); // Will generate only one instruction.
}
}
}
- // Emit a nop in the branch delay slot.
- nop();
-}
-
-void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
- UNIMPLEMENTED_MIPS();
+ // Emit a nop in the branch delay slot if required.
+ if (bdslot == PROTECT)
+ nop();
}
-void MacroAssembler::Drop(int count, Condition cond) {
- UNIMPLEMENTED_MIPS();
+void MacroAssembler::Drop(int count,
+ Condition cond,
+ Register reg,
+ const Operand& op) {
+ if (count <= 0) {
+ return;
+ }
+
+ Label skip;
+
+ if (cond != al) {
+ Branch(&skip, NegateCondition(cond), reg, op);
+ }
+
+ if (count > 0) {
+ addiu(sp, sp, count * kPointerSize);
+ }
+
+ if (cond != al) {
+ bind(&skip);
+ }
+}
+
+
+void MacroAssembler::DropAndRet(int drop,
+ Condition cond,
+ Register r1,
+ const Operand& r2) {
+ // This is a workaround to make sure only one branch instruction is
+ // generated. It relies on Drop and Ret not creating branches if
+ // cond == cc_always.
+ Label skip;
+ if (cond != cc_always) {
+ Branch(&skip, NegateCondition(cond), r1, r2);
+ }
+
+ Drop(drop);
+ Ret();
+
+ if (cond != cc_always) {
+ bind(&skip);
+ }
+}
+
+
+void MacroAssembler::Swap(Register reg1,
+ Register reg2,
+ Register scratch) {
+ if (scratch.is(no_reg)) {
+ Xor(reg1, reg1, Operand(reg2));
+ Xor(reg2, reg2, Operand(reg1));
+ Xor(reg1, reg1, Operand(reg2));
+ } else {
+ mov(scratch, reg1);
+ mov(reg1, reg2);
+ mov(reg2, scratch);
+ }
}
void MacroAssembler::Call(Label* target) {
- UNIMPLEMENTED_MIPS();
+ BranchAndLink(target);
+}
+
+
+void MacroAssembler::Move(Register dst, Register src) {
+ if (!dst.is(src)) {
+ mov(dst, src);
+ }
}
#ifdef ENABLE_DEBUGGER_SUPPORT
- // ---------------------------------------------------------------------------
- // Debugger Support
- void MacroAssembler::DebugBreak() {
- UNIMPLEMENTED_MIPS();
- }
-#endif
+void MacroAssembler::DebugBreak() {
+ ASSERT(allow_stub_calls());
+ mov(a0, zero_reg);
+ li(a1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
+ CEntryStub ces(1);
+ Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+}
+
+#endif // ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
@@ -822,7 +1818,7 @@
&& StackHandlerConstants::kPCOffset == 3 * kPointerSize
&& StackHandlerConstants::kNextOffset == 0 * kPointerSize);
// Save the current handler as the next handler.
- LoadExternalReference(t2, ExternalReference(Isolate::k_handler_address));
+ li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
lw(t1, MemOperand(t2));
addiu(sp, sp, -StackHandlerConstants::kSize);
@@ -848,7 +1844,7 @@
li(t0, Operand(StackHandler::ENTRY));
// Save the current handler as the next handler.
- LoadExternalReference(t2, ExternalReference(Isolate::k_handler_address));
+ li(t2, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
lw(t1, MemOperand(t2));
addiu(sp, sp, -StackHandlerConstants::kSize);
@@ -864,45 +1860,377 @@
void MacroAssembler::PopTryHandler() {
- UNIMPLEMENTED_MIPS();
+ ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
+ pop(a1);
+ Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
+ li(at, Operand(ExternalReference(Isolate::k_handler_address, isolate())));
+ sw(a1, MemOperand(at));
}
-
-// -----------------------------------------------------------------------------
-// Activation frames
-
-void MacroAssembler::SetupAlignedCall(Register scratch, int arg_count) {
- Label extra_push, end;
-
- andi(scratch, sp, 7);
-
- // We check for args and receiver size on the stack, all of them word sized.
- // We add one for sp, that we also want to store on the stack.
- if (((arg_count + 1) % kPointerSizeLog2) == 0) {
- Branch(ne, &extra_push, at, Operand(zero_reg));
- } else { // ((arg_count + 1) % 2) == 1
- Branch(eq, &extra_push, at, Operand(zero_reg));
+void MacroAssembler::AllocateInNewSpace(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
+ if (!FLAG_inline_new) {
+ if (FLAG_debug_code) {
+ // Trash the registers to simulate an allocation failure.
+ li(result, 0x7091);
+ li(scratch1, 0x7191);
+ li(scratch2, 0x7291);
+ }
+ jmp(gc_required);
+ return;
}
- // Save sp on the stack.
- mov(scratch, sp);
- Push(scratch);
- b(&end);
+ ASSERT(!result.is(scratch1));
+ ASSERT(!result.is(scratch2));
+ ASSERT(!scratch1.is(scratch2));
+ ASSERT(!scratch1.is(t9));
+ ASSERT(!scratch2.is(t9));
+ ASSERT(!result.is(t9));
- // Align before saving sp on the stack.
- bind(&extra_push);
- mov(scratch, sp);
- addiu(sp, sp, -8);
- sw(scratch, MemOperand(sp));
+ // Make object size into bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ object_size *= kPointerSize;
+ }
+ ASSERT_EQ(0, object_size & kObjectAlignmentMask);
- // The stack is aligned and sp is stored on the top.
- bind(&end);
+ // Check relative positions of allocation top and limit addresses.
+ // ARM adds additional checks to make sure the ldm instruction can be
+ // used. On MIPS we don't have ldm so we don't need additional checks either.
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address(isolate());
+ intptr_t top =
+ reinterpret_cast<intptr_t>(new_space_allocation_top.address());
+ intptr_t limit =
+ reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
+ ASSERT((limit - top) == kPointerSize);
+
+ // Set up allocation top address and object size registers.
+ Register topaddr = scratch1;
+ Register obj_size_reg = scratch2;
+ li(topaddr, Operand(new_space_allocation_top));
+ li(obj_size_reg, Operand(object_size));
+
+ // This code stores a temporary value in t9.
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
+ // Load allocation top into result and allocation limit into t9.
+ lw(result, MemOperand(topaddr));
+ lw(t9, MemOperand(topaddr, kPointerSize));
+ } else {
+ if (FLAG_debug_code) {
+ // Assert that result actually contains top on entry. t9 is used
+ // immediately below so this use of t9 does not cause difference with
+ // respect to register content between debug and release mode.
+ lw(t9, MemOperand(topaddr));
+ Check(eq, "Unexpected allocation top", result, Operand(t9));
+ }
+ // Load allocation limit into t9. Result already contains allocation top.
+ lw(t9, MemOperand(topaddr, limit - top));
+ }
+
+ // Calculate new top and bail out if new space is exhausted. Use result
+ // to calculate the new top.
+ Addu(scratch2, result, Operand(obj_size_reg));
+ Branch(gc_required, Ugreater, scratch2, Operand(t9));
+ sw(scratch2, MemOperand(topaddr));
+
+ // Tag object if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ Addu(result, result, Operand(kHeapObjectTag));
+ }
}
-void MacroAssembler::ReturnFromAlignedCall() {
- lw(sp, MemOperand(sp));
+void MacroAssembler::AllocateInNewSpace(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags) {
+ if (!FLAG_inline_new) {
+ if (FLAG_debug_code) {
+ // Trash the registers to simulate an allocation failure.
+ li(result, 0x7091);
+ li(scratch1, 0x7191);
+ li(scratch2, 0x7291);
+ }
+ jmp(gc_required);
+ return;
+ }
+
+ ASSERT(!result.is(scratch1));
+ ASSERT(!result.is(scratch2));
+ ASSERT(!scratch1.is(scratch2));
+ ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
+
+ // Check relative positions of allocation top and limit addresses.
+ // ARM adds additional checks to make sure the ldm instruction can be
+ // used. On MIPS we don't have ldm so we don't need additional checks either.
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+ ExternalReference new_space_allocation_limit =
+ ExternalReference::new_space_allocation_limit_address(isolate());
+ intptr_t top =
+ reinterpret_cast<intptr_t>(new_space_allocation_top.address());
+ intptr_t limit =
+ reinterpret_cast<intptr_t>(new_space_allocation_limit.address());
+ ASSERT((limit - top) == kPointerSize);
+
+ // Set up allocation top address and object size registers.
+ Register topaddr = scratch1;
+ li(topaddr, Operand(new_space_allocation_top));
+
+ // This code stores a temporary value in t9.
+ if ((flags & RESULT_CONTAINS_TOP) == 0) {
+ // Load allocation top into result and allocation limit into t9.
+ lw(result, MemOperand(topaddr));
+ lw(t9, MemOperand(topaddr, kPointerSize));
+ } else {
+ if (FLAG_debug_code) {
+ // Assert that result actually contains top on entry. t9 is used
+ // immediately below so this use of t9 does not cause difference with
+ // respect to register content between debug and release mode.
+ lw(t9, MemOperand(topaddr));
+ Check(eq, "Unexpected allocation top", result, Operand(t9));
+ }
+ // Load allocation limit into t9. Result already contains allocation top.
+ lw(t9, MemOperand(topaddr, limit - top));
+ }
+
+ // Calculate new top and bail out if new space is exhausted. Use result
+ // to calculate the new top. Object size may be in words so a shift is
+ // required to get the number of bytes.
+ if ((flags & SIZE_IN_WORDS) != 0) {
+ sll(scratch2, object_size, kPointerSizeLog2);
+ Addu(scratch2, result, scratch2);
+ } else {
+ Addu(scratch2, result, Operand(object_size));
+ }
+ Branch(gc_required, Ugreater, scratch2, Operand(t9));
+
+ // Update allocation top. result temporarily holds the new top.
+ if (FLAG_debug_code) {
+ And(t9, scratch2, Operand(kObjectAlignmentMask));
+ Check(eq, "Unaligned allocation in new space", t9, Operand(zero_reg));
+ }
+ sw(scratch2, MemOperand(topaddr));
+
+ // Tag object if requested.
+ if ((flags & TAG_OBJECT) != 0) {
+ Addu(result, result, Operand(kHeapObjectTag));
+ }
+}
+
+
+void MacroAssembler::UndoAllocationInNewSpace(Register object,
+ Register scratch) {
+ ExternalReference new_space_allocation_top =
+ ExternalReference::new_space_allocation_top_address(isolate());
+
+ // Make sure the object has no tag before resetting top.
+ And(object, object, Operand(~kHeapObjectTagMask));
+#ifdef DEBUG
+ // Check that the object un-allocated is below the current top.
+ li(scratch, Operand(new_space_allocation_top));
+ lw(scratch, MemOperand(scratch));
+ Check(less, "Undo allocation of non allocated memory",
+ object, Operand(scratch));
+#endif
+ // Write the address of the object to un-allocate as the current top.
+ li(scratch, Operand(new_space_allocation_top));
+ sw(object, MemOperand(scratch));
+}
+
+
+void MacroAssembler::AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string while
+ // observing object alignment.
+ ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ sll(scratch1, length, 1); // Length in bytes, not chars.
+ addiu(scratch1, scratch1,
+ kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
+ And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
+
+ // Allocate two-byte string in new space.
+ AllocateInNewSpace(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ InitializeNewString(result,
+ length,
+ Heap::kStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ // Calculate the number of bytes needed for the characters in the string
+ // while observing object alignment.
+ ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
+ ASSERT(kCharSize == 1);
+ addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
+ And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
+
+ // Allocate ASCII string in new space.
+ AllocateInNewSpace(scratch1,
+ result,
+ scratch2,
+ scratch3,
+ gc_required,
+ TAG_OBJECT);
+
+ // Set the map, length and hash field.
+ InitializeNewString(result,
+ length,
+ Heap::kAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateTwoByteConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ AllocateInNewSpace(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+ InitializeNewString(result,
+ length,
+ Heap::kConsStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+void MacroAssembler::AllocateAsciiConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ AllocateInNewSpace(ConsString::kSize,
+ result,
+ scratch1,
+ scratch2,
+ gc_required,
+ TAG_OBJECT);
+ InitializeNewString(result,
+ length,
+ Heap::kConsAsciiStringMapRootIndex,
+ scratch1,
+ scratch2);
+}
+
+
+// Allocates a heap number or jumps to the label if the young space is full and
+// a scavenge is needed.
+void MacroAssembler::AllocateHeapNumber(Register result,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ Label* need_gc) {
+ // Allocate an object in the heap for the heap number and tag it as a heap
+ // object.
+ AllocateInNewSpace(HeapNumber::kSize,
+ result,
+ scratch1,
+ scratch2,
+ need_gc,
+ TAG_OBJECT);
+
+ // Store heap number map in the allocated object.
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+}
+
+
+void MacroAssembler::AllocateHeapNumberWithValue(Register result,
+ FPURegister value,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
+ AllocateHeapNumber(result, scratch1, scratch2, t6, gc_required);
+ sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
+}
+
+
+// Copies a fixed number of fields of heap objects from src to dst.
+void MacroAssembler::CopyFields(Register dst,
+ Register src,
+ RegList temps,
+ int field_count) {
+ ASSERT((temps & dst.bit()) == 0);
+ ASSERT((temps & src.bit()) == 0);
+ // Primitive implementation using only one temporary register.
+
+ Register tmp = no_reg;
+ // Find a temp register in temps list.
+ for (int i = 0; i < kNumRegisters; i++) {
+ if ((temps & (1 << i)) != 0) {
+ tmp.code_ = i;
+ break;
+ }
+ }
+ ASSERT(!tmp.is(no_reg));
+
+ for (int i = 0; i < field_count; i++) {
+ lw(tmp, FieldMemOperand(src, i * kPointerSize));
+ sw(tmp, FieldMemOperand(dst, i * kPointerSize));
+ }
+}
+
+
+void MacroAssembler::CheckMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* fail,
+ bool is_heap_object) {
+ if (!is_heap_object) {
+ JumpIfSmi(obj, fail);
+ }
+ lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ li(at, Operand(map));
+ Branch(fail, ne, scratch, Operand(at));
+}
+
+
+void MacroAssembler::CheckMap(Register obj,
+ Register scratch,
+ Heap::RootListIndex index,
+ Label* fail,
+ bool is_heap_object) {
+ if (!is_heap_object) {
+ JumpIfSmi(obj, fail);
+ }
+ lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+ LoadRoot(at, index);
+ Branch(fail, ne, scratch, Operand(at));
}
@@ -914,7 +2242,8 @@
Handle<Code> code_constant,
Register code_reg,
Label* done,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator) {
bool definitely_matches = false;
Label regular_invoke;
@@ -949,11 +2278,13 @@
li(a2, Operand(expected.immediate()));
}
}
- } else if (actual.is_immediate()) {
- Branch(eq, ®ular_invoke, expected.reg(), Operand(actual.immediate()));
- li(a0, Operand(actual.immediate()));
} else {
- Branch(eq, ®ular_invoke, expected.reg(), Operand(actual.reg()));
+ if (actual.is_immediate()) {
+ Branch(®ular_invoke, eq, expected.reg(), Operand(actual.immediate()));
+ li(a0, Operand(actual.immediate()));
+ } else {
+ Branch(®ular_invoke, eq, expected.reg(), Operand(actual.reg()));
+ }
}
if (!definitely_matches) {
@@ -962,25 +2293,29 @@
addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
}
- ExternalReference adaptor(Builtins::ArgumentsAdaptorTrampoline);
+ Handle<Code> adaptor =
+ isolate()->builtins()->ArgumentsAdaptorTrampoline();
if (flag == CALL_FUNCTION) {
- CallBuiltin(adaptor);
- b(done);
- nop();
+ Call(adaptor, RelocInfo::CODE_TARGET);
+ if (post_call_generator != NULL) post_call_generator->Generate();
+ jmp(done);
} else {
- JumpToBuiltin(adaptor);
+ Jump(adaptor, RelocInfo::CODE_TARGET);
}
bind(®ular_invoke);
}
}
+
void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator) {
Label done;
- InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
+ InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
+ post_call_generator);
if (flag == CALL_FUNCTION) {
Call(code);
} else {
@@ -1014,7 +2349,8 @@
void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& actual,
- InvokeFlag flag) {
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator) {
// Contract with called JS functions requires that function is passed in a1.
ASSERT(function.is(a1));
Register expected_reg = a2;
@@ -1025,68 +2361,120 @@
lw(expected_reg,
FieldMemOperand(code_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
- lw(code_reg,
- MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
- addiu(code_reg, code_reg, Code::kHeaderSize - kHeapObjectTag);
+ sra(expected_reg, expected_reg, kSmiTagSize);
+ lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag);
+ InvokeCode(code_reg, expected, actual, flag, post_call_generator);
+}
+
+
+void MacroAssembler::InvokeFunction(JSFunction* function,
+ const ParameterCount& actual,
+ InvokeFlag flag) {
+ ASSERT(function->is_compiled());
+
+ // Get the function and setup the context.
+ li(a1, Operand(Handle<JSFunction>(function)));
+ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ // Invoke the cached code.
+ Handle<Code> code(function->code());
+ ParameterCount expected(function->shared()->formal_parameter_count());
+ if (V8::UseCrankshaft()) {
+ UNIMPLEMENTED_MIPS();
+ } else {
+ InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
+ }
+}
+
+
+void MacroAssembler::IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail) {
+ lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
+ IsInstanceJSObjectType(map, scratch, fail);
+}
+
+
+void MacroAssembler::IsInstanceJSObjectType(Register map,
+ Register scratch,
+ Label* fail) {
+ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Branch(fail, lt, scratch, Operand(FIRST_JS_OBJECT_TYPE));
+ Branch(fail, gt, scratch, Operand(LAST_JS_OBJECT_TYPE));
+}
+
+
+void MacroAssembler::IsObjectJSStringType(Register object,
+ Register scratch,
+ Label* fail) {
+ ASSERT(kNotStringTag != 0);
+
+ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ And(scratch, scratch, Operand(kIsNotStringMask));
+ Branch(fail, ne, scratch, Operand(zero_reg));
}
// ---------------------------------------------------------------------------
// Support functions.
- void MacroAssembler::GetObjectType(Register function,
- Register map,
- Register type_reg) {
- lw(map, FieldMemOperand(function, HeapObject::kMapOffset));
- lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- }
+
+void MacroAssembler::TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss) {
+ // Check that the receiver isn't a smi.
+ JumpIfSmi(function, miss);
+
+ // Check that the function really is a function. Load map into result reg.
+ GetObjectType(function, result, scratch);
+ Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
+
+ // Make sure that the function has an instance prototype.
+ Label non_instance;
+ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
+ Branch(&non_instance, ne, scratch, Operand(zero_reg));
+
+ // Get the prototype or initial map from the function.
+ lw(result,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+ // If the prototype or initial map is the hole, don't return it and
+ // simply miss the cache instead. This will allow us to allocate a
+ // prototype object on-demand in the runtime system.
+ LoadRoot(t8, Heap::kTheHoleValueRootIndex);
+ Branch(miss, eq, result, Operand(t8));
+
+ // If the function does not have an initial map, we're done.
+ Label done;
+ GetObjectType(result, scratch, scratch);
+ Branch(&done, ne, scratch, Operand(MAP_TYPE));
+
+ // Get the prototype from the initial map.
+ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
+ jmp(&done);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in initial map.
+ bind(&non_instance);
+ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
+
+ // All done.
+ bind(&done);
+}
- void MacroAssembler::CallBuiltin(ExternalReference builtin_entry) {
- // Load builtin address.
- LoadExternalReference(t9, builtin_entry);
- lw(t9, MemOperand(t9)); // Deref address.
- addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
- // Call and allocate arguments slots.
- jalr(t9);
- // Use the branch delay slot to allocated argument slots.
- addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
- addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize);
- }
-
-
- void MacroAssembler::CallBuiltin(Register target) {
- // Target already holds target address.
- // Call and allocate arguments slots.
- jalr(target);
- // Use the branch delay slot to allocated argument slots.
- addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
- addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize);
- }
-
-
- void MacroAssembler::JumpToBuiltin(ExternalReference builtin_entry) {
- // Load builtin address.
- LoadExternalReference(t9, builtin_entry);
- lw(t9, MemOperand(t9)); // Deref address.
- addiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
- // Call and allocate arguments slots.
- jr(t9);
- // Use the branch delay slot to allocated argument slots.
- addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
- }
-
-
- void MacroAssembler::JumpToBuiltin(Register target) {
- // t9 already holds target address.
- // Call and allocate arguments slots.
- jr(t9);
- // Use the branch delay slot to allocated argument slots.
- addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize);
- }
+void MacroAssembler::GetObjectType(Register object,
+ Register map,
+ Register type_reg) {
+ lw(map, FieldMemOperand(object, HeapObject::kMapOffset));
+ lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+}
// -----------------------------------------------------------------------------
@@ -1099,8 +2487,9 @@
}
-void MacroAssembler::StubReturn(int argc) {
- UNIMPLEMENTED_MIPS();
+void MacroAssembler::TailCallStub(CodeStub* stub) {
+ ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
@@ -1112,7 +2501,71 @@
}
-void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
+void MacroAssembler::IndexFromHash(Register hash,
+ Register index) {
+ // If the hash field contains an array index pick it out. The assert checks
+ // that the constants for the maximum number of digits for an array index
+ // cached in the hash field and the number of bits reserved for it does not
+ // conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
+ // the low kHashShift bits.
+ STATIC_ASSERT(kSmiTag == 0);
+ Ext(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
+ sll(index, hash, kSmiTagSize);
+}
+
+
+void MacroAssembler::ObjectToDoubleFPURegister(Register object,
+ FPURegister result,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ Label* not_number,
+ ObjectToDoubleFlags flags) {
+ Label done;
+ if ((flags & OBJECT_NOT_SMI) == 0) {
+ Label not_smi;
+ JumpIfNotSmi(object, ¬_smi);
+ // Remove smi tag and convert to double.
+ sra(scratch1, object, kSmiTagSize);
+ mtc1(scratch1, result);
+ cvt_d_w(result, result);
+ Branch(&done);
+ bind(¬_smi);
+ }
+ // Check for heap number and load double value from it.
+ lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
+ Branch(not_number, ne, scratch1, Operand(heap_number_map));
+
+ if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
+ // If exponent is all ones the number is either a NaN or +/-Infinity.
+ Register exponent = scratch1;
+ Register mask_reg = scratch2;
+ lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ li(mask_reg, HeapNumber::kExponentMask);
+
+ And(exponent, exponent, mask_reg);
+ Branch(not_number, eq, exponent, Operand(mask_reg));
+ }
+ ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
+ bind(&done);
+}
+
+
+
+void MacroAssembler::SmiToDoubleFPURegister(Register smi,
+ FPURegister value,
+ Register scratch1) {
+ sra(scratch1, smi, kSmiTagSize);
+ mtc1(scratch1, value);
+ cvt_d_w(value, value);
+}
+
+
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+ int num_arguments) {
// All parameters are on the stack. v0 has the return value after call.
// If the expected number of arguments of the runtime function is
@@ -1128,69 +2581,129 @@
// should remove this need and make the runtime routine entry code
// smarter.
li(a0, num_arguments);
- LoadExternalReference(a1, ExternalReference(f));
+ li(a1, Operand(ExternalReference(f, isolate())));
CEntryStub stub(1);
CallStub(&stub);
}
+void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+ const Runtime::Function* function = Runtime::FunctionForId(id);
+ li(a0, Operand(function->nargs));
+ li(a1, Operand(ExternalReference(function, isolate())));
+ CEntryStub stub(1);
+ stub.SaveDoubles();
+ CallStub(&stub);
+}
+
+
void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) {
CallRuntime(Runtime::FunctionForId(fid), num_arguments);
}
+void MacroAssembler::CallExternalReference(const ExternalReference& ext,
+ int num_arguments) {
+ li(a0, Operand(num_arguments));
+ li(a1, Operand(ext));
+
+ CEntryStub stub(1);
+ CallStub(&stub);
+}
+
+
void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
int num_arguments,
int result_size) {
- UNIMPLEMENTED_MIPS();
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ li(a0, Operand(num_arguments));
+ JumpToExternalReference(ext);
}
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
int num_arguments,
int result_size) {
- TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
+ TailCallExternalReference(ExternalReference(fid, isolate()),
+ num_arguments,
+ result_size);
}
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
- bool* resolved) {
- UNIMPLEMENTED_MIPS();
- return Handle<Code>(reinterpret_cast<Code*>(NULL)); // UNIMPLEMENTED RETURN
+ li(a1, Operand(builtin));
+ CEntryStub stub(1);
+ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
- InvokeJSFlags flags) {
- UNIMPLEMENTED_MIPS();
+ InvokeJSFlags flags,
+ PostCallGenerator* post_call_generator) {
+ GetBuiltinEntry(t9, id);
+ if (flags == CALL_JS) {
+ Call(t9);
+ if (post_call_generator != NULL) post_call_generator->Generate();
+ } else {
+ ASSERT(flags == JUMP_JS);
+ Jump(t9);
+ }
+}
+
+
+void MacroAssembler::GetBuiltinFunction(Register target,
+ Builtins::JavaScript id) {
+ // Load the builtins object into target register.
+ lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
+ // Load the JavaScript builtin function from the builtins object.
+ lw(target, FieldMemOperand(target,
+ JSBuiltinsObject::OffsetOfFunctionWithId(id)));
}
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(!target.is(a1));
+ GetBuiltinFunction(a1, id);
+ // Load the code entry point from the builtins object.
+ lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
}
void MacroAssembler::SetCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- UNIMPLEMENTED_MIPS();
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ li(scratch1, Operand(value));
+ li(scratch2, Operand(ExternalReference(counter)));
+ sw(scratch1, MemOperand(scratch2));
+ }
}
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ li(scratch2, Operand(ExternalReference(counter)));
+ lw(scratch1, MemOperand(scratch2));
+ Addu(scratch1, scratch1, Operand(value));
+ sw(scratch1, MemOperand(scratch2));
+ }
}
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- UNIMPLEMENTED_MIPS();
+ ASSERT(value > 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ li(scratch2, Operand(ExternalReference(counter)));
+ lw(scratch1, MemOperand(scratch2));
+ Subu(scratch1, scratch1, Operand(value));
+ sw(scratch1, MemOperand(scratch2));
+ }
}
@@ -1199,30 +2712,144 @@
void MacroAssembler::Assert(Condition cc, const char* msg,
Register rs, Operand rt) {
- UNIMPLEMENTED_MIPS();
+ if (FLAG_debug_code)
+ Check(cc, msg, rs, rt);
+}
+
+
+void MacroAssembler::AssertRegisterIsRoot(Register reg,
+ Heap::RootListIndex index) {
+ if (FLAG_debug_code) {
+ LoadRoot(at, index);
+ Check(eq, "Register did not match expected root", reg, Operand(at));
+ }
+}
+
+
+void MacroAssembler::AssertFastElements(Register elements) {
+ if (FLAG_debug_code) {
+ ASSERT(!elements.is(at));
+ Label ok;
+ Push(elements);
+ lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
+ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+ Branch(&ok, eq, elements, Operand(at));
+ LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
+ Branch(&ok, eq, elements, Operand(at));
+ Abort("JSObject with fast elements map has slow elements");
+ bind(&ok);
+ Pop(elements);
+ }
}
void MacroAssembler::Check(Condition cc, const char* msg,
Register rs, Operand rt) {
- UNIMPLEMENTED_MIPS();
+ Label L;
+ Branch(&L, cc, rs, rt);
+ Abort(msg);
+ // will not return here
+ bind(&L);
}
void MacroAssembler::Abort(const char* msg) {
- UNIMPLEMENTED_MIPS();
+ Label abort_start;
+ bind(&abort_start);
+ // We want to pass the msg string like a smi to avoid GC
+ // problems, however msg is not guaranteed to be aligned
+ // properly. Instead, we pass an aligned pointer that is
+ // a proper v8 smi, but also pass the alignment difference
+ // from the real pointer as a smi.
+ intptr_t p1 = reinterpret_cast<intptr_t>(msg);
+ intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
+ ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
+#ifdef DEBUG
+ if (msg != NULL) {
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
+#endif
+ // Disable stub call restrictions to always allow calls to abort.
+ AllowStubCallsScope allow_scope(this, true);
+
+ li(a0, Operand(p0));
+ Push(a0);
+ li(a0, Operand(Smi::FromInt(p1 - p0)));
+ Push(a0);
+ CallRuntime(Runtime::kAbort, 2);
+ // will not return here
+ if (is_trampoline_pool_blocked()) {
+ // If the calling code cares about the exact number of
+ // instructions generated, we insert padding here to keep the size
+ // of the Abort macro constant.
+ // Currently in debug mode with debug_code enabled the number of
+ // generated instructions is 14, so we use this as a maximum value.
+ static const int kExpectedAbortInstructions = 14;
+ int abort_instructions = InstructionsGeneratedSince(&abort_start);
+ ASSERT(abort_instructions <= kExpectedAbortInstructions);
+ while (abort_instructions++ < kExpectedAbortInstructions) {
+ nop();
+ }
+ }
+}
+
+
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+ if (context_chain_length > 0) {
+ // Move up the chain of contexts to the context containing the slot.
+ lw(dst, MemOperand(cp, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ // Load the function context (which is the incoming, outer context).
+ lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
+ for (int i = 1; i < context_chain_length; i++) {
+ lw(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ lw(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
+ }
+ // The context may be an intermediate context, not a function context.
+ lw(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ } else { // Slot is in the current function context.
+ // The context may be an intermediate context, not a function context.
+ lw(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ }
+}
+
+
+void MacroAssembler::LoadGlobalFunction(int index, Register function) {
+ // Load the global or builtins object from the current context.
+ lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ // Load the global context from the global or builtins object.
+ lw(function, FieldMemOperand(function,
+ GlobalObject::kGlobalContextOffset));
+ // Load the function from the global context.
+ lw(function, MemOperand(function, Context::SlotOffset(index)));
+}
+
+
+void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
+ Register map,
+ Register scratch) {
+ // Load the initial map. The global functions all have initial maps.
+ lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ if (FLAG_debug_code) {
+ Label ok, fail;
+ CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false);
+ Branch(&ok);
+ bind(&fail);
+ Abort("Global functions must have initial map");
+ bind(&ok);
+ }
}
void MacroAssembler::EnterFrame(StackFrame::Type type) {
addiu(sp, sp, -5 * kPointerSize);
- li(t0, Operand(Smi::FromInt(type)));
- li(t1, Operand(CodeObject()));
+ li(t8, Operand(Smi::FromInt(type)));
+ li(t9, Operand(CodeObject()));
sw(ra, MemOperand(sp, 4 * kPointerSize));
sw(fp, MemOperand(sp, 3 * kPointerSize));
sw(cp, MemOperand(sp, 2 * kPointerSize));
- sw(t0, MemOperand(sp, 1 * kPointerSize));
- sw(t1, MemOperand(sp, 0 * kPointerSize));
+ sw(t8, MemOperand(sp, 1 * kPointerSize));
+ sw(t9, MemOperand(sp, 0 * kPointerSize));
addiu(fp, sp, 3 * kPointerSize);
}
@@ -1235,62 +2862,98 @@
}
-void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode,
- Register hold_argc,
+void MacroAssembler::EnterExitFrame(Register hold_argc,
Register hold_argv,
- Register hold_function) {
- // Compute the argv pointer and keep it in a callee-saved register.
+ Register hold_function,
+ bool save_doubles) {
// a0 is argc.
- sll(t0, a0, kPointerSizeLog2);
- add(hold_argv, sp, t0);
- addi(hold_argv, hold_argv, -kPointerSize);
+ sll(t8, a0, kPointerSizeLog2);
+ addu(hold_argv, sp, t8);
+ addiu(hold_argv, hold_argv, -kPointerSize);
// Compute callee's stack pointer before making changes and save it as
- // t1 register so that it is restored as sp register on exit, thereby
+ // t9 register so that it is restored as sp register on exit, thereby
// popping the args.
- // t1 = sp + kPointerSize * #args
- add(t1, sp, t0);
+ // t9 = sp + kPointerSize * #args
+ addu(t9, sp, t8);
+
+ // Compute the argv pointer and keep it in a callee-saved register.
+ // This only seems to be needed for crankshaft and may cause problems
+ // so it's disabled for now.
+ // Subu(s6, t9, Operand(kPointerSize));
// Align the stack at this point.
AlignStack(0);
// Save registers.
addiu(sp, sp, -12);
- sw(t1, MemOperand(sp, 8));
+ sw(t9, MemOperand(sp, 8));
sw(ra, MemOperand(sp, 4));
sw(fp, MemOperand(sp, 0));
mov(fp, sp); // Setup new frame pointer.
- // Push debug marker.
- if (mode == ExitFrame::MODE_DEBUG) {
- Push(zero_reg);
- } else {
- li(t0, Operand(CodeObject()));
- Push(t0);
- }
+ li(t8, Operand(CodeObject()));
+ Push(t8); // Accessed from ExitFrame::code_slot.
// Save the frame pointer and the context in top.
- LoadExternalReference(t0, ExternalReference(Isolate::k_c_entry_fp_address));
- sw(fp, MemOperand(t0));
- LoadExternalReference(t0, ExternalReference(Isolate::k_context_address));
- sw(cp, MemOperand(t0));
+ li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
+ sw(fp, MemOperand(t8));
+ li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
+ sw(cp, MemOperand(t8));
// Setup argc and the builtin function in callee-saved registers.
mov(hold_argc, a0);
mov(hold_function, a1);
+
+ // Optionally save all double registers.
+ if (save_doubles) {
+#ifdef DEBUG
+ int frame_alignment = ActivationFrameAlignment();
+#endif
+ // The stack alignment code above made sp unaligned, so add space for one
+ // more double register and use aligned addresses.
+ ASSERT(kDoubleSize == frame_alignment);
+ // Mark the frame as containing doubles by pushing a non-valid return
+ // address, i.e. 0.
+ ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize);
+ push(zero_reg); // Marker and alignment word.
+ int space = FPURegister::kNumRegisters * kDoubleSize + kPointerSize;
+ Subu(sp, sp, Operand(space));
+ // Remember: we only need to save every 2nd double FPU value.
+ for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
+ FPURegister reg = FPURegister::from_code(i);
+ sdc1(reg, MemOperand(sp, i * kDoubleSize + kPointerSize));
+ }
+ // Note that f0 will be accessible at fp - 2*kPointerSize -
+ // FPURegister::kNumRegisters * kDoubleSize, since the code slot and the
+ // alignment word were pushed after the fp.
+ }
}
-void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) {
+void MacroAssembler::LeaveExitFrame(bool save_doubles) {
+ // Optionally restore all double registers.
+ if (save_doubles) {
+ // TODO(regis): Use vldrm instruction.
+ // Remember: we only need to restore every 2nd double FPU value.
+ for (int i = 0; i < FPURegister::kNumRegisters; i+=2) {
+ FPURegister reg = FPURegister::from_code(i);
+ // Register f30-f31 is just below the marker.
+ const int offset = ExitFrameConstants::kMarkerOffset;
+ ldc1(reg, MemOperand(fp,
+ (i - FPURegister::kNumRegisters) * kDoubleSize + offset));
+ }
+ }
+
// Clear top frame.
- LoadExternalReference(t0, ExternalReference(Isolate::k_c_entry_fp_address));
- sw(zero_reg, MemOperand(t0));
+ li(t8, Operand(ExternalReference(Isolate::k_c_entry_fp_address, isolate())));
+ sw(zero_reg, MemOperand(t8));
// Restore current context from top and clear it in debug mode.
- LoadExternalReference(t0, ExternalReference(Isolate::k_context_address));
- lw(cp, MemOperand(t0));
+ li(t8, Operand(ExternalReference(Isolate::k_context_address, isolate())));
+ lw(cp, MemOperand(t8));
#ifdef DEBUG
- sw(a3, MemOperand(t0));
+ sw(a3, MemOperand(t8));
#endif
// Pop the arguments, restore registers, and return.
@@ -1303,24 +2966,362 @@
}
+void MacroAssembler::InitializeNewString(Register string,
+ Register length,
+ Heap::RootListIndex map_index,
+ Register scratch1,
+ Register scratch2) {
+ sll(scratch1, length, kSmiTagSize);
+ LoadRoot(scratch2, map_index);
+ sw(scratch1, FieldMemOperand(string, String::kLengthOffset));
+ li(scratch1, Operand(String::kEmptyHashField));
+ sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
+ sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
+}
+
+
+int MacroAssembler::ActivationFrameAlignment() {
+#if defined(V8_HOST_ARCH_MIPS)
+ // Running on the real platform. Use the alignment as mandated by the local
+ // environment.
+ // Note: This will break if we ever start generating snapshots on one Mips
+ // platform for another Mips platform with a different alignment.
+ return OS::ActivationFrameAlignment();
+#else // defined(V8_HOST_ARCH_MIPS)
+ // If we are using the simulator then we should always align to the expected
+ // alignment. As the simulator is used to generate snapshots we do not know
+ // if the target platform will need alignment, so this is controlled from a
+ // flag.
+ return FLAG_sim_stack_alignment;
+#endif // defined(V8_HOST_ARCH_MIPS)
+}
+
+
void MacroAssembler::AlignStack(int offset) {
// On MIPS an offset of 0 aligns to 0 modulo 8 bytes,
// and an offset of 1 aligns to 4 modulo 8 bytes.
+#if defined(V8_HOST_ARCH_MIPS)
+ // Running on the real platform. Use the alignment as mandated by the local
+ // environment.
+ // Note: This will break if we ever start generating snapshots on one MIPS
+ // platform for another MIPS platform with a different alignment.
int activation_frame_alignment = OS::ActivationFrameAlignment();
+#else // defined(V8_HOST_ARCH_MIPS)
+ // If we are using the simulator then we should always align to the expected
+ // alignment. As the simulator is used to generate snapshots we do not know
+ // if the target platform will need alignment, so we will always align at
+ // this point here.
+ int activation_frame_alignment = 2 * kPointerSize;
+#endif // defined(V8_HOST_ARCH_MIPS)
if (activation_frame_alignment != kPointerSize) {
// This code needs to be made more general if this assert doesn't hold.
ASSERT(activation_frame_alignment == 2 * kPointerSize);
if (offset == 0) {
- andi(t0, sp, activation_frame_alignment - 1);
- Push(zero_reg, eq, t0, zero_reg);
+ andi(t8, sp, activation_frame_alignment - 1);
+ Push(zero_reg, eq, t8, zero_reg);
} else {
- andi(t0, sp, activation_frame_alignment - 1);
- addiu(t0, t0, -4);
- Push(zero_reg, eq, t0, zero_reg);
+ andi(t8, sp, activation_frame_alignment - 1);
+ addiu(t8, t8, -4);
+ Push(zero_reg, eq, t8, zero_reg);
}
}
}
+
+
+void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
+ Register reg,
+ Register scratch,
+ Label* not_power_of_two_or_zero) {
+ Subu(scratch, reg, Operand(1));
+ Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
+ scratch, Operand(zero_reg));
+ and_(at, scratch, reg); // In the delay slot.
+ Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
+}
+
+
+void MacroAssembler::JumpIfNotBothSmi(Register reg1,
+ Register reg2,
+ Label* on_not_both_smi) {
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT_EQ(1, kSmiTagMask);
+ or_(at, reg1, reg2);
+ andi(at, at, kSmiTagMask);
+ Branch(on_not_both_smi, ne, at, Operand(zero_reg));
+}
+
+
+void MacroAssembler::JumpIfEitherSmi(Register reg1,
+ Register reg2,
+ Label* on_either_smi) {
+ STATIC_ASSERT(kSmiTag == 0);
+ ASSERT_EQ(1, kSmiTagMask);
+ // Both Smi tags must be 1 (not Smi).
+ and_(at, reg1, reg2);
+ andi(at, at, kSmiTagMask);
+ Branch(on_either_smi, eq, at, Operand(zero_reg));
+}
+
+
+void MacroAssembler::AbortIfSmi(Register object) {
+ STATIC_ASSERT(kSmiTag == 0);
+ andi(at, object, kSmiTagMask);
+ Assert(ne, "Operand is a smi", at, Operand(zero_reg));
+}
+
+
+void MacroAssembler::AbortIfNotSmi(Register object) {
+ STATIC_ASSERT(kSmiTag == 0);
+ andi(at, object, kSmiTagMask);
+ Assert(eq, "Operand is a smi", at, Operand(zero_reg));
+}
+
+
+void MacroAssembler::AbortIfNotRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message) {
+ ASSERT(!src.is(at));
+ LoadRoot(at, root_value_index);
+ Assert(eq, message, src, Operand(at));
+}
+
+
+void MacroAssembler::JumpIfNotHeapNumber(Register object,
+ Register heap_number_map,
+ Register scratch,
+ Label* on_not_heap_number) {
+ lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
+}
+
+
+void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ // Test that both first and second are sequential ASCII strings.
+ // Assume that they are non-smis.
+ lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
+ lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
+ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+
+ JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
+ scratch2,
+ scratch1,
+ scratch2,
+ failure);
+}
+
+
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ // Check that neither is a smi.
+ STATIC_ASSERT(kSmiTag == 0);
+ And(scratch1, first, Operand(second));
+ And(scratch1, scratch1, Operand(kSmiTagMask));
+ Branch(failure, eq, scratch1, Operand(zero_reg));
+ JumpIfNonSmisNotBothSequentialAsciiStrings(first,
+ second,
+ scratch1,
+ scratch2,
+ failure);
+}
+
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure) {
+ int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
+ andi(scratch1, first, kFlatAsciiStringMask);
+ Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
+ andi(scratch2, second, kFlatAsciiStringMask);
+ Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
+}
+
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+ Register scratch,
+ Label* failure) {
+ int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+ int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+ And(scratch, type, Operand(kFlatAsciiStringMask));
+ Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
+}
+
+
+static const int kRegisterPassedArguments = 4;
+
+void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
+ int frame_alignment = ActivationFrameAlignment();
+
+ // Reserve space for Isolate address which is always passed as last parameter
+ num_arguments += 1;
+
+ // Up to four simple arguments are passed in registers a0..a3.
+ // Those four arguments must have reserved argument slots on the stack for
+ // mips, even though those argument slots are not normally used.
+ // Remaining arguments are pushed on the stack, above (higher address than)
+ // the argument slots.
+ ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
+ int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
+ 0 : num_arguments - kRegisterPassedArguments) +
+ (StandardFrameConstants::kCArgsSlotsSize /
+ kPointerSize);
+ if (frame_alignment > kPointerSize) {
+ // Make stack end at alignment and make room for num_arguments - 4 words
+ // and the original value of sp.
+ mov(scratch, sp);
+ Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
+ ASSERT(IsPowerOf2(frame_alignment));
+ And(sp, sp, Operand(-frame_alignment));
+ sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ } else {
+ Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
+ }
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+ int num_arguments) {
+ CallCFunctionHelper(no_reg, function, at, num_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+ Register scratch,
+ int num_arguments) {
+ CallCFunctionHelper(function,
+ ExternalReference::the_hole_value_location(isolate()),
+ scratch,
+ num_arguments);
+}
+
+
+void MacroAssembler::CallCFunctionHelper(Register function,
+ ExternalReference function_reference,
+ Register scratch,
+ int num_arguments) {
+ // Push Isolate address as the last argument.
+ if (num_arguments < kRegisterPassedArguments) {
+ Register arg_to_reg[] = {a0, a1, a2, a3};
+ Register r = arg_to_reg[num_arguments];
+ li(r, Operand(ExternalReference::isolate_address()));
+ } else {
+ int stack_passed_arguments = num_arguments - kRegisterPassedArguments +
+ (StandardFrameConstants::kCArgsSlotsSize /
+ kPointerSize);
+ // Push Isolate address on the stack after the arguments.
+ li(scratch, Operand(ExternalReference::isolate_address()));
+ sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ }
+ num_arguments += 1;
+
+ // Make sure that the stack is aligned before calling a C function unless
+ // running in the simulator. The simulator has its own alignment check which
+ // provides more information.
+ // The argument stots are presumed to have been set up by
+ // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
+
+#if defined(V8_HOST_ARCH_MIPS)
+ if (emit_debug_code()) {
+ int frame_alignment = OS::ActivationFrameAlignment();
+ int frame_alignment_mask = frame_alignment - 1;
+ if (frame_alignment > kPointerSize) {
+ ASSERT(IsPowerOf2(frame_alignment));
+ Label alignment_as_expected;
+ And(at, sp, Operand(frame_alignment_mask));
+ Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
+ // Don't use Check here, as it will call Runtime_Abort possibly
+ // re-entering here.
+ stop("Unexpected alignment in CallCFunction");
+ bind(&alignment_as_expected);
+ }
+ }
+#endif // V8_HOST_ARCH_MIPS
+
+ // Just call directly. The function called cannot cause a GC, or
+ // allow preemption, so the return address in the link register
+ // stays correct.
+ if (!function.is(t9)) {
+ mov(t9, function);
+ function = t9;
+ }
+
+ if (function.is(no_reg)) {
+ li(t9, Operand(function_reference));
+ function = t9;
+ }
+
+ Call(function);
+
+ ASSERT(StandardFrameConstants::kCArgsSlotsSize % kPointerSize == 0);
+ int stack_passed_arguments = ((num_arguments <= kRegisterPassedArguments) ?
+ 0 : num_arguments - kRegisterPassedArguments) +
+ (StandardFrameConstants::kCArgsSlotsSize /
+ kPointerSize);
+
+ if (OS::ActivationFrameAlignment() > kPointerSize) {
+ lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ } else {
+ Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
+ }
+}
+
+
+#undef BRANCH_ARGS_CHECK
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+CodePatcher::CodePatcher(byte* address, int instructions)
+ : address_(address),
+ instructions_(instructions),
+ size_(instructions * Assembler::kInstrSize),
+ masm_(address, size_ + Assembler::kGap) {
+ // Create a new macro assembler pointing to the address of the code to patch.
+ // The size is adjusted with kGap on order for the assembler to generate size
+ // bytes of instructions without failing with buffer size constraints.
+ ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+CodePatcher::~CodePatcher() {
+ // Indicate that code has changed.
+ CPU::FlushICache(address_, size_);
+
+ // Check that the code was patched as expected.
+ ASSERT(masm_.pc_ == address_ + size_);
+ ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+void CodePatcher::Emit(Instr x) {
+ masm()->emit(x);
+}
+
+
+void CodePatcher::Emit(Address addr) {
+ masm()->emit(reinterpret_cast<Instr>(addr));
+}
+
+
+#endif // ENABLE_DEBUGGER_SUPPORT
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 0f0365b..7ff9e17 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -36,69 +36,171 @@
// Forward declaration.
class JumpTarget;
+class PostCallGenerator;
-// Register at is used for instruction generation. So it is not safe to use it
-// unless we know exactly what we do.
+// Reserved Register Usage Summary.
+//
+// Registers t8, t9, and at are reserved for use by the MacroAssembler.
+//
+// The programmer should know that the MacroAssembler may clobber these three,
+// but won't touch other registers except in special cases.
+//
+// Per the MIPS ABI, register t9 must be used for indirect function call
+// via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
+// trying to update gp register for position-independent-code. Whenever
+// MIPS generated code calls C code, it must be via t9 register.
// Registers aliases
// cp is assumed to be a callee saved register.
+const Register roots = s6; // Roots array pointer.
const Register cp = s7; // JavaScript context pointer
const Register fp = s8_fp; // Alias fp
+// Register used for condition evaluation.
+const Register condReg1 = s4;
+const Register condReg2 = s5;
enum InvokeJSFlags {
CALL_JS,
JUMP_JS
};
+
+// Flags used for the AllocateInNewSpace functions.
+enum AllocationFlags {
+ // No special flags.
+ NO_ALLOCATION_FLAGS = 0,
+ // Return the pointer to the allocated already tagged as a heap object.
+ TAG_OBJECT = 1 << 0,
+ // The content of the result register already contains the allocation top in
+ // new space.
+ RESULT_CONTAINS_TOP = 1 << 1,
+ // Specify that the requested size of the space to allocate is specified in
+ // words instead of bytes.
+ SIZE_IN_WORDS = 1 << 2
+};
+
+// Flags used for the ObjectToDoubleFPURegister function.
+enum ObjectToDoubleFlags {
+ // No special flags.
+ NO_OBJECT_TO_DOUBLE_FLAGS = 0,
+ // Object is known to be a non smi.
+ OBJECT_NOT_SMI = 1 << 0,
+ // Don't load NaNs or infinities, branch to the non number case instead.
+ AVOID_NANS_AND_INFINITIES = 1 << 1
+};
+
+// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
+enum BranchDelaySlot {
+ USE_DELAY_SLOT,
+ PROTECT
+};
+
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
MacroAssembler(void* buffer, int size);
- // Jump, Call, and Ret pseudo instructions implementing inter-working.
- void Jump(const Operand& target,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Call(const Operand& target,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Jump(Register target,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Jump(byte* target, RelocInfo::Mode rmode,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Jump(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Call(Register target,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Call(byte* target, RelocInfo::Mode rmode,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Call(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Ret(Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
- void Branch(Condition cond, int16_t offset, Register rs = zero_reg,
- const Operand& rt = Operand(zero_reg), Register scratch = at);
- void Branch(Condition cond, Label* L, Register rs = zero_reg,
- const Operand& rt = Operand(zero_reg), Register scratch = at);
- // conditionnal branch and link
- void BranchAndLink(Condition cond, int16_t offset, Register rs = zero_reg,
- const Operand& rt = Operand(zero_reg),
- Register scratch = at);
- void BranchAndLink(Condition cond, Label* L, Register rs = zero_reg,
- const Operand& rt = Operand(zero_reg),
- Register scratch = at);
+// Arguments macros
+#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
+#define COND_ARGS cond, r1, r2
+
+// ** Prototypes
+
+// * Prototypes for functions with no target (eg Ret()).
+#define DECLARE_NOTARGET_PROTOTYPE(Name) \
+ void Name(BranchDelaySlot bd = PROTECT); \
+ void Name(COND_TYPED_ARGS, BranchDelaySlot bd = PROTECT); \
+ inline void Name(BranchDelaySlot bd, COND_TYPED_ARGS) { \
+ Name(COND_ARGS, bd); \
+ }
+
+// * Prototypes for functions with a target.
+
+// Cases when relocation may be needed.
+#define DECLARE_RELOC_PROTOTYPE(Name, target_type) \
+ void Name(target_type target, \
+ RelocInfo::Mode rmode, \
+ BranchDelaySlot bd = PROTECT); \
+ inline void Name(BranchDelaySlot bd, \
+ target_type target, \
+ RelocInfo::Mode rmode) { \
+ Name(target, rmode, bd); \
+ } \
+ void Name(target_type target, \
+ RelocInfo::Mode rmode, \
+ COND_TYPED_ARGS, \
+ BranchDelaySlot bd = PROTECT); \
+ inline void Name(BranchDelaySlot bd, \
+ target_type target, \
+ RelocInfo::Mode rmode, \
+ COND_TYPED_ARGS) { \
+ Name(target, rmode, COND_ARGS, bd); \
+ }
+
+// Cases when relocation is not needed.
+#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
+ void Name(target_type target, BranchDelaySlot bd = PROTECT); \
+ inline void Name(BranchDelaySlot bd, target_type target) { \
+ Name(target, bd); \
+ } \
+ void Name(target_type target, \
+ COND_TYPED_ARGS, \
+ BranchDelaySlot bd = PROTECT); \
+ inline void Name(BranchDelaySlot bd, \
+ target_type target, \
+ COND_TYPED_ARGS) { \
+ Name(target, COND_ARGS, bd); \
+ }
+
+// ** Target prototypes.
+
+#define DECLARE_JUMP_CALL_PROTOTYPES(Name) \
+ DECLARE_NORELOC_PROTOTYPE(Name, Register) \
+ DECLARE_NORELOC_PROTOTYPE(Name, const Operand&) \
+ DECLARE_RELOC_PROTOTYPE(Name, byte*) \
+ DECLARE_RELOC_PROTOTYPE(Name, Handle<Code>)
+
+#define DECLARE_BRANCH_PROTOTYPES(Name) \
+ DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
+ DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
+
+
+DECLARE_JUMP_CALL_PROTOTYPES(Jump)
+DECLARE_JUMP_CALL_PROTOTYPES(Call)
+
+DECLARE_BRANCH_PROTOTYPES(Branch)
+DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
+
+DECLARE_NOTARGET_PROTOTYPE(Ret)
+
+#undef COND_TYPED_ARGS
+#undef COND_ARGS
+#undef DECLARE_NOTARGET_PROTOTYPE
+#undef DECLARE_NORELOC_PROTOTYPE
+#undef DECLARE_RELOC_PROTOTYPE
+#undef DECLARE_JUMP_CALL_PROTOTYPES
+#undef DECLARE_BRANCH_PROTOTYPES
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the sp register.
- void Drop(int count, Condition cond = cc_always);
+ void Drop(int count,
+ Condition cond = cc_always,
+ Register reg = no_reg,
+ const Operand& op = Operand(no_reg));
+
+ void DropAndRet(int drop = 0,
+ Condition cond = cc_always,
+ Register reg = no_reg,
+ const Operand& op = Operand(no_reg));
+
+ // Swap two registers. If the scratch register is omitted then a slightly
+ // less efficient form using xor instead of mov is emitted.
+ void Swap(Register reg1, Register reg2, Register scratch = no_reg);
void Call(Label* target);
+ // May do nothing if the registers are identical.
+ void Move(Register dst, Register src);
+
// Jump unconditionally to given label.
// We NEED a nop in the branch delay slot, as it used by v8, for example in
@@ -106,7 +208,7 @@
// Currently the branch delay slot is filled by the MacroAssembler.
// Use rather b(Label) for code generation.
void jmp(Label* L) {
- Branch(cc_always, L);
+ Branch(L);
}
// Load an object from the root table.
@@ -116,19 +218,164 @@
Heap::RootListIndex index,
Condition cond, Register src1, const Operand& src2);
- // Load an external reference.
- void LoadExternalReference(Register reg, ExternalReference ext) {
- li(reg, Operand(ext));
+ // Store an object to the root table.
+ void StoreRoot(Register source,
+ Heap::RootListIndex index);
+ void StoreRoot(Register source,
+ Heap::RootListIndex index,
+ Condition cond, Register src1, const Operand& src2);
+
+
+ // Check if object is in new space.
+ // scratch can be object itself, but it will be clobbered.
+ void InNewSpace(Register object,
+ Register scratch,
+ Condition cc, // eq for new space, ne otherwise.
+ Label* branch);
+
+
+ // For the page containing |object| mark the region covering [address]
+ // dirty. The object address must be in the first 8K of an allocated page.
+ void RecordWriteHelper(Register object,
+ Register address,
+ Register scratch);
+
+ // For the page containing |object| mark the region covering
+ // [object+offset] dirty. The object address must be in the first 8K
+ // of an allocated page. The 'scratch' registers are used in the
+ // implementation and all 3 registers are clobbered by the
+ // operation, as well as the 'at' register. RecordWrite updates the
+ // write barrier even when storing smis.
+ void RecordWrite(Register object,
+ Operand offset,
+ Register scratch0,
+ Register scratch1);
+
+ // For the page containing |object| mark the region covering
+ // [address] dirty. The object address must be in the first 8K of an
+ // allocated page. All 3 registers are clobbered by the operation,
+ // as well as the ip register. RecordWrite updates the write barrier
+ // even when storing smis.
+ void RecordWrite(Register object,
+ Register address,
+ Register scratch);
+
+
+ // ---------------------------------------------------------------------------
+ // Inline caching support
+
+ // Generate code for checking access rights - used for security checks
+ // on access to global objects across environments. The holder register
+ // is left untouched, whereas both scratch registers are clobbered.
+ void CheckAccessGlobalProxy(Register holder_reg,
+ Register scratch,
+ Label* miss);
+
+ inline void MarkCode(NopMarkerTypes type) {
+ nop(type);
}
- // Sets the remembered set bit for [address+offset].
- void RecordWrite(Register object, Register offset, Register scratch);
+ // Check if the given instruction is a 'type' marker.
+ // ie. check if it is a sll zero_reg, zero_reg, <type> (referenced as
+ // nop(type)). These instructions are generated to mark special location in
+ // the code, like some special IC code.
+ static inline bool IsMarkedCode(Instr instr, int type) {
+ ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
+ return IsNop(instr, type);
+ }
+ static inline int GetCodeMarker(Instr instr) {
+ uint32_t opcode = ((instr & kOpcodeMask));
+ uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
+ uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
+ uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
+
+ // Return <n> if we have a sll zero_reg, zero_reg, n
+ // else return -1.
+ bool sllzz = (opcode == SLL &&
+ rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
+ rs == static_cast<uint32_t>(ToNumber(zero_reg)));
+ int type =
+ (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
+ ASSERT((type == -1) ||
+ ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
+ return type;
+ }
+
+
+
+ // ---------------------------------------------------------------------------
+ // Allocation support
+
+ // Allocate an object in new space. The object_size is specified
+ // either in bytes or in words if the allocation flag SIZE_IN_WORDS
+ // is passed. If the new space is exhausted control continues at the
+ // gc_required label. The allocated object is returned in result. If
+ // the flag tag_allocated_object is true the result is tagged as as
+ // a heap object. All registers are clobbered also when control
+ // continues at the gc_required label.
+ void AllocateInNewSpace(int object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+ void AllocateInNewSpace(Register object_size,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required,
+ AllocationFlags flags);
+
+ // Undo allocation in new space. The object passed and objects allocated after
+ // it will no longer be allocated. The caller must make sure that no pointers
+ // are left to the object(s) no longer allocated as they would be invalid when
+ // allocation is undone.
+ void UndoAllocationInNewSpace(Register object, Register scratch);
+
+
+ void AllocateTwoByteString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateAsciiString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* gc_required);
+ void AllocateTwoByteConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void AllocateAsciiConsString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
+ // Allocates a heap number or jumps to the gc_required label if the young
+ // space is full and a scavenge is needed. All registers are clobbered also
+ // when control continues at the gc_required label.
+ void AllocateHeapNumber(Register result,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ Label* gc_required);
+ void AllocateHeapNumberWithValue(Register result,
+ FPURegister value,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+
// ---------------------------------------------------------------------------
// Instruction macros
-#define DEFINE_INSTRUCTION(instr) \
+#define DEFINE_INSTRUCTION(instr) \
void instr(Register rd, Register rs, const Operand& rt); \
void instr(Register rd, Register rs, Register rt) { \
instr(rd, rs, Operand(rt)); \
@@ -137,7 +384,7 @@
instr(rs, rt, Operand(j)); \
}
-#define DEFINE_INSTRUCTION2(instr) \
+#define DEFINE_INSTRUCTION2(instr) \
void instr(Register rs, const Operand& rt); \
void instr(Register rs, Register rt) { \
instr(rs, Operand(rt)); \
@@ -146,8 +393,8 @@
instr(rs, Operand(j)); \
}
- DEFINE_INSTRUCTION(Add);
DEFINE_INSTRUCTION(Addu);
+ DEFINE_INSTRUCTION(Subu);
DEFINE_INSTRUCTION(Mul);
DEFINE_INSTRUCTION2(Mult);
DEFINE_INSTRUCTION2(Multu);
@@ -162,6 +409,9 @@
DEFINE_INSTRUCTION(Slt);
DEFINE_INSTRUCTION(Sltu);
+ // MIPS32 R2 instruction macro.
+ DEFINE_INSTRUCTION(Ror);
+
#undef DEFINE_INSTRUCTION
#undef DEFINE_INSTRUCTION2
@@ -169,8 +419,6 @@
//------------Pseudo-instructions-------------
void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
- // Move the logical ones complement of source to dest.
- void movn(Register rd, Register rt);
// load int32 in the rd register
@@ -178,6 +426,9 @@
inline void li(Register rd, int32_t j, bool gen2instr = false) {
li(rd, Operand(j), gen2instr);
}
+ inline void li(Register dst, Handle<Object> value, bool gen2instr = false) {
+ li(dst, Operand(value), gen2instr);
+ }
// Exception-generating instructions and debugging support
void stop(const char* msg);
@@ -188,19 +439,51 @@
// saved in higher memory addresses
void MultiPush(RegList regs);
void MultiPushReversed(RegList regs);
+
void Push(Register src) {
Addu(sp, sp, Operand(-kPointerSize));
sw(src, MemOperand(sp, 0));
}
+
+ // Push two registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Condition cond = al) {
+ ASSERT(cond == al); // Do not support conditional versions yet.
+ Subu(sp, sp, Operand(2 * kPointerSize));
+ sw(src1, MemOperand(sp, 1 * kPointerSize));
+ sw(src2, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push three registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Condition cond = al) {
+ ASSERT(cond == al); // Do not support conditional versions yet.
+ Addu(sp, sp, Operand(3 * -kPointerSize));
+ sw(src1, MemOperand(sp, 2 * kPointerSize));
+ sw(src2, MemOperand(sp, 1 * kPointerSize));
+ sw(src3, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push four registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2,
+ Register src3, Register src4, Condition cond = al) {
+ ASSERT(cond == al); // Do not support conditional versions yet.
+ Addu(sp, sp, Operand(4 * -kPointerSize));
+ sw(src1, MemOperand(sp, 3 * kPointerSize));
+ sw(src2, MemOperand(sp, 2 * kPointerSize));
+ sw(src3, MemOperand(sp, 1 * kPointerSize));
+ sw(src4, MemOperand(sp, 0 * kPointerSize));
+ }
+
inline void push(Register src) { Push(src); }
+ inline void pop(Register src) { Pop(src); }
void Push(Register src, Condition cond, Register tst1, Register tst2) {
// Since we don't have conditionnal execution we use a Branch.
- Branch(cond, 3, tst1, Operand(tst2));
+ Branch(3, cond, tst1, Operand(tst2));
Addu(sp, sp, Operand(-kPointerSize));
sw(src, MemOperand(sp, 0));
}
+
// Pops multiple values from the stack and load them in the
// registers specified in regs. Pop order is the opposite as in MultiPush.
void MultiPop(RegList regs);
@@ -209,44 +492,108 @@
lw(dst, MemOperand(sp, 0));
Addu(sp, sp, Operand(kPointerSize));
}
- void Pop() {
- Add(sp, sp, Operand(kPointerSize));
+ void Pop(uint32_t count = 1) {
+ Addu(sp, sp, Operand(count * kPointerSize));
}
+ // ---------------------------------------------------------------------------
+ // These functions are only used by crankshaft, so they are currently
+ // unimplemented.
+
+ // Push and pop the registers that can hold pointers, as defined by the
+ // RegList constant kSafepointSavedRegisters.
+ void PushSafepointRegisters() {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ void PopSafepointRegisters() {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ void PushSafepointRegistersAndDoubles() {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ void PopSafepointRegistersAndDoubles() {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ static int SafepointRegisterStackIndex(int reg_code) {
+ UNIMPLEMENTED_MIPS();
+ return 0;
+ }
// ---------------------------------------------------------------------------
+
+ // MIPS32 R2 instruction macro.
+ void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
+ void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+
+ // Convert unsigned word to double.
+ void Cvt_d_uw(FPURegister fd, FPURegister fs);
+ void Cvt_d_uw(FPURegister fd, Register rs);
+
+ // Convert double to unsigned word.
+ void Trunc_uw_d(FPURegister fd, FPURegister fs);
+ void Trunc_uw_d(FPURegister fd, Register rs);
+
+ // Convert the HeapNumber pointed to by source to a 32bits signed integer
+ // dest. If the HeapNumber does not fit into a 32bits signed integer branch
+ // to not_int32 label. If FPU is available double_scratch is used but not
+ // scratch2.
+ void ConvertToInt32(Register source,
+ Register dest,
+ Register scratch,
+ Register scratch2,
+ FPURegister double_scratch,
+ Label *not_int32);
+
+ // -------------------------------------------------------------------------
// Activation frames
void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
- // Enter specific kind of exit frame; either EXIT or
- // EXIT_DEBUG. Expects the number of arguments in register a0 and
+ void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
+ void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
+
+ // Enter exit frame.
+ // Expects the number of arguments in register a0 and
// the builtin function to call in register a1.
// On output hold_argc, hold_function, and hold_argv are setup.
- void EnterExitFrame(ExitFrame::Mode mode,
- Register hold_argc,
+ void EnterExitFrame(Register hold_argc,
Register hold_argv,
- Register hold_function);
+ Register hold_function,
+ bool save_doubles);
// Leave the current exit frame. Expects the return value in v0.
- void LeaveExitFrame(ExitFrame::Mode mode);
+ void LeaveExitFrame(bool save_doubles);
// Align the stack by optionally pushing a Smi zero.
- void AlignStack(int offset);
+ void AlignStack(int offset); // TODO(mips) : remove this function.
- void SetupAlignedCall(Register scratch, int arg_count = 0);
- void ReturnFromAlignedCall();
+ // Get the actual activation frame alignment for target environment.
+ static int ActivationFrameAlignment();
+ void LoadContext(Register dst, int context_chain_length);
- // ---------------------------------------------------------------------------
+ void LoadGlobalFunction(int index, Register function);
+
+ // Load the initial map from the global function. The registers
+ // function and map can be the same, function is then overwritten.
+ void LoadGlobalFunctionInitialMap(Register function,
+ Register map,
+ Register scratch);
+
+ // -------------------------------------------------------------------------
// JavaScript invokes
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
- InvokeFlag flag);
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator = NULL);
void InvokeCode(Handle<Code> code,
const ParameterCount& expected,
@@ -258,84 +605,135 @@
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator = NULL);
+
+ void InvokeFunction(JSFunction* function,
+ const ParameterCount& actual,
InvokeFlag flag);
+ void IsObjectJSObjectType(Register heap_object,
+ Register map,
+ Register scratch,
+ Label* fail);
+
+ void IsInstanceJSObjectType(Register map,
+ Register scratch,
+ Label* fail);
+
+ void IsObjectJSStringType(Register object,
+ Register scratch,
+ Label* fail);
+
#ifdef ENABLE_DEBUGGER_SUPPORT
- // ---------------------------------------------------------------------------
+ // -------------------------------------------------------------------------
// Debugger Support
- void SaveRegistersToMemory(RegList regs);
- void RestoreRegistersFromMemory(RegList regs);
- void CopyRegistersFromMemoryToStack(Register base, RegList regs);
- void CopyRegistersFromStackToMemory(Register base,
- Register scratch,
- RegList regs);
void DebugBreak();
#endif
- // ---------------------------------------------------------------------------
+ // -------------------------------------------------------------------------
// Exception handling
// Push a new try handler and link into try handler chain.
// The return address must be passed in register ra.
+ // Clobber t0, t1, t2.
void PushTryHandler(CodeLocation try_location, HandlerType type);
// Unlink the stack handler on top of the stack from the try handler chain.
// Must preserve the result register.
void PopTryHandler();
+ // Copies a fixed number of fields of heap objects from src to dst.
+ void CopyFields(Register dst, Register src, RegList temps, int field_count);
- // ---------------------------------------------------------------------------
+ // -------------------------------------------------------------------------
// Support functions.
+ // Try to get function prototype of a function and puts the value in
+ // the result register. Checks that the function really is a
+ // function and jumps to the miss label if the fast checks fail. The
+ // function register will be untouched; the other registers may be
+ // clobbered.
+ void TryGetFunctionPrototype(Register function,
+ Register result,
+ Register scratch,
+ Label* miss);
+
void GetObjectType(Register function,
Register map,
Register type_reg);
- inline void BranchOnSmi(Register value, Label* smi_label,
- Register scratch = at) {
- ASSERT_EQ(0, kSmiTag);
- andi(scratch, value, kSmiTagMask);
- Branch(eq, smi_label, scratch, Operand(zero_reg));
- }
+ // Check if the map of an object is equal to a specified map (either
+ // given directly or as an index into the root list) and branch to
+ // label if not. Skip the smi check if not required (object is known
+ // to be a heap object)
+ void CheckMap(Register obj,
+ Register scratch,
+ Handle<Map> map,
+ Label* fail,
+ bool is_heap_object);
-
- inline void BranchOnNotSmi(Register value, Label* not_smi_label,
- Register scratch = at) {
- ASSERT_EQ(0, kSmiTag);
- andi(scratch, value, kSmiTagMask);
- Branch(ne, not_smi_label, scratch, Operand(zero_reg));
- }
-
- void CallBuiltin(ExternalReference builtin_entry);
- void CallBuiltin(Register target);
- void JumpToBuiltin(ExternalReference builtin_entry);
- void JumpToBuiltin(Register target);
+ void CheckMap(Register obj,
+ Register scratch,
+ Heap::RootListIndex index,
+ Label* fail,
+ bool is_heap_object);
// Generates code for reporting that an illegal operation has
// occurred.
void IllegalOperation(int num_arguments);
+ // Picks out an array index from the hash field.
+ // Register use:
+ // hash - holds the index's hash. Clobbered.
+ // index - holds the overwritten index on exit.
+ void IndexFromHash(Register hash, Register index);
- // ---------------------------------------------------------------------------
+ // Load the value of a number object into a FPU double register. If the
+ // object is not a number a jump to the label not_number is performed
+ // and the FPU double register is unchanged.
+ void ObjectToDoubleFPURegister(
+ Register object,
+ FPURegister value,
+ Register scratch1,
+ Register scratch2,
+ Register heap_number_map,
+ Label* not_number,
+ ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
+
+ // Load the value of a smi object into a FPU double register. The register
+ // scratch1 can be the same register as smi in which case smi will hold the
+ // untagged value afterwards.
+ void SmiToDoubleFPURegister(Register smi,
+ FPURegister value,
+ Register scratch1);
+
+ // -------------------------------------------------------------------------
// Runtime calls
// Call a code stub.
void CallStub(CodeStub* stub, Condition cond = cc_always,
Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+
+ // Tail call a code stub (jump).
+ void TailCallStub(CodeStub* stub);
+
void CallJSExitStub(CodeStub* stub);
- // Return from a code stub after popping its arguments.
- void StubReturn(int argc);
-
// Call a runtime routine.
- void CallRuntime(Runtime::Function* f, int num_arguments);
+ void CallRuntime(const Runtime::Function* f, int num_arguments);
+ void CallRuntimeSaveDoubles(Runtime::FunctionId id);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId fid, int num_arguments);
+ // Convenience function: call an external reference.
+ void CallExternalReference(const ExternalReference& ext,
+ int num_arguments);
+
// Tail call of a runtime routine (jump).
// Like JumpToExternalReference, but also takes care of passing the number
// of parameters.
@@ -348,34 +746,54 @@
int num_arguments,
int result_size);
+ // Before calling a C-function from generated code, align arguments on stack
+ // and add space for the four mips argument slots.
+ // After aligning the frame, non-register arguments must be stored on the
+ // stack, after the argument-slots using helper: CFunctionArgumentOperand().
+ // The argument count assumes all arguments are word sized.
+ // Some compilers/platforms require the stack to be aligned when calling
+ // C++ code.
+ // Needs a scratch register to do some arithmetic. This register will be
+ // trashed.
+ void PrepareCallCFunction(int num_arguments, Register scratch);
+
+ // Arguments 1-4 are placed in registers a0 thru a3 respectively.
+ // Arguments 5..n are stored to stack using following:
+ // sw(t0, CFunctionArgumentOperand(5));
+
+ // Calls a C function and cleans up the space for arguments allocated
+ // by PrepareCallCFunction. The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function, int num_arguments);
+ void CallCFunction(Register function, Register scratch, int num_arguments);
+
// Jump to the builtin routine.
void JumpToExternalReference(const ExternalReference& builtin);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
- void InvokeBuiltin(Builtins::JavaScript id, InvokeJSFlags flags);
+ void InvokeBuiltin(Builtins::JavaScript id,
+ InvokeJSFlags flags,
+ PostCallGenerator* post_call_generator = NULL);
// Store the code object for the given builtin in the target register and
- // setup the function in r1.
+ // setup the function in a1.
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+ // Store the function for the given builtin in the target register.
+ void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+
struct Unresolved {
int pc;
uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
const char* name;
};
- List<Unresolved>* unresolved() { return &unresolved_; }
Handle<Object> CodeObject() { return code_object_; }
-
- // ---------------------------------------------------------------------------
- // Stack limit support
-
- void StackLimitCheck(Label* on_stack_limit_hit);
-
-
- // ---------------------------------------------------------------------------
+ // -------------------------------------------------------------------------
// StatsCounter support
void SetCounter(StatsCounter* counter, int value,
@@ -386,12 +804,14 @@
Register scratch1, Register scratch2);
- // ---------------------------------------------------------------------------
+ // -------------------------------------------------------------------------
// Debugging
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cc, const char* msg, Register rs, Operand rt);
+ void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
+ void AssertFastElements(Register elements);
// Like Assert(), but always enabled.
void Check(Condition cc, const char* msg, Register rs, Operand rt);
@@ -405,17 +825,132 @@
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
- private:
- List<Unresolved> unresolved_;
- bool generating_stub_;
- bool allow_stub_calls_;
- // This handle will be patched with the code object on installation.
- Handle<Object> code_object_;
+ // ---------------------------------------------------------------------------
+ // Number utilities
+ // Check whether the value of reg is a power of two and not zero. If not
+ // control continues at the label not_power_of_two. If reg is a power of two
+ // the register scratch contains the value of (reg - 1) when control falls
+ // through.
+ void JumpIfNotPowerOfTwoOrZero(Register reg,
+ Register scratch,
+ Label* not_power_of_two_or_zero);
+
+ // -------------------------------------------------------------------------
+ // Smi utilities
+
+ // Try to convert int32 to smi. If the value is to large, preserve
+ // the original value and jump to not_a_smi. Destroys scratch and
+ // sets flags.
+ // This is only used by crankshaft atm so it is unimplemented on MIPS.
+ void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ void SmiTag(Register reg) {
+ Addu(reg, reg, reg);
+ }
+
+ void SmiTag(Register dst, Register src) {
+ Addu(dst, src, src);
+ }
+
+ void SmiUntag(Register reg) {
+ sra(reg, reg, kSmiTagSize);
+ }
+
+ void SmiUntag(Register dst, Register src) {
+ sra(dst, src, kSmiTagSize);
+ }
+
+ // Jump the register contains a smi.
+ inline void JumpIfSmi(Register value, Label* smi_label,
+ Register scratch = at) {
+ ASSERT_EQ(0, kSmiTag);
+ andi(scratch, value, kSmiTagMask);
+ Branch(smi_label, eq, scratch, Operand(zero_reg));
+ }
+
+ // Jump if the register contains a non-smi.
+ inline void JumpIfNotSmi(Register value, Label* not_smi_label,
+ Register scratch = at) {
+ ASSERT_EQ(0, kSmiTag);
+ andi(scratch, value, kSmiTagMask);
+ Branch(not_smi_label, ne, scratch, Operand(zero_reg));
+ }
+
+ // Jump if either of the registers contain a non-smi.
+ void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
+ // Jump if either of the registers contain a smi.
+ void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
+
+ // Abort execution if argument is a smi. Used in debug code.
+ void AbortIfSmi(Register object);
+ void AbortIfNotSmi(Register object);
+
+ // Abort execution if argument is not the root value with the given index.
+ void AbortIfNotRootValue(Register src,
+ Heap::RootListIndex root_value_index,
+ const char* message);
+
+ // ---------------------------------------------------------------------------
+ // HeapNumber utilities
+
+ void JumpIfNotHeapNumber(Register object,
+ Register heap_number_map,
+ Register scratch,
+ Label* on_not_heap_number);
+
+ // -------------------------------------------------------------------------
+ // String utilities
+
+ // Checks if both instance types are sequential ASCII strings and jumps to
+ // label if either is not.
+ void JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first_object_instance_type,
+ Register second_object_instance_type,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ // Check if instance type is sequential ASCII string and jump to label if
+ // it is not.
+ void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
+ Register scratch,
+ Label* failure);
+
+ // Test that both first and second are sequential ASCII strings.
+ // Assume that they are non-smis.
+ void JumpIfNonSmisNotBothSequentialAsciiStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ // Test that both first and second are sequential ASCII strings.
+ // Check that they are non-smis.
+ void JumpIfNotBothSequentialAsciiStrings(Register first,
+ Register second,
+ Register scratch1,
+ Register scratch2,
+ Label* failure);
+
+ private:
+ void CallCFunctionHelper(Register function,
+ ExternalReference function_reference,
+ Register scratch,
+ int num_arguments);
+
+ void Jump(intptr_t target, RelocInfo::Mode rmode,
+ BranchDelaySlot bd = PROTECT);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+ Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg),
+ BranchDelaySlot bd = PROTECT);
+ void Call(intptr_t target, RelocInfo::Mode rmode,
+ BranchDelaySlot bd = PROTECT);
void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = cc_always,
- Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg));
+ Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg),
+ BranchDelaySlot bd = PROTECT);
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
@@ -423,22 +958,84 @@
Handle<Code> code_constant,
Register code_reg,
Label* done,
- InvokeFlag flag);
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator = NULL);
// Get the code for the given builtin. Returns if able to resolve
// the function in the 'resolved' flag.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
// Activation support.
- // EnterFrame clobbers t0 and t1.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
+
+ void InitializeNewString(Register string,
+ Register length,
+ Heap::RootListIndex map_index,
+ Register scratch1,
+ Register scratch2);
+
+
+ bool generating_stub_;
+ bool allow_stub_calls_;
+ // This handle will be patched with the code object on installation.
+ Handle<Object> code_object_;
+};
+
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+// The code patcher is used to patch (typically) small parts of code e.g. for
+// debugging and other types of instrumentation. When using the code patcher
+// the exact number of bytes specified must be emitted. It is not legal to emit
+// relocation information. If any of these constraints are violated it causes
+// an assertion to fail.
+class CodePatcher {
+ public:
+ CodePatcher(byte* address, int instructions);
+ virtual ~CodePatcher();
+
+ // Macro assembler to emit code.
+ MacroAssembler* masm() { return &masm_; }
+
+ // Emit an instruction directly.
+ void Emit(Instr x);
+
+ // Emit an address directly.
+ void Emit(Address addr);
+
+ private:
+ byte* address_; // The address of the code being patched.
+ int instructions_; // Number of instructions of the expected patch size.
+ int size_; // Number of bytes of the expected patch size.
+ MacroAssembler masm_; // Macro assembler used to generate the code.
+};
+#endif // ENABLE_DEBUGGER_SUPPORT
+
+
+// Helper class for generating code or data associated with the code
+// right after a call instruction. As an example this can be used to
+// generate safepoint data after calls for crankshaft.
+class PostCallGenerator {
+ public:
+ PostCallGenerator() { }
+ virtual ~PostCallGenerator() { }
+ virtual void Generate() = 0;
};
// -----------------------------------------------------------------------------
// Static helper functions.
+static MemOperand ContextOperand(Register context, int index) {
+ return MemOperand(context, Context::SlotOffset(index));
+}
+
+
+static inline MemOperand GlobalObjectOperand() {
+ return ContextOperand(cp, Context::GLOBAL_INDEX);
+}
+
+
// Generate a MemOperand for loading a field from an object.
static inline MemOperand FieldMemOperand(Register object, int offset) {
return MemOperand(object, offset - kHeapObjectTag);
diff --git a/src/mips/regexp-macro-assembler-mips.cc b/src/mips/regexp-macro-assembler-mips.cc
new file mode 100644
index 0000000..d1dbc43
--- /dev/null
+++ b/src/mips/regexp-macro-assembler-mips.cc
@@ -0,0 +1,478 @@
+// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "v8.h"
+
+#if defined(V8_TARGET_ARCH_MIPS)
+
+#include "unicode.h"
+#include "log.h"
+#include "code-stubs.h"
+#include "regexp-stack.h"
+#include "macro-assembler.h"
+#include "regexp-macro-assembler.h"
+#include "mips/regexp-macro-assembler-mips.h"
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_INTERPRETED_REGEXP
+/*
+ * This assembler uses the following register assignment convention
+ * - t1 : Pointer to current code object (Code*) including heap object tag.
+ * - t2 : Current position in input, as negative offset from end of string.
+ * Please notice that this is the byte offset, not the character offset!
+ * - t3 : Currently loaded character. Must be loaded using
+ * LoadCurrentCharacter before using any of the dispatch methods.
+ * - t4 : points to tip of backtrack stack
+ * - t5 : Unused.
+ * - t6 : End of input (points to byte after last character in input).
+ * - fp : Frame pointer. Used to access arguments, local variables and
+ * RegExp registers.
+ * - sp : points to tip of C stack.
+ *
+ * The remaining registers are free for computations.
+ *
+ * Each call to a public method should retain this convention.
+ * The stack will have the following structure:
+ * - direct_call (if 1, direct call from JavaScript code, if 0 call
+ * through the runtime system)
+ * - stack_area_base (High end of the memory area to use as
+ * backtracking stack)
+ * - int* capture_array (int[num_saved_registers_], for output).
+ * - stack frame header (16 bytes in size)
+ * --- sp when called ---
+ * - link address
+ * - backup of registers s0..s7
+ * - end of input (Address of end of string)
+ * - start of input (Address of first character in string)
+ * - start index (character index of start)
+ * --- frame pointer ----
+ * - void* input_string (location of a handle containing the string)
+ * - Offset of location before start of input (effectively character
+ * position -1). Used to initialize capture registers to a non-position.
+ * - At start (if 1, we are starting at the start of the
+ * string, otherwise 0)
+ * - register 0 (Only positions must be stored in the first
+ * - register 1 num_saved_registers_ registers)
+ * - ...
+ * - register num_registers-1
+ * --- sp ---
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers start out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code, by calling the code entry as cast to a function with the signature:
+ * int (*match)(String* input_string,
+ * int start_index,
+ * Address start,
+ * Address end,
+ * int* capture_output_array,
+ * bool at_start,
+ * byte* stack_area_base,
+ * bool direct_call)
+ * The call is performed by NativeRegExpMacroAssembler::Execute()
+ * (in regexp-macro-assembler.cc).
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(
+ Mode mode,
+ int registers_to_save)
+ : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
+ mode_(mode),
+ num_registers_(registers_to_save),
+ num_saved_registers_(registers_to_save),
+ entry_label_(),
+ start_label_(),
+ success_label_(),
+ backtrack_label_(),
+ exit_label_() {
+ ASSERT_EQ(0, registers_to_save % 2);
+ __ jmp(&entry_label_); // We'll write the entry code later.
+ __ bind(&start_label_); // And then continue from here.
+}
+
+
+RegExpMacroAssemblerMIPS::~RegExpMacroAssemblerMIPS() {
+ delete masm_;
+ // Unuse labels in case we throw away the assembler without calling GetCode.
+ entry_label_.Unuse();
+ start_label_.Unuse();
+ success_label_.Unuse();
+ backtrack_label_.Unuse();
+ exit_label_.Unuse();
+ check_preempt_label_.Unuse();
+ stack_overflow_label_.Unuse();
+}
+
+
+int RegExpMacroAssemblerMIPS::stack_limit_slack() {
+ return RegExpStack::kStackLimitSlack;
+}
+
+
+void RegExpMacroAssemblerMIPS::AdvanceCurrentPosition(int by) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::AdvanceRegister(int reg, int by) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::Backtrack() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::Bind(Label* label) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacter(uint32_t c, Label* on_equal) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacterLT(uc16 limit, Label* on_less) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacters(Vector<const uc16> str,
+ int cp_offset,
+ Label* on_failure,
+ bool check_end_of_string) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
+ int start_reg,
+ Label* on_no_match) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotBackReference(
+ int start_reg,
+ Label* on_no_match) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotRegistersEqual(int reg1,
+ int reg2,
+ Label* on_not_equal) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotCharacter(uint32_t c,
+ Label* on_not_equal) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_not_equal) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterMinusAnd(
+ uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match) {
+ UNIMPLEMENTED_MIPS();
+ return false;
+}
+
+
+void RegExpMacroAssemblerMIPS::Fail() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+Handle<Object> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
+ UNIMPLEMENTED_MIPS();
+ return Handle<Object>::null();
+}
+
+
+void RegExpMacroAssemblerMIPS::GoTo(Label* to) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::IfRegisterGE(int reg,
+ int comparand,
+ Label* if_ge) {
+ __ lw(a0, register_location(reg));
+ BranchOrBacktrack(if_ge, ge, a0, Operand(comparand));
+}
+
+
+void RegExpMacroAssemblerMIPS::IfRegisterLT(int reg,
+ int comparand,
+ Label* if_lt) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::IfRegisterEqPos(int reg,
+ Label* if_eq) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+RegExpMacroAssembler::IrregexpImplementation
+ RegExpMacroAssemblerMIPS::Implementation() {
+ return kMIPSImplementation;
+}
+
+
+void RegExpMacroAssemblerMIPS::LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds,
+ int characters) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::PopCurrentPosition() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::PopRegister(int register_index) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+
+void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::PushCurrentPosition() {
+ Push(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerMIPS::PushRegister(int register_index,
+ StackCheckFlag check_stack_limit) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::ReadCurrentPositionFromRegister(int reg) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::ReadStackPointerFromRegister(int reg) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::SetCurrentPositionFromEnd(int by) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::Succeed() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::WriteCurrentPositionToRegister(int reg,
+ int cp_offset) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// Private methods:
+
+void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+ return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
+}
+
+
+int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame) {
+ UNIMPLEMENTED_MIPS();
+ return 0;
+}
+
+
+MemOperand RegExpMacroAssemblerMIPS::register_location(int register_index) {
+ UNIMPLEMENTED_MIPS();
+ return MemOperand(zero_reg, 0);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckPosition(int cp_offset,
+ Label* on_outside_input) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::BranchOrBacktrack(Label* to,
+ Condition condition,
+ Register rs,
+ const Operand& rt) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::SafeCall(Label* to, Condition cond, Register rs,
+ const Operand& rt) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::SafeReturn() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::SafeCallTarget(Label* name) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::Push(Register source) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::Pop(Register target) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckPreemption() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckStackLimit() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::CallCFunctionUsingStub(
+ ExternalReference function,
+ int num_arguments) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
+ int characters) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+#undef __
+
+#endif // V8_INTERPRETED_REGEXP
+
+}} // namespace v8::internal
+
+#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/regexp-macro-assembler-mips.h b/src/mips/regexp-macro-assembler-mips.h
new file mode 100644
index 0000000..2f4319f
--- /dev/null
+++ b/src/mips/regexp-macro-assembler-mips.h
@@ -0,0 +1,250 @@
+// Copyright 2006-2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+#define V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+
+namespace v8 {
+namespace internal {
+
+#ifdef V8_INTERPRETED_REGEXP
+class RegExpMacroAssemblerMIPS: public RegExpMacroAssembler {
+ public:
+ RegExpMacroAssemblerMIPS();
+ virtual ~RegExpMacroAssemblerMIPS();
+};
+#else // V8_INTERPRETED_REGEXP
+class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
+ public:
+ RegExpMacroAssemblerMIPS(Mode mode, int registers_to_save);
+ virtual ~RegExpMacroAssemblerMIPS();
+ virtual int stack_limit_slack();
+ virtual void AdvanceCurrentPosition(int by);
+ virtual void AdvanceRegister(int reg, int by);
+ virtual void Backtrack();
+ virtual void Bind(Label* label);
+ virtual void CheckAtStart(Label* on_at_start);
+ virtual void CheckCharacter(uint32_t c, Label* on_equal);
+ virtual void CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal);
+ virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+ virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+ virtual void CheckCharacters(Vector<const uc16> str,
+ int cp_offset,
+ Label* on_failure,
+ bool check_end_of_string);
+ // A "greedy loop" is a loop that is both greedy and with a simple
+ // body. It has a particularly simple implementation.
+ virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+ virtual void CheckNotAtStart(Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+ virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ Label* on_no_match);
+ virtual void CheckNotRegistersEqual(int reg1, int reg2, Label* on_not_equal);
+ virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
+ virtual void CheckNotCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_not_equal);
+ virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
+ uc16 minus,
+ uc16 mask,
+ Label* on_not_equal);
+ // Checks whether the given offset from the current position is before
+ // the end of the string.
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+ virtual bool CheckSpecialCharacterClass(uc16 type,
+ Label* on_no_match);
+ virtual void Fail();
+ virtual Handle<Object> GetCode(Handle<String> source);
+ virtual void GoTo(Label* label);
+ virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+ virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+ virtual void IfRegisterEqPos(int reg, Label* if_eq);
+ virtual IrregexpImplementation Implementation();
+ virtual void LoadCurrentCharacter(int cp_offset,
+ Label* on_end_of_input,
+ bool check_bounds = true,
+ int characters = 1);
+ virtual void PopCurrentPosition();
+ virtual void PopRegister(int register_index);
+ virtual void PushBacktrack(Label* label);
+ virtual void PushCurrentPosition();
+ virtual void PushRegister(int register_index,
+ StackCheckFlag check_stack_limit);
+ virtual void ReadCurrentPositionFromRegister(int reg);
+ virtual void ReadStackPointerFromRegister(int reg);
+ virtual void SetCurrentPositionFromEnd(int by);
+ virtual void SetRegister(int register_index, int to);
+ virtual void Succeed();
+ virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+ virtual void ClearRegisters(int reg_from, int reg_to);
+ virtual void WriteStackPointerToRegister(int reg);
+
+ // Called from RegExp if the stack-guard is triggered.
+ // If the code object is relocated, the return address is fixed before
+ // returning.
+ static int CheckStackGuardState(Address* return_address,
+ Code* re_code,
+ Address re_frame);
+ private:
+ // Offsets from frame_pointer() of function parameters and stored registers.
+ static const int kFramePointer = 0;
+
+ // Above the frame pointer - Stored registers and stack passed parameters.
+ // Registers s0 to s7, fp, and ra.
+ static const int kStoredRegisters = kFramePointer;
+ // Return address (stored from link register, read into pc on return).
+ static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
+ // Stack frame header.
+ static const int kStackFrameHeader = kReturnAddress + kPointerSize;
+ // Stack parameters placed by caller.
+ static const int kRegisterOutput = kStackFrameHeader + 16;
+ static const int kStackHighEnd = kRegisterOutput + kPointerSize;
+ static const int kDirectCall = kStackHighEnd + kPointerSize;
+ static const int kIsolate = kDirectCall + kPointerSize;
+
+ // Below the frame pointer.
+ // Register parameters stored by setup code.
+ static const int kInputEnd = kFramePointer - kPointerSize;
+ static const int kInputStart = kInputEnd - kPointerSize;
+ static const int kStartIndex = kInputStart - kPointerSize;
+ static const int kInputString = kStartIndex - kPointerSize;
+ // When adding local variables remember to push space for them in
+ // the frame in GetCode.
+ static const int kInputStartMinusOne = kInputString - kPointerSize;
+ static const int kAtStart = kInputStartMinusOne - kPointerSize;
+ // First register address. Following registers are below it on the stack.
+ static const int kRegisterZero = kAtStart - kPointerSize;
+
+ // Initial size of code buffer.
+ static const size_t kRegExpCodeSize = 1024;
+
+ // Load a number of characters at the given offset from the
+ // current position, into the current-character register.
+ void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
+
+ // Check whether preemption has been requested.
+ void CheckPreemption();
+
+ // Check whether we are exceeding the stack limit on the backtrack stack.
+ void CheckStackLimit();
+
+
+ // Generate a call to CheckStackGuardState.
+ void CallCheckStackGuardState(Register scratch);
+
+ // The ebp-relative location of a regexp register.
+ MemOperand register_location(int register_index);
+
+ // Register holding the current input position as negative offset from
+ // the end of the string.
+ inline Register current_input_offset() { return t2; }
+
+ // The register containing the current character after LoadCurrentCharacter.
+ inline Register current_character() { return t3; }
+
+ // Register holding address of the end of the input string.
+ inline Register end_of_input_address() { return t6; }
+
+ // Register holding the frame address. Local variables, parameters and
+ // regexp registers are addressed relative to this.
+ inline Register frame_pointer() { return fp; }
+
+ // The register containing the backtrack stack top. Provides a meaningful
+ // name to the register.
+ inline Register backtrack_stackpointer() { return t4; }
+
+ // Register holding pointer to the current code object.
+ inline Register code_pointer() { return t1; }
+
+ // Byte size of chars in the string to match (decided by the Mode argument)
+ inline int char_size() { return static_cast<int>(mode_); }
+
+ // Equivalent to a conditional branch to the label, unless the label
+ // is NULL, in which case it is a conditional Backtrack.
+ void BranchOrBacktrack(Label* to,
+ Condition condition,
+ Register rs,
+ const Operand& rt);
+
+ // Call and return internally in the generated code in a way that
+ // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
+ inline void SafeCall(Label* to,
+ Condition cond,
+ Register rs,
+ const Operand& rt);
+ inline void SafeReturn();
+ inline void SafeCallTarget(Label* name);
+
+ // Pushes the value of a register on the backtrack stack. Decrements the
+ // stack pointer by a word size and stores the register's value there.
+ inline void Push(Register source);
+
+ // Pops a value from the backtrack stack. Reads the word at the stack pointer
+ // and increments it by a word size.
+ inline void Pop(Register target);
+
+ // Calls a C function and cleans up the frame alignment done by
+ // by FrameAlign. The called function *is* allowed to trigger a garbage
+ // collection, but may not take more than four arguments (no arguments
+ // passed on the stack), and the first argument will be a pointer to the
+ // return address.
+ inline void CallCFunctionUsingStub(ExternalReference function,
+ int num_arguments);
+
+
+ MacroAssembler* masm_;
+
+ // Which mode to generate code for (ASCII or UC16).
+ Mode mode_;
+
+ // One greater than maximal register index actually used.
+ int num_registers_;
+
+ // Number of registers to output at the end (the saved registers
+ // are always 0..num_saved_registers_-1)
+ int num_saved_registers_;
+
+ // Labels used internally.
+ Label entry_label_;
+ Label start_label_;
+ Label success_label_;
+ Label backtrack_label_;
+ Label exit_label_;
+ Label check_preempt_label_;
+ Label stack_overflow_label_;
+};
+
+#endif // V8_INTERPRETED_REGEXP
+
+
+}} // namespace v8::internal
+
+#endif // V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+
diff --git a/src/mips/register-allocator-mips-inl.h b/src/mips/register-allocator-mips-inl.h
index a876bee..bbfb31d 100644
--- a/src/mips/register-allocator-mips-inl.h
+++ b/src/mips/register-allocator-mips-inl.h
@@ -125,9 +125,6 @@
void RegisterAllocator::Initialize() {
Reset();
- // The non-reserved a1 and ra registers are live on JS function entry.
- Use(a1); // JS function.
- Use(ra); // Return address.
}
diff --git a/src/mips/register-allocator-mips.h b/src/mips/register-allocator-mips.h
index e056fb8..c448923 100644
--- a/src/mips/register-allocator-mips.h
+++ b/src/mips/register-allocator-mips.h
@@ -35,8 +35,9 @@
class RegisterAllocatorConstants : public AllStatic {
public:
- static const int kNumRegisters = assembler::mips::kNumRegisters;
- static const int kInvalidRegister = assembler::mips::kInvalidRegister;
+ // No registers are currently managed by the register allocator on MIPS.
+ static const int kNumRegisters = 0;
+ static const int kInvalidRegister = -1;
};
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index 59a5373..50ad7a1 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -26,6 +26,8 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
+#include <math.h>
+#include <limits.h>
#include <cstdarg>
#include "v8.h"
@@ -37,23 +39,25 @@
#include "mips/constants-mips.h"
#include "mips/simulator-mips.h"
-namespace v8i = v8::internal;
-
-#if !defined(__mips) || defined(USE_SIMULATOR)
// Only build the simulator if not compiling for real MIPS hardware.
-namespace assembler {
-namespace mips {
+#if defined(USE_SIMULATOR)
-using ::v8::internal::Object;
-using ::v8::internal::PrintF;
-using ::v8::internal::OS;
-using ::v8::internal::ReadLine;
-using ::v8::internal::DeleteArray;
+namespace v8 {
+namespace internal {
// Utils functions
bool HaveSameSign(int32_t a, int32_t b) {
- return ((a ^ b) > 0);
+ return ((a ^ b) >= 0);
+}
+
+
+uint32_t get_fcsr_condition_bit(uint32_t cc) {
+ if (cc == 0) {
+ return 23;
+ } else {
+ return 24 + cc;
+ }
}
@@ -63,15 +67,18 @@
// Library does not provide vsscanf.
#define SScanF sscanf // NOLINT
-// The Debugger class is used by the simulator while debugging simulated MIPS
+// The MipsDebugger class is used by the simulator while debugging simulated
// code.
-class Debugger {
+class MipsDebugger {
public:
- explicit Debugger(Simulator* sim);
- ~Debugger();
+ explicit MipsDebugger(Simulator* sim);
+ ~MipsDebugger();
void Stop(Instruction* instr);
void Debug();
+ // Print all registers with a nice formatting.
+ void PrintAllRegs();
+ void PrintAllRegsIncludingFPU();
private:
// We set the breakpoint code to 0xfffff to easily recognize it.
@@ -81,6 +88,10 @@
Simulator* sim_;
int32_t GetRegisterValue(int regnum);
+ int32_t GetFPURegisterValueInt(int regnum);
+ int64_t GetFPURegisterValueLong(int regnum);
+ float GetFPURegisterValueFloat(int regnum);
+ double GetFPURegisterValueDouble(int regnum);
bool GetValue(const char* desc, int32_t* value);
// Set or delete a breakpoint. Returns true if successful.
@@ -91,18 +102,17 @@
// execution to skip past breakpoints when run from the debugger.
void UndoBreakpoints();
void RedoBreakpoints();
-
- // Print all registers with a nice formatting.
- void PrintAllRegs();
};
-Debugger::Debugger(Simulator* sim) {
+MipsDebugger::MipsDebugger(Simulator* sim) {
sim_ = sim;
}
-Debugger::~Debugger() {
+
+MipsDebugger::~MipsDebugger() {
}
+
#ifdef GENERATED_CODE_COVERAGE
static FILE* coverage_log = NULL;
@@ -115,7 +125,7 @@
}
-void Debugger::Stop(Instruction* instr) {
+void MipsDebugger::Stop(Instruction* instr) {
UNIMPLEMENTED_MIPS();
char* str = reinterpret_cast<char*>(instr->InstructionBits());
if (strlen(str) > 0) {
@@ -125,9 +135,10 @@
}
instr->SetInstructionBits(0x0); // Overwrite with nop.
}
- sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
+ sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
}
+
#else // ndef GENERATED_CODE_COVERAGE
#define UNSUPPORTED() printf("Unsupported instruction.\n");
@@ -135,16 +146,16 @@
static void InitializeCoverage() {}
-void Debugger::Stop(Instruction* instr) {
+void MipsDebugger::Stop(Instruction* instr) {
const char* str = reinterpret_cast<char*>(instr->InstructionBits());
PrintF("Simulator hit %s\n", str);
- sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
+ sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
Debug();
}
#endif // GENERATED_CODE_COVERAGE
-int32_t Debugger::GetRegisterValue(int regnum) {
+int32_t MipsDebugger::GetRegisterValue(int regnum) {
if (regnum == kNumSimuRegisters) {
return sim_->get_pc();
} else {
@@ -153,11 +164,54 @@
}
-bool Debugger::GetValue(const char* desc, int32_t* value) {
+int32_t MipsDebugger::GetFPURegisterValueInt(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register(regnum);
+ }
+}
+
+
+int64_t MipsDebugger::GetFPURegisterValueLong(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register_long(regnum);
+ }
+}
+
+
+float MipsDebugger::GetFPURegisterValueFloat(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register_float(regnum);
+ }
+}
+
+
+double MipsDebugger::GetFPURegisterValueDouble(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register_double(regnum);
+ }
+}
+
+
+bool MipsDebugger::GetValue(const char* desc, int32_t* value) {
int regnum = Registers::Number(desc);
+ int fpuregnum = FPURegisters::Number(desc);
+
if (regnum != kInvalidRegister) {
*value = GetRegisterValue(regnum);
return true;
+ } else if (fpuregnum != kInvalidFPURegister) {
+ *value = GetFPURegisterValueInt(fpuregnum);
+ return true;
+ } else if (strncmp(desc, "0x", 2) == 0) {
+ return SScanF(desc, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
} else {
return SScanF(desc, "%i", value) == 1;
}
@@ -165,7 +219,7 @@
}
-bool Debugger::SetBreakpoint(Instruction* breakpc) {
+bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
// Check if a breakpoint can be set. If not return without any side-effects.
if (sim_->break_pc_ != NULL) {
return false;
@@ -180,7 +234,7 @@
}
-bool Debugger::DeleteBreakpoint(Instruction* breakpc) {
+bool MipsDebugger::DeleteBreakpoint(Instruction* breakpc) {
if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
@@ -191,20 +245,21 @@
}
-void Debugger::UndoBreakpoints() {
+void MipsDebugger::UndoBreakpoints() {
if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
}
-void Debugger::RedoBreakpoints() {
+void MipsDebugger::RedoBreakpoints() {
if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
}
}
-void Debugger::PrintAllRegs() {
+
+void MipsDebugger::PrintAllRegs() {
#define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n)
PrintF("\n");
@@ -237,10 +292,45 @@
// pc
PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n",
REG_INFO(31), REG_INFO(34));
+
#undef REG_INFO
+#undef FPU_REG_INFO
}
-void Debugger::Debug() {
+
+void MipsDebugger::PrintAllRegsIncludingFPU() {
+#define FPU_REG_INFO(n) FPURegisters::Name(n), FPURegisters::Name(n+1), \
+ GetFPURegisterValueInt(n+1), \
+ GetFPURegisterValueInt(n), \
+ GetFPURegisterValueDouble(n)
+
+ PrintAllRegs();
+
+ PrintF("\n\n");
+ // f0, f1, f2, ... f31
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(0) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(2) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(4) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(6) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(8) );
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(10));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(12));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(14));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(16));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(18));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(20));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(22));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(24));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(26));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(28));
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(30));
+
+#undef REG_INFO
+#undef FPU_REG_INFO
+}
+
+
+void MipsDebugger::Debug() {
intptr_t last_pc = -1;
bool done = false;
@@ -253,6 +343,7 @@
char cmd[COMMAND_SIZE + 1];
char arg1[ARG_SIZE + 1];
char arg2[ARG_SIZE + 1];
+ char* argv[3] = { cmd, arg1, arg2 };
// make sure to have a proper terminating character if reaching the limit
cmd[COMMAND_SIZE] = 0;
@@ -280,19 +371,21 @@
} else {
// Use sscanf to parse the individual parts of the command line. At the
// moment no command expects more than two parameters.
- int args = SScanF(line,
+ int argc = SScanF(line,
"%" XSTR(COMMAND_SIZE) "s "
"%" XSTR(ARG_SIZE) "s "
"%" XSTR(ARG_SIZE) "s",
cmd, arg1, arg2);
if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
- if (!(reinterpret_cast<Instruction*>(sim_->get_pc())->IsTrap())) {
+ Instruction* instr = reinterpret_cast<Instruction*>(sim_->get_pc());
+ if (!(instr->IsTrap()) ||
+ instr->InstructionBits() == rtCallRedirInstr) {
sim_->InstructionDecode(
- reinterpret_cast<Instruction*>(sim_->get_pc()));
+ reinterpret_cast<Instruction*>(sim_->get_pc()));
} else {
// Allow si to jump over generated breakpoints.
PrintF("/!\\ Jumping over generated breakpoint.\n");
- sim_->set_pc(sim_->get_pc() + Instruction::kInstructionSize);
+ sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
}
} else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
// Execute the one instruction we broke at with breakpoints disabled.
@@ -300,23 +393,65 @@
// Leave the debugger shell.
done = true;
} else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
- if (args == 2) {
+ if (argc == 2) {
int32_t value;
+ float fvalue;
if (strcmp(arg1, "all") == 0) {
PrintAllRegs();
+ } else if (strcmp(arg1, "allf") == 0) {
+ PrintAllRegsIncludingFPU();
} else {
- if (GetValue(arg1, &value)) {
+ int regnum = Registers::Number(arg1);
+ int fpuregnum = FPURegisters::Number(arg1);
+
+ if (regnum != kInvalidRegister) {
+ value = GetRegisterValue(regnum);
PrintF("%s: 0x%08x %d \n", arg1, value, value);
+ } else if (fpuregnum != kInvalidFPURegister) {
+ if (fpuregnum % 2 == 1) {
+ value = GetFPURegisterValueInt(fpuregnum);
+ fvalue = GetFPURegisterValueFloat(fpuregnum);
+ PrintF("%s: 0x%08x %11.4e\n", arg1, value, fvalue);
+ } else {
+ double dfvalue;
+ int32_t lvalue1 = GetFPURegisterValueInt(fpuregnum);
+ int32_t lvalue2 = GetFPURegisterValueInt(fpuregnum + 1);
+ dfvalue = GetFPURegisterValueDouble(fpuregnum);
+ PrintF("%3s,%3s: 0x%08x%08x %16.4e\n",
+ FPURegisters::Name(fpuregnum+1),
+ FPURegisters::Name(fpuregnum),
+ lvalue1,
+ lvalue2,
+ dfvalue);
+ }
} else {
PrintF("%s unrecognized\n", arg1);
}
}
} else {
- PrintF("print <register>\n");
+ if (argc == 3) {
+ if (strcmp(arg2, "single") == 0) {
+ int32_t value;
+ float fvalue;
+ int fpuregnum = FPURegisters::Number(arg1);
+
+ if (fpuregnum != kInvalidFPURegister) {
+ value = GetFPURegisterValueInt(fpuregnum);
+ fvalue = GetFPURegisterValueFloat(fpuregnum);
+ PrintF("%s: 0x%08x %11.4e\n", arg1, value, fvalue);
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("print <fpu register> single\n");
+ }
+ } else {
+ PrintF("print <register> or print <fpu register> single\n");
+ }
}
} else if ((strcmp(cmd, "po") == 0)
|| (strcmp(cmd, "printobject") == 0)) {
- if (args == 2) {
+ if (argc == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
Object* obj = reinterpret_cast<Object*>(value);
@@ -333,6 +468,39 @@
} else {
PrintF("printobject <value>\n");
}
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+ int32_t* cur = NULL;
+ int32_t* end = NULL;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int32_t*>(sim_->get_register(Simulator::sp));
+ } else { // "mem"
+ int32_t value;
+ if (!GetValue(arg1, &value)) {
+ PrintF("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int32_t*>(value);
+ next_arg++;
+ }
+
+ int32_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else if (argc == next_arg + 1) {
+ if (!GetValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ while (cur < end) {
+ PrintF(" 0x%08x: 0x%08x %10d\n",
+ reinterpret_cast<intptr_t>(cur), *cur, *cur);
+ cur++;
+ }
+
} else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0)) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
@@ -342,36 +510,37 @@
byte_* cur = NULL;
byte_* end = NULL;
- if (args == 1) {
+ if (argc == 1) {
cur = reinterpret_cast<byte_*>(sim_->get_pc());
- end = cur + (10 * Instruction::kInstructionSize);
- } else if (args == 2) {
+ end = cur + (10 * Instruction::kInstrSize);
+ } else if (argc == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte_*>(value);
// no length parameter passed, assume 10 instructions
- end = cur + (10 * Instruction::kInstructionSize);
+ end = cur + (10 * Instruction::kInstrSize);
}
} else {
int32_t value1;
int32_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
cur = reinterpret_cast<byte_*>(value1);
- end = cur + (value2 * Instruction::kInstructionSize);
+ end = cur + (value2 * Instruction::kInstrSize);
}
}
while (cur < end) {
dasm.InstructionDecode(buffer, cur);
- PrintF(" 0x%08x %s\n", cur, buffer.start());
- cur += Instruction::kInstructionSize;
+ PrintF(" 0x%08x %s\n",
+ reinterpret_cast<intptr_t>(cur), buffer.start());
+ cur += Instruction::kInstrSize;
}
} else if (strcmp(cmd, "gdb") == 0) {
PrintF("relinquishing control to gdb\n");
v8::internal::OS::DebugBreak();
PrintF("regaining control from gdb\n");
} else if (strcmp(cmd, "break") == 0) {
- if (args == 2) {
+ if (argc == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
@@ -404,29 +573,30 @@
byte_* cur = NULL;
byte_* end = NULL;
- if (args == 1) {
+ if (argc == 1) {
cur = reinterpret_cast<byte_*>(sim_->get_pc());
- end = cur + (10 * Instruction::kInstructionSize);
- } else if (args == 2) {
+ end = cur + (10 * Instruction::kInstrSize);
+ } else if (argc == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte_*>(value);
// no length parameter passed, assume 10 instructions
- end = cur + (10 * Instruction::kInstructionSize);
+ end = cur + (10 * Instruction::kInstrSize);
}
} else {
int32_t value1;
int32_t value2;
if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
cur = reinterpret_cast<byte_*>(value1);
- end = cur + (value2 * Instruction::kInstructionSize);
+ end = cur + (value2 * Instruction::kInstrSize);
}
}
while (cur < end) {
dasm.InstructionDecode(buffer, cur);
- PrintF(" 0x%08x %s\n", cur, buffer.start());
- cur += Instruction::kInstructionSize;
+ PrintF(" 0x%08x %s\n",
+ reinterpret_cast<intptr_t>(cur), buffer.start());
+ cur += Instruction::kInstrSize;
}
} else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
PrintF("cont\n");
@@ -438,6 +608,10 @@
PrintF(" use register name 'all' to print all registers\n");
PrintF("printobject <register>\n");
PrintF(" print an object from a register (alias 'po')\n");
+ PrintF("stack [<words>]\n");
+ PrintF(" dump stack content, default dump 10 words)\n");
+ PrintF("mem <address> [<words>]\n");
+ PrintF(" dump memory content, default dump 10 words)\n");
PrintF("flags\n");
PrintF(" print flags\n");
PrintF("disasm [<instructions>]\n");
@@ -471,29 +645,120 @@
}
-// Create one simulator per thread and keep it in thread local storage.
-static v8::internal::Thread::LocalStorageKey simulator_key;
+static bool ICacheMatch(void* one, void* two) {
+ ASSERT((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
+ ASSERT((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
+ return one == two;
+}
-bool Simulator::initialized_ = false;
+static uint32_t ICacheHash(void* key) {
+ return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
+}
+
+
+static bool AllOnOnePage(uintptr_t start, int size) {
+ intptr_t start_page = (start & ~CachePage::kPageMask);
+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+ return start_page == end_page;
+}
+
+
+void Simulator::FlushICache(v8::internal::HashMap* i_cache,
+ void* start_addr,
+ size_t size) {
+ intptr_t start = reinterpret_cast<intptr_t>(start_addr);
+ int intra_line = (start & CachePage::kLineMask);
+ start -= intra_line;
+ size += intra_line;
+ size = ((size - 1) | CachePage::kLineMask) + 1;
+ int offset = (start & CachePage::kPageMask);
+ while (!AllOnOnePage(start, size - 1)) {
+ int bytes_to_flush = CachePage::kPageSize - offset;
+ FlushOnePage(i_cache, start, bytes_to_flush);
+ start += bytes_to_flush;
+ size -= bytes_to_flush;
+ ASSERT_EQ(0, start & CachePage::kPageMask);
+ offset = 0;
+ }
+ if (size != 0) {
+ FlushOnePage(i_cache, start, size);
+ }
+}
+
+
+CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
+ v8::internal::HashMap::Entry* entry = i_cache->Lookup(page,
+ ICacheHash(page),
+ true);
+ if (entry->value == NULL) {
+ CachePage* new_page = new CachePage();
+ entry->value = new_page;
+ }
+ return reinterpret_cast<CachePage*>(entry->value);
+}
+
+
+// Flush from start up to and not including start + size.
+void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
+ intptr_t start,
+ int size) {
+ ASSERT(size <= CachePage::kPageSize);
+ ASSERT(AllOnOnePage(start, size - 1));
+ ASSERT((start & CachePage::kLineMask) == 0);
+ ASSERT((size & CachePage::kLineMask) == 0);
+ void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+ int offset = (start & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePage(i_cache, page);
+ char* valid_bytemap = cache_page->ValidityByte(offset);
+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+
+void Simulator::CheckICache(v8::internal::HashMap* i_cache,
+ Instruction* instr) {
+ intptr_t address = reinterpret_cast<intptr_t>(instr);
+ void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+ void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+ int offset = (address & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePage(i_cache, page);
+ char* cache_valid_byte = cache_page->ValidityByte(offset);
+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+ char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
+ if (cache_hit) {
+ // Check that the data in memory matches the contents of the I-cache.
+ CHECK(memcmp(reinterpret_cast<void*>(instr),
+ cache_page->CachedData(offset),
+ Instruction::kInstrSize) == 0);
+ } else {
+ // Cache miss. Load memory into the cache.
+ memcpy(cached_line, line, CachePage::kLineLength);
+ *cache_valid_byte = CachePage::LINE_VALID;
+ }
+}
void Simulator::Initialize() {
- if (initialized_) return;
- simulator_key = v8::internal::Thread::CreateThreadLocalKey();
- initialized_ = true;
+ if (Isolate::Current()->simulator_initialized()) return;
+ Isolate::Current()->set_simulator_initialized(true);
::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference);
}
-Simulator::Simulator() {
+Simulator::Simulator() : isolate_(Isolate::Current()) {
+ i_cache_ = isolate_->simulator_i_cache();
+ if (i_cache_ == NULL) {
+ i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+ isolate_->set_simulator_i_cache(i_cache_);
+ }
Initialize();
// Setup simulator support first. Some of this information is needed to
// setup the architecture state.
- size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
- stack_ = reinterpret_cast<char*>(malloc(stack_size));
+ stack_size_ = 1 * 1024*1024; // allocate 1MB for stack
+ stack_ = reinterpret_cast<char*>(malloc(stack_size_));
pc_modified_ = false;
icount_ = 0;
+ break_count_ = 0;
break_pc_ = NULL;
break_instr_ = 0;
@@ -502,16 +767,23 @@
for (int i = 0; i < kNumSimuRegisters; i++) {
registers_[i] = 0;
}
+ for (int i = 0; i < kNumFPURegisters; i++) {
+ FPUregisters_[i] = 0;
+ }
+ FCSR_ = 0;
// The sp is initialized to point to the bottom (high address) of the
// allocated stack area. To be safe in potential stack underflows we leave
// some buffer below.
- registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size - 64;
+ registers_[sp] = reinterpret_cast<int32_t>(stack_) + stack_size_ - 64;
// The ra and pc are initialized to a known bad value that will cause an
// access violation if the simulator ever tries to execute it.
registers_[pc] = bad_ra;
registers_[ra] = bad_ra;
InitializeCoverage();
+ for (int i = 0; i < kNumExceptions; i++) {
+ exceptions[i] = 0;
+ }
}
@@ -524,12 +796,18 @@
// offset from the swi instruction so the simulator knows what to call.
class Redirection {
public:
- Redirection(void* external_function, bool fp_return)
+ Redirection(void* external_function, ExternalReference::Type type)
: external_function_(external_function),
swi_instruction_(rtCallRedirInstr),
- fp_return_(fp_return),
- next_(list_) {
- list_ = this;
+ type_(type),
+ next_(NULL) {
+ Isolate* isolate = Isolate::Current();
+ next_ = isolate->simulator_redirection();
+ Simulator::current(isolate)->
+ FlushICache(isolate->simulator_i_cache(),
+ reinterpret_cast<void*>(&swi_instruction_),
+ Instruction::kInstrSize);
+ isolate->set_simulator_redirection(this);
}
void* address_of_swi_instruction() {
@@ -537,14 +815,16 @@
}
void* external_function() { return external_function_; }
- bool fp_return() { return fp_return_; }
+ ExternalReference::Type type() { return type_; }
- static Redirection* Get(void* external_function, bool fp_return) {
- Redirection* current;
- for (current = list_; current != NULL; current = current->next_) {
+ static Redirection* Get(void* external_function,
+ ExternalReference::Type type) {
+ Isolate* isolate = Isolate::Current();
+ Redirection* current = isolate->simulator_redirection();
+ for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) return current;
}
- return new Redirection(external_function, fp_return);
+ return new Redirection(external_function, type);
}
static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
@@ -557,31 +837,33 @@
private:
void* external_function_;
uint32_t swi_instruction_;
- bool fp_return_;
+ ExternalReference::Type type_;
Redirection* next_;
- static Redirection* list_;
};
-Redirection* Redirection::list_ = NULL;
-
-
void* Simulator::RedirectExternalReference(void* external_function,
- bool fp_return) {
- Redirection* redirection = Redirection::Get(external_function, fp_return);
+ ExternalReference::Type type) {
+ Redirection* redirection = Redirection::Get(external_function, type);
return redirection->address_of_swi_instruction();
}
// Get the active Simulator for the current thread.
-Simulator* Simulator::current() {
- Initialize();
- Simulator* sim = reinterpret_cast<Simulator*>(
- v8::internal::Thread::GetThreadLocal(simulator_key));
+Simulator* Simulator::current(Isolate* isolate) {
+ v8::internal::Isolate::PerIsolateThreadData* isolate_data =
+ Isolate::CurrentPerIsolateThreadData();
+ if (isolate_data == NULL) {
+ Isolate::EnterDefaultIsolate();
+ isolate_data = Isolate::CurrentPerIsolateThreadData();
+ }
+ ASSERT(isolate_data != NULL);
+
+ Simulator* sim = isolate_data->simulator();
if (sim == NULL) {
- // TODO(146): delete the simulator object when a thread goes away.
+ // TODO(146): delete the simulator object when a thread/isolate goes away.
sim = new Simulator();
- v8::internal::Thread::SetThreadLocal(simulator_key, sim);
+ isolate_data->set_simulator(sim);
}
return sim;
}
@@ -599,14 +881,22 @@
registers_[reg] = (reg == 0) ? 0 : value;
}
+
void Simulator::set_fpu_register(int fpureg, int32_t value) {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
FPUregisters_[fpureg] = value;
}
+
+void Simulator::set_fpu_register_float(int fpureg, float value) {
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ *BitCast<float*>(&FPUregisters_[fpureg]) = value;
+}
+
+
void Simulator::set_fpu_register_double(int fpureg, double value) {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
- *v8i::BitCast<double*>(&FPUregisters_[fpureg]) = value;
+ *BitCast<double*>(&FPUregisters_[fpureg]) = value;
}
@@ -620,22 +910,75 @@
return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0);
}
+
int32_t Simulator::get_fpu_register(int fpureg) const {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
return FPUregisters_[fpureg];
}
+
+int64_t Simulator::get_fpu_register_long(int fpureg) const {
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
+ return *BitCast<int64_t*>(
+ const_cast<int32_t*>(&FPUregisters_[fpureg]));
+}
+
+
+float Simulator::get_fpu_register_float(int fpureg) const {
+ ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return *BitCast<float*>(
+ const_cast<int32_t*>(&FPUregisters_[fpureg]));
+}
+
+
double Simulator::get_fpu_register_double(int fpureg) const {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
- return *v8i::BitCast<double*>(const_cast<int32_t*>(&FPUregisters_[fpureg]));
+ return *BitCast<double*>(const_cast<int32_t*>(&FPUregisters_[fpureg]));
}
+
+// Helper functions for setting and testing the FCSR register's bits.
+void Simulator::set_fcsr_bit(uint32_t cc, bool value) {
+ if (value) {
+ FCSR_ |= (1 << cc);
+ } else {
+ FCSR_ &= ~(1 << cc);
+ }
+}
+
+
+bool Simulator::test_fcsr_bit(uint32_t cc) {
+ return FCSR_ & (1 << cc);
+}
+
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round_error(double original, double rounded) {
+ if (!isfinite(original) ||
+ rounded > LONG_MAX ||
+ rounded < LONG_MIN) {
+ set_fcsr_bit(6, true); // Invalid operation.
+ return true;
+ } else if (original != static_cast<double>(rounded)) {
+ set_fcsr_bit(2, true); // Inexact.
+ }
+ return false;
+}
+
+
// Raw access to the PC register.
void Simulator::set_pc(int32_t value) {
pc_modified_ = true;
registers_[pc] = value;
}
+
+bool Simulator::has_bad_pc() const {
+ return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
+}
+
+
// Raw access to the PC register without the special adjustment when reading.
int32_t Simulator::get_pc() const {
return registers_[pc];
@@ -651,24 +994,38 @@
// get the correct MIPS-like behaviour on unaligned accesses.
int Simulator::ReadW(int32_t addr, Instruction* instr) {
- if ((addr & v8i::kPointerAlignmentMask) == 0) {
+ if (addr >=0 && addr < 0x400) {
+ // this has to be a NULL-dereference
+ MipsDebugger dbg(this);
+ dbg.Debug();
+ }
+ if ((addr & kPointerAlignmentMask) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr;
}
- PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
- OS::Abort();
+ PrintF("Unaligned read at 0x%08x, pc=%p\n", addr,
+ reinterpret_cast<void*>(instr));
+ MipsDebugger dbg(this);
+ dbg.Debug();
return 0;
}
void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
- if ((addr & v8i::kPointerAlignmentMask) == 0) {
+ if (addr >= 0 && addr < 0x400) {
+ // this has to be a NULL-dereference
+ MipsDebugger dbg(this);
+ dbg.Debug();
+ }
+ if ((addr & kPointerAlignmentMask) == 0) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
*ptr = value;
return;
}
- PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
- OS::Abort();
+ PrintF("Unaligned write at 0x%08x, pc=%p\n", addr,
+ reinterpret_cast<void*>(instr));
+ MipsDebugger dbg(this);
+ dbg.Debug();
}
@@ -677,7 +1034,8 @@
double* ptr = reinterpret_cast<double*>(addr);
return *ptr;
}
- PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
+ PrintF("Unaligned (double) read at 0x%08x, pc=%p\n", addr,
+ reinterpret_cast<void*>(instr));
OS::Abort();
return 0;
}
@@ -689,7 +1047,8 @@
*ptr = value;
return;
}
- PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
+ PrintF("Unaligned (double) write at 0x%08x, pc=%p\n", addr,
+ reinterpret_cast<void*>(instr));
OS::Abort();
}
@@ -699,7 +1058,8 @@
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
}
- PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr, instr);
+ PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr,
+ reinterpret_cast<void*>(instr));
OS::Abort();
return 0;
}
@@ -710,7 +1070,8 @@
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
}
- PrintF("Unaligned signed halfword read at 0x%08x, pc=%p\n", addr, instr);
+ PrintF("Unaligned signed halfword read at 0x%08x, pc=%p\n", addr,
+ reinterpret_cast<void*>(instr));
OS::Abort();
return 0;
}
@@ -722,7 +1083,8 @@
*ptr = value;
return;
}
- PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr, instr);
+ PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr,
+ reinterpret_cast<void*>(instr));
OS::Abort();
}
@@ -733,7 +1095,8 @@
*ptr = value;
return;
}
- PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr, instr);
+ PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr,
+ reinterpret_cast<void*>(instr));
OS::Abort();
}
@@ -746,7 +1109,7 @@
int32_t Simulator::ReadB(int32_t addr) {
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
- return ((*ptr << 24) >> 24) & 0xff;
+ return *ptr;
}
@@ -773,7 +1136,7 @@
// Unsupported instructions use Format to print an error and stop execution.
void Simulator::Format(Instruction* instr, const char* format) {
PrintF("Simulator found unsupported instruction:\n 0x%08x: %s\n",
- instr, format);
+ reinterpret_cast<intptr_t>(instr), format);
UNIMPLEMENTED_MIPS();
}
@@ -782,75 +1145,140 @@
// Note: To be able to return two values from some calls the code in runtime.cc
// uses the ObjectPair which is essentially two 32-bit values stuffed into a
// 64-bit value. With the code below we assume that all runtime calls return
-// 64 bits of result. If they don't, the r1 result register contains a bogus
+// 64 bits of result. If they don't, the v1 result register contains a bogus
// value, which is fine because it is caller-saved.
typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
int32_t arg1,
int32_t arg2,
- int32_t arg3);
-typedef double (*SimulatorRuntimeFPCall)(double fparg0,
- double fparg1);
-
+ int32_t arg3,
+ int32_t arg4,
+ int32_t arg5);
+typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
+ int32_t arg1,
+ int32_t arg2,
+ int32_t arg3);
// Software interrupt instructions are used by the simulator to call into the
-// C-based V8 runtime.
+// C-based V8 runtime. They are also used for debugging with simulator.
void Simulator::SoftwareInterrupt(Instruction* instr) {
+ // There are several instructions that could get us here,
+ // the break_ instruction, or several variants of traps. All
+ // Are "SPECIAL" class opcode, and are distinuished by function.
+ int32_t func = instr->FunctionFieldRaw();
+ int32_t code = (func == BREAK) ? instr->Bits(25, 6) : -1;
+
// We first check if we met a call_rt_redirected.
if (instr->InstructionBits() == rtCallRedirInstr) {
+ // Check if stack is aligned. Error if not aligned is reported below to
+ // include information on the function called.
+ bool stack_aligned =
+ (get_register(sp)
+ & (::v8::internal::FLAG_sim_stack_alignment - 1)) == 0;
Redirection* redirection = Redirection::FromSwiInstruction(instr);
int32_t arg0 = get_register(a0);
int32_t arg1 = get_register(a1);
int32_t arg2 = get_register(a2);
int32_t arg3 = get_register(a3);
- // fp args are (not always) in f12 and f14.
- // See MIPS conventions for more details.
- double fparg0 = get_fpu_register_double(f12);
- double fparg1 = get_fpu_register_double(f14);
+ int32_t arg4 = 0;
+ int32_t arg5 = 0;
+
+ // Need to check if sp is valid before assigning arg4, arg5.
+ // This is a fix for cctest test-api/CatchStackOverflow which causes
+ // the stack to overflow. For some reason arm doesn't need this
+ // stack check here.
+ int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
+ int32_t* stack = reinterpret_cast<int32_t*>(stack_);
+ if (stack_pointer >= stack && stack_pointer < stack + stack_size_) {
+ arg4 = stack_pointer[0];
+ arg5 = stack_pointer[1];
+ }
// This is dodgy but it works because the C entry stubs are never moved.
// See comment in codegen-arm.cc and bug 1242173.
int32_t saved_ra = get_register(ra);
- if (redirection->fp_return()) {
- intptr_t external =
- reinterpret_cast<intptr_t>(redirection->external_function());
+
+ intptr_t external =
+ reinterpret_cast<int32_t>(redirection->external_function());
+
+ // Based on CpuFeatures::IsSupported(FPU), Mips will use either hardware
+ // FPU, or gcc soft-float routines. Hardware FPU is simulated in this
+ // simulator. Soft-float has additional abstraction of ExternalReference,
+ // to support serialization. Finally, when simulated on x86 host, the
+ // x86 softfloat routines are used, and this Redirection infrastructure
+ // lets simulated-mips make calls into x86 C code.
+ // When doing that, the 'double' return type must be handled differently
+ // than the usual int64_t return. The data is returned in different
+ // registers and cannot be cast from one type to the other. However, the
+ // calling arguments are passed the same way in both cases.
+ if (redirection->type() == ExternalReference::FP_RETURN_CALL) {
SimulatorRuntimeFPCall target =
reinterpret_cast<SimulatorRuntimeFPCall>(external);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Call to host function at %p with args %f, %f\n",
- FUNCTION_ADDR(target), fparg0, fparg1);
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ PrintF("Call to host function at %p with args %08x:%08x %08x:%08x",
+ FUNCTION_ADDR(target), arg0, arg1, arg2, arg3);
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08x\n", get_register(sp));
+ }
+ PrintF("\n");
}
- double result = target(fparg0, fparg1);
- set_fpu_register_double(f0, result);
+ double result = target(arg0, arg1, arg2, arg3);
+ // fp result -> registers v0 and v1.
+ int32_t gpreg_pair[2];
+ memcpy(&gpreg_pair[0], &result, 2 * sizeof(int32_t));
+ set_register(v0, gpreg_pair[0]);
+ set_register(v1, gpreg_pair[1]);
+ } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
+ PrintF("Mips does not yet support ExternalReference::DIRECT_API_CALL\n");
+ ASSERT(redirection->type() != ExternalReference::DIRECT_API_CALL);
+ } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+ PrintF("Mips does not support ExternalReference::DIRECT_GETTER_CALL\n");
+ ASSERT(redirection->type() != ExternalReference::DIRECT_GETTER_CALL);
} else {
- intptr_t external =
- reinterpret_cast<int32_t>(redirection->external_function());
+ // Builtin call.
+ ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
- if (::v8::internal::FLAG_trace_sim) {
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF(
- "Call to host function at %p with args %08x, %08x, %08x, %08x\n",
+ "Call to host function at %p: %08x, %08x, %08x, %08x, %08x, %08x",
FUNCTION_ADDR(target),
arg0,
arg1,
arg2,
- arg3);
+ arg3,
+ arg4,
+ arg5);
+ if (!stack_aligned) {
+ PrintF(" with unaligned stack %08x\n", get_register(sp));
+ }
+ PrintF("\n");
}
- int64_t result = target(arg0, arg1, arg2, arg3);
- int32_t lo_res = static_cast<int32_t>(result);
- int32_t hi_res = static_cast<int32_t>(result >> 32);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08x\n", lo_res);
- }
- set_register(v0, lo_res);
- set_register(v1, hi_res);
+
+ int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+ set_register(v0, static_cast<int32_t>(result));
+ set_register(v1, static_cast<int32_t>(result >> 32));
+ }
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %08x : %08x\n", get_register(v1), get_register(v0));
}
set_register(ra, saved_ra);
set_pc(get_register(ra));
+
+ } else if (func == BREAK && code >= 0 && code < 16) {
+ // First 16 break_ codes interpreted as debug markers.
+ MipsDebugger dbg(this);
+ ++break_count_;
+ PrintF("\n---- break %d marker: %3d (instr count: %8d) ----------"
+ "----------------------------------",
+ code, break_count_, icount_);
+ dbg.PrintAllRegs(); // Print registers and continue running.
} else {
- Debugger dbg(this);
+ // All remaining break_ codes, and all traps are handled here.
+ MipsDebugger dbg(this);
dbg.Debug();
}
}
+
void Simulator::SignalExceptions() {
for (int i = 1; i < kNumExceptions; i++) {
if (exceptions[i] != 0) {
@@ -859,51 +1287,52 @@
}
}
+
// Handle execution based on instruction types.
-void Simulator::DecodeTypeRegister(Instruction* instr) {
- // Instruction fields
- Opcode op = instr->OpcodeFieldRaw();
- int32_t rs_reg = instr->RsField();
- int32_t rs = get_register(rs_reg);
- uint32_t rs_u = static_cast<uint32_t>(rs);
- int32_t rt_reg = instr->RtField();
- int32_t rt = get_register(rt_reg);
- uint32_t rt_u = static_cast<uint32_t>(rt);
- int32_t rd_reg = instr->RdField();
- uint32_t sa = instr->SaField();
- int32_t fs_reg= instr->FsField();
+void Simulator::ConfigureTypeRegister(Instruction* instr,
+ int32_t& alu_out,
+ int64_t& i64hilo,
+ uint64_t& u64hilo,
+ int32_t& next_pc,
+ bool& do_interrupt) {
+ // Every local variable declared here needs to be const.
+ // This is to make sure that changed values are sent back to
+ // DecodeTypeRegister correctly.
- // ALU output
- // It should not be used as is. Instructions using it should always initialize
- // it first.
- int32_t alu_out = 0x12345678;
- // Output or temporary for floating point.
- double fp_out = 0.0;
+ // Instruction fields.
+ const Opcode op = instr->OpcodeFieldRaw();
+ const int32_t rs_reg = instr->RsValue();
+ const int32_t rs = get_register(rs_reg);
+ const uint32_t rs_u = static_cast<uint32_t>(rs);
+ const int32_t rt_reg = instr->RtValue();
+ const int32_t rt = get_register(rt_reg);
+ const uint32_t rt_u = static_cast<uint32_t>(rt);
+ const int32_t rd_reg = instr->RdValue();
+ const uint32_t sa = instr->SaValue();
- // For break and trap instructions.
- bool do_interrupt = false;
+ const int32_t fs_reg = instr->FsValue();
- // For jr and jalr
- // Get current pc.
- int32_t current_pc = get_pc();
- // Next pc
- int32_t next_pc = 0;
// ---------- Configuration
switch (op) {
case COP1: // Coprocessor instructions
switch (instr->RsFieldRaw()) {
- case BC1: // branch on coprocessor condition
+ case BC1: // Handled in DecodeTypeImmed, should never come here.
UNREACHABLE();
break;
+ case CFC1:
+ // At the moment only FCSR is supported.
+ ASSERT(fs_reg == kFCSRRegister);
+ alu_out = FCSR_;
+ break;
case MFC1:
alu_out = get_fpu_register(fs_reg);
break;
case MFHC1:
- fp_out = get_fpu_register_double(fs_reg);
- alu_out = *v8i::BitCast<int32_t*>(&fp_out);
+ UNIMPLEMENTED_MIPS();
break;
+ case CTC1:
case MTC1:
case MTHC1:
// Do the store in the execution step.
@@ -923,13 +1352,22 @@
switch (instr->FunctionFieldRaw()) {
case JR:
case JALR:
- next_pc = get_register(instr->RsField());
+ next_pc = get_register(instr->RsValue());
break;
case SLL:
alu_out = rt << sa;
break;
case SRL:
- alu_out = rt_u >> sa;
+ if (rs_reg == 0) {
+ // Regular logical right shift of a word by a fixed number of
+ // bits instruction. RS field is always equal to 0.
+ alu_out = rt_u >> sa;
+ } else {
+ // Logical right-rotate of a word by a fixed number of bits. This
+ // is special case of SRL instruction, added in MIPS32 Release 2.
+ // RS field is equal to 00001
+ alu_out = (rt_u >> sa) | (rt_u << (32 - sa));
+ }
break;
case SRA:
alu_out = rt >> sa;
@@ -938,7 +1376,16 @@
alu_out = rt << rs;
break;
case SRLV:
- alu_out = rt_u >> rs;
+ if (sa == 0) {
+ // Regular logical right-shift of a word by a variable number of
+ // bits instruction. SA field is always equal to 0.
+ alu_out = rt_u >> rs;
+ } else {
+ // Logical right-rotate of a word by a variable number of bits.
+ // This is special case od SRLV instruction, added in MIPS32
+ // Release 2. SA field is equal to 00001
+ alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u));
+ }
break;
case SRAV:
alu_out = rt >> rs;
@@ -950,10 +1397,10 @@
alu_out = get_register(LO);
break;
case MULT:
- UNIMPLEMENTED_MIPS();
+ i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
break;
case MULTU:
- UNIMPLEMENTED_MIPS();
+ u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
break;
case DIV:
case DIVU:
@@ -1005,6 +1452,7 @@
break;
// Break and trap instructions
case BREAK:
+
do_interrupt = true;
break;
case TGE:
@@ -1025,6 +1473,11 @@
case TNE:
do_interrupt = rs != rt;
break;
+ case MOVN:
+ case MOVZ:
+ case MOVCI:
+ // No action taken on decode.
+ break;
default:
UNREACHABLE();
};
@@ -1034,13 +1487,83 @@
case MUL:
alu_out = rs_u * rt_u; // Only the lower 32 bits are kept.
break;
+ case CLZ:
+ alu_out = __builtin_clz(rs_u);
+ break;
default:
UNREACHABLE();
- }
+ };
+ break;
+ case SPECIAL3:
+ switch (instr->FunctionFieldRaw()) {
+ case INS: { // Mips32r2 instruction.
+ // Interpret Rd field as 5-bit msb of insert.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of insert.
+ uint16_t lsb = sa;
+ uint16_t size = msb - lsb + 1;
+ uint32_t mask = (1 << size) - 1;
+ alu_out = (rt_u & ~(mask << lsb)) | ((rs_u & mask) << lsb);
+ break;
+ }
+ case EXT: { // Mips32r2 instruction.
+ // Interpret Rd field as 5-bit msb of extract.
+ uint16_t msb = rd_reg;
+ // Interpret sa field as 5-bit lsb of extract.
+ uint16_t lsb = sa;
+ uint16_t size = msb + 1;
+ uint32_t mask = (1 << size) - 1;
+ alu_out = (rs_u & (mask << lsb)) >> lsb;
+ break;
+ }
+ default:
+ UNREACHABLE();
+ };
break;
default:
UNREACHABLE();
};
+}
+
+
+void Simulator::DecodeTypeRegister(Instruction* instr) {
+ // Instruction fields.
+ const Opcode op = instr->OpcodeFieldRaw();
+ const int32_t rs_reg = instr->RsValue();
+ const int32_t rs = get_register(rs_reg);
+ const uint32_t rs_u = static_cast<uint32_t>(rs);
+ const int32_t rt_reg = instr->RtValue();
+ const int32_t rt = get_register(rt_reg);
+ const uint32_t rt_u = static_cast<uint32_t>(rt);
+ const int32_t rd_reg = instr->RdValue();
+
+ const int32_t fs_reg = instr->FsValue();
+ const int32_t ft_reg = instr->FtValue();
+ const int32_t fd_reg = instr->FdValue();
+ int64_t i64hilo = 0;
+ uint64_t u64hilo = 0;
+
+ // ALU output
+ // It should not be used as is. Instructions using it should always
+ // initialize it first.
+ int32_t alu_out = 0x12345678;
+
+ // For break and trap instructions.
+ bool do_interrupt = false;
+
+ // For jr and jalr
+ // Get current pc.
+ int32_t current_pc = get_pc();
+ // Next pc
+ int32_t next_pc = 0;
+
+ // Setup the variables if needed before executing the instruction.
+ ConfigureTypeRegister(instr,
+ alu_out,
+ i64hilo,
+ u64hilo,
+ next_pc,
+ do_interrupt);
// ---------- Raise exceptions triggered.
SignalExceptions();
@@ -1052,25 +1575,42 @@
case BC1: // branch on coprocessor condition
UNREACHABLE();
break;
+ case CFC1:
+ set_register(rt_reg, alu_out);
case MFC1:
- case MFHC1:
set_register(rt_reg, alu_out);
break;
+ case MFHC1:
+ UNIMPLEMENTED_MIPS();
+ break;
+ case CTC1:
+ // At the moment only FCSR is supported.
+ ASSERT(fs_reg == kFCSRRegister);
+ FCSR_ = registers_[rt_reg];
+ break;
case MTC1:
- // We don't need to set the higher bits to 0, because MIPS ISA says
- // they are in an unpredictable state after executing MTC1.
FPUregisters_[fs_reg] = registers_[rt_reg];
- FPUregisters_[fs_reg+1] = Unpredictable;
break;
case MTHC1:
- // Here we need to keep the lower bits unchanged.
- FPUregisters_[fs_reg+1] = registers_[rt_reg];
+ UNIMPLEMENTED_MIPS();
break;
case S:
+ float f;
switch (instr->FunctionFieldRaw()) {
case CVT_D_S:
+ f = get_fpu_register_float(fs_reg);
+ set_fpu_register_double(fd_reg, static_cast<double>(f));
+ break;
case CVT_W_S:
case CVT_L_S:
+ case TRUNC_W_S:
+ case TRUNC_L_S:
+ case ROUND_W_S:
+ case ROUND_L_S:
+ case FLOOR_W_S:
+ case FLOOR_L_S:
+ case CEIL_W_S:
+ case CEIL_L_S:
case CVT_PS_S:
UNIMPLEMENTED_MIPS();
break;
@@ -1079,10 +1619,133 @@
}
break;
case D:
+ double ft, fs;
+ uint32_t cc, fcsr_cc;
+ int64_t i64;
+ fs = get_fpu_register_double(fs_reg);
+ ft = get_fpu_register_double(ft_reg);
+ cc = instr->FCccValue();
+ fcsr_cc = get_fcsr_condition_bit(cc);
switch (instr->FunctionFieldRaw()) {
- case CVT_S_D:
- case CVT_W_D:
- case CVT_L_D:
+ case ADD_D:
+ set_fpu_register_double(fd_reg, fs + ft);
+ break;
+ case SUB_D:
+ set_fpu_register_double(fd_reg, fs - ft);
+ break;
+ case MUL_D:
+ set_fpu_register_double(fd_reg, fs * ft);
+ break;
+ case DIV_D:
+ set_fpu_register_double(fd_reg, fs / ft);
+ break;
+ case ABS_D:
+ set_fpu_register_double(fd_reg, fs < 0 ? -fs : fs);
+ break;
+ case MOV_D:
+ set_fpu_register_double(fd_reg, fs);
+ break;
+ case NEG_D:
+ set_fpu_register_double(fd_reg, -fs);
+ break;
+ case SQRT_D:
+ set_fpu_register_double(fd_reg, sqrt(fs));
+ break;
+ case C_UN_D:
+ set_fcsr_bit(fcsr_cc, isnan(fs) || isnan(ft));
+ break;
+ case C_EQ_D:
+ set_fcsr_bit(fcsr_cc, (fs == ft));
+ break;
+ case C_UEQ_D:
+ set_fcsr_bit(fcsr_cc, (fs == ft) || (isnan(fs) || isnan(ft)));
+ break;
+ case C_OLT_D:
+ set_fcsr_bit(fcsr_cc, (fs < ft));
+ break;
+ case C_ULT_D:
+ set_fcsr_bit(fcsr_cc, (fs < ft) || (isnan(fs) || isnan(ft)));
+ break;
+ case C_OLE_D:
+ set_fcsr_bit(fcsr_cc, (fs <= ft));
+ break;
+ case C_ULE_D:
+ set_fcsr_bit(fcsr_cc, (fs <= ft) || (isnan(fs) || isnan(ft)));
+ break;
+ case CVT_W_D: // Convert double to word.
+ // Rounding modes are not yet supported.
+ ASSERT((FCSR_ & 3) == 0);
+ // In rounding mode 0 it should behave like ROUND.
+ case ROUND_W_D: // Round double to word.
+ {
+ double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPUInvalidResult);
+ }
+ }
+ break;
+ case TRUNC_W_D: // Truncate double to word (round towards 0).
+ {
+ int32_t result = static_cast<int32_t>(fs);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round_error(fs, static_cast<double>(result))) {
+ set_fpu_register(fd_reg, kFPUInvalidResult);
+ }
+ }
+ break;
+ case FLOOR_W_D: // Round double to word towards negative infinity.
+ {
+ double rounded = floor(fs);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPUInvalidResult);
+ }
+ }
+ break;
+ case CEIL_W_D: // Round double to word towards positive infinity.
+ {
+ double rounded = ceil(fs);
+ int32_t result = static_cast<int32_t>(rounded);
+ set_fpu_register(fd_reg, result);
+ if (set_fcsr_round_error(fs, rounded)) {
+ set_fpu_register(fd_reg, kFPUInvalidResult);
+ }
+ }
+ break;
+ case CVT_S_D: // Convert double to float (single).
+ set_fpu_register_float(fd_reg, static_cast<float>(fs));
+ break;
+ case CVT_L_D: // Mips32r2: Truncate double to 64-bit long-word.
+ i64 = static_cast<int64_t>(fs);
+ set_fpu_register(fd_reg, i64 & 0xffffffff);
+ set_fpu_register(fd_reg + 1, i64 >> 32);
+ break;
+ case TRUNC_L_D: // Mips32r2 instruction.
+ i64 = static_cast<int64_t>(fs);
+ set_fpu_register(fd_reg, i64 & 0xffffffff);
+ set_fpu_register(fd_reg + 1, i64 >> 32);
+ break;
+ case ROUND_L_D: { // Mips32r2 instruction.
+ double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
+ i64 = static_cast<int64_t>(rounded);
+ set_fpu_register(fd_reg, i64 & 0xffffffff);
+ set_fpu_register(fd_reg + 1, i64 >> 32);
+ break;
+ }
+ case FLOOR_L_D: // Mips32r2 instruction.
+ i64 = static_cast<int64_t>(floor(fs));
+ set_fpu_register(fd_reg, i64 & 0xffffffff);
+ set_fpu_register(fd_reg + 1, i64 >> 32);
+ break;
+ case CEIL_L_D: // Mips32r2 instruction.
+ i64 = static_cast<int64_t>(ceil(fs));
+ set_fpu_register(fd_reg, i64 & 0xffffffff);
+ set_fpu_register(fd_reg + 1, i64 >> 32);
+ break;
+ case C_F_D:
UNIMPLEMENTED_MIPS();
break;
default:
@@ -1091,11 +1754,13 @@
break;
case W:
switch (instr->FunctionFieldRaw()) {
- case CVT_S_W:
- UNIMPLEMENTED_MIPS();
+ case CVT_S_W: // Convert word to float (single).
+ alu_out = get_fpu_register(fs_reg);
+ set_fpu_register_float(fd_reg, static_cast<float>(alu_out));
break;
case CVT_D_W: // Convert word to double.
- set_fpu_register(rd_reg, static_cast<double>(rs));
+ alu_out = get_fpu_register(fs_reg);
+ set_fpu_register_double(fd_reg, static_cast<double>(alu_out));
break;
default:
UNREACHABLE();
@@ -1103,8 +1768,14 @@
break;
case L:
switch (instr->FunctionFieldRaw()) {
+ case CVT_D_L: // Mips32r2 instruction.
+ // Watch the signs here, we want 2 32-bit vals
+ // to make a sign-64.
+ i64 = (uint32_t) get_fpu_register(fs_reg);
+ i64 |= ((int64_t) get_fpu_register(fs_reg + 1) << 32);
+ set_fpu_register_double(fd_reg, static_cast<double>(i64));
+ break;
case CVT_S_L:
- case CVT_D_L:
UNIMPLEMENTED_MIPS();
break;
default:
@@ -1121,7 +1792,7 @@
switch (instr->FunctionFieldRaw()) {
case JR: {
Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
- current_pc+Instruction::kInstructionSize);
+ current_pc+Instruction::kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
set_pc(next_pc);
pc_modified_ = true;
@@ -1129,16 +1800,21 @@
}
case JALR: {
Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
- current_pc+Instruction::kInstructionSize);
+ current_pc+Instruction::kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
- set_register(31, current_pc + 2* Instruction::kInstructionSize);
+ set_register(31, current_pc + 2* Instruction::kInstrSize);
set_pc(next_pc);
pc_modified_ = true;
break;
}
// Instructions using HI and LO registers.
case MULT:
+ set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
+ set_register(HI, static_cast<int32_t>(i64hilo >> 32));
+ break;
case MULTU:
+ set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
+ set_register(HI, static_cast<int32_t>(u64hilo >> 32));
break;
case DIV:
// Divide by zero was checked in the configuration step.
@@ -1149,7 +1825,7 @@
set_register(LO, rs_u / rt_u);
set_register(HI, rs_u % rt_u);
break;
- // Break and trap instructions
+ // Break and trap instructions.
case BREAK:
case TGE:
case TGEU:
@@ -1161,6 +1837,23 @@
SoftwareInterrupt(instr);
}
break;
+ // Conditional moves.
+ case MOVN:
+ if (rt) set_register(rd_reg, rs);
+ break;
+ case MOVCI: {
+ uint32_t cc = instr->FCccValue();
+ uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
+ if (instr->Bit(16)) { // Read Tf bit
+ if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
+ } else {
+ if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
+ }
+ break;
+ }
+ case MOVZ:
+ if (!rt) set_register(rd_reg, rs);
+ break;
default: // For other special opcodes we do the default operation.
set_register(rd_reg, alu_out);
};
@@ -1173,9 +1866,23 @@
set_register(LO, Unpredictable);
set_register(HI, Unpredictable);
break;
+ default: // For other special2 opcodes we do the default operation.
+ set_register(rd_reg, alu_out);
+ }
+ break;
+ case SPECIAL3:
+ switch (instr->FunctionFieldRaw()) {
+ case INS:
+ // Ins instr leaves result in Rt, rather than Rd.
+ set_register(rt_reg, alu_out);
+ break;
+ case EXT:
+ // Ext instr leaves result in Rt, rather than Rd.
+ set_register(rt_reg, alu_out);
+ break;
default:
UNREACHABLE();
- }
+ };
break;
// Unimplemented opcodes raised an error in the configuration step before,
// so we can use the default here to set the destination register in common
@@ -1185,22 +1892,22 @@
};
}
+
// Type 2: instructions using a 16 bytes immediate. (eg: addi, beq)
void Simulator::DecodeTypeImmediate(Instruction* instr) {
- // Instruction fields
+ // Instruction fields.
Opcode op = instr->OpcodeFieldRaw();
- int32_t rs = get_register(instr->RsField());
+ int32_t rs = get_register(instr->RsValue());
uint32_t rs_u = static_cast<uint32_t>(rs);
- int32_t rt_reg = instr->RtField(); // destination register
+ int32_t rt_reg = instr->RtValue(); // destination register
int32_t rt = get_register(rt_reg);
- int16_t imm16 = instr->Imm16Field();
+ int16_t imm16 = instr->Imm16Value();
- int32_t ft_reg = instr->FtField(); // destination register
- int32_t ft = get_register(ft_reg);
+ int32_t ft_reg = instr->FtValue(); // destination register
- // zero extended immediate
+ // Zero extended immediate.
uint32_t oe_imm16 = 0xffff & imm16;
- // sign extended immediate
+ // Sign extended immediate.
int32_t se_imm16 = imm16;
// Get current pc.
@@ -1208,25 +1915,38 @@
// Next pc.
int32_t next_pc = bad_ra;
- // Used for conditional branch instructions
+ // Used for conditional branch instructions.
bool do_branch = false;
bool execute_branch_delay_instruction = false;
- // Used for arithmetic instructions
+ // Used for arithmetic instructions.
int32_t alu_out = 0;
- // Floating point
+ // Floating point.
double fp_out = 0.0;
+ uint32_t cc, cc_value, fcsr_cc;
- // Used for memory instructions
+ // Used for memory instructions.
int32_t addr = 0x0;
+ // Value to be written in memory
+ uint32_t mem_value = 0x0;
// ---------- Configuration (and execution for REGIMM)
switch (op) {
- // ------------- COP1. Coprocessor instructions
+ // ------------- COP1. Coprocessor instructions.
case COP1:
switch (instr->RsFieldRaw()) {
- case BC1: // branch on coprocessor condition
- UNIMPLEMENTED_MIPS();
+ case BC1: // Branch on coprocessor condition.
+ cc = instr->FBccValue();
+ fcsr_cc = get_fcsr_condition_bit(cc);
+ cc_value = test_fcsr_bit(fcsr_cc);
+ do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
+ execute_branch_delay_instruction = true;
+ // Set next_pc
+ if (do_branch) {
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+ } else {
+ next_pc = current_pc + kBranchReturnOffset;
+ }
break;
default:
UNREACHABLE();
@@ -1259,7 +1979,7 @@
execute_branch_delay_instruction = true;
// Set next_pc
if (do_branch) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstructionSize;
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
if (instr->IsLinkingInstruction()) {
set_register(31, current_pc + kBranchReturnOffset);
}
@@ -1323,6 +2043,21 @@
addr = rs + se_imm16;
alu_out = ReadB(addr);
break;
+ case LH:
+ addr = rs + se_imm16;
+ alu_out = ReadH(addr, instr);
+ break;
+ case LWL: {
+ // al_offset is an offset of the effective address within an aligned word
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+ uint32_t mask = (1 << byte_shift * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = ReadW(addr, instr);
+ alu_out <<= byte_shift * 8;
+ alu_out |= rt & mask;
+ break;
+ }
case LW:
addr = rs + se_imm16;
alu_out = ReadW(addr, instr);
@@ -1331,12 +2066,47 @@
addr = rs + se_imm16;
alu_out = ReadBU(addr);
break;
+ case LHU:
+ addr = rs + se_imm16;
+ alu_out = ReadHU(addr, instr);
+ break;
+ case LWR: {
+ // al_offset is an offset of the effective address within an aligned word
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+ uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ alu_out = ReadW(addr, instr);
+ alu_out = static_cast<uint32_t> (alu_out) >> al_offset * 8;
+ alu_out |= rt & mask;
+ break;
+ }
case SB:
addr = rs + se_imm16;
break;
+ case SH:
+ addr = rs + se_imm16;
+ break;
+ case SWL: {
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint8_t byte_shift = kPointerAlignmentMask - al_offset;
+ uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = ReadW(addr, instr) & mask;
+ mem_value |= static_cast<uint32_t>(rt) >> byte_shift * 8;
+ break;
+ }
case SW:
addr = rs + se_imm16;
break;
+ case SWR: {
+ uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask;
+ uint32_t mask = (1 << al_offset * 8) - 1;
+ addr = rs + se_imm16 - al_offset;
+ mem_value = ReadW(addr, instr);
+ mem_value = (rt << al_offset * 8) | (mem_value & mask);
+ break;
+ }
case LWC1:
addr = rs + se_imm16;
alu_out = ReadW(addr, instr);
@@ -1367,12 +2137,12 @@
execute_branch_delay_instruction = true;
// Set next_pc
if (do_branch) {
- next_pc = current_pc + (imm16 << 2) + Instruction::kInstructionSize;
+ next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
if (instr->IsLinkingInstruction()) {
- set_register(31, current_pc + 2* Instruction::kInstructionSize);
+ set_register(31, current_pc + 2* Instruction::kInstrSize);
}
} else {
- next_pc = current_pc + 2 * Instruction::kInstructionSize;
+ next_pc = current_pc + 2 * Instruction::kInstrSize;
}
break;
// ------------- Arithmetic instructions
@@ -1388,16 +2158,29 @@
break;
// ------------- Memory instructions
case LB:
+ case LH:
+ case LWL:
case LW:
case LBU:
+ case LHU:
+ case LWR:
set_register(rt_reg, alu_out);
break;
case SB:
WriteB(addr, static_cast<int8_t>(rt));
break;
+ case SH:
+ WriteH(addr, static_cast<uint16_t>(rt), instr);
+ break;
+ case SWL:
+ WriteW(addr, mem_value, instr);
+ break;
case SW:
WriteW(addr, rt, instr);
break;
+ case SWR:
+ WriteW(addr, mem_value, instr);
+ break;
case LWC1:
set_fpu_register(ft_reg, alu_out);
break;
@@ -1410,7 +2193,7 @@
break;
case SDC1:
addr = rs + se_imm16;
- WriteD(addr, ft, instr);
+ WriteD(addr, get_fpu_register_double(ft_reg), instr);
break;
default:
break;
@@ -1422,7 +2205,7 @@
// We don't check for end_sim_pc. First it should not be met as the current
// pc is valid. Secondly a jump should always execute its branch delay slot.
Instruction* branch_delay_instr =
- reinterpret_cast<Instruction*>(current_pc+Instruction::kInstructionSize);
+ reinterpret_cast<Instruction*>(current_pc+Instruction::kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
}
@@ -1432,6 +2215,7 @@
}
}
+
// Type 3: instructions using a 26 bytes immediate. (eg: j, jal)
void Simulator::DecodeTypeJump(Instruction* instr) {
// Get current pc.
@@ -1439,35 +2223,39 @@
// Get unchanged bits of pc.
int32_t pc_high_bits = current_pc & 0xf0000000;
// Next pc
- int32_t next_pc = pc_high_bits | (instr->Imm26Field() << 2);
+ int32_t next_pc = pc_high_bits | (instr->Imm26Value() << 2);
// Execute branch delay slot
// We don't check for end_sim_pc. First it should not be met as the current pc
// is valid. Secondly a jump should always execute its branch delay slot.
Instruction* branch_delay_instr =
- reinterpret_cast<Instruction*>(current_pc+Instruction::kInstructionSize);
+ reinterpret_cast<Instruction*>(current_pc+Instruction::kInstrSize);
BranchDelayInstructionDecode(branch_delay_instr);
// Update pc and ra if necessary.
// Do this after the branch delay execution.
if (instr->IsLinkingInstruction()) {
- set_register(31, current_pc + 2* Instruction::kInstructionSize);
+ set_register(31, current_pc + 2* Instruction::kInstrSize);
}
set_pc(next_pc);
pc_modified_ = true;
}
+
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
+ if (v8::internal::FLAG_check_icache) {
+ CheckICache(isolate_->simulator_i_cache(), instr);
+ }
pc_modified_ = false;
if (::v8::internal::FLAG_trace_sim) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
// use a reasonably large buffer
v8::internal::EmbeddedVector<char, 256> buffer;
- dasm.InstructionDecode(buffer,
- reinterpret_cast<byte_*>(instr));
- PrintF(" 0x%08x %s\n", instr, buffer.start());
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte_*>(instr));
+ PrintF(" 0x%08x %s\n", reinterpret_cast<intptr_t>(instr),
+ buffer.start());
}
switch (instr->InstructionType()) {
@@ -1485,7 +2273,7 @@
}
if (!pc_modified_) {
set_register(pc, reinterpret_cast<int32_t>(instr) +
- Instruction::kInstructionSize);
+ Instruction::kInstrSize);
}
}
@@ -1511,7 +2299,7 @@
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;
if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
- Debugger dbg(this);
+ MipsDebugger dbg(this);
dbg.Debug();
} else {
InstructionDecode(instr);
@@ -1538,7 +2326,7 @@
int original_stack = get_register(sp);
// Compute position of stack on entry to generated code.
int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)
- - kArgsSlotsSize);
+ - kCArgsSlotsSize);
if (OS::ActivationFrameAlignment() != 0) {
entry_stack &= -OS::ActivationFrameAlignment();
}
@@ -1643,8 +2431,8 @@
#undef UNSUPPORTED
-} } // namespace assembler::mips
+} } // namespace v8::internal
-#endif // !__mips || USE_SIMULATOR
+#endif // USE_SIMULATOR
#endif // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/simulator-mips.h b/src/mips/simulator-mips.h
index 6e42683..0cd9bbe 100644
--- a/src/mips/simulator-mips.h
+++ b/src/mips/simulator-mips.h
@@ -37,12 +37,31 @@
#define V8_MIPS_SIMULATOR_MIPS_H_
#include "allocation.h"
+#include "constants-mips.h"
-#if defined(__mips) && !defined(USE_SIMULATOR)
+#if !defined(USE_SIMULATOR)
+// Running without a simulator on a native mips platform.
+
+namespace v8 {
+namespace internal {
// When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- entry(p0, p1, p2, p3, p4);
+ entry(p0, p1, p2, p3, p4)
+
+typedef int (*mips_regexp_matcher)(String*, int, const byte*, const byte*,
+ void*, int*, Address, int, Isolate*);
+
+// Call the generated regexp code directly. The code at the entry address
+// should act as a function matching the type arm_regexp_matcher.
+// The fifth argument is a dummy that reserves the space used for
+// the return address added by the ExitFrame in native calls.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ (FUNCTION_CAST<mips_regexp_matcher>(entry)( \
+ p0, p1, p2, p3, NULL, p4, p5, p6, p7))
+
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ reinterpret_cast<TryCatch*>(try_catch_address)
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on mips uses the C stack, we
@@ -60,6 +79,8 @@
static inline void UnregisterCTryCatch() { }
};
+} } // namespace v8::internal
+
// Calculated the stack limit beyond which we will throw stack overflow errors.
// This macro must be called from a C++ method. It relies on being able to take
// the address of "this" to get a value on the current execution stack and then
@@ -70,39 +91,50 @@
(reinterpret_cast<uintptr_t>(this) >= limit ? \
reinterpret_cast<uintptr_t>(this) - limit : 0)
-// Call the generated regexp code directly. The entry function pointer should
-// expect seven int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- entry(p0, p1, p2, p3, p4, p5, p6)
+#else // !defined(USE_SIMULATOR)
+// Running with a simulator.
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- reinterpret_cast<TryCatch*>(try_catch_address)
+#include "hashmap.h"
+namespace v8 {
+namespace internal {
-#else // #if !defined(__mips) || defined(USE_SIMULATOR)
+// -----------------------------------------------------------------------------
+// Utility functions
-// When running with the simulator transition into simulated execution at this
-// point.
-#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
- reinterpret_cast<Object*>(\
- assembler::mips::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
- p0, p1, p2, p3, p4))
+class CachePage {
+ public:
+ static const int LINE_VALID = 0;
+ static const int LINE_INVALID = 1;
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- assembler::mips::Simulator::current()->Call(\
- FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
+ static const int kPageShift = 12;
+ static const int kPageSize = 1 << kPageShift;
+ static const int kPageMask = kPageSize - 1;
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
+ static const int kLineLength = 1 << kLineShift;
+ static const int kLineMask = kLineLength - 1;
-#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
- try_catch_address == NULL ? \
- NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
+ CachePage() {
+ memset(&validity_map_, LINE_INVALID, sizeof(validity_map_));
+ }
+ char* ValidityByte(int offset) {
+ return &validity_map_[offset >> kLineShift];
+ }
-namespace assembler {
-namespace mips {
+ char* CachedData(int offset) {
+ return &data_[offset];
+ }
+
+ private:
+ char data_[kPageSize]; // The cached data.
+ static const int kValidityMapSize = kPageSize >> kLineShift;
+ char validity_map_[kValidityMapSize]; // One byte per line.
+};
class Simulator {
public:
- friend class Debugger;
+ friend class MipsDebugger;
// Registers are declared in order. See SMRL chapter 2.
enum Register {
@@ -143,7 +175,7 @@
// The currently executing Simulator instance. Potentially there can be one
// for each native thread.
- static Simulator* current();
+ static Simulator* current(v8::internal::Isolate* isolate);
// Accessors for register state. Reading the pc value adheres to the MIPS
// architecture specification and is off by a 8 from the currently executing
@@ -152,9 +184,15 @@
int32_t get_register(int reg) const;
// Same for FPURegisters
void set_fpu_register(int fpureg, int32_t value);
+ void set_fpu_register_float(int fpureg, float value);
void set_fpu_register_double(int fpureg, double value);
int32_t get_fpu_register(int fpureg) const;
+ int64_t get_fpu_register_long(int fpureg) const;
+ float get_fpu_register_float(int fpureg) const;
double get_fpu_register_double(int fpureg) const;
+ void set_fcsr_bit(uint32_t cc, bool value);
+ bool test_fcsr_bit(uint32_t cc);
+ bool set_fcsr_round_error(double original, double rounded);
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int32_t value);
@@ -172,7 +210,7 @@
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
- int32_t Call(byte_* entry, int argument_count, ...);
+ int32_t Call(byte* entry, int argument_count, ...);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@@ -180,6 +218,14 @@
// Pop an address from the JS stack.
uintptr_t PopAddress();
+ // ICache checking.
+ static void FlushICache(v8::internal::HashMap* i_cache, void* start,
+ size_t size);
+
+ // Returns true if pc register contains one of the 'special_values' defined
+ // below (bad_ra, end_sim_pc).
+ bool has_bad_pc() const;
+
private:
enum special_values {
// Known bad pc value to ensure that the simulator does not execute
@@ -223,9 +269,17 @@
inline int32_t SetDoubleHIW(double* addr);
inline int32_t SetDoubleLOW(double* addr);
-
// Executing is handled based on the instruction type.
void DecodeTypeRegister(Instruction* instr);
+
+ // Helper function for DecodeTypeRegister.
+ void ConfigureTypeRegister(Instruction* instr,
+ int32_t& alu_out,
+ int64_t& i64hilo,
+ uint64_t& u64hilo,
+ int32_t& next_pc,
+ bool& do_interrupt);
+
void DecodeTypeImmediate(Instruction* instr);
void DecodeTypeJump(Instruction* instr);
@@ -239,11 +293,18 @@
if (instr->IsForbiddenInBranchDelay()) {
V8_Fatal(__FILE__, __LINE__,
"Eror:Unexpected %i opcode in a branch delay slot.",
- instr->OpcodeField());
+ instr->OpcodeValue());
}
InstructionDecode(instr);
}
+ // ICache.
+ static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
+ static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
+ int size);
+ static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
+
+
enum Exception {
none,
kIntegerOverflow,
@@ -258,7 +319,7 @@
// Runtime call support.
static void* RedirectExternalReference(void* external_function,
- bool fp_return);
+ ExternalReference::Type type);
// Used for real time calls that takes two double values as arguments and
// returns a double.
@@ -269,19 +330,40 @@
int32_t registers_[kNumSimuRegisters];
// Coprocessor Registers.
int32_t FPUregisters_[kNumFPURegisters];
+ // FPU control register.
+ uint32_t FCSR_;
// Simulator support.
char* stack_;
+ size_t stack_size_;
bool pc_modified_;
int icount_;
- static bool initialized_;
+ int break_count_;
+
+ // Icache simulation
+ v8::internal::HashMap* i_cache_;
// Registered breakpoints.
Instruction* break_pc_;
Instr break_instr_;
+
+ v8::internal::Isolate* isolate_;
};
-} } // namespace assembler::mips
+
+// When running with the simulator transition into simulated execution at this
+// point.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
+ FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
+
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ Simulator::current(Isolate::Current())->Call( \
+ entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
+
+#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
+ try_catch_address == NULL ? \
+ NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
// The simulator has its own stack. Thus it has a different stack limit from
@@ -292,20 +374,21 @@
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
- return assembler::mips::Simulator::current()->StackLimit();
+ return Simulator::current(Isolate::Current())->StackLimit();
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
- assembler::mips::Simulator* sim = assembler::mips::Simulator::current();
+ Simulator* sim = Simulator::current(Isolate::Current());
return sim->PushAddress(try_catch_address);
}
static inline void UnregisterCTryCatch() {
- assembler::mips::Simulator::current()->PopAddress();
+ Simulator::current(Isolate::Current())->PopAddress();
}
};
-#endif // !defined(__mips) || defined(USE_SIMULATOR)
+} } // namespace v8::internal
+#endif // !defined(USE_SIMULATOR)
#endif // V8_MIPS_SIMULATOR_MIPS_H_
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 683b862..1a49558 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -57,6 +57,12 @@
}
+void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+ MacroAssembler* masm, int index, Register prototype, Label* miss) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
// Load a fast property out of a holder object (src). In-object properties
// are loaded directly otherwise the property is loaded from the properties
// fixed array.
@@ -75,6 +81,20 @@
}
+// Generate code to load the length from a string object and return the length.
+// If the receiver object is not a string or a wrapped string object the
+// execution continues at the miss label. The register containing the
+// receiver is potentially clobbered.
+void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Label* miss,
+ bool support_wrappers) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
Register receiver,
Register scratch1,
@@ -84,7 +104,7 @@
}
-// Generate StoreField code, value is passed in r0 register.
+// Generate StoreField code, value is passed in a0 register.
// After executing generated code, the receiver_reg and name_reg
// may be clobbered.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
@@ -104,15 +124,94 @@
}
+class CallInterceptorCompiler BASE_EMBEDDED {
+ public:
+ CallInterceptorCompiler(StubCompiler* stub_compiler,
+ const ParameterCount& arguments,
+ Register name)
+ : stub_compiler_(stub_compiler),
+ arguments_(arguments),
+ name_(name) {}
+
+ void Compile(MacroAssembler* masm,
+ JSObject* object,
+ JSObject* holder,
+ String* name,
+ LookupResult* lookup,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Label* miss) {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ private:
+ void CompileCacheable(MacroAssembler* masm,
+ JSObject* object,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ JSObject* interceptor_holder,
+ LookupResult* lookup,
+ String* name,
+ const CallOptimization& optimization,
+ Label* miss_label) {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ void CompileRegular(MacroAssembler* masm,
+ JSObject* object,
+ Register receiver,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ String* name,
+ JSObject* interceptor_holder,
+ Label* miss_label) {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ void LoadWithInterceptor(MacroAssembler* masm,
+ Register receiver,
+ Register holder,
+ JSObject* holder_obj,
+ Register scratch,
+ Label* interceptor_succeeded) {
+ UNIMPLEMENTED_MIPS();
+ }
+
+ StubCompiler* stub_compiler_;
+ const ParameterCount& arguments_;
+ Register name_;
+};
+
+
#undef __
#define __ ACCESS_MASM(masm())
+Register StubCompiler::CheckPrototypes(JSObject* object,
+ Register object_reg,
+ JSObject* holder,
+ Register holder_reg,
+ Register scratch1,
+ Register scratch2,
+ String* name,
+ int save_at_depth,
+ Label* miss) {
+ UNIMPLEMENTED_MIPS();
+ return no_reg;
+}
+
+
void StubCompiler::GenerateLoadField(JSObject* object,
JSObject* holder,
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
int index,
String* name,
Label* miss) {
@@ -125,6 +224,7 @@
Register receiver,
Register scratch1,
Register scratch2,
+ Register scratch3,
Object* value,
String* name,
Label* miss) {
@@ -132,282 +232,365 @@
}
-bool StubCompiler::GenerateLoadCallback(JSObject* object,
- JSObject* holder,
- Register receiver,
- Register name_reg,
- Register scratch1,
- Register scratch2,
- AccessorInfo* callback,
- String* name,
- Label* miss,
- Failure** failure) {
+MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
+ JSObject* holder,
+ Register receiver,
+ Register name_reg,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ AccessorInfo* callback,
+ String* name,
+ Label* miss) {
UNIMPLEMENTED_MIPS();
- __ break_(0x470);
- return false; // UNIMPLEMENTED RETURN
+ return NULL;
}
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
- JSObject* holder,
+ JSObject* interceptor_holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
Register scratch1,
Register scratch2,
+ Register scratch3,
String* name,
Label* miss) {
UNIMPLEMENTED_MIPS();
- __ break_(0x505);
}
-Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
- // Registers:
- // a1: function
- // ra: return address
-
- // Enter an internal frame.
- __ EnterInternalFrame();
- // Preserve the function.
- __ Push(a1);
- // Setup aligned call.
- __ SetupAlignedCall(t0, 1);
- // Push the function on the stack as the argument to the runtime function.
- __ Push(a1);
- // Call the runtime function
- __ CallRuntime(Runtime::kLazyCompile, 1);
- __ ReturnFromAlignedCall();
- // Calculate the entry point.
- __ addiu(t9, v0, Code::kHeaderSize - kHeapObjectTag);
- // Restore saved function.
- __ Pop(a1);
- // Tear down temporary frame.
- __ LeaveInternalFrame();
- // Do a tail-call of the compiled function.
- __ Jump(t9);
-
- return GetCodeWithFlags(flags, "LazyCompileStub");
-}
-
-
-Object* CallStubCompiler::CompileCallField(JSObject* object,
- JSObject* holder,
- int index,
- String* name) {
+void CallStubCompiler::GenerateNameCheck(String* name, Label* miss) {
UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
-Object* CallStubCompiler::CompileArrayPushCall(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
- CheckType check) {
+void CallStubCompiler::GenerateGlobalReceiverCheck(JSObject* object,
+ JSObject* holder,
+ String* name,
+ Label* miss) {
UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
-Object* CallStubCompiler::CompileArrayPopCall(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
- CheckType check) {
+void CallStubCompiler::GenerateLoadFunctionFromCell(JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ Label* miss) {
UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
}
-Object* CallStubCompiler::CompileCallConstant(Object* object,
- JSObject* holder,
- JSFunction* function,
- String* name,
- CheckType check) {
+MaybeObject* CallStubCompiler::GenerateMissBranch() {
UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ return NULL;
}
-Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
- UNIMPLEMENTED_MIPS();
- __ break_(0x782);
- return GetCode(INTERCEPTOR, name);
-}
-
-
-Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- JSFunction* function,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
-}
-
-
-Object* StoreStubCompiler::CompileStoreField(JSObject* object,
- int index,
- Map* transition,
- String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
-}
-
-
-Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
- AccessorInfo* callback,
+MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
+ JSObject* holder,
+ int index,
String* name) {
UNIMPLEMENTED_MIPS();
- __ break_(0x906);
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ return NULL;
}
-Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
+MaybeObject* CallStubCompiler::CompileArrayPushCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
+ UNIMPLEMENTED_MIPS();
+ return NULL;
+}
+
+
+MaybeObject* CallStubCompiler::CompileArrayPopCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
String* name) {
UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ return NULL;
}
-Object* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
- JSGlobalPropertyCell* cell,
- String* name) {
+MaybeObject* CallStubCompiler::CompileStringCharCodeAtCall(
+ Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ return NULL;
}
-Object* LoadStubCompiler::CompileLoadField(JSObject* object,
- JSObject* holder,
- int index,
- String* name) {
+MaybeObject* CallStubCompiler::CompileStringCharAtCall(
+ Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ return NULL;
}
-Object* LoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* object,
- JSObject* holder,
- AccessorInfo* callback) {
+MaybeObject* CallStubCompiler::CompileStringFromCharCodeCall(
+ Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ return NULL;
}
-Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
- JSObject* holder,
- Object* value,
- String* name) {
+MaybeObject* CallStubCompiler::CompileMathFloorCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ return NULL;
}
-Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
- JSObject* holder,
- String* name) {
+MaybeObject* CallStubCompiler::CompileMathAbsCall(Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ return NULL;
}
-Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
- GlobalObject* holder,
- JSGlobalPropertyCell* cell,
- String* name,
- bool is_dont_delete) {
+MaybeObject* CallStubCompiler::CompileFastApiCall(
+ const CallOptimization& optimization,
+ Object* object,
+ JSObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ return NULL;
}
-Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
- JSObject* receiver,
- JSObject* holder,
- int index) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
-}
-
-
-Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
- JSObject* receiver,
+MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
- AccessorInfo* callback) {
+ JSFunction* function,
+ String* name,
+ CheckType check) {
UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ return NULL;
}
-Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
- JSObject* receiver,
- JSObject* holder,
- Object* value) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
-}
-
-
-Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+MaybeObject* CallStubCompiler::CompileCallInterceptor(JSObject* object,
JSObject* holder,
String* name) {
UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ return NULL;
}
-Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+MaybeObject* CallStubCompiler::CompileCallGlobal(JSObject* object,
+ GlobalObject* holder,
+ JSGlobalPropertyCell* cell,
+ JSFunction* function,
+ String* name) {
UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ return NULL;
}
-Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
-}
-
-
-// TODO(1224671): implement the fast case.
-Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
- UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
-}
-
-
-Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+MaybeObject* StoreStubCompiler::CompileStoreField(JSObject* object,
int index,
Map* transition,
String* name) {
UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ return NULL;
}
-Object* ConstructStubCompiler::CompileConstructStub(
- SharedFunctionInfo* shared) {
+MaybeObject* StoreStubCompiler::CompileStoreCallback(JSObject* object,
+ AccessorInfo* callback,
+ String* name) {
UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ return NULL;
}
-Object* ExternalArrayStubCompiler::CompileKeyedLoadStub(
- ExternalArrayType array_type, Code::Flags flags) {
+MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
+ String* name) {
UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ return NULL;
}
-Object* ExternalArrayStubCompiler::CompileKeyedStoreStub(
- ExternalArrayType array_type, Code::Flags flags) {
+MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
+ JSGlobalPropertyCell* cell,
+ String* name) {
UNIMPLEMENTED_MIPS();
- return reinterpret_cast<Object*>(NULL); // UNIMPLEMENTED RETURN
+ return NULL;
+}
+
+
+MaybeObject* LoadStubCompiler::CompileLoadNonexistent(String* name,
+ JSObject* object,
+ JSObject* last) {
+ UNIMPLEMENTED_MIPS();
+ return NULL;
+}
+
+
+MaybeObject* LoadStubCompiler::CompileLoadField(JSObject* object,
+ JSObject* holder,
+ int index,
+ String* name) {
+ UNIMPLEMENTED_MIPS();
+ return NULL;
+}
+
+
+MaybeObject* LoadStubCompiler::CompileLoadCallback(String* name,
+ JSObject* object,
+ JSObject* holder,
+ AccessorInfo* callback) {
+ UNIMPLEMENTED_MIPS();
+ return NULL;
+}
+
+
+MaybeObject* LoadStubCompiler::CompileLoadConstant(JSObject* object,
+ JSObject* holder,
+ Object* value,
+ String* name) {
+ UNIMPLEMENTED_MIPS();
+ return NULL;
+}
+
+
+MaybeObject* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
+ JSObject* holder,
+ String* name) {
+ UNIMPLEMENTED_MIPS();
+ return NULL;
+}
+
+
+MaybeObject* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
+ GlobalObject* holder,
+ JSGlobalPropertyCell* cell,
+ String* name,
+ bool is_dont_delete) {
+ UNIMPLEMENTED_MIPS();
+ return NULL;
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadField(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ int index) {
+ UNIMPLEMENTED_MIPS();
+ return NULL;
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadCallback(
+ String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ AccessorInfo* callback) {
+ UNIMPLEMENTED_MIPS();
+ return NULL;
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
+ JSObject* receiver,
+ JSObject* holder,
+ Object* value) {
+ UNIMPLEMENTED_MIPS();
+ return NULL;
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
+ JSObject* holder,
+ String* name) {
+ UNIMPLEMENTED_MIPS();
+ return NULL;
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
+ UNIMPLEMENTED_MIPS();
+ return NULL;
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
+ UNIMPLEMENTED_MIPS();
+ return NULL;
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
+ UNIMPLEMENTED_MIPS();
+ return NULL;
+}
+
+
+MaybeObject* KeyedLoadStubCompiler::CompileLoadSpecialized(JSObject* receiver) {
+ UNIMPLEMENTED_MIPS();
+ return NULL;
+}
+
+
+MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
+ int index,
+ Map* transition,
+ String* name) {
+ UNIMPLEMENTED_MIPS();
+ return NULL;
+}
+
+
+MaybeObject* KeyedStoreStubCompiler::CompileStoreSpecialized(
+ JSObject* receiver) {
+ UNIMPLEMENTED_MIPS();
+ return NULL;
+}
+
+
+MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
+ UNIMPLEMENTED_MIPS();
+ return NULL;
+}
+
+
+MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
+ JSObject* receiver_object,
+ ExternalArrayType array_type,
+ Code::Flags flags) {
+ UNIMPLEMENTED_MIPS();
+ return NULL;
+}
+
+
+MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
+ JSObject* receiver_object,
+ ExternalArrayType array_type,
+ Code::Flags flags) {
+ UNIMPLEMENTED_MIPS();
+ return NULL;
}
diff --git a/src/mips/fast-codegen-mips.cc b/src/mips/virtual-frame-mips-inl.h
similarity index 65%
rename from src/mips/fast-codegen-mips.cc
rename to src/mips/virtual-frame-mips-inl.h
index 186f9fa..f0d2fab 100644
--- a/src/mips/fast-codegen-mips.cc
+++ b/src/mips/virtual-frame-mips-inl.h
@@ -25,53 +25,34 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "v8.h"
+#ifndef V8_VIRTUAL_FRAME_MIPS_INL_H_
+#define V8_VIRTUAL_FRAME_MIPS_INL_H_
-#if defined(V8_TARGET_ARCH_MIPS)
-
-#include "codegen-inl.h"
-#include "fast-codegen.h"
+#include "assembler-mips.h"
+#include "virtual-frame-mips.h"
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
-Register FastCodeGenerator::accumulator0() { return no_reg; }
-Register FastCodeGenerator::accumulator1() { return no_reg; }
-Register FastCodeGenerator::scratch0() { return no_reg; }
-Register FastCodeGenerator::scratch1() { return no_reg; }
-Register FastCodeGenerator::receiver_reg() { return no_reg; }
-Register FastCodeGenerator::context_reg() { return no_reg; }
-
-
-void FastCodeGenerator::Generate(CompilationInfo* info) {
+MemOperand VirtualFrame::ParameterAt(int index) {
UNIMPLEMENTED_MIPS();
+ return MemOperand(zero_reg, 0);
}
-void FastCodeGenerator::EmitThisPropertyStore(Handle<String> name) {
+// The receiver frame slot.
+MemOperand VirtualFrame::Receiver() {
UNIMPLEMENTED_MIPS();
+ return MemOperand(zero_reg, 0);
}
-void FastCodeGenerator::EmitGlobalVariableLoad(Handle<Object> name) {
+void VirtualFrame::Forget(int count) {
UNIMPLEMENTED_MIPS();
}
-void FastCodeGenerator::EmitThisPropertyLoad(Handle<String> name) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void FastCodeGenerator::EmitBitOr() {
- UNIMPLEMENTED_MIPS();
-}
-
-#undef __
-
-
} } // namespace v8::internal
-#endif // V8_TARGET_ARCH_MIPS
+#endif // V8_VIRTUAL_FRAME_MIPS_INL_H_
diff --git a/src/mips/virtual-frame-mips.cc b/src/mips/virtual-frame-mips.cc
index b61ce75..22fe9f0 100644
--- a/src/mips/virtual-frame-mips.cc
+++ b/src/mips/virtual-frame-mips.cc
@@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
#include "v8.h"
#if defined(V8_TARGET_ARCH_MIPS)
@@ -39,44 +37,50 @@
namespace v8 {
namespace internal {
-// -------------------------------------------------------------------------
-// VirtualFrame implementation.
-
#define __ ACCESS_MASM(masm())
-void VirtualFrame::SyncElementBelowStackPointer(int index) {
- UNREACHABLE();
+void VirtualFrame::PopToA1A0() {
+ UNIMPLEMENTED_MIPS();
}
-void VirtualFrame::SyncElementByPushing(int index) {
- UNREACHABLE();
+void VirtualFrame::PopToA1() {
+ UNIMPLEMENTED_MIPS();
}
-void VirtualFrame::SyncRange(int begin, int end) {
- // All elements are in memory on MIPS (ie, synced).
-#ifdef DEBUG
- for (int i = begin; i <= end; i++) {
- ASSERT(elements_[i].is_synced());
- }
-#endif
+void VirtualFrame::PopToA0() {
+ UNIMPLEMENTED_MIPS();
}
-void VirtualFrame::MergeTo(VirtualFrame* expected) {
+void VirtualFrame::MergeTo(const VirtualFrame* expected,
+ Condition cond,
+ Register r1,
+ const Operand& r2) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::MergeTo(VirtualFrame* expected,
+ Condition cond,
+ Register r1,
+ const Operand& r2) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::MergeTOSTo(
+ VirtualFrame::TopOfStack expected_top_of_stack_state,
+ Condition cond,
+ Register r1,
+ const Operand& r2) {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::Enter() {
- // TODO(MIPS): Implement DEBUG
-
- // We are about to push four values to the frame.
- Adjust(4);
- __ MultiPush(ra.bit() | fp.bit() | cp.bit() | a1.bit());
- // Adjust FP to point to saved FP.
- __ addiu(fp, sp, 2 * kPointerSize);
+ UNIMPLEMENTED_MIPS();
}
@@ -86,232 +90,216 @@
void VirtualFrame::AllocateStackSlots() {
- int count = local_count();
- if (count > 0) {
- Comment cmnt(masm(), "[ Allocate space for locals");
- Adjust(count);
- // Initialize stack slots with 'undefined' value.
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ addiu(sp, sp, -count * kPointerSize);
- for (int i = 0; i < count; i++) {
- __ sw(t0, MemOperand(sp, (count-i-1)*kPointerSize));
- }
- }
-}
-
-
-void VirtualFrame::SaveContextRegister() {
UNIMPLEMENTED_MIPS();
}
-void VirtualFrame::RestoreContextRegister() {
- UNIMPLEMENTED_MIPS();
-}
-
void VirtualFrame::PushReceiverSlotAddress() {
UNIMPLEMENTED_MIPS();
}
-int VirtualFrame::InvalidateFrameSlotAt(int index) {
- return kIllegalIndex;
-}
-
-
-void VirtualFrame::TakeFrameSlotAt(int index) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::StoreToFrameSlotAt(int index) {
- UNIMPLEMENTED_MIPS();
-}
-
-
void VirtualFrame::PushTryHandler(HandlerType type) {
UNIMPLEMENTED_MIPS();
}
-void VirtualFrame::RawCallStub(CodeStub* stub) {
+void VirtualFrame::CallJSFunction(int arg_count) {
UNIMPLEMENTED_MIPS();
}
-void VirtualFrame::CallStub(CodeStub* stub, Result* arg) {
+void VirtualFrame::CallRuntime(const Runtime::Function* f, int arg_count) {
UNIMPLEMENTED_MIPS();
}
-void VirtualFrame::CallStub(CodeStub* stub, Result* arg0, Result* arg1) {
- UNIMPLEMENTED_MIPS();
-}
-
-
-void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(f, arg_count);
-}
-
-
void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ CallRuntime(id, arg_count);
-}
-
-
-void VirtualFrame::CallAlignedRuntime(Runtime::Function* f, int arg_count) {
UNIMPLEMENTED_MIPS();
}
-void VirtualFrame::CallAlignedRuntime(Runtime::FunctionId id, int arg_count) {
+#ifdef ENABLE_DEBUGGER_SUPPORT
+void VirtualFrame::DebugBreak() {
UNIMPLEMENTED_MIPS();
}
+#endif
void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
- Result* arg_count_register,
int arg_count) {
UNIMPLEMENTED_MIPS();
}
-void VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- int dropped_args) {
- switch (code->kind()) {
- case Code::CALL_IC:
- break;
- case Code::FUNCTION:
- UNIMPLEMENTED_MIPS();
- break;
- case Code::KEYED_LOAD_IC:
- UNIMPLEMENTED_MIPS();
- break;
- case Code::LOAD_IC:
- UNIMPLEMENTED_MIPS();
- break;
- case Code::KEYED_STORE_IC:
- UNIMPLEMENTED_MIPS();
- break;
- case Code::STORE_IC:
- UNIMPLEMENTED_MIPS();
- break;
- case Code::BUILTIN:
- UNIMPLEMENTED_MIPS();
- break;
- default:
- UNREACHABLE();
- break;
- }
- Forget(dropped_args);
- ASSERT(cgen()->HasValidEntryRegisters());
- __ Call(code, rmode);
+void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
+ UNIMPLEMENTED_MIPS();
}
-void VirtualFrame::CallCodeObject(Handle<Code> code,
- RelocInfo::Mode rmode,
- Result* arg,
- int dropped_args) {
+void VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallKeyedLoadIC() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::CallKeyedStoreIC() {
UNIMPLEMENTED_MIPS();
}
void VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
- Result* arg0,
- Result* arg1,
- int dropped_args,
- bool set_auto_args_slots) {
+ int dropped_args) {
UNIMPLEMENTED_MIPS();
}
+// NO_TOS_REGISTERS, A0_TOS, A1_TOS, A1_A0_TOS, A0_A1_TOS.
+const bool VirtualFrame::kA0InUse[TOS_STATES] =
+ { false, true, false, true, true };
+const bool VirtualFrame::kA1InUse[TOS_STATES] =
+ { false, false, true, true, true };
+const int VirtualFrame::kVirtualElements[TOS_STATES] =
+ { 0, 1, 1, 2, 2 };
+const Register VirtualFrame::kTopRegister[TOS_STATES] =
+ { a0, a0, a1, a1, a0 };
+const Register VirtualFrame::kBottomRegister[TOS_STATES] =
+ { a0, a0, a1, a0, a1 };
+const Register VirtualFrame::kAllocatedRegisters[
+ VirtualFrame::kNumberOfAllocatedRegisters] = { a2, a3, t0, t1, t2 };
+// Popping is done by the transition implied by kStateAfterPop. Of course if
+// there were no stack slots allocated to registers then the physical SP must
+// be adjusted.
+const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPop[TOS_STATES] =
+ { NO_TOS_REGISTERS, NO_TOS_REGISTERS, NO_TOS_REGISTERS, A0_TOS, A1_TOS };
+// Pushing is done by the transition implied by kStateAfterPush. Of course if
+// the maximum number of registers was already allocated to the top of stack
+// slots then one register must be physically pushed onto the stack.
+const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPush[TOS_STATES] =
+ { A0_TOS, A1_A0_TOS, A0_A1_TOS, A0_A1_TOS, A1_A0_TOS };
+
+
void VirtualFrame::Drop(int count) {
- ASSERT(count >= 0);
- ASSERT(height() >= count);
- int num_virtual_elements = (element_count() - 1) - stack_pointer_;
-
- // Emit code to lower the stack pointer if necessary.
- if (num_virtual_elements < count) {
- int num_dropped = count - num_virtual_elements;
- stack_pointer_ -= num_dropped;
- __ addiu(sp, sp, num_dropped * kPointerSize);
- }
-
- // Discard elements from the virtual frame and free any registers.
- for (int i = 0; i < count; i++) {
- FrameElement dropped = elements_.RemoveLast();
- if (dropped.is_register()) {
- Unuse(dropped.reg());
- }
- }
-}
-
-
-void VirtualFrame::DropFromVFrameOnly(int count) {
UNIMPLEMENTED_MIPS();
}
-Result VirtualFrame::Pop() {
+void VirtualFrame::Pop() {
UNIMPLEMENTED_MIPS();
- Result res = Result();
- return res; // UNIMPLEMENTED RETURN
}
void VirtualFrame::EmitPop(Register reg) {
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_--;
- elements_.RemoveLast();
- __ Pop(reg);
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::SpillAllButCopyTOSToA0() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::SpillAllButCopyTOSToA1() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::SpillAllButCopyTOSToA1A0() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+Register VirtualFrame::Peek() {
+ UNIMPLEMENTED_MIPS();
+ return no_reg;
+}
+
+
+Register VirtualFrame::Peek2() {
+ UNIMPLEMENTED_MIPS();
+ return no_reg;
+}
+
+
+void VirtualFrame::Dup() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::Dup2() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
+ UNIMPLEMENTED_MIPS();
+ return no_reg;
+}
+
+
+void VirtualFrame::EnsureOneFreeTOSRegister() {
+ UNIMPLEMENTED_MIPS();
}
void VirtualFrame::EmitMultiPop(RegList regs) {
- ASSERT(stack_pointer_ == element_count() - 1);
- for (int16_t i = 0; i < kNumRegisters; i++) {
- if ((regs & (1 << i)) != 0) {
- stack_pointer_--;
- elements_.RemoveLast();
- }
- }
- __ MultiPop(regs);
+ UNIMPLEMENTED_MIPS();
}
-void VirtualFrame::EmitPush(Register reg) {
- ASSERT(stack_pointer_ == element_count() - 1);
- elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown()));
- stack_pointer_++;
- __ Push(reg);
+void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::SetElementAt(Register reg, int this_far_down) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+Register VirtualFrame::GetTOSRegister() {
+ UNIMPLEMENTED_MIPS();
+ return no_reg;
+}
+
+
+void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::EmitPush(MemOperand operand, TypeInfo info) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::EmitPushRoot(Heap::RootListIndex index) {
+ UNIMPLEMENTED_MIPS();
}
void VirtualFrame::EmitMultiPush(RegList regs) {
- ASSERT(stack_pointer_ == element_count() - 1);
- for (int16_t i = kNumRegisters; i > 0; i--) {
- if ((regs & (1 << i)) != 0) {
- elements_.Add(FrameElement::MemoryElement(NumberInfo::Unknown()));
- stack_pointer_++;
- }
- }
- __ MultiPush(regs);
-}
-
-
-void VirtualFrame::EmitArgumentSlots(RegList reglist) {
UNIMPLEMENTED_MIPS();
}
+
+void VirtualFrame::EmitMultiPushReversed(RegList regs) {
+ UNIMPLEMENTED_MIPS();
+}
+
+
+void VirtualFrame::SpillAll() {
+ UNIMPLEMENTED_MIPS();
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/src/mips/virtual-frame-mips.h b/src/mips/virtual-frame-mips.h
index b32e2ae..be8b74e 100644
--- a/src/mips/virtual-frame-mips.h
+++ b/src/mips/virtual-frame-mips.h
@@ -30,11 +30,13 @@
#define V8_MIPS_VIRTUAL_FRAME_MIPS_H_
#include "register-allocator.h"
-#include "scopes.h"
namespace v8 {
namespace internal {
+// This dummy class is only used to create invalid virtual frames.
+extern class InvalidVirtualFrameInitializer {}* kInvalidVirtualFrameInitializer;
+
// -------------------------------------------------------------------------
// Virtual frames
@@ -47,14 +49,54 @@
class VirtualFrame : public ZoneObject {
public:
+ class RegisterAllocationScope;
// A utility class to introduce a scope where the virtual frame is
// expected to remain spilled. The constructor spills the code
- // generator's current frame, but no attempt is made to require it
- // to stay spilled. It is intended as documentation while the code
- // generator is being transformed.
+ // generator's current frame, and keeps it spilled.
class SpilledScope BASE_EMBEDDED {
public:
+ explicit SpilledScope(VirtualFrame* frame)
+ : old_is_spilled_(
+ Isolate::Current()->is_virtual_frame_in_spilled_scope()) {
+ if (frame != NULL) {
+ if (!old_is_spilled_) {
+ frame->SpillAll();
+ } else {
+ frame->AssertIsSpilled();
+ }
+ }
+ Isolate::Current()->set_is_virtual_frame_in_spilled_scope(true);
+ }
+ ~SpilledScope() {
+ Isolate::Current()->set_is_virtual_frame_in_spilled_scope(
+ old_is_spilled_);
+ }
+ static bool is_spilled() {
+ return Isolate::Current()->is_virtual_frame_in_spilled_scope();
+ }
+
+ private:
+ int old_is_spilled_;
+
SpilledScope() {}
+
+ friend class RegisterAllocationScope;
+ };
+
+ class RegisterAllocationScope BASE_EMBEDDED {
+ public:
+ // A utility class to introduce a scope where the virtual frame
+ // is not spilled, ie. where register allocation occurs. Eventually
+ // when RegisterAllocationScope is ubiquitous it can be removed
+ // along with the (by then unused) SpilledScope class.
+ inline explicit RegisterAllocationScope(CodeGenerator* cgen);
+ inline ~RegisterAllocationScope();
+
+ private:
+ CodeGenerator* cgen_;
+ bool old_is_spilled_;
+
+ RegisterAllocationScope() {}
};
// An illegal index into the virtual frame.
@@ -63,45 +105,49 @@
// Construct an initial virtual frame on entry to a JS function.
inline VirtualFrame();
+ // Construct an invalid virtual frame, used by JumpTargets.
+ inline VirtualFrame(InvalidVirtualFrameInitializer* dummy);
+
// Construct a virtual frame as a clone of an existing one.
explicit inline VirtualFrame(VirtualFrame* original);
- CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
- MacroAssembler* masm() { return cgen()->masm(); }
-
- // Create a duplicate of an existing valid frame element.
- FrameElement CopyElementAt(int index,
- NumberInfo info = NumberInfo::Unknown());
+ inline CodeGenerator* cgen() const;
+ inline MacroAssembler* masm();
// The number of elements on the virtual frame.
- int element_count() { return elements_.length(); }
+ int element_count() const { return element_count_; }
// The height of the virtual expression stack.
- int height() {
- return element_count() - expression_base_index();
- }
-
- int register_location(int num) {
- ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
- return register_locations_[num];
- }
-
- int register_location(Register reg) {
- return register_locations_[RegisterAllocator::ToNumber(reg)];
- }
-
- void set_register_location(Register reg, int index) {
- register_locations_[RegisterAllocator::ToNumber(reg)] = index;
- }
+ inline int height() const;
bool is_used(int num) {
- ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
- return register_locations_[num] != kIllegalIndex;
- }
-
- bool is_used(Register reg) {
- return register_locations_[RegisterAllocator::ToNumber(reg)]
- != kIllegalIndex;
+ switch (num) {
+ case 0: { // a0.
+ return kA0InUse[top_of_stack_state_];
+ }
+ case 1: { // a1.
+ return kA1InUse[top_of_stack_state_];
+ }
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6: { // a2 to a3, t0 to t2.
+ ASSERT(num - kFirstAllocatedRegister < kNumberOfAllocatedRegisters);
+ ASSERT(num >= kFirstAllocatedRegister);
+ if ((register_allocation_map_ &
+ (1 << (num - kFirstAllocatedRegister))) == 0) {
+ return false;
+ } else {
+ return true;
+ }
+ }
+ default: {
+ ASSERT(num < kFirstAllocatedRegister ||
+ num >= kFirstAllocatedRegister + kNumberOfAllocatedRegisters);
+ return false;
+ }
+ }
}
// Add extra in-memory elements to the top of the frame to match an actual
@@ -110,53 +156,60 @@
void Adjust(int count);
// Forget elements from the top of the frame to match an actual frame (eg,
- // the frame after a runtime call). No code is emitted.
- void Forget(int count) {
- ASSERT(count >= 0);
- ASSERT(stack_pointer_ == element_count() - 1);
- stack_pointer_ -= count;
- // On mips, all elements are in memory, so there is no extra bookkeeping
- // (registers, copies, etc.) beyond dropping the elements.
- elements_.Rewind(stack_pointer_ + 1);
- }
+ // the frame after a runtime call). No code is emitted except to bring the
+ // frame to a spilled state.
+ void Forget(int count);
- // Forget count elements from the top of the frame and adjust the stack
- // pointer downward. This is used, for example, before merging frames at
- // break, continue, and return targets.
- void ForgetElements(int count);
// Spill all values from the frame to memory.
void SpillAll();
+ void AssertIsSpilled() const {
+ ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
+ ASSERT(register_allocation_map_ == 0);
+ }
+
+ void AssertIsNotSpilled() {
+ ASSERT(!SpilledScope::is_spilled());
+ }
+
// Spill all occurrences of a specific register from the frame.
void Spill(Register reg) {
- if (is_used(reg)) SpillElementAt(register_location(reg));
+ UNIMPLEMENTED();
}
// Spill all occurrences of an arbitrary register if possible. Return the
// register spilled or no_reg if it was not possible to free any register
- // (ie, they all have frame-external references).
+ // (ie, they all have frame-external references). Unimplemented.
Register SpillAnyRegister();
- // Prepare this virtual frame for merging to an expected frame by
- // performing some state changes that do not require generating
- // code. It is guaranteed that no code will be generated.
- void PrepareMergeTo(VirtualFrame* expected);
-
// Make this virtual frame have a state identical to an expected virtual
// frame. As a side effect, code may be emitted to make this frame match
// the expected one.
- void MergeTo(VirtualFrame* expected);
+ void MergeTo(const VirtualFrame* expected,
+ Condition cond = al,
+ Register r1 = no_reg,
+ const Operand& r2 = Operand(no_reg));
+
+ void MergeTo(VirtualFrame* expected,
+ Condition cond = al,
+ Register r1 = no_reg,
+ const Operand& r2 = Operand(no_reg));
+
+ // Checks whether this frame can be branched to by the other frame.
+ bool IsCompatibleWith(const VirtualFrame* other) const {
+ return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0;
+ }
+
+ inline void ForgetTypeInfo() {
+ tos_known_smi_map_ = 0;
+ }
// Detach a frame from its code generator, perhaps temporarily. This
// tells the register allocator that it is free to use frame-internal
// registers. Used when the code generator's frame is switched from this
// one to NULL by an unconditional jump.
void DetachFromCodeGenerator() {
- RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i)) cgen_allocator->Unuse(i);
- }
}
// (Re)attach a frame to its code generator. This informs the register
@@ -164,10 +217,6 @@
// Used when a code generator's frame is switched from NULL to this one by
// binding a label.
void AttachToCodeGenerator() {
- RegisterAllocator* cgen_allocator = cgen()->allocator();
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- if (is_used(i)) cgen_allocator->Unuse(i);
- }
}
// Emit code for the physical JS entry and exit frame sequences. After
@@ -177,176 +226,142 @@
void Enter();
void Exit();
- // Prepare for returning from the frame by spilling locals and
- // dropping all non-locals elements in the virtual frame. This
- // avoids generating unnecessary merge code when jumping to the
- // shared return site. Emits code for spills.
- void PrepareForReturn();
+ // Prepare for returning from the frame by elements in the virtual frame.
+ // This avoids generating unnecessary merge code when jumping to the shared
+ // return site. No spill code emitted. Value to return should be in v0.
+ inline void PrepareForReturn();
+
+ // Number of local variables after when we use a loop for allocating.
+ static const int kLocalVarBound = 5;
// Allocate and initialize the frame-allocated locals.
void AllocateStackSlots();
// The current top of the expression stack as an assembly operand.
- MemOperand Top() { return MemOperand(sp, 0); }
+ MemOperand Top() {
+ AssertIsSpilled();
+ return MemOperand(sp, 0);
+ }
// An element of the expression stack as an assembly operand.
MemOperand ElementAt(int index) {
- return MemOperand(sp, index * kPointerSize);
+ int adjusted_index = index - kVirtualElements[top_of_stack_state_];
+ ASSERT(adjusted_index >= 0);
+ return MemOperand(sp, adjusted_index * kPointerSize);
}
- // Random-access store to a frame-top relative frame element. The result
- // becomes owned by the frame and is invalidated.
- void SetElementAt(int index, Result* value);
-
- // Set a frame element to a constant. The index is frame-top relative.
- void SetElementAt(int index, Handle<Object> value) {
- Result temp(value);
- SetElementAt(index, &temp);
+ bool KnownSmiAt(int index) {
+ if (index >= kTOSKnownSmiMapSize) return false;
+ return (tos_known_smi_map_ & (1 << index)) != 0;
}
-
- void PushElementAt(int index) {
- PushFrameSlotAt(element_count() - index - 1);
- }
-
// A frame-allocated local as an assembly operand.
- MemOperand LocalAt(int index) {
- ASSERT(0 <= index);
- ASSERT(index < local_count());
- return MemOperand(s8_fp, kLocal0Offset - index * kPointerSize);
- }
-
- // Push a copy of the value of a local frame slot on top of the frame.
- void PushLocalAt(int index) {
- PushFrameSlotAt(local0_index() + index);
- }
-
- // Push the value of a local frame slot on top of the frame and invalidate
- // the local slot. The slot should be written to before trying to read
- // from it again.
- void TakeLocalAt(int index) {
- TakeFrameSlotAt(local0_index() + index);
- }
-
- // Store the top value on the virtual frame into a local frame slot. The
- // value is left in place on top of the frame.
- void StoreToLocalAt(int index) {
- StoreToFrameSlotAt(local0_index() + index);
- }
+ inline MemOperand LocalAt(int index);
// Push the address of the receiver slot on the frame.
void PushReceiverSlotAddress();
// The function frame slot.
- MemOperand Function() { return MemOperand(s8_fp, kFunctionOffset); }
-
- // Push the function on top of the frame.
- void PushFunction() { PushFrameSlotAt(function_index()); }
+ MemOperand Function() { return MemOperand(fp, kFunctionOffset); }
// The context frame slot.
- MemOperand Context() { return MemOperand(s8_fp, kContextOffset); }
-
- // Save the value of the cp register to the context frame slot.
- void SaveContextRegister();
-
- // Restore the cp register from the value of the context frame
- // slot.
- void RestoreContextRegister();
+ MemOperand Context() { return MemOperand(fp, kContextOffset); }
// A parameter as an assembly operand.
- MemOperand ParameterAt(int index) {
- // Index -1 corresponds to the receiver.
- ASSERT(-1 <= index); // -1 is the receiver.
- ASSERT(index <= parameter_count());
- uint16_t a = 0; // Number of argument slots.
- return MemOperand(s8_fp, (1 + parameter_count() + a - index) *kPointerSize);
- }
-
- // Push a copy of the value of a parameter frame slot on top of the frame.
- void PushParameterAt(int index) {
- PushFrameSlotAt(param0_index() + index);
- }
-
- // Push the value of a paramter frame slot on top of the frame and
- // invalidate the parameter slot. The slot should be written to before
- // trying to read from it again.
- void TakeParameterAt(int index) {
- TakeFrameSlotAt(param0_index() + index);
- }
-
- // Store the top value on the virtual frame into a parameter frame slot.
- // The value is left in place on top of the frame.
- void StoreToParameterAt(int index) {
- StoreToFrameSlotAt(param0_index() + index);
- }
+ inline MemOperand ParameterAt(int index);
// The receiver frame slot.
- MemOperand Receiver() { return ParameterAt(-1); }
+ inline MemOperand Receiver();
// Push a try-catch or try-finally handler on top of the virtual frame.
void PushTryHandler(HandlerType type);
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
- void CallStub(CodeStub* stub, int arg_count) {
- PrepareForCall(arg_count, arg_count);
- RawCallStub(stub);
- }
+ inline void CallStub(CodeStub* stub, int arg_count);
- void CallStub(CodeStub* stub, Result* arg);
-
- void CallStub(CodeStub* stub, Result* arg0, Result* arg1);
+ // Call JS function from top of the stack with arguments
+ // taken from the stack.
+ void CallJSFunction(int arg_count);
// Call runtime given the number of arguments expected on (and
// removed from) the stack.
- void CallRuntime(Runtime::Function* f, int arg_count);
+ void CallRuntime(const Runtime::Function* f, int arg_count);
void CallRuntime(Runtime::FunctionId id, int arg_count);
- // Call runtime with sp aligned to 8 bytes.
- void CallAlignedRuntime(Runtime::Function* f, int arg_count);
- void CallAlignedRuntime(Runtime::FunctionId id, int arg_count);
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ void DebugBreak();
+#endif
// Invoke builtin given the number of arguments it expects on (and
// removes from) the stack.
void InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flag,
- Result* arg_count_register,
int arg_count);
+ // Call load IC. Receiver is on the stack and is consumed. Result is returned
+ // in v0.
+ void CallLoadIC(Handle<String> name, RelocInfo::Mode mode);
+
+ // Call store IC. If the load is contextual, value is found on top of the
+ // frame. If not, value and receiver are on the frame. Both are consumed.
+ // Result is returned in v0.
+ void CallStoreIC(Handle<String> name, bool is_contextual);
+
+ // Call keyed load IC. Key and receiver are on the stack. Both are consumed.
+ // Result is returned in v0.
+ void CallKeyedLoadIC();
+
+ // Call keyed store IC. Value, key and receiver are on the stack. All three
+ // are consumed. Result is returned in v0 (and a0).
+ void CallKeyedStoreIC();
+
// Call into an IC stub given the number of arguments it removes
- // from the stack. Register arguments are passed as results and
- // consumed by the call.
+ // from the stack. Register arguments to the IC stub are implicit,
+ // and depend on the type of IC stub.
void CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode,
int dropped_args);
- void CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- Result* arg,
- int dropped_args);
- void CallCodeObject(Handle<Code> ic,
- RelocInfo::Mode rmode,
- Result* arg0,
- Result* arg1,
- int dropped_args,
- bool set_auto_args_slots = false);
// Drop a number of elements from the top of the expression stack. May
// emit code to affect the physical frame. Does not clobber any registers
// excepting possibly the stack pointer.
void Drop(int count);
- // Similar to VirtualFrame::Drop but we don't modify the actual stack.
- // This is because we need to manually restore sp to the correct position.
- void DropFromVFrameOnly(int count);
// Drop one element.
void Drop() { Drop(1); }
- void DropFromVFrameOnly() { DropFromVFrameOnly(1); }
- // Duplicate the top element of the frame.
- void Dup() { PushFrameSlotAt(element_count() - 1); }
+ // Pop an element from the top of the expression stack. Discards
+ // the result.
+ void Pop();
- // Pop an element from the top of the expression stack. Returns a
- // Result, which may be a constant or a register.
- Result Pop();
+ // Pop an element from the top of the expression stack. The register
+ // will be one normally used for the top of stack register allocation
+ // so you can't hold on to it if you push on the stack.
+ Register PopToRegister(Register but_not_to_this_one = no_reg);
+
+ // Look at the top of the stack. The register returned is aliased and
+ // must be copied to a scratch register before modification.
+ Register Peek();
+
+ // Look at the value beneath the top of the stack. The register returned is
+ // aliased and must be copied to a scratch register before modification.
+ Register Peek2();
+
+ // Duplicate the top of stack.
+ void Dup();
+
+ // Duplicate the two elements on top of stack.
+ void Dup2();
+
+ // Flushes all registers, but it puts a copy of the top-of-stack in a0.
+ void SpillAllButCopyTOSToA0();
+
+ // Flushes all registers, but it puts a copy of the top-of-stack in a1.
+ void SpillAllButCopyTOSToA1();
+
+ // Flushes all registers, but it puts a copy of the top-of-stack in a1
+ // and the next value on the stack in a0.
+ void SpillAllButCopyTOSToA1A0();
// Pop and save an element from the top of the expression stack and
// emit a corresponding pop instruction.
@@ -355,40 +370,41 @@
void EmitMultiPop(RegList regs);
void EmitMultiPopReversed(RegList regs);
+
+ // Takes the top two elements and puts them in a0 (top element) and a1
+ // (second element).
+ void PopToA1A0();
+
+ // Takes the top element and puts it in a1.
+ void PopToA1();
+
+ // Takes the top element and puts it in a0.
+ void PopToA0();
+
// Push an element on top of the expression stack and emit a
// corresponding push instruction.
- void EmitPush(Register reg);
+ void EmitPush(Register reg, TypeInfo type_info = TypeInfo::Unknown());
+ void EmitPush(Operand operand, TypeInfo type_info = TypeInfo::Unknown());
+ void EmitPush(MemOperand operand, TypeInfo type_info = TypeInfo::Unknown());
+ void EmitPushRoot(Heap::RootListIndex index);
+
+ // Overwrite the nth thing on the stack. If the nth position is in a
+ // register then this turns into a Move, otherwise an sw. Afterwards
+ // you can still use the register even if it is a register that can be
+ // used for TOS (a0 or a1).
+ void SetElementAt(Register reg, int this_far_down);
+
+ // Get a register which is free and which must be immediately used to
+ // push on the top of the stack.
+ Register GetTOSRegister();
+
// Same but for multiple registers.
void EmitMultiPush(RegList regs);
void EmitMultiPushReversed(RegList regs);
- // Push an element on the virtual frame.
- inline void Push(Register reg, NumberInfo info = NumberInfo::Unknown());
- inline void Push(Handle<Object> value);
- inline void Push(Smi* value);
-
- // Pushing a result invalidates it (its contents become owned by the frame).
- void Push(Result* result) {
- if (result->is_register()) {
- Push(result->reg());
- } else {
- ASSERT(result->is_constant());
- Push(result->handle());
- }
- result->Unuse();
- }
-
- // Nip removes zero or more elements from immediately below the top
- // of the frame, leaving the previous top-of-frame value on top of
- // the frame. Nip(k) is equivalent to x = Pop(), Drop(k), Push(x).
- inline void Nip(int num_dropped);
-
- // This pushes 4 arguments slots on the stack and saves asked 'a' registers
- // 'a' registers are arguments register a0 to a3.
- void EmitArgumentSlots(RegList reglist);
-
- inline void SetTypeForLocalAt(int index, NumberInfo info);
- inline void SetTypeForParamAt(int index, NumberInfo info);
+ static Register scratch0() { return t4; }
+ static Register scratch1() { return t5; }
+ static Register scratch2() { return t6; }
private:
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
@@ -398,24 +414,51 @@
static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
- ZoneList<FrameElement> elements_;
+ // 5 states for the top of stack, which can be in memory or in a0 and a1.
+ enum TopOfStack { NO_TOS_REGISTERS, A0_TOS, A1_TOS, A1_A0_TOS, A0_A1_TOS,
+ TOS_STATES};
+ static const int kMaxTOSRegisters = 2;
+
+ static const bool kA0InUse[TOS_STATES];
+ static const bool kA1InUse[TOS_STATES];
+ static const int kVirtualElements[TOS_STATES];
+ static const TopOfStack kStateAfterPop[TOS_STATES];
+ static const TopOfStack kStateAfterPush[TOS_STATES];
+ static const Register kTopRegister[TOS_STATES];
+ static const Register kBottomRegister[TOS_STATES];
+
+ // We allocate up to 5 locals in registers.
+ static const int kNumberOfAllocatedRegisters = 5;
+ // r2 to r6 are allocated to locals.
+ static const int kFirstAllocatedRegister = 2;
+
+ static const Register kAllocatedRegisters[kNumberOfAllocatedRegisters];
+
+ static Register AllocatedRegister(int r) {
+ ASSERT(r >= 0 && r < kNumberOfAllocatedRegisters);
+ return kAllocatedRegisters[r];
+ }
+
+ // The number of elements on the stack frame.
+ int element_count_;
+ TopOfStack top_of_stack_state_:3;
+ int register_allocation_map_:kNumberOfAllocatedRegisters;
+ static const int kTOSKnownSmiMapSize = 4;
+ unsigned tos_known_smi_map_:kTOSKnownSmiMapSize;
// The index of the element that is at the processor's stack pointer
- // (the sp register).
- int stack_pointer_;
-
- // The index of the register frame element using each register, or
- // kIllegalIndex if a register is not on the frame.
- int register_locations_[RegisterAllocator::kNumRegisters];
+ // (the sp register). For now since everything is in memory it is given
+ // by the number of elements on the not-very-virtual stack frame.
+ int stack_pointer() { return element_count_ - 1; }
// The number of frame-allocated locals and parameters respectively.
- int parameter_count() { return cgen()->scope()->num_parameters(); }
- int local_count() { return cgen()->scope()->num_stack_slots(); }
+ inline int parameter_count() const;
+ inline int local_count() const;
// The index of the element that is at the processor's frame pointer
// (the fp register). The parameters, receiver, function, and context
// are below the frame pointer.
- int frame_pointer() { return parameter_count() + 3; }
+ inline int frame_pointer() const;
// The index of the first parameter. The receiver lies below the first
// parameter.
@@ -423,75 +466,22 @@
// The index of the context slot in the frame. It is immediately
// below the frame pointer.
- int context_index() { return frame_pointer() - 1; }
+ inline int context_index();
// The index of the function slot in the frame. It is below the frame
// pointer and context slot.
- int function_index() { return frame_pointer() - 2; }
+ inline int function_index();
// The index of the first local. Between the frame pointer and the
// locals lies the return address.
- int local0_index() { return frame_pointer() + 2; }
+ inline int local0_index() const;
// The index of the base of the expression stack.
- int expression_base_index() { return local0_index() + local_count(); }
+ inline int expression_base_index() const;
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
- int fp_relative(int index) {
- ASSERT(index < element_count());
- ASSERT(frame_pointer() < element_count()); // FP is on the frame.
- return (frame_pointer() - index) * kPointerSize;
- }
-
- // Record an occurrence of a register in the virtual frame. This has the
- // effect of incrementing the register's external reference count and
- // of updating the index of the register's location in the frame.
- void Use(Register reg, int index) {
- ASSERT(!is_used(reg));
- set_register_location(reg, index);
- cgen()->allocator()->Use(reg);
- }
-
- // Record that a register reference has been dropped from the frame. This
- // decrements the register's external reference count and invalidates the
- // index of the register's location in the frame.
- void Unuse(Register reg) {
- ASSERT(is_used(reg));
- set_register_location(reg, kIllegalIndex);
- cgen()->allocator()->Unuse(reg);
- }
-
- // Spill the element at a particular index---write it to memory if
- // necessary, free any associated register, and forget its value if
- // constant.
- void SpillElementAt(int index);
-
- // Sync the element at a particular index. If it is a register or
- // constant that disagrees with the value on the stack, write it to memory.
- // Keep the element type as register or constant, and clear the dirty bit.
- void SyncElementAt(int index);
-
- // Sync the range of elements in [begin, end] with memory.
- void SyncRange(int begin, int end);
-
- // Sync a single unsynced element that lies beneath or at the stack pointer.
- void SyncElementBelowStackPointer(int index);
-
- // Sync a single unsynced element that lies just above the stack pointer.
- void SyncElementByPushing(int index);
-
- // Push a copy of a frame slot (typically a local or parameter) on top of
- // the frame.
- inline void PushFrameSlotAt(int index);
-
- // Push a the value of a frame slot (typically a local or parameter) on
- // top of the frame and invalidate the slot.
- void TakeFrameSlotAt(int index);
-
- // Store the value on top of the frame to a frame slot (typically a local
- // or parameter).
- void StoreToFrameSlotAt(int index);
+ inline int fp_relative(int index);
// Spill all elements in registers. Spill the top spilled_args elements
// on the frame. Sync all other frame elements.
@@ -499,45 +489,37 @@
// the effect of an upcoming call that will drop them from the stack.
void PrepareForCall(int spilled_args, int dropped_args);
- // Move frame elements currently in registers or constants, that
- // should be in memory in the expected frame, to memory.
- void MergeMoveRegistersToMemory(VirtualFrame* expected);
+ // If all top-of-stack registers are in use then the lowest one is pushed
+ // onto the physical stack and made free.
+ void EnsureOneFreeTOSRegister();
- // Make the register-to-register moves necessary to
- // merge this frame with the expected frame.
- // Register to memory moves must already have been made,
- // and memory to register moves must follow this call.
- // This is because some new memory-to-register moves are
- // created in order to break cycles of register moves.
- // Used in the implementation of MergeTo().
- void MergeMoveRegistersToRegisters(VirtualFrame* expected);
+ // Emit instructions to get the top of stack state from where we are to where
+ // we want to be.
+ void MergeTOSTo(TopOfStack expected_state,
+ Condition cond = al,
+ Register r1 = no_reg,
+ const Operand& r2 = Operand(no_reg));
- // Make the memory-to-register and constant-to-register moves
- // needed to make this frame equal the expected frame.
- // Called after all register-to-memory and register-to-register
- // moves have been made. After this function returns, the frames
- // should be equal.
- void MergeMoveMemoryToRegisters(VirtualFrame* expected);
+ inline bool Equals(const VirtualFrame* other);
- // Invalidates a frame slot (puts an invalid frame element in it).
- // Copies on the frame are correctly handled, and if this slot was
- // the backing store of copies, the index of the new backing store
- // is returned. Otherwise, returns kIllegalIndex.
- // Register counts are correctly updated.
- int InvalidateFrameSlotAt(int index);
+ inline void LowerHeight(int count) {
+ element_count_ -= count;
+ if (count >= kTOSKnownSmiMapSize) {
+ tos_known_smi_map_ = 0;
+ } else {
+ tos_known_smi_map_ >>= count;
+ }
+ }
- // Call a code stub that has already been prepared for calling (via
- // PrepareForCall).
- void RawCallStub(CodeStub* stub);
-
- // Calls a code object which has already been prepared for calling
- // (via PrepareForCall).
- void RawCallCodeObject(Handle<Code> code, RelocInfo::Mode rmode);
-
- inline bool Equals(VirtualFrame* other);
-
- // Classes that need raw access to the elements_ array.
- friend class DeferredCode;
+ inline void RaiseHeight(int count, unsigned known_smi_map = 0) {
+ ASSERT(known_smi_map < (1u << count));
+ element_count_ += count;
+ if (count >= kTOSKnownSmiMapSize) {
+ tos_known_smi_map_ = known_smi_map;
+ } else {
+ tos_known_smi_map_ = ((tos_known_smi_map_ << count) | known_smi_map);
+ }
+ }
friend class JumpTarget;
};
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 36e5c6f..5395bbb 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -848,11 +848,45 @@
IsRegionDirty(object->address() + offset)); \
}
-#define READ_DOUBLE_FIELD(p, offset) \
- (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)))
+#ifndef V8_TARGET_ARCH_MIPS
+ #define READ_DOUBLE_FIELD(p, offset) \
+ (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)))
+#else // V8_TARGET_ARCH_MIPS
+ // Prevent gcc from using load-double (mips ldc1) on (possibly)
+ // non-64-bit aligned HeapNumber::value.
+ static inline double read_double_field(HeapNumber* p, int offset) {
+ union conversion {
+ double d;
+ uint32_t u[2];
+ } c;
+ c.u[0] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)));
+ c.u[1] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset + 4)));
+ return c.d;
+ }
+ #define READ_DOUBLE_FIELD(p, offset) read_double_field(p, offset)
+#endif // V8_TARGET_ARCH_MIPS
-#define WRITE_DOUBLE_FIELD(p, offset, value) \
- (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)) = value)
+
+#ifndef V8_TARGET_ARCH_MIPS
+ #define WRITE_DOUBLE_FIELD(p, offset, value) \
+ (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)) = value)
+#else // V8_TARGET_ARCH_MIPS
+ // Prevent gcc from using store-double (mips sdc1) on (possibly)
+ // non-64-bit aligned HeapNumber::value.
+ static inline void write_double_field(HeapNumber* p, int offset,
+ double value) {
+ union conversion {
+ double d;
+ uint32_t u[2];
+ } c;
+ c.d = value;
+ (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset))) = c.u[0];
+ (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset + 4))) = c.u[1];
+ }
+ #define WRITE_DOUBLE_FIELD(p, offset, value) \
+ write_double_field(p, offset, value)
+#endif // V8_TARGET_ARCH_MIPS
+
#define READ_INT_FIELD(p, offset) \
(*reinterpret_cast<int*>(FIELD_ADDR(p, offset)))
diff --git a/src/objects.cc b/src/objects.cc
index a0d1536..8cb36e9 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -4514,7 +4514,6 @@
int offset,
int length,
int* length_return) {
- ASSERT(NativeAllocationChecker::allocation_allowed());
if (robust_flag == ROBUST_STRING_TRAVERSAL && !LooksValid()) {
return SmartPointer<char>(NULL);
}
@@ -4594,7 +4593,6 @@
SmartPointer<uc16> String::ToWideCString(RobustnessFlag robust_flag) {
- ASSERT(NativeAllocationChecker::allocation_allowed());
if (robust_flag == ROBUST_STRING_TRAVERSAL && !LooksValid()) {
return SmartPointer<uc16>();
}
diff --git a/src/parser.cc b/src/parser.cc
index eefe7cf..13e0c33 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -579,7 +579,7 @@
: isolate_(script->GetIsolate()),
symbol_cache_(pre_data ? pre_data->symbol_count() : 0),
script_(script),
- scanner_(isolate_),
+ scanner_(isolate_->scanner_constants()),
top_scope_(NULL),
with_nesting_level_(0),
lexical_scope_(NULL),
@@ -5053,7 +5053,7 @@
bool allow_lazy,
ParserRecorder* recorder) {
Isolate* isolate = Isolate::Current();
- V8JavaScriptScanner scanner(isolate);
+ V8JavaScriptScanner scanner(isolate->scanner_constants());
scanner.Initialize(source);
intptr_t stack_limit = isolate->stack_guard()->real_climit();
if (!preparser::PreParser::PreParseProgram(&scanner,
diff --git a/src/parser.h b/src/parser.h
index 765bf5a..74cb049 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -776,7 +776,9 @@
}
private:
- JsonParser() : isolate_(Isolate::Current()), scanner_(isolate_) { }
+ JsonParser()
+ : isolate_(Isolate::Current()),
+ scanner_(isolate_->scanner_constants()) { }
~JsonParser() { }
Isolate* isolate() { return isolate_; }
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index 06f832b..2a73b6e 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -75,6 +75,9 @@
}
+static Mutex* limit_mutex = NULL;
+
+
void OS::Setup() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
@@ -83,6 +86,7 @@
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
+ limit_mutex = CreateMutex();
}
@@ -131,6 +135,9 @@
static void UpdateAllocatedSpaceLimits(void* address, int size) {
+ ASSERT(limit_mutex != NULL);
+ ScopedLock lock(limit_mutex);
+
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
Max(highest_ever_allocated,
@@ -300,7 +307,7 @@
// There may be no filename in this line. Skip to next.
if (start_of_path == NULL) continue;
buffer[bytes_read] = 0;
- LOG(SharedLibraryEvent(start_of_path, start, end));
+ LOG(i::Isolate::Current(), SharedLibraryEvent(start_of_path, start, end));
}
close(fd);
#endif
@@ -619,10 +626,6 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
-static Sampler* active_sampler_ = NULL;
-static pthread_t vm_tid_ = NULL;
-
-
static pthread_t GetThreadID() {
pthread_t thread_id = pthread_self();
return thread_id;
@@ -631,34 +634,160 @@
class Sampler::PlatformData : public Malloced {
public:
+ PlatformData() : vm_tid_(GetThreadID()) {}
+
+ pthread_t vm_tid() const { return vm_tid_; }
+
+ private:
+ pthread_t vm_tid_;
+};
+
+
+static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
+ USE(info);
+ if (signal != SIGPROF) return;
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
+ // We require a fully initialized and entered isolate.
+ return;
+ }
+ Sampler* sampler = isolate->logger()->sampler();
+ if (sampler == NULL || !sampler->IsActive()) return;
+
+ TickSample sample_obj;
+ TickSample* sample = CpuProfiler::TickSampleEvent(isolate);
+ if (sample == NULL) sample = &sample_obj;
+
+ // Extracting the sample from the context is extremely machine dependent.
+ ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+ mcontext_t& mcontext = ucontext->uc_mcontext;
+ sample->state = isolate->current_vm_state();
+#if V8_HOST_ARCH_IA32
+ sample->pc = reinterpret_cast<Address>(mcontext.mc_eip);
+ sample->sp = reinterpret_cast<Address>(mcontext.mc_esp);
+ sample->fp = reinterpret_cast<Address>(mcontext.mc_ebp);
+#elif V8_HOST_ARCH_X64
+ sample->pc = reinterpret_cast<Address>(mcontext.mc_rip);
+ sample->sp = reinterpret_cast<Address>(mcontext.mc_rsp);
+ sample->fp = reinterpret_cast<Address>(mcontext.mc_rbp);
+#elif V8_HOST_ARCH_ARM
+ sample->pc = reinterpret_cast<Address>(mcontext.mc_r15);
+ sample->sp = reinterpret_cast<Address>(mcontext.mc_r13);
+ sample->fp = reinterpret_cast<Address>(mcontext.mc_r11);
+#endif
+ sampler->SampleStack(sample);
+ sampler->Tick(sample);
+}
+
+
+class SignalSender : public Thread {
+ public:
enum SleepInterval {
- FULL_INTERVAL,
- HALF_INTERVAL
+ HALF_INTERVAL,
+ FULL_INTERVAL
};
- explicit PlatformData(Sampler* sampler)
- : sampler_(sampler),
- signal_handler_installed_(false),
- signal_sender_launched_(false) {
+ explicit SignalSender(int interval)
+ : Thread(NULL, "SignalSender"),
+ interval_(interval) {}
+
+ static void AddActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::AddActiveSampler(sampler);
+ if (instance_ == NULL) {
+ // Install a signal handler.
+ struct sigaction sa;
+ sa.sa_sigaction = ProfilerSignalHandler;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ signal_handler_installed_ =
+ (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
+
+ // Start a thread that sends SIGPROF signal to VM threads.
+ instance_ = new SignalSender(sampler->interval());
+ instance_->Start();
+ } else {
+ ASSERT(instance_->interval_ == sampler->interval());
+ }
}
- void SignalSender() {
- while (sampler_->IsActive()) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- if (sampler_->IsProfiling() && RuntimeProfiler::IsEnabled()) {
- Sleep(FULL_INTERVAL);
- RuntimeProfiler::NotifyTick();
+ static void RemoveActiveSampler(Sampler* sampler) {
+ ScopedLock lock(mutex_);
+ SamplerRegistry::RemoveActiveSampler(sampler);
+ if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
+ RuntimeProfiler::WakeUpRuntimeProfilerThreadBeforeShutdown();
+ instance_->Join();
+ delete instance_;
+ instance_ = NULL;
+
+ // Restore the old signal handler.
+ if (signal_handler_installed_) {
+ sigaction(SIGPROF, &old_signal_handler_, 0);
+ signal_handler_installed_ = false;
+ }
+ }
+ }
+
+ // Implement Thread::Run().
+ virtual void Run() {
+ SamplerRegistry::State state;
+ while ((state = SamplerRegistry::GetState()) !=
+ SamplerRegistry::HAS_NO_SAMPLERS) {
+ bool cpu_profiling_enabled =
+ (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+ bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+ // When CPU profiling is enabled both JavaScript and C++ code is
+ // profiled. We must not suspend.
+ if (!cpu_profiling_enabled) {
+ if (rate_limiter_.SuspendIfNecessary()) continue;
+ }
+ if (cpu_profiling_enabled && runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+ return;
+ }
+ Sleep(HALF_INTERVAL);
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+ return;
+ }
+ Sleep(HALF_INTERVAL);
} else {
- if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
+ if (cpu_profiling_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
+ this)) {
+ return;
+ }
+ }
+ if (runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
+ NULL)) {
+ return;
+ }
+ }
Sleep(FULL_INTERVAL);
}
}
}
+ static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
+ if (!sampler->IsProfiling()) return;
+ SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
+ sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
+ }
+
+ static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ sampler->isolate()->runtime_profiler()->NotifyTick();
+ }
+
+ void SendProfilingSignal(pthread_t tid) {
+ if (!signal_handler_installed_) return;
+ pthread_kill(tid, SIGPROF);
+ }
+
void Sleep(SleepInterval full_or_half) {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
- useconds_t interval = sampler_->interval_ * 1000 - 100;
+ useconds_t interval = interval_ * 1000 - 100;
if (full_or_half == HALF_INTERVAL) interval /= 2;
int result = usleep(interval);
#ifdef DEBUG
@@ -673,63 +802,22 @@
USE(result);
}
- Sampler* sampler_;
- bool signal_handler_installed_;
- struct sigaction old_signal_handler_;
- struct itimerval old_timer_value_;
- bool signal_sender_launched_;
- pthread_t signal_sender_thread_;
+ const int interval_;
RuntimeProfilerRateLimiter rate_limiter_;
+
+ // Protects the process wide state below.
+ static Mutex* mutex_;
+ static SignalSender* instance_;
+ static bool signal_handler_installed_;
+ static struct sigaction old_signal_handler_;
+
+ DISALLOW_COPY_AND_ASSIGN(SignalSender);
};
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
- USE(info);
- if (signal != SIGPROF) return;
- if (active_sampler_ == NULL) return;
- if (!active_sampler_->IsActive()) {
- // Restore old signal handler
- Sampler::PlatformData* data = active_sampler_->data();
- if (data->signal_handler_installed_) {
- sigaction(SIGPROF, &data->old_signal_handler_, 0);
- data->signal_handler_installed_ = false;
- }
- return;
- }
-
- if (vm_tid_ != GetThreadID()) return;
-
- TickSample sample_obj;
- TickSample* sample = CpuProfiler::TickSampleEvent();
- if (sample == NULL) sample = &sample_obj;
-
- // Extracting the sample from the context is extremely machine dependent.
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
- mcontext_t& mcontext = ucontext->uc_mcontext;
-#if V8_HOST_ARCH_IA32
- sample->pc = reinterpret_cast<Address>(mcontext.mc_eip);
- sample->sp = reinterpret_cast<Address>(mcontext.mc_esp);
- sample->fp = reinterpret_cast<Address>(mcontext.mc_ebp);
-#elif V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(mcontext.mc_rip);
- sample->sp = reinterpret_cast<Address>(mcontext.mc_rsp);
- sample->fp = reinterpret_cast<Address>(mcontext.mc_rbp);
-#elif V8_HOST_ARCH_ARM
- sample->pc = reinterpret_cast<Address>(mcontext.mc_r15);
- sample->sp = reinterpret_cast<Address>(mcontext.mc_r13);
- sample->fp = reinterpret_cast<Address>(mcontext.mc_r11);
-#endif
- active_sampler_->SampleStack(sample);
- active_sampler_->Tick(sample);
-}
-
-
-static void* SenderEntry(void* arg) {
- Sampler::PlatformData* data =
- reinterpret_cast<Sampler::PlatformData*>(arg);
- data->SignalSender();
- return 0;
-}
+Mutex* SignalSender::mutex_ = OS::CreateMutex();
+SignalSender* SignalSender::instance_ = NULL;
+struct sigaction SignalSender::old_signal_handler_;
+bool SignalSender::signal_handler_installed_ = false;
Sampler::Sampler(Isolate* isolate, int interval)
@@ -738,62 +826,27 @@
profiling_(false),
active_(false),
samples_taken_(0) {
- data_ = new PlatformData(this);
+ data_ = new PlatformData;
}
Sampler::~Sampler() {
+ ASSERT(!IsActive());
delete data_;
}
void Sampler::Start() {
- // There can only be one active sampler at the time on POSIX
- // platforms.
ASSERT(!IsActive());
- vm_tid_ = GetThreadID();
-
- // Request profiling signals.
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_SIGINFO;
- if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
- data_->signal_handler_installed_ = true;
-
- // Set the itimer to generate a tick for each interval.
- itimerval itimer;
- itimer.it_interval.tv_sec = interval_ / 1000;
- itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
- itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
- itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
- setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
-
- // Set this sampler as the active sampler.
- active_sampler_ = this;
SetActive(true);
-
- // There's no way to send a signal to a thread on FreeBSD, but we can
- // start a thread that uses the stack guard to interrupt the JS thread.
- if (pthread_create(
- &data_->signal_sender_thread_, NULL, SenderEntry, data_) == 0) {
- data_->signal_sender_launched_ = true;
- }
+ SignalSender::AddActiveSampler(this);
}
void Sampler::Stop() {
- // This sampler is no longer the active sampler.
- active_sampler_ = NULL;
+ ASSERT(IsActive());
+ SignalSender::RemoveActiveSampler(this);
SetActive(false);
-
- // Wait for signal sender termination (it will exit after setting
- // active_ to false).
- if (data_->signal_sender_launched_) {
- Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
- pthread_join(data_->signal_sender_thread_, NULL);
- data_->signal_sender_launched_ = false;
- }
}
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index 14f0db5..73a6ccb 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -97,6 +97,10 @@
return 1u << VFP3;
#elif CAN_USE_ARMV7_INSTRUCTIONS
return 1u << ARMv7;
+#elif(defined(__mips_hard_float) && __mips_hard_float != 0)
+ // Here gcc is telling us that we are on an MIPS and gcc is assuming that we
+ // have FPU instructions. If gcc can assume it then so can we.
+ return 1u << FPU;
#else
return 0; // Linux runs on anything.
#endif
@@ -175,6 +179,58 @@
#endif // def __arm__
+#ifdef __mips__
+bool OS::MipsCpuHasFeature(CpuFeature feature) {
+ const char* search_string = NULL;
+ const char* file_name = "/proc/cpuinfo";
+ // Simple detection of FPU at runtime for Linux.
+ // It is based on /proc/cpuinfo, which reveals hardware configuration
+ // to user-space applications. According to MIPS (early 2010), no similar
+ // facility is universally available on the MIPS architectures,
+ // so it's up to individual OSes to provide such.
+ //
+ // This is written as a straight shot one pass parser
+ // and not using STL string and ifstream because,
+ // on Linux, it's reading from a (non-mmap-able)
+ // character special device.
+
+ switch (feature) {
+ case FPU:
+ search_string = "FPU";
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ FILE* f = NULL;
+ const char* what = search_string;
+
+ if (NULL == (f = fopen(file_name, "r")))
+ return false;
+
+ int k;
+ while (EOF != (k = fgetc(f))) {
+ if (k == *what) {
+ ++what;
+ while ((*what != '\0') && (*what == fgetc(f))) {
+ ++what;
+ }
+ if (*what == '\0') {
+ fclose(f);
+ return true;
+ } else {
+ what = search_string;
+ }
+ }
+ }
+ fclose(f);
+
+ // Did not find string in the proc file.
+ return false;
+}
+#endif // def __mips__
+
+
int OS::ActivationFrameAlignment() {
#ifdef V8_TARGET_ARCH_ARM
// On EABI ARM targets this is required for fp correctness in the
@@ -190,8 +246,9 @@
void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
-#if defined(V8_TARGET_ARCH_ARM) && defined(__arm__)
- // Only use on ARM hardware.
+#if (defined(V8_TARGET_ARCH_ARM) && defined(__arm__)) || \
+ (defined(V8_TARGET_ARCH_MIPS) && defined(__mips__))
+ // Only use on ARM or MIPS hardware.
MemoryBarrier();
#else
__asm__ __volatile__("" : : : "memory");
@@ -860,8 +917,9 @@
sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
#endif
#elif V8_HOST_ARCH_MIPS
- // Implement this on MIPS.
- UNIMPLEMENTED();
+ sample.pc = reinterpret_cast<Address>(mcontext.pc);
+ sample.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
+ sample.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
#endif
sampler->SampleStack(sample);
sampler->Tick(sample);
diff --git a/src/platform.h b/src/platform.h
index 4f34abe..b2e0c48 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -294,6 +294,9 @@
// Support runtime detection of VFP3 on ARM CPUs.
static bool ArmCpuHasFeature(CpuFeature feature);
+ // Support runtime detection of FPU on MIPS CPUs.
+ static bool MipsCpuHasFeature(CpuFeature feature);
+
// Returns the activation frame alignment constraint or zero if
// the platform doesn't care. Guaranteed to be a power of two.
static int ActivationFrameAlignment();
diff --git a/src/preparse-data.cc b/src/preparse-data.cc
index cea54ef..92a0338 100644
--- a/src/preparse-data.cc
+++ b/src/preparse-data.cc
@@ -25,20 +25,13 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "v8.h"
-
-/*
-TODO(isolates): I incldue v8.h instead of these because we need Isolate and
-some classes (NativeAllocationChecker) are moved into isolate.h
#include "../include/v8stdint.h"
#include "globals.h"
#include "checks.h"
#include "allocation.h"
-#include "allocation-inl.h"
#include "utils.h"
#include "list-inl.h"
-#include "hashmap.h"
-*/
+#include "hashmap.h"
#include "preparse-data.h"
diff --git a/src/preparser-api.cc b/src/preparser-api.cc
index 6ed8e36..61e9e7e 100644
--- a/src/preparser-api.cc
+++ b/src/preparser-api.cc
@@ -27,8 +27,6 @@
#include "../include/v8-preparser.h"
-#include "v8.h"
-
#include "globals.h"
#include "checks.h"
#include "allocation.h"
@@ -161,8 +159,8 @@
class StandAloneJavaScriptScanner : public JavaScriptScanner {
public:
- StandAloneJavaScriptScanner()
- : JavaScriptScanner(Isolate::Current()) { }
+ explicit StandAloneJavaScriptScanner(ScannerConstants* scanner_constants)
+ : JavaScriptScanner(scanner_constants) { }
void Initialize(UC16CharacterStream* source) {
source_ = source;
@@ -176,7 +174,8 @@
};
-// Functions declared by allocation.h
+// Functions declared by allocation.h and implemented in both api.cc (for v8)
+// or here (for a stand-alone preparser).
void FatalProcessOutOfMemory(const char* reason) {
V8_Fatal(__FILE__, __LINE__, reason);
@@ -193,7 +192,8 @@
PreParserData Preparse(UnicodeInputStream* input, size_t max_stack) {
internal::InputStreamUTF16Buffer buffer(input);
uintptr_t stack_limit = reinterpret_cast<uintptr_t>(&buffer) - max_stack;
- internal::StandAloneJavaScriptScanner scanner;
+ internal::ScannerConstants scanner_constants;
+ internal::StandAloneJavaScriptScanner scanner(&scanner_constants);
scanner.Initialize(&buffer);
internal::CompleteParserRecorder recorder;
preparser::PreParser::PreParseResult result =
diff --git a/src/preparser.cc b/src/preparser.cc
index a0a34a7..fec1567 100644
--- a/src/preparser.cc
+++ b/src/preparser.cc
@@ -25,11 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "v8.h"
-
-/*
-TODO(isolates): I incldue v8.h instead of these because we need Isolate and
-some classes (NativeAllocationChecker) are moved into isolate.h
#include "../include/v8stdint.h"
#include "unicode.h"
#include "globals.h"
@@ -37,7 +32,6 @@
#include "allocation.h"
#include "utils.h"
#include "list.h"
-*/
#include "scanner-base.h"
#include "preparse-data.h"
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 39b1a8d..c9db94f 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -984,11 +984,6 @@
}
-List<HeapGraphPath*>* HeapEntry::GetRetainingPaths() {
- return snapshot_->GetRetainingPaths(this);
-}
-
-
template<class Visitor>
void HeapEntry::ApplyAndPaintAllReachable(Visitor* visitor) {
List<HeapEntry*> list(10);
@@ -1147,107 +1142,6 @@
}
-class CachedHeapGraphPath {
- public:
- CachedHeapGraphPath()
- : nodes_(NodesMatch) { }
- CachedHeapGraphPath(const CachedHeapGraphPath& src)
- : nodes_(NodesMatch, &HashMap::DefaultAllocator, src.nodes_.capacity()),
- path_(src.path_.length() + 1) {
- for (HashMap::Entry* p = src.nodes_.Start();
- p != NULL;
- p = src.nodes_.Next(p)) {
- nodes_.Lookup(p->key, p->hash, true);
- }
- path_.AddAll(src.path_);
- }
- void Add(HeapGraphEdge* edge) {
- nodes_.Lookup(edge->to(), Hash(edge->to()), true);
- path_.Add(edge);
- }
- bool ContainsNode(HeapEntry* node) {
- return nodes_.Lookup(node, Hash(node), false) != NULL;
- }
- const List<HeapGraphEdge*>* path() const { return &path_; }
-
- private:
- static uint32_t Hash(HeapEntry* entry) {
- return static_cast<uint32_t>(reinterpret_cast<intptr_t>(entry));
- }
- static bool NodesMatch(void* key1, void* key2) { return key1 == key2; }
-
- HashMap nodes_;
- List<HeapGraphEdge*> path_;
-};
-
-
-List<HeapGraphPath*>* HeapEntry::CalculateRetainingPaths() {
- List<HeapGraphPath*>* retaining_paths = new List<HeapGraphPath*>(4);
- CachedHeapGraphPath path;
- FindRetainingPaths(&path, retaining_paths);
- return retaining_paths;
-}
-
-
-void HeapEntry::FindRetainingPaths(CachedHeapGraphPath* prev_path,
- List<HeapGraphPath*>* retaining_paths) {
- Vector<HeapGraphEdge*> rets = retainers();
- for (int i = 0; i < rets.length(); ++i) {
- HeapGraphEdge* ret_edge = rets[i];
- if (prev_path->ContainsNode(ret_edge->From())) continue;
- if (ret_edge->From() != snapshot()->root()) {
- CachedHeapGraphPath path(*prev_path);
- path.Add(ret_edge);
- ret_edge->From()->FindRetainingPaths(&path, retaining_paths);
- } else {
- HeapGraphPath* ret_path = new HeapGraphPath(*prev_path->path());
- ret_path->Set(0, ret_edge);
- retaining_paths->Add(ret_path);
- }
- }
-}
-
-
-HeapGraphPath::HeapGraphPath(const List<HeapGraphEdge*>& path)
- : path_(path.length() + 1) {
- Add(NULL);
- for (int i = path.length() - 1; i >= 0; --i) {
- Add(path[i]);
- }
-}
-
-
-void HeapGraphPath::Print() {
- path_[0]->From()->Print(1, 0);
- for (int i = 0; i < path_.length(); ++i) {
- OS::Print(" -> ");
- HeapGraphEdge* edge = path_[i];
- switch (edge->type()) {
- case HeapGraphEdge::kContextVariable:
- OS::Print("[#%s] ", edge->name());
- break;
- case HeapGraphEdge::kElement:
- case HeapGraphEdge::kHidden:
- OS::Print("[%d] ", edge->index());
- break;
- case HeapGraphEdge::kInternal:
- OS::Print("[$%s] ", edge->name());
- break;
- case HeapGraphEdge::kProperty:
- OS::Print("[%s] ", edge->name());
- break;
- case HeapGraphEdge::kShortcut:
- OS::Print("[^%s] ", edge->name());
- break;
- default:
- OS::Print("!!! unknown edge type: %d ", edge->type());
- }
- edge->to()->Print(1, 0);
- }
- OS::Print("\n");
-}
-
-
// It is very important to keep objects that form a heap snapshot
// as small as possible.
namespace { // Avoid littering the global namespace.
@@ -1278,8 +1172,7 @@
gc_roots_entry_(NULL),
natives_root_entry_(NULL),
raw_entries_(NULL),
- entries_sorted_(false),
- retaining_paths_(HeapEntry::Match) {
+ entries_sorted_(false) {
STATIC_ASSERT(
sizeof(HeapGraphEdge) ==
SnapshotSizeConstants<sizeof(void*)>::kExpectedHeapGraphEdgeSize); // NOLINT
@@ -1288,21 +1181,8 @@
SnapshotSizeConstants<sizeof(void*)>::kExpectedHeapEntrySize); // NOLINT
}
-
-static void DeleteHeapGraphPath(HeapGraphPath** path_ptr) {
- delete *path_ptr;
-}
-
HeapSnapshot::~HeapSnapshot() {
DeleteArray(raw_entries_);
- for (HashMap::Entry* p = retaining_paths_.Start();
- p != NULL;
- p = retaining_paths_.Next(p)) {
- List<HeapGraphPath*>* list =
- reinterpret_cast<List<HeapGraphPath*>*>(p->value);
- list->Iterate(DeleteHeapGraphPath);
- delete list;
- }
}
@@ -1404,14 +1284,7 @@
}
-HeapSnapshotsDiff* HeapSnapshot::CompareWith(HeapSnapshot* snapshot) {
- return collection_->CompareSnapshots(this, snapshot);
-}
-
-
HeapEntry* HeapSnapshot::GetEntryById(uint64_t id) {
- // GetSortedEntriesList is used in diff algorithm and sorts
- // entries by their id.
List<HeapEntry*>* entries_by_id = GetSortedEntriesList();
// Perform a binary search by id.
@@ -1432,16 +1305,6 @@
}
-List<HeapGraphPath*>* HeapSnapshot::GetRetainingPaths(HeapEntry* entry) {
- HashMap::Entry* p =
- retaining_paths_.Lookup(entry, HeapEntry::Hash(entry), true);
- if (p->value == NULL) {
- p->value = entry->CalculateRetainingPaths();
- }
- return reinterpret_cast<List<HeapGraphPath*>*>(p->value);
-}
-
-
template<class T>
static int SortByIds(const T* entry1_ptr,
const T* entry2_ptr) {
@@ -1631,13 +1494,6 @@
}
-HeapSnapshotsDiff* HeapSnapshotsCollection::CompareSnapshots(
- HeapSnapshot* snapshot1,
- HeapSnapshot* snapshot2) {
- return comparator_.Compare(snapshot1, snapshot2);
-}
-
-
HeapEntry *const HeapEntriesMap::kHeapEntryPlaceholder =
reinterpret_cast<HeapEntry*>(1);
@@ -2804,83 +2660,6 @@
}
-void HeapSnapshotsDiff::CreateRoots(int additions_count, int deletions_count) {
- raw_additions_root_ =
- NewArray<char>(HeapEntry::EntriesSize(1, additions_count, 0));
- additions_root()->Init(
- snapshot2_, HeapEntry::kHidden, "", 0, 0, additions_count, 0);
- raw_deletions_root_ =
- NewArray<char>(HeapEntry::EntriesSize(1, deletions_count, 0));
- deletions_root()->Init(
- snapshot1_, HeapEntry::kHidden, "", 0, 0, deletions_count, 0);
-}
-
-
-static void DeleteHeapSnapshotsDiff(HeapSnapshotsDiff** diff_ptr) {
- delete *diff_ptr;
-}
-
-HeapSnapshotsComparator::~HeapSnapshotsComparator() {
- diffs_.Iterate(DeleteHeapSnapshotsDiff);
-}
-
-
-HeapSnapshotsDiff* HeapSnapshotsComparator::Compare(HeapSnapshot* snapshot1,
- HeapSnapshot* snapshot2) {
- snapshot1->ClearPaint();
- snapshot1->root()->PaintAllReachable();
- snapshot2->ClearPaint();
- snapshot2->root()->PaintAllReachable();
-
- List<HeapEntry*>* entries1 = snapshot1->GetSortedEntriesList();
- List<HeapEntry*>* entries2 = snapshot2->GetSortedEntriesList();
- int i = 0, j = 0;
- List<HeapEntry*> added_entries, deleted_entries;
- while (i < entries1->length() && j < entries2->length()) {
- uint64_t id1 = entries1->at(i)->id();
- uint64_t id2 = entries2->at(j)->id();
- if (id1 == id2) {
- HeapEntry* entry1 = entries1->at(i++);
- HeapEntry* entry2 = entries2->at(j++);
- if (entry1->painted_reachable() != entry2->painted_reachable()) {
- if (entry1->painted_reachable())
- deleted_entries.Add(entry1);
- else
- added_entries.Add(entry2);
- }
- } else if (id1 < id2) {
- HeapEntry* entry = entries1->at(i++);
- deleted_entries.Add(entry);
- } else {
- HeapEntry* entry = entries2->at(j++);
- added_entries.Add(entry);
- }
- }
- while (i < entries1->length()) {
- HeapEntry* entry = entries1->at(i++);
- deleted_entries.Add(entry);
- }
- while (j < entries2->length()) {
- HeapEntry* entry = entries2->at(j++);
- added_entries.Add(entry);
- }
-
- HeapSnapshotsDiff* diff = new HeapSnapshotsDiff(snapshot1, snapshot2);
- diffs_.Add(diff);
- diff->CreateRoots(added_entries.length(), deleted_entries.length());
-
- for (int i = 0; i < deleted_entries.length(); ++i) {
- HeapEntry* entry = deleted_entries[i];
- diff->AddDeletedEntry(i, i + 1, entry);
- }
- for (int i = 0; i < added_entries.length(); ++i) {
- HeapEntry* entry = added_entries[i];
- diff->AddAddedEntry(i, i + 1, entry);
- }
- return diff;
-}
-
-
class OutputStreamWriter {
public:
explicit OutputStreamWriter(v8::OutputStream* stream)
diff --git a/src/profile-generator.h b/src/profile-generator.h
index c817913..377c083 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -490,8 +490,6 @@
};
-class CachedHeapGraphPath;
-class HeapGraphPath;
class HeapSnapshot;
// HeapEntry instances represent an entity from the heap (or a special
@@ -551,7 +549,6 @@
return Vector<HeapGraphEdge>(children_arr(), children_count_); }
Vector<HeapGraphEdge*> retainers() {
return Vector<HeapGraphEdge*>(retainers_arr(), retainers_count_); }
- List<HeapGraphPath*>* GetRetainingPaths();
HeapEntry* dominator() { return dominator_; }
void set_dominator(HeapEntry* entry) { dominator_ = entry; }
@@ -585,18 +582,12 @@
int EntrySize() { return EntriesSize(1, children_count_, retainers_count_); }
int RetainedSize(bool exact);
- List<HeapGraphPath*>* CalculateRetainingPaths();
void Print(int max_depth, int indent);
static int EntriesSize(int entries_count,
int children_count,
int retainers_count);
- static uint32_t Hash(HeapEntry* entry) {
- return ComputeIntegerHash(
- static_cast<uint32_t>(reinterpret_cast<uintptr_t>(entry)));
- }
- static bool Match(void* entry1, void* entry2) { return entry1 == entry2; }
private:
HeapGraphEdge* children_arr() {
@@ -606,8 +597,6 @@
return reinterpret_cast<HeapGraphEdge**>(children_arr() + children_count_);
}
void CalculateExactRetainedSize();
- void FindRetainingPaths(CachedHeapGraphPath* prev_path,
- List<HeapGraphPath*>* retaining_paths);
const char* TypeAsString();
unsigned painted_: 2;
@@ -638,27 +627,7 @@
};
-class HeapGraphPath {
- public:
- HeapGraphPath()
- : path_(8) { }
- explicit HeapGraphPath(const List<HeapGraphEdge*>& path);
-
- void Add(HeapGraphEdge* edge) { path_.Add(edge); }
- void Set(int index, HeapGraphEdge* edge) { path_[index] = edge; }
- const List<HeapGraphEdge*>* path() { return &path_; }
-
- void Print();
-
- private:
- List<HeapGraphEdge*> path_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapGraphPath);
-};
-
-
class HeapSnapshotsCollection;
-class HeapSnapshotsDiff;
// HeapSnapshot represents a single heap snapshot. It is stored in
// HeapSnapshotsCollection, which is also a factory for
@@ -700,9 +669,7 @@
HeapEntry* AddGcRootsEntry(int children_count, int retainers_count);
HeapEntry* AddNativesRootEntry(int children_count, int retainers_count);
void ClearPaint();
- HeapSnapshotsDiff* CompareWith(HeapSnapshot* snapshot);
HeapEntry* GetEntryById(uint64_t id);
- List<HeapGraphPath*>* GetRetainingPaths(HeapEntry* entry);
List<HeapEntry*>* GetSortedEntriesList();
template<class Visitor>
void IterateEntries(Visitor* visitor) { entries_.Iterate(visitor); }
@@ -724,7 +691,6 @@
char* raw_entries_;
List<HeapEntry*> entries_;
bool entries_sorted_;
- HashMap retaining_paths_;
#ifdef DEBUG
int raw_entries_size_;
#endif
@@ -781,58 +747,6 @@
};
-class HeapSnapshotsDiff {
- public:
- HeapSnapshotsDiff(HeapSnapshot* snapshot1, HeapSnapshot* snapshot2)
- : snapshot1_(snapshot1),
- snapshot2_(snapshot2),
- raw_additions_root_(NULL),
- raw_deletions_root_(NULL) { }
-
- ~HeapSnapshotsDiff() {
- DeleteArray(raw_deletions_root_);
- DeleteArray(raw_additions_root_);
- }
-
- void AddAddedEntry(int child_index, int index, HeapEntry* entry) {
- additions_root()->SetUnidirElementReference(child_index, index, entry);
- }
-
- void AddDeletedEntry(int child_index, int index, HeapEntry* entry) {
- deletions_root()->SetUnidirElementReference(child_index, index, entry);
- }
-
- void CreateRoots(int additions_count, int deletions_count);
-
- HeapEntry* additions_root() {
- return reinterpret_cast<HeapEntry*>(raw_additions_root_);
- }
- HeapEntry* deletions_root() {
- return reinterpret_cast<HeapEntry*>(raw_deletions_root_);
- }
-
- private:
- HeapSnapshot* snapshot1_;
- HeapSnapshot* snapshot2_;
- char* raw_additions_root_;
- char* raw_deletions_root_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsDiff);
-};
-
-
-class HeapSnapshotsComparator {
- public:
- HeapSnapshotsComparator() { }
- ~HeapSnapshotsComparator();
- HeapSnapshotsDiff* Compare(HeapSnapshot* snapshot1, HeapSnapshot* snapshot2);
- private:
- List<HeapSnapshotsDiff*> diffs_;
-
- DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsComparator);
-};
-
-
class HeapSnapshotsCollection {
public:
HeapSnapshotsCollection();
@@ -853,9 +767,6 @@
uint64_t GetObjectId(Address addr) { return ids_.FindObject(addr); }
void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); }
- HeapSnapshotsDiff* CompareSnapshots(HeapSnapshot* snapshot1,
- HeapSnapshot* snapshot2);
-
private:
INLINE(static bool HeapSnapshotsMatch(void* key1, void* key2)) {
return key1 == key2;
@@ -869,7 +780,6 @@
TokenEnumerator* token_enumerator_;
// Mapping from HeapObject addresses to objects' uids.
HeapObjectsMap ids_;
- HeapSnapshotsComparator comparator_;
DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsCollection);
};
diff --git a/src/regexp-macro-assembler.h b/src/regexp-macro-assembler.h
index 0d60e79..1268e78 100644
--- a/src/regexp-macro-assembler.h
+++ b/src/regexp-macro-assembler.h
@@ -48,6 +48,7 @@
enum IrregexpImplementation {
kIA32Implementation,
kARMImplementation,
+ kMIPSImplementation,
kX64Implementation,
kBytecodeImplementation
};
diff --git a/src/scanner-base.cc b/src/scanner-base.cc
index 324a0e0..2066b5a 100644
--- a/src/scanner-base.cc
+++ b/src/scanner-base.cc
@@ -27,13 +27,7 @@
// Features shared by parsing and pre-parsing scanners.
-#include "v8.h"
-
-/*
-TODO(isolates): I incldue v8.h instead of these because we need Isolate and
-some classes (NativeAllocationChecker) are moved into isolate.h
#include "../include/v8stdint.h"
-*/
#include "scanner-base.h"
#include "char-predicates-inl.h"
@@ -60,8 +54,8 @@
// ----------------------------------------------------------------------------
// Scanner
-Scanner::Scanner(Isolate* isolate)
- : scanner_constants_(isolate->scanner_constants()),
+Scanner::Scanner(ScannerConstants* scanner_constants)
+ : scanner_constants_(scanner_constants),
octal_pos_(kNoOctalLocation) {
}
@@ -120,7 +114,8 @@
// ----------------------------------------------------------------------------
// JavaScriptScanner
-JavaScriptScanner::JavaScriptScanner(Isolate* isolate) : Scanner(isolate) {}
+JavaScriptScanner::JavaScriptScanner(ScannerConstants* scanner_contants)
+ : Scanner(scanner_contants) { }
Token::Value JavaScriptScanner::Next() {
diff --git a/src/scanner-base.h b/src/scanner-base.h
index 7203569..552f387 100644
--- a/src/scanner-base.h
+++ b/src/scanner-base.h
@@ -123,6 +123,7 @@
// ---------------------------------------------------------------------
// Constants used by scanners.
public:
+ ScannerConstants() {}
typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder;
StaticResource<Utf8Decoder>* utf8_decoder() {
@@ -137,7 +138,6 @@
bool IsIdentifier(unibrow::CharacterStream* buffer);
private:
- ScannerConstants() {}
unibrow::Predicate<IdentifierStart, 128> kIsIdentifierStart;
unibrow::Predicate<IdentifierPart, 128> kIsIdentifierPart;
@@ -145,7 +145,6 @@
unibrow::Predicate<unibrow::WhiteSpace, 128> kIsWhiteSpace;
StaticResource<Utf8Decoder> utf8_decoder_;
- friend class Isolate;
DISALLOW_COPY_AND_ASSIGN(ScannerConstants);
};
@@ -273,7 +272,7 @@
bool complete_;
};
- explicit Scanner(Isolate* isolate);
+ explicit Scanner(ScannerConstants* scanner_contants);
// Returns the current token again.
Token::Value current_token() { return current_.token; }
@@ -474,7 +473,7 @@
bool complete_;
};
- explicit JavaScriptScanner(Isolate* isolate);
+ explicit JavaScriptScanner(ScannerConstants* scanner_contants);
// Returns the next token.
Token::Value Next();
diff --git a/src/scanner.cc b/src/scanner.cc
index d1520b5..d9c2188 100755
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -345,7 +345,8 @@
// ----------------------------------------------------------------------------
// JsonScanner
-JsonScanner::JsonScanner(Isolate* isolate) : Scanner(isolate) { }
+JsonScanner::JsonScanner(ScannerConstants* scanner_constants)
+ : Scanner(scanner_constants) { }
void JsonScanner::Initialize(UC16CharacterStream* source) {
diff --git a/src/scanner.h b/src/scanner.h
index b61a068..776ba53 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -134,8 +134,8 @@
class V8JavaScriptScanner : public JavaScriptScanner {
public:
- explicit V8JavaScriptScanner(Isolate* isolate)
- : JavaScriptScanner(isolate) {}
+ explicit V8JavaScriptScanner(ScannerConstants* scanner_constants)
+ : JavaScriptScanner(scanner_constants) {}
void Initialize(UC16CharacterStream* source);
};
@@ -143,7 +143,7 @@
class JsonScanner : public Scanner {
public:
- explicit JsonScanner(Isolate* isolate);
+ explicit JsonScanner(ScannerConstants* scanner_constants);
void Initialize(UC16CharacterStream* source);
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
index cd198e2..435e71d 100644
--- a/src/stub-cache.cc
+++ b/src/stub-cache.cc
@@ -64,7 +64,7 @@
// Validate that the name does not move on scavenge, and that we
// can use identity checks instead of string equality checks.
- ASSERT(!isolate_->heap()->InNewSpace(name));
+ ASSERT(!heap()->InNewSpace(name));
ASSERT(name->IsSymbol());
// The state bits are not important to the hash function because
@@ -108,10 +108,10 @@
// there are global objects involved, we need to check global
// property cells in the stub and therefore the stub will be
// specific to the name.
- String* cache_name = isolate_->heap()->empty_string();
+ String* cache_name = heap()->empty_string();
if (receiver->IsGlobalObject()) cache_name = name;
JSObject* last = receiver;
- while (last->GetPrototype() != isolate_->heap()->null_value()) {
+ while (last->GetPrototype() != heap()->null_value()) {
last = JSObject::cast(last->GetPrototype());
if (last->IsGlobalObject()) cache_name = name;
}
@@ -466,7 +466,7 @@
// keyed loads that are not array elements go through a generic builtin stub.
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, NORMAL);
- String* name = isolate_->heap()->KeyedLoadSpecialized_symbol();
+ String* name = heap()->KeyedLoadSpecialized_symbol();
Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedLoadStubCompiler compiler;
@@ -518,7 +518,7 @@
StrictModeFlag strict_mode) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, NORMAL, strict_mode);
- String* name = isolate_->heap()->KeyedStoreSpecialized_symbol();
+ String* name = heap()->KeyedStoreSpecialized_symbol();
Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
KeyedStoreStubCompiler compiler(strict_mode);
@@ -563,26 +563,27 @@
}
}
-String* ExternalArrayTypeToStubName(ExternalArrayType array_type,
+String* ExternalArrayTypeToStubName(Heap* heap,
+ ExternalArrayType array_type,
bool is_store) {
if (is_store) {
switch (array_type) {
case kExternalByteArray:
- return HEAP->KeyedStoreExternalByteArray_symbol();
+ return heap->KeyedStoreExternalByteArray_symbol();
case kExternalUnsignedByteArray:
- return HEAP->KeyedStoreExternalUnsignedByteArray_symbol();
+ return heap->KeyedStoreExternalUnsignedByteArray_symbol();
case kExternalShortArray:
- return HEAP->KeyedStoreExternalShortArray_symbol();
+ return heap->KeyedStoreExternalShortArray_symbol();
case kExternalUnsignedShortArray:
- return HEAP->KeyedStoreExternalUnsignedShortArray_symbol();
+ return heap->KeyedStoreExternalUnsignedShortArray_symbol();
case kExternalIntArray:
- return HEAP->KeyedStoreExternalIntArray_symbol();
+ return heap->KeyedStoreExternalIntArray_symbol();
case kExternalUnsignedIntArray:
- return HEAP->KeyedStoreExternalUnsignedIntArray_symbol();
+ return heap->KeyedStoreExternalUnsignedIntArray_symbol();
case kExternalFloatArray:
- return HEAP->KeyedStoreExternalFloatArray_symbol();
+ return heap->KeyedStoreExternalFloatArray_symbol();
case kExternalPixelArray:
- return HEAP->KeyedStoreExternalPixelArray_symbol();
+ return heap->KeyedStoreExternalPixelArray_symbol();
default:
UNREACHABLE();
return NULL;
@@ -590,21 +591,21 @@
} else {
switch (array_type) {
case kExternalByteArray:
- return HEAP->KeyedLoadExternalByteArray_symbol();
+ return heap->KeyedLoadExternalByteArray_symbol();
case kExternalUnsignedByteArray:
- return HEAP->KeyedLoadExternalUnsignedByteArray_symbol();
+ return heap->KeyedLoadExternalUnsignedByteArray_symbol();
case kExternalShortArray:
- return HEAP->KeyedLoadExternalShortArray_symbol();
+ return heap->KeyedLoadExternalShortArray_symbol();
case kExternalUnsignedShortArray:
- return HEAP->KeyedLoadExternalUnsignedShortArray_symbol();
+ return heap->KeyedLoadExternalUnsignedShortArray_symbol();
case kExternalIntArray:
- return HEAP->KeyedLoadExternalIntArray_symbol();
+ return heap->KeyedLoadExternalIntArray_symbol();
case kExternalUnsignedIntArray:
- return HEAP->KeyedLoadExternalUnsignedIntArray_symbol();
+ return heap->KeyedLoadExternalUnsignedIntArray_symbol();
case kExternalFloatArray:
- return HEAP->KeyedLoadExternalFloatArray_symbol();
+ return heap->KeyedLoadExternalFloatArray_symbol();
case kExternalPixelArray:
- return HEAP->KeyedLoadExternalPixelArray_symbol();
+ return heap->KeyedLoadExternalPixelArray_symbol();
default:
UNREACHABLE();
return NULL;
@@ -627,7 +628,7 @@
strict_mode);
ExternalArrayType array_type =
ElementsKindToExternalArrayType(receiver->GetElementsKind());
- String* name = ExternalArrayTypeToStubName(array_type, is_store);
+ String* name = ExternalArrayTypeToStubName(heap(), array_type, is_store);
Object* code = receiver->map()->FindInCodeCache(name, flags);
if (code->IsUndefined()) {
ExternalArrayStubCompiler compiler;
@@ -759,7 +760,7 @@
compiler.CompileStoreField(receiver, field_index, transition, name);
if (!maybe_code->ToObject(&code)) return maybe_code;
}
- PROFILE(isolate_,
+ PROFILE(isolate(),
CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, name, Code::cast(code)));
@@ -917,7 +918,7 @@
if (!maybe_code->ToObject(&code)) return maybe_code;
}
ASSERT_EQ(flags, Code::cast(code)->flags());
- PROFILE(isolate_,
+ PROFILE(isolate(),
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
@@ -975,7 +976,7 @@
if (!maybe_code->ToObject(&code)) return maybe_code;
}
ASSERT_EQ(flags, Code::cast(code)->flags());
- PROFILE(isolate_,
+ PROFILE(isolate(),
CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG),
Code::cast(code), name));
GDBJIT(AddCode(GDBJITInterface::CALL_IC, name, Code::cast(code)));
@@ -1046,8 +1047,8 @@
Code::kNoExtraICState,
NORMAL,
argc);
- Object* result = ProbeCache(isolate_, flags)->ToObjectUnchecked();
- ASSERT(result != isolate_->heap()->undefined_value());
+ Object* result = ProbeCache(isolate(), flags)->ToObjectUnchecked();
+ ASSERT(result != heap()->undefined_value());
// This might be called during the marking phase of the collector
// hence the unchecked cast.
return reinterpret_cast<Code*>(result);
@@ -1219,12 +1220,12 @@
void StubCache::Clear() {
for (int i = 0; i < kPrimaryTableSize; i++) {
- primary_[i].key = isolate_->heap()->empty_string();
+ primary_[i].key = heap()->empty_string();
primary_[i].value = isolate_->builtins()->builtin(
Builtins::kIllegal);
}
for (int j = 0; j < kSecondaryTableSize; j++) {
- secondary_[j].key = isolate_->heap()->empty_string();
+ secondary_[j].key = heap()->empty_string();
secondary_[j].value = isolate_->builtins()->builtin(
Builtins::kIllegal);
}
@@ -1668,7 +1669,7 @@
// Create code object in the heap.
CodeDesc desc;
masm_.GetCode(&desc);
- MaybeObject* result = HEAP->CreateCode(desc, flags, masm_.CodeObject());
+ MaybeObject* result = heap()->CreateCode(desc, flags, masm_.CodeObject());
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs && !result->IsFailure()) {
Code::cast(result->ToObjectUnchecked())->Disassemble(name);
diff --git a/src/stub-cache.h b/src/stub-cache.h
index 19c4bac..793f581 100644
--- a/src/stub-cache.h
+++ b/src/stub-cache.h
@@ -322,6 +322,8 @@
return NULL;
}
+ Isolate* isolate() { return isolate_; }
+ Heap* heap() { return isolate()->heap(); }
private:
explicit StubCache(Isolate* isolate);
@@ -559,6 +561,8 @@
LookupResult* lookup);
Isolate* isolate() { return scope_.isolate(); }
+ Heap* heap() { return isolate()->heap(); }
+ Factory* factory() { return isolate()->factory(); }
private:
HandleScope scope_;
diff --git a/src/top.cc b/src/top.cc
index 62850b1..ff29cad 100644
--- a/src/top.cc
+++ b/src/top.cc
@@ -306,11 +306,6 @@
allocator = preallocated_message_space_;
}
- NativeAllocationChecker allocation_checker(
- !FLAG_preallocate_message_memory ?
- NativeAllocationChecker::ALLOW :
- NativeAllocationChecker::DISALLOW);
-
StringStream::ClearMentionedObjectCache();
StringStream accumulator(allocator);
incomplete_message_ = &accumulator;
diff --git a/src/v8globals.h b/src/v8globals.h
index 7ff464a..2a01dfd 100644
--- a/src/v8globals.h
+++ b/src/v8globals.h
@@ -468,7 +468,8 @@
CPUID = 10, // x86
VFP3 = 1, // ARM
ARMv7 = 2, // ARM
- SAHF = 0}; // x86
+ SAHF = 0, // x86
+ FPU = 1}; // MIPS
// The Strict Mode (ECMA-262 5th edition, 4.2.2).
enum StrictModeFlag {
diff --git a/src/version.cc b/src/version.cc
index 7a45cfb..6104dac 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,9 +34,8 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 2
-#define BUILD_NUMBER 5
-#define PATCH_LEVEL 1
-
+#define BUILD_NUMBER 6
+#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index e6f4696..0fb827b 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -1062,6 +1062,9 @@
case TRBinaryOpIC::HEAP_NUMBER:
GenerateHeapNumberStub(masm);
break;
+ case TRBinaryOpIC::ODDBALL:
+ GenerateOddballStub(masm);
+ break;
case TRBinaryOpIC::STRING:
GenerateStringStub(masm);
break;
@@ -1438,6 +1441,39 @@
}
+void TypeRecordingBinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
+ Label call_runtime;
+
+ if (op_ == Token::ADD) {
+ // Handle string addition here, because it is the only operation
+ // that does not do a ToNumber conversion on the operands.
+ GenerateStringAddCode(masm);
+ }
+
+ // Convert oddball arguments to numbers.
+ NearLabel check, done;
+ __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &check);
+ if (Token::IsBitOp(op_)) {
+ __ xor_(rdx, rdx);
+ } else {
+ __ LoadRoot(rdx, Heap::kNanValueRootIndex);
+ }
+ __ jmp(&done);
+ __ bind(&check);
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
+ __ j(not_equal, &done);
+ if (Token::IsBitOp(op_)) {
+ __ xor_(rax, rax);
+ } else {
+ __ LoadRoot(rax, Heap::kNanValueRootIndex);
+ }
+ __ bind(&done);
+
+ GenerateHeapNumberStub(masm);
+}
+
+
void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
Label gc_required, not_number;
GenerateFloatingPointCode(masm, &gc_required, ¬_number);
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index 0d24af5..246650a 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -289,6 +289,7 @@
void GenerateSmiStub(MacroAssembler* masm);
void GenerateInt32Stub(MacroAssembler* masm);
void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateOddballStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 6f34cf7..8c338fe 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -8207,17 +8207,9 @@
result = allocator()->Allocate();
ASSERT(result.is_valid());
- // Cannot use r12 for receiver, because that changes
- // the distance between a call and a fixup location,
- // due to a special encoding of r12 as r/m in a ModR/M byte.
- if (receiver.reg().is(r12)) {
- frame()->Spill(receiver.reg()); // It will be overwritten with result.
- // Swap receiver and value.
- __ movq(result.reg(), receiver.reg());
- Result temp = receiver;
- receiver = result;
- result = temp;
- }
+ // r12 is now a reserved register, so it cannot be the receiver.
+ // If it was, the distance to the fixup location would not be constant.
+ ASSERT(!receiver.reg().is(r12));
DeferredReferenceGetNamedValue* deferred =
new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name);
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index c58d5ea..2080c61 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -361,13 +361,32 @@
// There are no translation commands for the caller's pc and fp, the
// context, and the function. Set them up explicitly.
- for (int i = 0; ok && i < 4; i++) {
+ for (int i = StandardFrameConstants::kCallerPCOffset;
+ ok && i >= StandardFrameConstants::kMarkerOffset;
+ i -= kPointerSize) {
intptr_t input_value = input_->GetFrameSlot(input_offset);
if (FLAG_trace_osr) {
- PrintF(" [esp + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] (fixed part)\n",
+ const char* name = "UNKNOWN";
+ switch (i) {
+ case StandardFrameConstants::kCallerPCOffset:
+ name = "caller's pc";
+ break;
+ case StandardFrameConstants::kCallerFPOffset:
+ name = "fp";
+ break;
+ case StandardFrameConstants::kContextOffset:
+ name = "context";
+ break;
+ case StandardFrameConstants::kMarkerOffset:
+ name = "function";
+ break;
+ }
+ PrintF(" [rsp + %d] <- 0x%08" V8PRIxPTR " ; [rsp + %d] "
+ "(fixed part - %s)\n",
output_offset,
input_value,
- input_offset);
+ input_offset,
+ name);
}
output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
input_offset -= kPointerSize;
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index a3609fd..86a7e83 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -63,9 +63,6 @@
}
virtual void AfterCall() {
- // Ensure that we have enough space in the reloc info to patch
- // this with calls when doing deoptimization.
- codegen_->masm()->RecordComment(RelocInfo::kFillerCommentString, true);
codegen_->RecordSafepoint(pointers_, deoptimization_index_);
}
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index 99e1627..269e7af 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -109,13 +109,13 @@
* bool direct_call)
*/
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM((&masm_))
RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(
Mode mode,
int registers_to_save)
- : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
- no_root_array_scope_(masm_),
+ : masm_(NULL, kRegExpCodeSize),
+ no_root_array_scope_(&masm_),
code_relative_fixup_positions_(4),
mode_(mode),
num_registers_(registers_to_save),
@@ -132,7 +132,6 @@
RegExpMacroAssemblerX64::~RegExpMacroAssemblerX64() {
- delete masm_;
// Unuse labels in case we throw away the assembler without calling GetCode.
entry_label_.Unuse();
start_label_.Unuse();
@@ -428,11 +427,11 @@
__ movq(rdx, rbx);
#endif
ExternalReference compare =
- ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+ ExternalReference::re_case_insensitive_compare_uc16(masm_.isolate());
__ CallCFunction(compare, num_arguments);
// Restore original values before reacting on result value.
- __ Move(code_object_pointer(), masm_->CodeObject());
+ __ Move(code_object_pointer(), masm_.CodeObject());
__ pop(backtrack_stackpointer());
#ifndef _WIN64
__ pop(rdi);
@@ -746,7 +745,7 @@
Label stack_ok;
ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_->isolate());
+ ExternalReference::address_of_stack_limit(masm_.isolate());
__ movq(rcx, rsp);
__ movq(kScratchRegister, stack_limit);
__ subq(rcx, Operand(kScratchRegister, 0));
@@ -762,7 +761,7 @@
__ jmp(&exit_label_);
__ bind(&stack_limit_hit);
- __ Move(code_object_pointer(), masm_->CodeObject());
+ __ Move(code_object_pointer(), masm_.CodeObject());
CallCheckStackGuardState(); // Preserves no registers beside rbp and rsp.
__ testq(rax, rax);
// If returned value is non-zero, we exit with the returned value as result.
@@ -817,7 +816,7 @@
// Initialize backtrack stack pointer.
__ movq(backtrack_stackpointer(), Operand(rbp, kStackHighEnd));
// Initialize code object pointer.
- __ Move(code_object_pointer(), masm_->CodeObject());
+ __ Move(code_object_pointer(), masm_.CodeObject());
// Load previous char as initial value of current-character.
Label at_start;
__ cmpb(Operand(rbp, kStartIndex), Immediate(0));
@@ -898,7 +897,7 @@
__ j(not_zero, &exit_label_);
// Restore registers.
- __ Move(code_object_pointer(), masm_->CodeObject());
+ __ Move(code_object_pointer(), masm_.CodeObject());
__ pop(rdi);
__ pop(backtrack_stackpointer());
// String might have moved: Reload esi from frame.
@@ -932,7 +931,7 @@
__ lea(rsi, Operand(rbp, kStackHighEnd)); // Second argument.
#endif
ExternalReference grow_stack =
- ExternalReference::re_grow_stack(masm_->isolate());
+ ExternalReference::re_grow_stack(masm_.isolate());
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
@@ -941,7 +940,7 @@
// Otherwise use return value as new stack pointer.
__ movq(backtrack_stackpointer(), rax);
// Restore saved registers and continue.
- __ Move(code_object_pointer(), masm_->CodeObject());
+ __ Move(code_object_pointer(), masm_.CodeObject());
#ifndef _WIN64
__ pop(rdi);
__ pop(rsi);
@@ -960,11 +959,11 @@
FixupCodeRelativePositions();
CodeDesc code_desc;
- masm_->GetCode(&code_desc);
+ masm_.GetCode(&code_desc);
Isolate* isolate = ISOLATE;
Handle<Code> code = isolate->factory()->NewCode(
code_desc, Code::ComputeFlags(Code::REGEXP),
- masm_->CodeObject());
+ masm_.CodeObject());
PROFILE(isolate, RegExpCodeCreateEvent(*code, *source));
return Handle<Object>::cast(code);
}
@@ -1134,7 +1133,7 @@
__ lea(rdi, Operand(rsp, -kPointerSize));
#endif
ExternalReference stack_check =
- ExternalReference::re_check_stack_guard_state(masm_->isolate());
+ ExternalReference::re_check_stack_guard_state(masm_.isolate());
__ CallCFunction(stack_check, num_arguments);
}
@@ -1299,8 +1298,8 @@
// Patch the relative offset to be relative to the Code object pointer
// instead.
int patch_position = position - kIntSize;
- int offset = masm_->long_at(patch_position);
- masm_->long_at_put(patch_position,
+ int offset = masm_.long_at(patch_position);
+ masm_.long_at_put(patch_position,
offset
+ position
+ Code::kHeaderSize
@@ -1334,7 +1333,7 @@
// Check for preemption.
Label no_preempt;
ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm_->isolate());
+ ExternalReference::address_of_stack_limit(masm_.isolate());
__ load_rax(stack_limit);
__ cmpq(rsp, rax);
__ j(above, &no_preempt);
@@ -1348,7 +1347,7 @@
void RegExpMacroAssemblerX64::CheckStackLimit() {
Label no_stack_overflow;
ExternalReference stack_limit =
- ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
+ ExternalReference::address_of_regexp_stack_limit(masm_.isolate());
__ load_rax(stack_limit);
__ cmpq(backtrack_stackpointer(), rax);
__ j(above, &no_stack_overflow);
diff --git a/src/x64/regexp-macro-assembler-x64.h b/src/x64/regexp-macro-assembler-x64.h
index 564669e..a83f8cb 100644
--- a/src/x64/regexp-macro-assembler-x64.h
+++ b/src/x64/regexp-macro-assembler-x64.h
@@ -218,7 +218,7 @@
void BranchOrBacktrack(Condition condition, Label* to);
void MarkPositionForCodeRelativeFixup() {
- code_relative_fixup_positions_.Add(masm_->pc_offset());
+ code_relative_fixup_positions_.Add(masm_.pc_offset());
}
void FixupCodeRelativePositions();
@@ -250,7 +250,7 @@
// Increments the stack pointer (rcx) by a word size.
inline void Drop();
- MacroAssembler* masm_;
+ MacroAssembler masm_;
MacroAssembler::NoRootArrayScope no_root_array_scope_;
ZoneList<int> code_relative_fixup_positions_;
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index 403a1dc..7494fe0 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -153,7 +153,7 @@
ASSERT_EQ(kSmiTagSize, 1);
__ movq(entity_name, Operand(properties, index, times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
- __ Cmp(entity_name, FACTORY->undefined_value());
+ __ Cmp(entity_name, masm->isolate()->factory()->undefined_value());
// __ jmp(miss_label);
if (i != kProbes - 1) {
__ j(equal, &done);
@@ -380,7 +380,7 @@
JSObject* holder_obj) {
__ push(name);
InterceptorInfo* interceptor = holder_obj->GetNamedInterceptor();
- ASSERT(!HEAP->InNewSpace(interceptor));
+ ASSERT(!masm->isolate()->heap()->InNewSpace(interceptor));
__ Move(kScratchRegister, Handle<Object>(interceptor));
__ push(kScratchRegister);
__ push(receiver);
@@ -472,7 +472,7 @@
__ movq(Operand(rsp, 2 * kPointerSize), rdi);
Object* call_data = optimization.api_call_info()->data();
Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
- if (HEAP->InNewSpace(call_data)) {
+ if (masm->isolate()->heap()->InNewSpace(call_data)) {
__ Move(rcx, api_call_info_handle);
__ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset));
__ movq(Operand(rsp, 3 * kPointerSize), rbx);
@@ -567,7 +567,7 @@
name,
holder,
miss);
- return HEAP->undefined_value(); // Success.
+ return masm->isolate()->heap()->undefined_value(); // Success.
}
}
@@ -667,7 +667,7 @@
FreeSpaceForFastApiCall(masm, scratch1);
}
- return HEAP->undefined_value(); // Success.
+ return masm->isolate()->heap()->undefined_value(); // Success.
}
void CompileRegular(MacroAssembler* masm,
@@ -847,7 +847,7 @@
ASSERT(cell->value()->IsTheHole());
__ Move(scratch, Handle<Object>(cell));
__ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
- FACTORY->the_hole_value());
+ masm->isolate()->factory()->the_hole_value());
__ j(not_equal, miss);
return cell;
}
@@ -896,7 +896,7 @@
!current->IsJSGlobalObject() &&
!current->IsJSGlobalProxy()) {
if (!name->IsSymbol()) {
- MaybeObject* lookup_result = HEAP->LookupSymbol(name);
+ MaybeObject* lookup_result = heap()->LookupSymbol(name);
if (lookup_result->IsFailure()) {
set_failure(Failure::cast(lookup_result));
return reg;
@@ -916,7 +916,7 @@
__ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
reg = holder_reg; // from now the object is in holder_reg
__ movq(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
- } else if (HEAP->InNewSpace(prototype)) {
+ } else if (heap()->InNewSpace(prototype)) {
// Get the map of the current object.
__ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
__ Cmp(scratch1, Handle<Map>(current->map()));
@@ -1050,7 +1050,7 @@
__ push(receiver); // receiver
__ push(reg); // holder
- if (HEAP->InNewSpace(callback_handle->data())) {
+ if (heap()->InNewSpace(callback_handle->data())) {
__ Move(scratch1, callback_handle);
__ push(FieldOperand(scratch1, AccessorInfo::kDataOffset)); // data
} else {
@@ -1242,7 +1242,7 @@
ExternalReference ref =
ExternalReference(IC_Utility(IC::kLoadCallbackProperty),
- masm()->isolate());
+ isolate());
__ TailCallExternalReference(ref, 5, 1);
}
} else { // !compile_followup_inline
@@ -1257,7 +1257,7 @@
__ push(scratch2); // restore old return address
ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), masm()->isolate());
+ IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate());
__ TailCallExternalReference(ref, 5, 1);
}
}
@@ -1303,7 +1303,7 @@
__ movq(rdi, FieldOperand(rdi, JSGlobalPropertyCell::kValueOffset));
// Check that the cell contains the same function.
- if (HEAP->InNewSpace(function)) {
+ if (heap()->InNewSpace(function)) {
// We can't embed a pointer to a function in new space so we have
// to verify that the shared function info is unchanged. This has
// the nice side effect that multiple closures based on the same
@@ -1325,7 +1325,7 @@
MaybeObject* CallStubCompiler::GenerateMissBranch() {
- MaybeObject* maybe_obj = masm()->isolate()->stub_cache()->ComputeCallMiss(
+ MaybeObject* maybe_obj = isolate()->stub_cache()->ComputeCallMiss(
arguments().immediate(), kind_);
Object* obj;
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
@@ -1403,7 +1403,7 @@
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return HEAP->undefined_value();
+ if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
Label miss;
@@ -1437,7 +1437,7 @@
// Check that the elements are in fast mode and writable.
__ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- FACTORY->fixed_array_map());
+ factory()->fixed_array_map());
__ j(not_equal, &call_builtin);
if (argc == 1) { // Otherwise fall through to call builtin.
@@ -1486,11 +1486,10 @@
__ jmp(&call_builtin);
}
- Isolate* isolate = masm()->isolate();
ExternalReference new_space_allocation_top =
- ExternalReference::new_space_allocation_top_address(isolate);
+ ExternalReference::new_space_allocation_top_address(isolate());
ExternalReference new_space_allocation_limit =
- ExternalReference::new_space_allocation_limit_address(isolate);
+ ExternalReference::new_space_allocation_limit_address(isolate());
const int kAllocationDelta = 4;
// Load top.
@@ -1537,7 +1536,7 @@
__ bind(&call_builtin);
__ TailCallExternalReference(ExternalReference(Builtins::c_ArrayPush,
- masm()->isolate()),
+ isolate()),
argc + 1,
1);
}
@@ -1565,7 +1564,7 @@
// -----------------------------------
// If object is not an array, bail out to regular call.
- if (!object->IsJSArray() || cell != NULL) return HEAP->undefined_value();
+ if (!object->IsJSArray() || cell != NULL) return heap()->undefined_value();
Label miss, return_undefined, call_builtin;
@@ -1621,7 +1620,7 @@
__ bind(&call_builtin);
__ TailCallExternalReference(
- ExternalReference(Builtins::c_ArrayPop, masm()->isolate()),
+ ExternalReference(Builtins::c_ArrayPop, isolate()),
argc + 1,
1);
@@ -1649,7 +1648,7 @@
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return HEAP->undefined_value();
+ if (!object->IsString() || cell != NULL) return heap()->undefined_value();
const int argc = arguments().immediate();
@@ -1731,7 +1730,7 @@
// -----------------------------------
// If object is not a string, bail out to regular call.
- if (!object->IsString() || cell != NULL) return HEAP->undefined_value();
+ if (!object->IsString() || cell != NULL) return heap()->undefined_value();
const int argc = arguments().immediate();
@@ -1818,7 +1817,7 @@
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return HEAP->undefined_value();
+ if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
Label miss;
GenerateNameCheck(name, &miss);
@@ -1875,7 +1874,7 @@
JSFunction* function,
String* name) {
// TODO(872): implement this.
- return HEAP->undefined_value();
+ return heap()->undefined_value();
}
@@ -1896,7 +1895,7 @@
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
- if (!object->IsJSObject() || argc != 1) return HEAP->undefined_value();
+ if (!object->IsJSObject() || argc != 1) return heap()->undefined_value();
Label miss;
GenerateNameCheck(name, &miss);
@@ -1945,7 +1944,7 @@
// Check if the argument is a heap number and load its value.
__ bind(¬_smi);
- __ CheckMap(rax, FACTORY->heap_number_map(), &slow, true);
+ __ CheckMap(rax, factory()->heap_number_map(), &slow, true);
__ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
// Check the sign of the argument. If the argument is positive,
@@ -1992,11 +1991,11 @@
ASSERT(optimization.is_simple_api_call());
// Bail out if object is a global object as we don't want to
// repatch it to global receiver.
- if (object->IsGlobalObject()) return HEAP->undefined_value();
- if (cell != NULL) return HEAP->undefined_value();
+ if (object->IsGlobalObject()) return heap()->undefined_value();
+ if (cell != NULL) return heap()->undefined_value();
int depth = optimization.GetPrototypeDepthOfExpectedType(
JSObject::cast(object), holder);
- if (depth == kInvalidProtoDepth) return HEAP->undefined_value();
+ if (depth == kInvalidProtoDepth) return heap()->undefined_value();
Label miss, miss_before_stack_reserved;
@@ -2009,7 +2008,7 @@
// Check that the receiver isn't a smi.
__ JumpIfSmi(rdx, &miss_before_stack_reserved);
- Counters* counters = masm()->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->call_const(), 1);
__ IncrementCounter(counters->call_const_fast_api(), 1);
@@ -2081,7 +2080,7 @@
// unless we're doing a receiver map check.
ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
- Counters* counters = masm()->isolate()->counters();
+ Counters* counters = isolate()->counters();
SharedFunctionInfo* function_info = function->shared();
switch (check) {
case RECEIVER_MAP_CHECK:
@@ -2287,7 +2286,7 @@
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Jump to the cached code (tail call).
- Counters* counters = masm()->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->call_global_inline(), 1);
ASSERT(function->is_compiled());
ParameterCount expected(function->shared()->formal_parameter_count());
@@ -2335,7 +2334,7 @@
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
+ Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2380,13 +2379,12 @@
// Do tail-call to the runtime system.
ExternalReference store_callback_property =
- ExternalReference(IC_Utility(IC::kStoreCallbackProperty),
- masm()->isolate());
+ ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
__ TailCallExternalReference(store_callback_property, 4, 1);
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
+ Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2430,13 +2428,12 @@
// Do tail-call to the runtime system.
ExternalReference store_ic_property =
- ExternalReference(IC_Utility(IC::kStoreInterceptorProperty),
- masm()->isolate());
+ ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
__ TailCallExternalReference(store_ic_property, 4, 1);
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
+ Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2473,14 +2470,14 @@
__ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rax);
// Return the value (register rax).
- Counters* counters = masm()->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1);
__ ret(0);
// Handle store cache miss.
__ bind(&miss);
__ IncrementCounter(counters->named_store_global_inline_miss(), 1);
- Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss();
+ Handle<Code> ic = isolate()->builtins()->StoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2500,7 +2497,7 @@
// -----------------------------------
Label miss;
- Counters* counters = masm()->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->keyed_store_field(), 1);
// Check that the name has not changed.
@@ -2518,7 +2515,7 @@
// Handle store cache miss.
__ bind(&miss);
__ DecrementCounter(counters->keyed_store_field(), 1);
- Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
+ Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
__ Jump(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2550,7 +2547,7 @@
// Get the elements array and make sure it is a fast element array, not 'cow'.
__ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
__ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
- FACTORY->fixed_array_map());
+ factory()->fixed_array_map());
__ j(not_equal, &miss);
// Check that the key is within bounds.
@@ -2575,7 +2572,7 @@
// Handle store cache miss.
__ bind(&miss);
- Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss();
+ Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss();
__ jmp(ic, RelocInfo::CODE_TARGET);
// Return the generated code.
@@ -2624,7 +2621,7 @@
GenerateLoadMiss(masm(), Code::LOAD_IC);
// Return the generated code.
- return GetCode(NONEXISTENT, HEAP->empty_string());
+ return GetCode(NONEXISTENT, heap()->empty_string());
}
@@ -2763,7 +2760,7 @@
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
- Counters* counters = masm()->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_load_global_stub(), 1);
__ movq(rax, rbx);
__ ret(0);
@@ -2788,7 +2785,7 @@
// -----------------------------------
Label miss;
- Counters* counters = masm()->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->keyed_load_field(), 1);
// Check that the name has not changed.
@@ -2818,7 +2815,7 @@
// -----------------------------------
Label miss;
- Counters* counters = masm()->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->keyed_load_callback(), 1);
// Check that the name has not changed.
@@ -2853,7 +2850,7 @@
// -----------------------------------
Label miss;
- Counters* counters = masm()->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->keyed_load_constant_function(), 1);
// Check that the name has not changed.
@@ -2881,7 +2878,7 @@
// -----------------------------------
Label miss;
- Counters* counters = masm()->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->keyed_load_interceptor(), 1);
// Check that the name has not changed.
@@ -2917,7 +2914,7 @@
// -----------------------------------
Label miss;
- Counters* counters = masm()->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->keyed_load_array_length(), 1);
// Check that the name has not changed.
@@ -2942,7 +2939,7 @@
// -----------------------------------
Label miss;
- Counters* counters = masm()->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->keyed_load_string_length(), 1);
// Check that the name has not changed.
@@ -2967,7 +2964,7 @@
// -----------------------------------
Label miss;
- Counters* counters = masm()->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->keyed_load_function_prototype(), 1);
// Check that the name has not changed.
@@ -3042,7 +3039,7 @@
Label generic_stub_call;
// Use r8 for holding undefined which is used in several places below.
- __ Move(r8, FACTORY->undefined_value());
+ __ Move(r8, factory()->undefined_value());
#ifdef ENABLE_DEBUGGER_SUPPORT
// Check to see whether there are any break points in the function code. If
@@ -3086,7 +3083,7 @@
// rbx: initial map
// rdx: JSObject (untagged)
__ movq(Operand(rdx, JSObject::kMapOffset), rbx);
- __ Move(rbx, FACTORY->empty_fixed_array());
+ __ Move(rbx, factory()->empty_fixed_array());
__ movq(Operand(rdx, JSObject::kPropertiesOffset), rbx);
__ movq(Operand(rdx, JSObject::kElementsOffset), rbx);
@@ -3145,7 +3142,7 @@
__ pop(rcx);
__ lea(rsp, Operand(rsp, rbx, times_pointer_size, 1 * kPointerSize));
__ push(rcx);
- Counters* counters = masm()->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->constructed_objects(), 1);
__ IncrementCounter(counters->constructed_objects_stub(), 1);
__ ret(0);
@@ -3154,7 +3151,7 @@
// construction.
__ bind(&generic_stub_call);
Code* code =
- masm()->isolate()->builtins()->builtin(Builtins::kJSConstructStubGeneric);
+ isolate()->builtins()->builtin(Builtins::kJSConstructStubGeneric);
Handle<Code> generic_construct_stub(code);
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
@@ -3269,7 +3266,7 @@
// Slow case: Jump to runtime.
__ bind(&slow);
- Counters* counters = masm()->isolate()->counters();
+ Counters* counters = isolate()->counters();
__ IncrementCounter(counters->keyed_load_external_array_slow(), 1);
// ----------- S t a t e -------------