Version 2.2.14.
Fixed a crash in code generated for String.charCodeAt.
Fixed a compilation issue with some GCC versions (issue 727).
Performance optimizations on x64 and ARM platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@4779 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index a6de794..c2c1d35 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,12 @@
+2010-06-02: Version 2.2.14
+
+ Fixed a crash in code generated for String.charCodeAt.
+
+ Fixed a compilation issue with some GCC versions (issue 727).
+
+ Performance optimizations on x64 and ARM platforms.
+
+
2010-05-31: Version 2.2.13
Implement Object.getOwnPropertyDescriptor for element indices and
diff --git a/include/v8.h b/include/v8.h
index 5b5dabe..02abb4c 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -3177,7 +3177,7 @@
static const int kProxyProxyOffset = sizeof(void*);
static const int kJSObjectHeaderSize = 3 * sizeof(void*);
static const int kFullStringRepresentationMask = 0x07;
- static const int kExternalTwoByteRepresentationTag = 0x03;
+ static const int kExternalTwoByteRepresentationTag = 0x02;
// These constants are compiler dependent so their values must be
// defined within the implementation.
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 6dd381f..846464a 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -1213,31 +1213,32 @@
// Both instructions can be eliminated if ry = rx.
// If ry != rx, a register copy from ry to rx is inserted
// after eliminating the push and the pop instructions.
- Instr push_instr = instr_at(pc_ - 2 * kInstrSize);
- Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
+ if (can_peephole_optimize(2)) {
+ Instr push_instr = instr_at(pc_ - 2 * kInstrSize);
+ Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
- if (can_peephole_optimize(2) &&
- IsPush(push_instr) &&
- IsPop(pop_instr)) {
- if ((pop_instr & kRdMask) != (push_instr & kRdMask)) {
- // For consecutive push and pop on different registers,
- // we delete both the push & pop and insert a register move.
- // push ry, pop rx --> mov rx, ry
- Register reg_pushed, reg_popped;
- reg_pushed = GetRd(push_instr);
- reg_popped = GetRd(pop_instr);
- pc_ -= 2 * kInstrSize;
- // Insert a mov instruction, which is better than a pair of push & pop
- mov(reg_popped, reg_pushed);
- if (FLAG_print_peephole_optimization) {
- PrintF("%x push/pop (diff reg) replaced by a reg move\n", pc_offset());
- }
- } else {
- // For consecutive push and pop on the same register,
- // both the push and the pop can be deleted.
- pc_ -= 2 * kInstrSize;
- if (FLAG_print_peephole_optimization) {
- PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
+ if (IsPush(push_instr) && IsPop(pop_instr)) {
+ if ((pop_instr & kRdMask) != (push_instr & kRdMask)) {
+ // For consecutive push and pop on different registers,
+ // we delete both the push & pop and insert a register move.
+ // push ry, pop rx --> mov rx, ry
+ Register reg_pushed, reg_popped;
+ reg_pushed = GetRd(push_instr);
+ reg_popped = GetRd(pop_instr);
+ pc_ -= 2 * kInstrSize;
+ // Insert a mov instruction, which is better than a pair of push & pop
+ mov(reg_popped, reg_pushed);
+ if (FLAG_print_peephole_optimization) {
+ PrintF("%x push/pop (diff reg) replaced by a reg move\n",
+ pc_offset());
+ }
+ } else {
+ // For consecutive push and pop on the same register,
+ // both the push and the pop can be deleted.
+ pc_ -= 2 * kInstrSize;
+ if (FLAG_print_peephole_optimization) {
+ PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
+ }
}
}
}
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 947c363..d0dee56 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -690,6 +690,10 @@
void add(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
+ void add(Register dst, Register src1, Register src2,
+ SBit s = LeaveCC, Condition cond = al) {
+ add(dst, src1, Operand(src2), s, cond);
+ }
void adc(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index d550cbd..800723b 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -9283,7 +9283,7 @@
__ CheckMap(index_, scratch_,
Factory::heap_number_map(), index_not_number_, true);
call_helper.BeforeCall(masm);
- __ Push(object_, index_, result_);
+ __ Push(object_, index_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@@ -9297,9 +9297,11 @@
// have a chance to overwrite it.
__ mov(scratch_, r0);
}
- __ pop(result_);
__ pop(index_);
__ pop(object_);
+ // Reload the instance type.
+ __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
__ BranchOnNotSmi(scratch_, index_out_of_range_);
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 80c8765..a7c436d 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -48,60 +48,70 @@
#define __ ACCESS_MASM(masm)
// Helper function used from LoadIC/CallIC GenerateNormal.
+// receiver: Receiver. It is not clobbered if a jump to the miss label is
+// done
+// name: Property name. It is not clobbered if a jump to the miss label is
+// done
+// result: Register for the result. It is only updated if a jump to the miss
+// label is not done. Can be the same as receiver or name clobbering
+// one of these in the case of not jumping to the miss label.
+// The three scratch registers need to be different from the receiver, name and
+// result.
static void GenerateDictionaryLoad(MacroAssembler* masm,
Label* miss,
- Register t0,
- Register t1) {
- // Register use:
- //
- // t0 - used to hold the property dictionary.
- //
- // t1 - initially the receiver
- // - used for the index into the property dictionary
- // - holds the result on exit.
- //
- // r3 - used as temporary and to hold the capacity of the property
- // dictionary.
- //
- // r2 - holds the name of the property and is unchanged.
- // r4 - used as temporary.
+ Register receiver,
+ Register name,
+ Register result,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ DictionaryCheck check_dictionary) {
+ // Main use of the scratch registers.
+ // scratch1: Used to hold the property dictionary.
+ // scratch2: Used as temporary and to hold the capacity of the property
+ // dictionary.
+ // scratch3: Used as temporary.
Label done;
// Check for the absence of an interceptor.
- // Load the map into t0.
- __ ldr(t0, FieldMemOperand(t1, JSObject::kMapOffset));
+ // Load the map into scratch1.
+ __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kMapOffset));
// Bail out if the receiver has a named interceptor.
- __ ldrb(r3, FieldMemOperand(t0, Map::kBitFieldOffset));
- __ tst(r3, Operand(1 << Map::kHasNamedInterceptor));
+ __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
+ __ tst(scratch2, Operand(1 << Map::kHasNamedInterceptor));
__ b(nz, miss);
// Bail out if we have a JS global proxy object.
- __ ldrb(r3, FieldMemOperand(t0, Map::kInstanceTypeOffset));
- __ cmp(r3, Operand(JS_GLOBAL_PROXY_TYPE));
+ __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ cmp(scratch2, Operand(JS_GLOBAL_PROXY_TYPE));
__ b(eq, miss);
// Possible work-around for http://crbug.com/16276.
// See also: http://codereview.chromium.org/155418.
- __ cmp(r3, Operand(JS_GLOBAL_OBJECT_TYPE));
+ __ cmp(scratch2, Operand(JS_GLOBAL_OBJECT_TYPE));
__ b(eq, miss);
- __ cmp(r3, Operand(JS_BUILTINS_OBJECT_TYPE));
+ __ cmp(scratch2, Operand(JS_BUILTINS_OBJECT_TYPE));
__ b(eq, miss);
+ // Load the properties array.
+ __ ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
// Check that the properties array is a dictionary.
- __ ldr(t0, FieldMemOperand(t1, JSObject::kPropertiesOffset));
- __ ldr(r3, FieldMemOperand(t0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r3, ip);
- __ b(ne, miss);
+ if (check_dictionary == CHECK_DICTIONARY) {
+ __ ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+ __ cmp(scratch2, ip);
+ __ b(ne, miss);
+ }
// Compute the capacity mask.
const int kCapacityOffset = StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
- __ ldr(r3, FieldMemOperand(t0, kCapacityOffset));
- __ mov(r3, Operand(r3, ASR, kSmiTagSize)); // convert smi to int
- __ sub(r3, r3, Operand(1));
+ __ ldr(scratch2, FieldMemOperand(scratch1, kCapacityOffset));
+ __ mov(scratch2, Operand(scratch2, ASR, kSmiTagSize)); // convert smi to int
+ __ sub(scratch2, scratch2, Operand(1));
const int kElementsStartOffset = StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
@@ -112,26 +122,27 @@
static const int kProbes = 4;
for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ ldr(r4, FieldMemOperand(r2, String::kHashFieldOffset));
+ __ ldr(scratch3, FieldMemOperand(name, String::kHashFieldOffset));
if (i > 0) {
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
ASSERT(StringDictionary::GetProbeOffset(i) <
1 << (32 - String::kHashFieldOffset));
- __ add(r4, r4, Operand(
+ __ add(scratch3, scratch3, Operand(
StringDictionary::GetProbeOffset(i) << String::kHashShift));
}
- __ and_(r4, r3, Operand(r4, LSR, String::kHashShift));
+ __ and_(scratch3, scratch2, Operand(scratch3, LSR, String::kHashShift));
// Scale the index by multiplying by the element size.
ASSERT(StringDictionary::kEntrySize == 3);
- __ add(r4, r4, Operand(r4, LSL, 1)); // r4 = r4 * 3
+ // scratch3 = scratch3 * 3.
+ __ add(scratch3, scratch3, Operand(scratch3, LSL, 1));
// Check if the key is identical to the name.
- __ add(r4, t0, Operand(r4, LSL, 2));
- __ ldr(ip, FieldMemOperand(r4, kElementsStartOffset));
- __ cmp(r2, Operand(ip));
+ __ add(scratch3, scratch1, Operand(scratch3, LSL, 2));
+ __ ldr(ip, FieldMemOperand(scratch3, kElementsStartOffset));
+ __ cmp(name, Operand(ip));
if (i != kProbes - 1) {
__ b(eq, &done);
} else {
@@ -140,13 +151,15 @@
}
// Check that the value is a normal property.
- __ bind(&done); // r4 == t0 + 4*index
- __ ldr(r3, FieldMemOperand(r4, kElementsStartOffset + 2 * kPointerSize));
- __ tst(r3, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
+ __ bind(&done); // scratch3 == scratch1 + 4 * index
+ __ ldr(scratch2,
+ FieldMemOperand(scratch3, kElementsStartOffset + 2 * kPointerSize));
+ __ tst(scratch2, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
__ b(ne, miss);
// Get the value at the masked, scaled index and return.
- __ ldr(t1, FieldMemOperand(r4, kElementsStartOffset + 1 * kPointerSize));
+ __ ldr(result,
+ FieldMemOperand(scratch3, kElementsStartOffset + 1 * kPointerSize));
}
@@ -354,7 +367,7 @@
Label* miss,
Register scratch) {
// Search dictionary - put result in register r1.
- GenerateDictionaryLoad(masm, miss, r0, r1);
+ GenerateDictionaryLoad(masm, miss, r1, r2, r1, r0, r3, r4, CHECK_DICTIONARY);
// Check that the value isn't a smi.
__ tst(r1, Operand(kSmiTagMask));
@@ -534,7 +547,7 @@
__ b(ne, &miss);
__ bind(&probe);
- GenerateDictionaryLoad(masm, &miss, r1, r0);
+ GenerateDictionaryLoad(masm, &miss, r0, r2, r0, r1, r3, r4, CHECK_DICTIONARY);
__ Ret();
// Global object access: Check access rights.
@@ -542,7 +555,7 @@
__ CheckAccessGlobalProxy(r0, r1, &miss);
__ b(&probe);
- // Cache miss: Restore receiver from stack and jump to runtime.
+ // Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm);
}
@@ -715,7 +728,7 @@
__ Push(r1, r0);
- __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+ __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
}
@@ -725,7 +738,8 @@
// -- r0 : key
// -- r1 : receiver
// -----------------------------------
- Label slow, fast, check_pixel_array, check_number_dictionary;
+ Label slow, check_string, index_smi, index_string;
+ Label check_pixel_array, probe_dictionary, check_number_dictionary;
Register key = r0;
Register receiver = r1;
@@ -748,8 +762,10 @@
__ b(lt, &slow);
// Check that the key is a smi.
- __ BranchOnNotSmi(key, &slow);
- // Get the elements array of the object.
+ __ BranchOnNotSmi(key, &check_string);
+ __ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
__ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ ldr(r3, FieldMemOperand(r4, HeapObject::kMapOffset));
@@ -771,6 +787,7 @@
// to ensure the prototype chain is searched.
__ b(eq, &slow);
__ mov(r0, r2);
+ __ IncrementCounter(&Counters::keyed_load_generic_smi, 1, r2, r3);
__ Ret();
// Check whether the elements is a pixel array.
@@ -806,6 +823,107 @@
__ bind(&slow);
__ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r2, r3);
GenerateRuntimeGetProperty(masm);
+
+ __ bind(&check_string);
+ // The key is not a smi.
+ // Is it a string?
+ // r0: key
+ // r1: receiver
+ __ CompareObjectType(r0, r2, r3, FIRST_NONSTRING_TYPE);
+ __ b(ge, &slow);
+
+ // Is the string an array index, with cached numeric value?
+ __ ldr(r3, FieldMemOperand(r0, String::kHashFieldOffset));
+ __ tst(r3, Operand(String::kIsArrayIndexMask));
+ __ b(ne, &index_string);
+
+ // Is the string a symbol?
+ // r2: key map
+ __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
+ ASSERT(kSymbolTag != 0);
+ __ tst(r3, Operand(kIsSymbolMask));
+ __ b(eq, &slow);
+
+ // If the receiver is a fast-case object, check the keyed lookup
+ // cache. Otherwise probe the dictionary.
+ __ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
+ __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+ __ cmp(r3, ip);
+ __ b(eq, &probe_dictionary);
+
+ // Load the map of the receiver, compute the keyed lookup cache hash
+ // based on 32 bits of the map pointer and the string hash.
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
+ __ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset));
+ __ eor(r3, r3, Operand(r4, ASR, String::kHashShift));
+ __ and_(r3, r3, Operand(KeyedLookupCache::kCapacityMask));
+
+ // Load the key (consisting of map and symbol) from the cache and
+ // check for match.
+ ExternalReference cache_keys = ExternalReference::keyed_lookup_cache_keys();
+ __ mov(r4, Operand(cache_keys));
+ __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
+ __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex)); // Move r4 to symbol.
+ __ cmp(r2, r5);
+ __ b(ne, &slow);
+ __ ldr(r5, MemOperand(r4));
+ __ cmp(r0, r5);
+ __ b(ne, &slow);
+
+ // Get field offset and check that it is an in-object property.
+ // r0 : key
+ // r1 : receiver
+ // r2 : receiver's map
+ // r3 : lookup cache index
+ ExternalReference cache_field_offsets
+ = ExternalReference::keyed_lookup_cache_field_offsets();
+ __ mov(r4, Operand(cache_field_offsets));
+ __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
+ __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
+ __ cmp(r5, r6);
+ __ b(ge, &slow);
+
+ // Load in-object property.
+ __ sub(r5, r5, r6); // Index from end of object.
+ __ ldrb(r6, FieldMemOperand(r2, Map::kInstanceSizeOffset));
+ __ add(r6, r6, r5); // Index from start of object.
+ __ sub(r1, r1, Operand(kHeapObjectTag)); // Remove the heap tag.
+ __ ldr(r0, MemOperand(r1, r6, LSL, kPointerSizeLog2));
+ __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1, r2, r3);
+ __ Ret();
+
+ // Do a quick inline probe of the receiver's dictionary, if it
+ // exists.
+ __ bind(&probe_dictionary);
+ // Load the property to r0.
+ GenerateDictionaryLoad(
+ masm, &slow, r1, r0, r0, r2, r3, r4, DICTIONARY_CHECK_DONE);
+ __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1, r2, r3);
+ __ Ret();
+
+ __ b(&slow);
+ // If the hash field contains an array index pick it out. The assert checks
+ // that the constants for the maximum number of digits for an array index
+ // cached in the hash field and the number of bits reserved for it does not
+ // conflict.
+ ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ (1 << String::kArrayIndexValueBits));
+ __ bind(&index_string);
+ // r0: key (string)
+ // r1: receiver
+ // r3: hash field
+ // We want the smi-tagged index in r0. kArrayIndexValueMask has zeros in
+ // the low kHashShift bits.
+ ASSERT(String::kHashShift >= kSmiTagSize);
+ __ and_(r3, r3, Operand(String::kArrayIndexValueMask));
+ // Here we actually clobber the key (r0) which will be used if calling into
+ // runtime later. However as the new key is the numeric value of a string key
+ // there is no difference in using either key.
+ __ mov(r0, Operand(r3, ASR, String::kHashShift - kSmiTagSize));
+ // Now jump to the place where smi keys are handled.
+ __ jmp(&index_smi);
}
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index 31c4658..2968081 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -31,6 +31,7 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
+#include "frames-inl.h"
#include "log-inl.h"
#include "../include/v8-profiler.h"
@@ -49,7 +50,8 @@
ticks_buffer_(sizeof(TickSampleEventRecord),
kTickSamplesBufferChunkSize,
kTickSamplesBufferChunksCount),
- enqueue_order_(0) { }
+ enqueue_order_(0) {
+}
void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
@@ -181,6 +183,24 @@
}
+void ProfilerEventsProcessor::AddCurrentStack() {
+ TickSampleEventRecord record;
+ TickSample* sample = &record.sample;
+ sample->state = VMState::current_state();
+ sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
+ sample->frames_count = 0;
+ for (StackTraceFrameIterator it;
+ !it.done() && sample->frames_count < TickSample::kMaxFramesCount;
+ it.Advance()) {
+ JavaScriptFrame* frame = it.frame();
+ sample->stack[sample->frames_count++] =
+ reinterpret_cast<Address>(frame->function());
+ }
+ record.order = enqueue_order_;
+ ticks_from_vm_buffer_.Enqueue(record);
+}
+
+
bool ProfilerEventsProcessor::ProcessCodeEvent(unsigned* dequeue_order) {
if (!events_buffer_.IsEmpty()) {
CodeEventsContainer record;
@@ -205,9 +225,16 @@
bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
while (true) {
+ if (!ticks_from_vm_buffer_.IsEmpty()
+ && ticks_from_vm_buffer_.Peek()->order == dequeue_order) {
+ TickSampleEventRecord record;
+ ticks_from_vm_buffer_.Dequeue(&record);
+ generator_->RecordTickSample(record.sample);
+ }
+
const TickSampleEventRecord* rec =
TickSampleEventRecord::cast(ticks_buffer_.StartDequeue());
- if (rec == NULL) return false;
+ if (rec == NULL) return !ticks_from_vm_buffer_.IsEmpty();
if (rec->order == dequeue_order) {
generator_->RecordTickSample(rec->sample);
ticks_buffer_.FinishDequeue();
@@ -416,13 +443,12 @@
if (profiles_->StartProfiling(title, next_profile_uid_++)) {
StartProcessorIfNotStarted();
}
+ processor_->AddCurrentStack();
}
void CpuProfiler::StartCollectingProfile(String* title) {
- if (profiles_->StartProfiling(title, next_profile_uid_++)) {
- StartProcessorIfNotStarted();
- }
+ StartCollectingProfile(profiles_->GetName(title));
}
@@ -434,10 +460,6 @@
generator_ = new ProfileGenerator(profiles_);
processor_ = new ProfilerEventsProcessor(generator_);
processor_->Start();
- // Enable stack sampling.
- // It is important to have it started prior to logging, see issue 683:
- // http://code.google.com/p/v8/issues/detail?id=683
- reinterpret_cast<Sampler*>(Logger::ticker_)->Start();
// Enumerate stuff we already have in the heap.
if (Heap::HasBeenSetup()) {
Logger::LogCodeObjects();
@@ -445,6 +467,8 @@
Logger::LogFunctionObjects();
Logger::LogAccessorCallbacks();
}
+ // Enable stack sampling.
+ reinterpret_cast<Sampler*>(Logger::ticker_)->Start();
}
}
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index 81f9ae3..03b8176 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -105,6 +105,11 @@
class TickSampleEventRecord BASE_EMBEDDED {
public:
+ TickSampleEventRecord()
+ : filler(1) {
+ ASSERT(filler != SamplingCircularQueue::kClear);
+ }
+
// The first machine word of a TickSampleEventRecord must not ever
// become equal to SamplingCircularQueue::kClear. As both order and
// TickSample's first field are not reliable in this sense (order
@@ -119,9 +124,6 @@
}
INLINE(static TickSampleEventRecord* init(void* value));
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(TickSampleEventRecord);
};
@@ -159,6 +161,8 @@
void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
const char* prefix, String* name,
Address start, unsigned size);
+ // Puts current stack into tick sample events buffer.
+ void AddCurrentStack();
// Tick sample events are filled directly in the buffer of the circular
// queue (because the structure is of fixed width, but usually not all
@@ -184,6 +188,7 @@
bool running_;
UnboundQueue<CodeEventsContainer> events_buffer_;
SamplingCircularQueue ticks_buffer_;
+ UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
unsigned enqueue_order_;
};
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 95aeba6..c883a48 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -12638,7 +12638,6 @@
call_helper.BeforeCall(masm);
__ push(object_);
__ push(index_);
- __ push(result_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@@ -12652,9 +12651,11 @@
// have a chance to overwrite it.
__ mov(scratch_, eax);
}
- __ pop(result_);
__ pop(index_);
__ pop(object_);
+ // Reload the instance type.
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
ASSERT(kSmiTag == 0);
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
index 2ba64dc..6a77f99 100644
--- a/src/ia32/ic-ia32.cc
+++ b/src/ia32/ic-ia32.cc
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -305,8 +305,7 @@
// -- esp[0] : return address
// -----------------------------------
Label slow, check_string, index_smi, index_string;
- Label check_pixel_array, probe_dictionary;
- Label check_number_dictionary;
+ Label check_pixel_array, probe_dictionary, check_number_dictionary;
// Check that the object isn't a smi.
__ test(edx, Immediate(kSmiTagMask));
@@ -329,8 +328,9 @@
// Check that the key is a smi.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &check_string, not_taken);
- // Get the elements array of the object.
__ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ CheckMap(ecx, Factory::fixed_array_map(), &check_pixel_array, true);
@@ -409,6 +409,7 @@
__ j(not_zero, &index_string, not_taken);
// Is the string a symbol?
+ // ecx: key map.
__ movzx_b(ebx, FieldOperand(ecx, Map::kInstanceTypeOffset));
ASSERT(kSymbolTag != 0);
__ test(ebx, Immediate(kIsSymbolMask));
@@ -461,6 +462,7 @@
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ add(ecx, Operand(edi));
__ mov(eax, FieldOperand(edx, ecx, times_pointer_size, 0));
+ __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1);
__ ret(0);
// Do a quick inline probe of the receiver's dictionary, if it
@@ -487,10 +489,17 @@
__ bind(&index_string);
// We want the smi-tagged index in eax. kArrayIndexValueMask has zeros in
// the low kHashShift bits.
+ // eax: key (string).
+ // ebx: hash field.
+ // edx: receiver.
ASSERT(String::kHashShift >= kSmiTagSize);
__ and_(ebx, String::kArrayIndexValueMask);
__ shr(ebx, String::kHashShift - kSmiTagSize);
+ // Here we actually clobber the key (eax) which will be used if calling into
+ // runtime later. However as the new key is the numeric value of a string key
+ // there is no difference in using either key.
__ mov(eax, ebx);
+ // Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
@@ -1322,7 +1331,7 @@
__ CheckAccessGlobalProxy(eax, edx, &miss);
__ jmp(&probe);
- // Cache miss: Restore receiver from stack and jump to runtime.
+ // Cache miss: Jump to runtime.
__ bind(&miss);
GenerateMiss(masm);
}
diff --git a/src/objects.cc b/src/objects.cc
index e2c5bc9..74f3e80 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -2958,7 +2958,7 @@
// Make the lookup and include prototypes.
int accessor_index = is_getter ? kGetterIndex : kSetterIndex;
- uint32_t index;
+ uint32_t index = 0; // Initialize index to make compiler happy.
if (name->AsArrayIndex(&index)) {
for (Object* obj = this;
obj != Heap::null_value();
diff --git a/src/objects.h b/src/objects.h
index 622bc26..7de8b2a 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -408,7 +408,7 @@
enum StringRepresentationTag {
kSeqStringTag = 0x0,
kConsStringTag = 0x1,
- kExternalStringTag = 0x3
+ kExternalStringTag = 0x2
};
const uint32_t kIsConsStringMask = 0x1;
diff --git a/src/profile-generator.h b/src/profile-generator.h
index 7830787..18265f1 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -260,6 +260,7 @@
CpuProfile* GetProfile(int security_token_id, unsigned uid);
inline bool is_last_profile();
+ const char* GetName(String* name);
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag,
String* name, String* resource_name, int line_number);
CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, const char* name);
@@ -274,7 +275,6 @@
private:
INLINE(const char* GetFunctionName(String* name));
INLINE(const char* GetFunctionName(const char* name));
- const char* GetName(String* name);
const char* GetName(int args_count);
List<CpuProfile*>* GetProfilesList(int security_token_id);
int TokenToIndex(int security_token_id);
diff --git a/src/unbound-queue-inl.h b/src/unbound-queue-inl.h
index ff5d833..fffb1db 100644
--- a/src/unbound-queue-inl.h
+++ b/src/unbound-queue-inl.h
@@ -82,6 +82,14 @@
while (first_ != reinterpret_cast<Node*>(divider_)) DeleteFirst();
}
+
+template<typename Record>
+Record* UnboundQueue<Record>::Peek() {
+ ASSERT(divider_ != last_);
+ Node* next = reinterpret_cast<Node*>(divider_)->next;
+ return &next->value;
+}
+
} } // namespace v8::internal
#endif // V8_UNBOUND_QUEUE_INL_H_
diff --git a/src/unbound-queue.h b/src/unbound-queue.h
index 7bc314b..443d5ce 100644
--- a/src/unbound-queue.h
+++ b/src/unbound-queue.h
@@ -47,6 +47,7 @@
INLINE(void Dequeue(Record* rec));
INLINE(void Enqueue(const Record& rec));
INLINE(bool IsEmpty()) { return divider_ == last_; }
+ INLINE(Record* Peek());
private:
INLINE(void DeleteFirst());
diff --git a/src/v8-counters.h b/src/v8-counters.h
index bd671a1..8c69cf3 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -123,6 +123,7 @@
/* How is the generic keyed-load stub used? */ \
SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi) \
SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol) \
+ SC(keyed_load_generic_lookup_cache, V8.KeyedLoadGenericLookupCache) \
SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow) \
SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow) \
/* Count how much the monomorphic keyed-load stubs are hit. */ \
diff --git a/src/version.cc b/src/version.cc
index 6d8c2fe..2bdfe5e 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 2
-#define BUILD_NUMBER 13
+#define BUILD_NUMBER 14
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 182e94b..6467734 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -794,6 +794,7 @@
// Load applicand.apply onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
Load(applicand);
+ frame()->Dup();
Handle<String> name = Factory::LookupAsciiSymbol("apply");
frame()->Push(name);
Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET);
@@ -5791,8 +5792,6 @@
// property case was inlined. Ensure that there is not a test rax
// instruction here.
masm_->nop();
- // Discard the global object. The result is in answer.
- frame_->Drop();
return answer;
}
@@ -6740,7 +6739,9 @@
void DeferredReferenceGetNamedValue::Generate() {
- __ push(receiver_);
+ if (!receiver_.is(rax)) {
+ __ movq(rax, receiver_);
+ }
__ Move(rcx, name_);
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
@@ -6757,7 +6758,6 @@
__ IncrementCounter(&Counters::named_load_inline_miss, 1);
if (!dst_.is(rax)) __ movq(dst_, rax);
- __ pop(receiver_);
}
@@ -7418,9 +7418,8 @@
__ IncrementCounter(&Counters::named_load_inline, 1);
deferred->BindExit();
- frame()->Push(&receiver);
}
- ASSERT(frame()->height() == original_height);
+ ASSERT(frame()->height() == original_height - 1);
return result;
}
@@ -7569,10 +7568,13 @@
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
+ if (persist_after_get_) {
+ cgen_->frame()->Dup();
+ }
Result result = cgen_->EmitNamedLoad(GetName(), is_global);
cgen_->frame()->Push(&result);
if (!persist_after_get_) {
- cgen_->UnloadReference(this);
+ set_unloaded();
}
break;
}
@@ -10920,7 +10922,6 @@
call_helper.BeforeCall(masm);
__ push(object_);
__ push(index_);
- __ push(result_);
__ push(index_); // Consumed by runtime conversion function.
if (index_flags_ == STRING_INDEX_IS_NUMBER) {
__ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
@@ -10934,9 +10935,11 @@
// have a chance to overwrite it.
__ movq(scratch_, rax);
}
- __ pop(result_);
__ pop(index_);
__ pop(object_);
+ // Reload the instance type.
+ __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
__ JumpIfNotSmi(scratch_, index_out_of_range_);
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 89b98f1..b9d062d 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -144,9 +144,10 @@
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Register state for IC load call (from ic-x64.cc).
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -----------------------------------
- Generate_DebugBreakCallHelper(masm, rcx.bit(), false);
+ Generate_DebugBreakCallHelper(masm, rax.bit() | rcx.bit(), false);
}
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index e4e6a0b..5007b8e 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1127,15 +1127,15 @@
Comment cmnt(masm_, "Global variable");
// Use inline caching. Variable name is passed in rcx and the global
// object on the stack.
- __ push(CodeGenerator::GlobalObject());
__ Move(rcx, var->name());
+ __ movq(rax, CodeGenerator::GlobalObject());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
// A test rax instruction following the call is used by the IC to
// indicate that the inobject property case was inlined. Ensure there
// is no test rax instruction here.
__ nop();
- DropAndApply(1, context, rax);
+ Apply(context, rax);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
Comment cmnt(masm_, "Lookup slot");
@@ -1693,14 +1693,12 @@
Comment cmnt(masm_, "[ Property");
Expression* key = expr->key();
- // Evaluate receiver.
- VisitForValue(expr->obj(), kStack);
-
if (key->IsPropertyName()) {
+ VisitForValue(expr->obj(), kAccumulator);
EmitNamedPropertyLoad(expr);
- // Drop receiver left on the stack by IC.
- DropAndApply(1, context_, rax);
+ Apply(context_, rax);
} else {
+ VisitForValue(expr->obj(), kStack);
VisitForValue(expr->key(), kStack);
EmitKeyedPropertyLoad(expr);
// Drop key and receiver left on the stack by IC.
@@ -2745,13 +2743,13 @@
!proxy->var()->is_this() &&
proxy->var()->is_global()) {
Comment cmnt(masm_, "Global variable");
- __ push(CodeGenerator::GlobalObject());
__ Move(rcx, proxy->name());
+ __ movq(rax, CodeGenerator::GlobalObject());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
// Use a regular load, not a contextual load, to avoid a reference
// error.
__ Call(ic, RelocInfo::CODE_TARGET);
- __ movq(Operand(rsp, 0), rax);
+ __ push(rax);
} else if (proxy != NULL &&
proxy->var()->slot() != NULL &&
proxy->var()->slot()->type() == Slot::LOOKUP) {
@@ -2861,10 +2859,12 @@
if (expr->is_postfix() && context_ != Expression::kEffect) {
__ Push(Smi::FromInt(0));
}
- VisitForValue(prop->obj(), kStack);
if (assign_type == NAMED_PROPERTY) {
+ VisitForValue(prop->obj(), kAccumulator);
+ __ push(rax); // Copy of receiver, needed for later store.
EmitNamedPropertyLoad(prop);
} else {
+ VisitForValue(prop->obj(), kStack);
VisitForValue(prop->key(), kStack);
EmitKeyedPropertyLoad(prop);
}
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 773d262..c146a4c 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -56,18 +56,20 @@
Register r1,
Register r2,
Register name,
+ Register r4,
DictionaryCheck check_dictionary) {
// Register use:
//
// r0 - used to hold the property dictionary.
//
- // r1 - initially the receiver
- // - used for the index into the property dictionary
+ // r1 - initially the receiver.
+ // - unchanged on any jump to miss_label.
// - holds the result on exit.
//
// r2 - used to hold the capacity of the property dictionary.
//
// name - holds the name of the property and is unchanged.
+ // r4 - used to hold the index into the property dictionary.
Label done;
@@ -116,19 +118,19 @@
StringDictionary::kElementsStartIndex * kPointerSize;
for (int i = 0; i < kProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
- __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
- __ shrl(r1, Immediate(String::kHashShift));
+ __ movl(r4, FieldOperand(name, String::kHashFieldOffset));
+ __ shrl(r4, Immediate(String::kHashShift));
if (i > 0) {
- __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
+ __ addl(r4, Immediate(StringDictionary::GetProbeOffset(i)));
}
- __ and_(r1, r2);
+ __ and_(r4, r2);
// Scale the index by multiplying by the entry size.
ASSERT(StringDictionary::kEntrySize == 3);
- __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
+ __ lea(r4, Operand(r4, r4, times_2, 0)); // r4 = r4 * 3
// Check if the key is identical to the name.
- __ cmpq(name, Operand(r0, r1, times_pointer_size,
+ __ cmpq(name, Operand(r0, r4, times_pointer_size,
kElementsStartOffset - kHeapObjectTag));
if (i != kProbes - 1) {
__ j(equal, &done);
@@ -140,14 +142,14 @@
// Check that the value is a normal property.
__ bind(&done);
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
- __ Test(Operand(r0, r1, times_pointer_size, kDetailsOffset - kHeapObjectTag),
+ __ Test(Operand(r0, r4, times_pointer_size, kDetailsOffset - kHeapObjectTag),
Smi::FromInt(PropertyDetails::TypeField::mask()));
__ j(not_zero, miss_label);
// Get the value at the masked, scaled index.
const int kValueOffset = kElementsStartOffset + kPointerSize;
__ movq(r1,
- Operand(r0, r1, times_pointer_size, kValueOffset - kHeapObjectTag));
+ Operand(r0, r4, times_pointer_size, kValueOffset - kHeapObjectTag));
}
@@ -351,8 +353,7 @@
// -- rsp[16] : receiver
// -----------------------------------
Label slow, check_string, index_smi, index_string;
- Label check_pixel_array, probe_dictionary;
- Label check_number_dictionary;
+ Label check_pixel_array, probe_dictionary, check_number_dictionary;
// Load name and receiver.
__ movq(rax, Operand(rsp, kPointerSize));
@@ -376,9 +377,9 @@
// Check that the key is a smi.
__ JumpIfNotSmi(rax, &check_string);
-
- // Get the elements array of the object.
__ bind(&index_smi);
+ // Now the key is known to be a smi. This place is also jumped to from below
+ // where a numeric string is converted to a smi.
__ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
@@ -440,6 +441,7 @@
__ testl(rbx, Immediate(String::kIsArrayIndexMask));
// Is the string a symbol?
+ // rcx: key map.
__ j(not_zero, &index_string); // The value in rbx is used at jump target.
ASSERT(kSymbolTag != 0);
__ testb(FieldOperand(rdx, Map::kInstanceTypeOffset),
@@ -490,6 +492,7 @@
__ movzxbq(rdx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ addq(rax, rdx);
__ movq(rax, FieldOperand(rcx, rax, times_pointer_size, 0));
+ __ IncrementCounter(&Counters::keyed_load_generic_lookup_cache, 1);
__ ret(0);
// Do a quick inline probe of the receiver's dictionary, if it
@@ -501,6 +504,7 @@
rcx,
rdx,
rax,
+ rdi,
DICTIONARY_CHECK_DONE);
__ movq(rax, rcx);
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
@@ -513,9 +517,16 @@
(1 << String::kArrayIndexValueBits));
__ bind(&index_string);
// We want the smi-tagged index in rax.
+ // rax: key (string).
+ // rbx: hash field.
+ // rdx: receiver.
__ and_(rbx, Immediate(String::kArrayIndexValueMask));
__ shr(rbx, Immediate(String::kHashShift));
+ // Here we actually clobber the key (rax) which will be used if calling into
+ // runtime later. However as the new key is the numeric value of a string key
+ // there is no difference in using either key.
__ Integer32ToSmi(rax, rbx);
+ // Now jump to the place where smi keys are handled.
__ jmp(&index_smi);
}
@@ -1228,7 +1239,7 @@
// rsp[(argc + 1) * 8] : argument 0 = receiver
// -----------------------------------
// Search dictionary - put result in register rdx.
- GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx, CHECK_DICTIONARY);
+ GenerateDictionaryLoad(masm, miss, rax, rdx, rbx, rcx, rdi, CHECK_DICTIONARY);
// Move the result to register rdi and check that it isn't a smi.
__ movq(rdi, rdx);
@@ -1333,13 +1344,13 @@
void LoadIC::GenerateMiss(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
__ pop(rbx);
- __ push(Operand(rsp, 0)); // receiver
+ __ push(rax); // receiver
__ push(rcx); // name
__ push(rbx); // return address
@@ -1351,14 +1362,12 @@
void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
-
StubCompiler::GenerateLoadArrayLength(masm, rax, rdx, &miss);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@@ -1367,14 +1376,12 @@
void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
-
StubCompiler::GenerateLoadFunctionPrototype(masm, rax, rdx, rbx, &miss);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
@@ -1383,13 +1390,11 @@
void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
- __ movq(rax, Operand(rsp, kPointerSize));
-
// Probe the stub cache.
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
NOT_IN_LOOP,
@@ -1403,14 +1408,12 @@
void LoadIC::GenerateNormal(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss, probe, global;
- __ movq(rax, Operand(rsp, kPointerSize));
-
// Check that the receiver isn't a smi.
__ JumpIfSmi(rax, &miss);
@@ -1432,7 +1435,8 @@
// Search the dictionary placing the result in rax.
__ bind(&probe);
- GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx, rcx, CHECK_DICTIONARY);
+ GenerateDictionaryLoad(masm, &miss, rdx, rax, rbx,
+ rcx, rdi, CHECK_DICTIONARY);
__ ret(0);
// Global object access: Check access rights.
@@ -1440,23 +1444,20 @@
__ CheckAccessGlobalProxy(rax, rdx, &miss);
__ jmp(&probe);
- // Cache miss: Restore receiver from stack and jump to runtime.
+ // Cache miss: Jump to runtime.
__ bind(&miss);
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
GenerateMiss(masm);
}
void LoadIC::GenerateStringLength(MacroAssembler* masm) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
-
StubCompiler::GenerateLoadStringLength(masm, rax, rdx, rbx, &miss);
__ bind(&miss);
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
index fbd95d9..0340d8f 100644
--- a/src/x64/stub-cache-x64.cc
+++ b/src/x64/stub-cache-x64.cc
@@ -1318,13 +1318,12 @@
JSObject* holder,
AccessorInfo* callback) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
Failure* failure = Failure::InternalError();
bool success = GenerateLoadCallback(object, holder, rax, rcx, rbx, rdx,
callback, name, &miss, &failure);
@@ -1343,13 +1342,12 @@
Object* value,
String* name) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
GenerateLoadConstant(object, holder, rax, rbx, rdx, value, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1363,15 +1361,12 @@
JSObject* object,
JSObject* last) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
- // Load receiver.
- __ movq(rax, Operand(rsp, kPointerSize));
-
// Chech that receiver is not a smi.
__ JumpIfSmi(rax, &miss);
@@ -1409,13 +1404,12 @@
int index,
String* name) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
- __ movq(rax, Operand(rsp, kPointerSize));
GenerateLoadField(object, holder, rax, rbx, rdx, index, name, &miss);
__ bind(&miss);
GenerateLoadMiss(masm(), Code::LOAD_IC);
@@ -1429,16 +1423,15 @@
JSObject* holder,
String* name) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
LookupResult lookup;
LookupPostInterceptor(holder, name, &lookup);
- __ movq(rax, Operand(rsp, kPointerSize));
// TODO(368): Compile in the whole chain: all the interceptors in
// prototypes and ultimate answer.
GenerateLoadInterceptor(receiver,
@@ -1465,15 +1458,12 @@
String* name,
bool is_dont_delete) {
// ----------- S t a t e -------------
+ // -- rax : receiver
// -- rcx : name
// -- rsp[0] : return address
- // -- rsp[8] : receiver
// -----------------------------------
Label miss;
- // Get the receiver from the stack.
- __ movq(rax, Operand(rsp, kPointerSize));
-
// If the object is the holder then we know that it's a global
// object which can only happen for contextual loads. In this case,
// the receiver cannot be a smi.
@@ -1485,19 +1475,20 @@
CheckPrototypes(object, rax, holder, rbx, rdx, name, &miss);
// Get the value from the cell.
- __ Move(rax, Handle<JSGlobalPropertyCell>(cell));
- __ movq(rax, FieldOperand(rax, JSGlobalPropertyCell::kValueOffset));
+ __ Move(rbx, Handle<JSGlobalPropertyCell>(cell));
+ __ movq(rbx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
// Check for deleted property if property can actually be deleted.
if (!is_dont_delete) {
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
__ j(equal, &miss);
} else if (FLAG_debug_code) {
- __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
__ Check(not_equal, "DontDelete cells can't contain the hole");
}
__ IncrementCounter(&Counters::named_load_global_inline, 1);
+ __ movq(rax, rbx);
__ ret(0);
__ bind(&miss);
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index db316bb..0846604 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -1072,14 +1072,14 @@
Result VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
- // Name and receiver are on the top of the frame. The IC expects
- // name in rcx and receiver on the stack. It does not drop the
- // receiver.
+ // Name and receiver are on the top of the frame. Both are dropped.
+ // The IC expects name in rcx and receiver in rax.
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
Result name = Pop();
- PrepareForCall(1, 0); // One stack arg, not callee-dropped.
- name.ToRegister(rcx);
- name.Unuse();
+ Result receiver = Pop();
+ PrepareForCall(0, 0); // One stack arg, not callee-dropped.
+ MoveResultsToRegisters(&name, &receiver, rcx, rax);
+
return RawCallCodeObject(ic, mode);
}
diff --git a/test/cctest/test-profile-generator.cc b/test/cctest/test-profile-generator.cc
index b438d25..418fd36 100644
--- a/test/cctest/test-profile-generator.cc
+++ b/test/cctest/test-profile-generator.cc
@@ -7,12 +7,14 @@
#include "v8.h"
#include "profile-generator-inl.h"
#include "cctest.h"
+#include "../include/v8-profiler.h"
namespace i = v8::internal;
using i::CodeEntry;
using i::CodeMap;
using i::CpuProfile;
+using i::CpuProfiler;
using i::CpuProfilesCollection;
using i::ProfileNode;
using i::ProfileTree;
@@ -668,4 +670,109 @@
CHECK_EQ(kSamplingIntervalMs * 0.66666, calc3.ticks_per_ms());
}
+
+// --- P r o f i l e r E x t e n s i o n ---
+
+class ProfilerExtension : public v8::Extension {
+ public:
+ ProfilerExtension() : v8::Extension("v8/profiler", kSource) { }
+ virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction(
+ v8::Handle<v8::String> name);
+ static v8::Handle<v8::Value> StartProfiling(const v8::Arguments& args);
+ static v8::Handle<v8::Value> StopProfiling(const v8::Arguments& args);
+ private:
+ static const char* kSource;
+};
+
+
+const char* ProfilerExtension::kSource =
+ "native function startProfiling();"
+ "native function stopProfiling();";
+
+v8::Handle<v8::FunctionTemplate> ProfilerExtension::GetNativeFunction(
+ v8::Handle<v8::String> name) {
+ if (name->Equals(v8::String::New("startProfiling"))) {
+ return v8::FunctionTemplate::New(ProfilerExtension::StartProfiling);
+ } else if (name->Equals(v8::String::New("stopProfiling"))) {
+ return v8::FunctionTemplate::New(ProfilerExtension::StopProfiling);
+ } else {
+ CHECK(false);
+ return v8::Handle<v8::FunctionTemplate>();
+ }
+}
+
+
+v8::Handle<v8::Value> ProfilerExtension::StartProfiling(
+ const v8::Arguments& args) {
+ if (args.Length() > 0)
+ v8::CpuProfiler::StartProfiling(args[0].As<v8::String>());
+ else
+ v8::CpuProfiler::StartProfiling(v8::String::New(""));
+ return v8::Undefined();
+}
+
+
+v8::Handle<v8::Value> ProfilerExtension::StopProfiling(
+ const v8::Arguments& args) {
+ if (args.Length() > 0)
+ v8::CpuProfiler::StopProfiling(args[0].As<v8::String>());
+ else
+ v8::CpuProfiler::StopProfiling(v8::String::New(""));
+ return v8::Undefined();
+}
+
+
+static ProfilerExtension kProfilerExtension;
+v8::DeclareExtension kProfilerExtensionDeclaration(&kProfilerExtension);
+static v8::Persistent<v8::Context> env;
+
+static const ProfileNode* PickChild(const ProfileNode* parent,
+ const char* name) {
+ for (int i = 0; i < parent->children()->length(); ++i) {
+ const ProfileNode* child = parent->children()->at(i);
+ if (strcmp(child->entry()->name(), name) == 0) return child;
+ }
+ return NULL;
+}
+
+
+TEST(RecordStackTraceAtStartProfiling) {
+ if (env.IsEmpty()) {
+ v8::HandleScope scope;
+ const char* extensions[] = { "v8/profiler" };
+ v8::ExtensionConfiguration config(1, extensions);
+ env = v8::Context::New(&config);
+ }
+ v8::HandleScope scope;
+ env->Enter();
+
+ CHECK_EQ(0, CpuProfiler::GetProfilesCount());
+ CompileRun(
+ "function c() { startProfiling(); }\n"
+ "function b() { c(); }\n"
+ "function a() { b(); }\n"
+ "a();\n"
+ "stopProfiling();");
+ CHECK_EQ(1, CpuProfiler::GetProfilesCount());
+ CpuProfile* profile =
+ CpuProfiler::GetProfile(NULL, 0);
+ const ProfileTree* topDown = profile->top_down();
+ const ProfileNode* current = topDown->root();
+ // The tree should look like this:
+ // (root)
+ // (anonymous function)
+ // a
+ // b
+ // c
+ current = PickChild(current, "(anonymous function)");
+ CHECK_NE(NULL, const_cast<ProfileNode*>(current));
+ current = PickChild(current, "a");
+ CHECK_NE(NULL, const_cast<ProfileNode*>(current));
+ current = PickChild(current, "b");
+ CHECK_NE(NULL, const_cast<ProfileNode*>(current));
+ current = PickChild(current, "c");
+ CHECK_NE(NULL, const_cast<ProfileNode*>(current));
+ CHECK_EQ(0, current->children()->length());
+}
+
#endif // ENABLE_LOGGING_AND_PROFILING
diff --git a/test/mjsunit/delete.js b/test/mjsunit/delete.js
index 6fc15e9..8d4636a 100644
--- a/test/mjsunit/delete.js
+++ b/test/mjsunit/delete.js
@@ -44,16 +44,11 @@
assertTrue(delete x);
assertTrue(typeof x === 'undefined', "x is gone");
-/****
- * This test relies on DontDelete attributes. This is not
- * working yet.
-
var y = 87; // should have DontDelete attribute
assertEquals(87, y);
assertFalse(delete y, "don't delete");
assertFalse(typeof y === 'undefined');
assertEquals(87, y);
-*/
var o = { x: 42, y: 87 };
assertTrue(has(o, 'x'));
@@ -161,3 +156,25 @@
assertFalse(has(a, Math.pow(2,30)-1), "delete 2^30-1");
assertFalse(has(a, Math.pow(2,31)-1), "delete 2^31-1");
assertEquals(Math.pow(2,31), a.length);
+
+// Check that a LoadIC for a dictionary field works, even
+// when the dictionary probe misses.
+function load_deleted_property_using_IC() {
+ var x = new Object();
+ x.a = 3;
+ x.b = 4;
+ x.c = 5;
+
+ delete x.c;
+ assertEquals(3, load_a(x));
+ assertEquals(3, load_a(x));
+ delete x.a;
+ assertTrue(typeof load_a(x) === 'undefined', "x.a is gone");
+ assertTrue(typeof load_a(x) === 'undefined', "x.a is gone");
+}
+
+function load_a(x) {
+ return x.a;
+}
+
+load_deleted_property_using_IC();