Version 3.21.7
Fixed casts of eternal handles.
Turned on global handle zapping.
Always visit branches during HGraph building (Chromium issue 280333).
Profiler changes: removed deprecated API, support higher sampling rate on Windows.
Performance and stability improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@16446 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index c5c6f36..2922fc3 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,17 @@
+2013-08-30: Version 3.21.7
+
+ Fixed casts of eternal handles.
+
+ Turned on global handle zapping.
+
+ Always visit branches during HGraph building (Chromium issue 280333).
+
+ Profiler changes: removed deprecated API, support higher sampling
+ rate on Windows.
+
+ Performance and stability improvements on all platforms.
+
+
2013-08-29: Version 3.21.6
Fixed inlined 'throw' statements interfering with live range
diff --git a/include/v8.h b/include/v8.h
index 51bd362..cfc1de6 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -4547,28 +4547,6 @@
intptr_t change_in_bytes);
/**
- * Suspends recording of tick samples in the profiler.
- * When the V8 profiling mode is enabled (usually via command line
- * switches) this function suspends recording of tick samples.
- * Profiling ticks are discarded until ResumeProfiler() is called.
- *
- * See also the --prof and --prof_auto command line switches to
- * enable V8 profiling.
- */
- V8_DEPRECATED(static void PauseProfiler());
-
- /**
- * Resumes recording of tick samples in the profiler.
- * See also PauseProfiler().
- */
- V8_DEPRECATED(static void ResumeProfiler());
-
- /**
- * Return whether profiler is currently paused.
- */
- V8_DEPRECATED(static bool IsProfilerPaused());
-
- /**
* Retrieve the V8 thread id of the calling thread.
*
* The thread id for a thread should only be retrieved after the V8
@@ -5600,13 +5578,13 @@
template<class S>
void Eternal<T>::Set(Isolate* isolate, Local<S> handle) {
TYPE_CHECK(T, S);
- V8::Eternalize(isolate, Value::Cast(*handle), &this->index_);
+ V8::Eternalize(isolate, reinterpret_cast<Value*>(*handle), &this->index_);
}
template<class T>
Local<T> Eternal<T>::Get(Isolate* isolate) {
- return Local<T>::Cast(V8::GetEternal(isolate, index_));
+ return Local<T>(reinterpret_cast<T*>(*V8::GetEternal(isolate, index_)));
}
diff --git a/src/api.cc b/src/api.cc
index 9c0ac7b..643a5b4 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -3571,7 +3571,7 @@
ENTER_V8(isolate);
HandleScope scope(reinterpret_cast<Isolate*>(isolate));
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- return i::JSObject::DeleteElement(self, index)->IsTrue();
+ return i::JSReceiver::DeleteElement(self, index)->IsTrue();
}
@@ -6735,24 +6735,6 @@
}
-void V8::PauseProfiler() {
- i::Isolate* isolate = i::Isolate::Current();
- isolate->logger()->PauseProfiler();
-}
-
-
-void V8::ResumeProfiler() {
- i::Isolate* isolate = i::Isolate::Current();
- isolate->logger()->ResumeProfiler();
-}
-
-
-bool V8::IsProfilerPaused() {
- i::Isolate* isolate = i::Isolate::Current();
- return isolate->logger()->IsProfilerPaused();
-}
-
-
int V8::GetCurrentThreadId() {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "V8::GetCurrentThreadId()");
@@ -7310,13 +7292,13 @@
int64_t CpuProfile::GetStartTime() const {
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return profile->start_time_us();
+ return (profile->start_time() - i::Time::UnixEpoch()).InMicroseconds();
}
int64_t CpuProfile::GetEndTime() const {
const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
- return profile->end_time_us();
+ return (profile->end_time() - i::Time::UnixEpoch()).InMicroseconds();
}
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 9a4d6e5..310cf3e 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -2491,16 +2491,6 @@
}
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
const Register base = r1;
const Register exponent = r2;
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 5b42116..e49e5bc 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -101,12 +101,7 @@
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
Code* replacement_code) {
- ASSERT(!InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
static const int kInstrSize = Assembler::kInstrSize;
// Turn the jump into nops.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
@@ -125,12 +120,7 @@
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
- ASSERT(InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
+ Code* interrupt_code) {
static const int kInstrSize = Assembler::kInstrSize;
// Restore the original jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
@@ -150,10 +140,10 @@
#ifdef DEBUG
-bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
+Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after) {
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
@@ -164,17 +154,22 @@
if (Assembler::IsNop(Assembler::instr_at(pc_after - 3 * kInstrSize))) {
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
- ASSERT(reinterpret_cast<uint32_t>(replacement_code->entry()) ==
+ Code* osr_builtin =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+ ASSERT(reinterpret_cast<uint32_t>(osr_builtin->entry()) ==
Memory::uint32_at(interrupt_address_pointer));
- return true;
+ return PATCHED_FOR_OSR;
} else {
+ // Get the interrupt stub code object to match against from cache.
+ Code* interrupt_builtin =
+ isolate->builtins()->builtin(Builtins::kInterruptCheck);
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
- ASSERT(reinterpret_cast<uint32_t>(interrupt_code->entry()) ==
+ ASSERT(reinterpret_cast<uint32_t>(interrupt_builtin->entry()) ==
Memory::uint32_at(interrupt_address_pointer));
- return false;
+ return NOT_PATCHED;
}
}
#endif // DEBUG
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 3400248..2a0d102 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -296,8 +296,7 @@
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
- StackCheckStub stub;
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ bind(&ok);
}
@@ -366,8 +365,7 @@
}
EmitProfilingCounterDecrement(weight);
__ b(pl, &ok);
- InterruptStub stub;
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
@@ -416,8 +414,8 @@
__ push(r2);
__ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
} else {
- InterruptStub stub;
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
}
__ pop(r0);
EmitProfilingCounterReset();
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 9cb92e8..ae24210 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -5643,9 +5643,10 @@
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &done);
- StackCheckStub stub;
PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
EnsureSpaceForLazyDeopt();
last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
diff --git a/src/assembler.cc b/src/assembler.cc
index ae8a0b5..a9587f3 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -891,7 +891,7 @@
double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
double_constants.negative_infinity = -V8_INFINITY;
- math_exp_data_mutex = OS::CreateMutex();
+ math_exp_data_mutex = new Mutex();
}
@@ -899,7 +899,7 @@
// Early return?
if (math_exp_data_initialized) return;
- math_exp_data_mutex->Lock();
+ LockGuard<Mutex> lock_guard(math_exp_data_mutex);
if (!math_exp_data_initialized) {
// If this is changed, generated code must be adapted too.
const int kTableSizeBits = 11;
@@ -935,7 +935,6 @@
math_exp_data_initialized = true;
}
- math_exp_data_mutex->Unlock();
}
diff --git a/src/ast.cc b/src/ast.cc
index 38c6ddd..8734171 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -708,7 +708,9 @@
void AstVisitor::VisitStatements(ZoneList<Statement*>* statements) {
for (int i = 0; i < statements->length(); i++) {
- Visit(statements->at(i));
+ Statement* stmt = statements->at(i);
+ Visit(stmt);
+ if (stmt->IsJump()) break;
}
}
diff --git a/src/builtins.cc b/src/builtins.cc
index 1bc0a72..f1ee0a4 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -1813,6 +1813,16 @@
}
+void Builtins::Generate_InterruptCheck(MacroAssembler* masm) {
+ masm->TailCallRuntime(Runtime::kInterrupt, 0, 1);
+}
+
+
+void Builtins::Generate_StackCheck(MacroAssembler* masm) {
+ masm->TailCallRuntime(Runtime::kStackGuard, 0, 1);
+}
+
+
#define DEFINE_BUILTIN_ACCESSOR_C(name, ignore) \
Handle<Code> Builtins::name() { \
Code** code_address = \
diff --git a/src/builtins.h b/src/builtins.h
index 11494c6..a7c774a 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -211,6 +211,10 @@
\
V(OnStackReplacement, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
+ V(InterruptCheck, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
+ V(StackCheck, BUILTIN, UNINITIALIZED, \
+ Code::kNoExtraICState) \
CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -395,6 +399,9 @@
static void Generate_StringConstructCode(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm);
+ static void Generate_InterruptCheck(MacroAssembler* masm);
+ static void Generate_StackCheck(MacroAssembler* masm);
+
#define DECLARE_CODE_AGE_BUILTIN_GENERATOR(C) \
static void Generate_Make##C##CodeYoungAgainEvenMarking( \
MacroAssembler* masm); \
diff --git a/src/code-stubs.h b/src/code-stubs.h
index ccd2caf..7c70583 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -449,30 +449,6 @@
};
-class StackCheckStub : public PlatformCodeStub {
- public:
- StackCheckStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return StackCheck; }
- int MinorKey() { return 0; }
-};
-
-
-class InterruptStub : public PlatformCodeStub {
- public:
- InterruptStub() { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Major MajorKey() { return Interrupt; }
- int MinorKey() { return 0; }
-};
-
-
class ToNumberStub: public HydrogenCodeStub {
public:
ToNumberStub() { }
diff --git a/src/compiler.cc b/src/compiler.cc
index 6c2bdce..1fba20f 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -260,10 +260,9 @@
Handle<JSFunction> function = info()->closure();
int opt_count = function->shared()->opt_count();
function->shared()->set_opt_count(opt_count + 1);
- double ms_creategraph =
- static_cast<double>(time_taken_to_create_graph_) / 1000;
- double ms_optimize = static_cast<double>(time_taken_to_optimize_) / 1000;
- double ms_codegen = static_cast<double>(time_taken_to_codegen_) / 1000;
+ double ms_creategraph = time_taken_to_create_graph_.InMillisecondsF();
+ double ms_optimize = time_taken_to_optimize_.InMillisecondsF();
+ double ms_codegen = time_taken_to_codegen_.InMillisecondsF();
if (FLAG_trace_opt) {
PrintF("[optimizing ");
function->ShortPrint();
@@ -373,9 +372,9 @@
// performance of the hydrogen-based compiler.
bool should_recompile = !info()->shared_info()->has_deoptimization_support();
if (should_recompile || FLAG_hydrogen_stats) {
- int64_t start_ticks = 0;
+ ElapsedTimer timer;
if (FLAG_hydrogen_stats) {
- start_ticks = OS::Ticks();
+ timer.Start();
}
CompilationInfoWithZone unoptimized(info()->shared_info());
// Note that we use the same AST that we will use for generating the
@@ -394,8 +393,7 @@
Logger::LAZY_COMPILE_TAG, &unoptimized, shared);
}
if (FLAG_hydrogen_stats) {
- int64_t ticks = OS::Ticks() - start_ticks;
- isolate()->GetHStatistics()->IncrementFullCodeGen(ticks);
+ isolate()->GetHStatistics()->IncrementFullCodeGen(timer.Elapsed());
}
}
@@ -1026,13 +1024,7 @@
// aborted optimization. In either case we want to continue executing
// the unoptimized code without running into OSR. If the unoptimized
// code has been patched for OSR, unpatch it.
- InterruptStub interrupt_stub;
- Handle<Code> interrupt_code = interrupt_stub.GetCode(isolate);
- Handle<Code> replacement_code =
- isolate->builtins()->OnStackReplacement();
- Deoptimizer::RevertInterruptCode(shared->code(),
- *interrupt_code,
- *replacement_code);
+ Deoptimizer::RevertInterruptCode(isolate, shared->code());
}
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
@@ -1244,7 +1236,7 @@
: name_(name), info_(info), zone_(info->isolate()) {
if (FLAG_hydrogen_stats) {
info_zone_start_allocation_size_ = info->zone()->allocation_size();
- start_ticks_ = OS::Ticks();
+ timer_.Start();
}
}
@@ -1253,8 +1245,7 @@
if (FLAG_hydrogen_stats) {
unsigned size = zone()->allocation_size();
size += info_->zone()->allocation_size() - info_zone_start_allocation_size_;
- int64_t ticks = OS::Ticks() - start_ticks_;
- isolate()->GetHStatistics()->SaveTiming(name_, ticks, size);
+ isolate()->GetHStatistics()->SaveTiming(name_, timer_.Elapsed(), size);
}
}
diff --git a/src/compiler.h b/src/compiler.h
index 469698e..bdb168f 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -501,9 +501,6 @@
graph_builder_(NULL),
graph_(NULL),
chunk_(NULL),
- time_taken_to_create_graph_(0),
- time_taken_to_optimize_(0),
- time_taken_to_codegen_(0),
last_status_(FAILED) { }
enum Status {
@@ -529,9 +526,9 @@
HOptimizedGraphBuilder* graph_builder_;
HGraph* graph_;
LChunk* chunk_;
- int64_t time_taken_to_create_graph_;
- int64_t time_taken_to_optimize_;
- int64_t time_taken_to_codegen_;
+ TimeDelta time_taken_to_create_graph_;
+ TimeDelta time_taken_to_optimize_;
+ TimeDelta time_taken_to_codegen_;
Status last_status_;
MUST_USE_RESULT Status SetLastStatus(Status status) {
@@ -541,18 +538,20 @@
void RecordOptimizationStats();
struct Timer {
- Timer(OptimizingCompiler* compiler, int64_t* location)
+ Timer(OptimizingCompiler* compiler, TimeDelta* location)
: compiler_(compiler),
- start_(OS::Ticks()),
- location_(location) { }
+ location_(location) {
+ ASSERT(location_ != NULL);
+ timer_.Start();
+ }
~Timer() {
- *location_ += (OS::Ticks() - start_);
+ *location_ += timer_.Elapsed();
}
OptimizingCompiler* compiler_;
- int64_t start_;
- int64_t* location_;
+ ElapsedTimer timer_;
+ TimeDelta* location_;
};
};
@@ -644,7 +643,7 @@
CompilationInfo* info_;
Zone zone_;
unsigned info_zone_start_allocation_size_;
- int64_t start_ticks_;
+ ElapsedTimer timer_;
DISALLOW_COPY_AND_ASSIGN(CompilationPhase);
};
diff --git a/src/counters.cc b/src/counters.cc
index 1839412..e2530a8 100644
--- a/src/counters.cc
+++ b/src/counters.cc
@@ -60,8 +60,7 @@
// Start the timer.
void HistogramTimer::Start() {
if (Enabled()) {
- stop_time_ = 0;
- start_time_ = OS::Ticks();
+ timer_.Start();
}
if (FLAG_log_internal_timer_events) {
LOG(isolate(), TimerEvent(Logger::START, name()));
@@ -72,10 +71,9 @@
// Stop the timer and record the results.
void HistogramTimer::Stop() {
if (Enabled()) {
- stop_time_ = OS::Ticks();
// Compute the delta between start and stop, in milliseconds.
- int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
- AddSample(milliseconds);
+ AddSample(static_cast<int>(timer_.Elapsed().InMilliseconds()));
+ timer_.Stop();
}
if (FLAG_log_internal_timer_events) {
LOG(isolate(), TimerEvent(Logger::END, name()));
diff --git a/src/counters.h b/src/counters.h
index a633fea..8cfe6c5 100644
--- a/src/counters.h
+++ b/src/counters.h
@@ -245,9 +245,7 @@
int max,
int num_buckets,
Isolate* isolate)
- : Histogram(name, min, max, num_buckets, isolate),
- start_time_(0),
- stop_time_(0) { }
+ : Histogram(name, min, max, num_buckets, isolate) {}
// Start the timer.
void Start();
@@ -257,12 +255,11 @@
// Returns true if the timer is running.
bool Running() {
- return Enabled() && (start_time_ != 0) && (stop_time_ == 0);
+ return Enabled() && timer_.IsStarted();
}
private:
- int64_t start_time_;
- int64_t stop_time_;
+ ElapsedTimer timer_;
};
// Helper class for scoping a HistogramTimer.
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index f8698b3..34bebb8 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -46,12 +46,12 @@
ProfilerEventsProcessor::ProfilerEventsProcessor(
ProfileGenerator* generator,
Sampler* sampler,
- int period_in_useconds)
+ TimeDelta period)
: Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
generator_(generator),
sampler_(sampler),
running_(true),
- period_in_useconds_(period_in_useconds),
+ period_(period),
last_code_event_id_(0), last_processed_code_event_id_(0) {
}
@@ -124,9 +124,10 @@
void ProfilerEventsProcessor::ProcessEventsAndDoSample() {
- int64_t stop_time = OS::Ticks() + period_in_useconds_;
+ ElapsedTimer timer;
+ timer.Start();
// Keep processing existing events until we need to do next sample.
- while (OS::Ticks() < stop_time) {
+ while (!timer.HasExpired(period_)) {
if (ProcessTicks()) {
// All ticks of the current dequeue_order are processed,
// proceed to the next code event.
@@ -138,24 +139,9 @@
}
-void ProfilerEventsProcessor::ProcessEventsAndYield() {
- // Process ticks until we have any.
- if (ProcessTicks()) {
- // All ticks of the current dequeue_order are processed,
- // proceed to the next code event.
- ProcessCodeEvent();
- }
- YieldCPU();
-}
-
-
void ProfilerEventsProcessor::Run() {
while (running_) {
- if (Sampler::CanSampleOnProfilerEventsProcessorThread()) {
- ProcessEventsAndDoSample();
- } else {
- ProcessEventsAndYield();
- }
+ ProcessEventsAndDoSample();
}
// Process remaining tick events.
@@ -381,7 +367,6 @@
next_profile_uid_(1),
generator_(NULL),
processor_(NULL),
- need_to_stop_sampler_(false),
is_profiling_(false) {
}
@@ -395,7 +380,6 @@
next_profile_uid_(1),
generator_(test_generator),
processor_(test_processor),
- need_to_stop_sampler_(false),
is_profiling_(false) {
}
@@ -429,12 +413,13 @@
if (processor_ == NULL) {
Logger* logger = isolate_->logger();
// Disable logging when using the new implementation.
- saved_logging_nesting_ = logger->logging_nesting_;
- logger->logging_nesting_ = 0;
+ saved_is_logging_ = logger->is_logging_;
+ logger->is_logging_ = false;
generator_ = new ProfileGenerator(profiles_);
Sampler* sampler = logger->sampler();
processor_ = new ProfilerEventsProcessor(
- generator_, sampler, FLAG_cpu_profiler_sampling_interval);
+ generator_, sampler,
+ TimeDelta::FromMicroseconds(FLAG_cpu_profiler_sampling_interval));
is_profiling_ = true;
// Enumerate stuff we already have in the heap.
ASSERT(isolate_->heap()->HasBeenSetUp());
@@ -445,14 +430,8 @@
logger->LogAccessorCallbacks();
LogBuiltins();
// Enable stack sampling.
- if (Sampler::CanSampleOnProfilerEventsProcessorThread()) {
- sampler->SetHasProcessingThread(true);
- }
+ sampler->SetHasProcessingThread(true);
sampler->IncreaseProfilingDepth();
- if (!sampler->IsActive()) {
- sampler->Start();
- need_to_stop_sampler_ = true;
- }
processor_->StartSynchronously();
}
}
@@ -485,21 +464,15 @@
void CpuProfiler::StopProcessor() {
Logger* logger = isolate_->logger();
Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
- sampler->DecreaseProfilingDepth();
is_profiling_ = false;
processor_->StopSynchronously();
delete processor_;
delete generator_;
processor_ = NULL;
generator_ = NULL;
- if (Sampler::CanSampleOnProfilerEventsProcessorThread()) {
- sampler->SetHasProcessingThread(false);
- }
- if (need_to_stop_sampler_) {
- sampler->Stop();
- need_to_stop_sampler_ = false;
- }
- logger->logging_nesting_ = saved_logging_nesting_;
+ sampler->SetHasProcessingThread(false);
+ sampler->DecreaseProfilingDepth();
+ logger->is_logging_ = saved_is_logging_;
}
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index 84e34e4..a6eccff 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -138,7 +138,7 @@
public:
ProfilerEventsProcessor(ProfileGenerator* generator,
Sampler* sampler,
- int period_in_useconds);
+ TimeDelta period);
virtual ~ProfilerEventsProcessor() {}
// Thread control.
@@ -163,13 +163,12 @@
bool ProcessTicks();
void ProcessEventsAndDoSample();
- void ProcessEventsAndYield();
ProfileGenerator* generator_;
Sampler* sampler_;
bool running_;
// Sampling period in microseconds.
- const int period_in_useconds_;
+ const TimeDelta period_;
UnboundQueue<CodeEventsContainer> events_buffer_;
static const size_t kTickSampleBufferSize = 1 * MB;
static const size_t kTickSampleQueueLength =
@@ -265,8 +264,7 @@
unsigned next_profile_uid_;
ProfileGenerator* generator_;
ProfilerEventsProcessor* processor_;
- int saved_logging_nesting_;
- bool need_to_stop_sampler_;
+ bool saved_is_logging_;
bool is_profiling_;
DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
diff --git a/src/d8-debug.cc b/src/d8-debug.cc
index 9a72518..3adeb71 100644
--- a/src/d8-debug.cc
+++ b/src/d8-debug.cc
@@ -248,7 +248,7 @@
void RemoteDebugger::AddEvent(RemoteDebuggerEvent* event) {
- i::ScopedLock lock(event_access_);
+ i::LockGuard<i::Mutex> lock_guard(&event_access_);
if (head_ == NULL) {
ASSERT(tail_ == NULL);
head_ = event;
@@ -263,7 +263,7 @@
RemoteDebuggerEvent* RemoteDebugger::GetEvent() {
- i::ScopedLock lock(event_access_);
+ i::LockGuard<i::Mutex> lock_guard(&event_access_);
ASSERT(head_ != NULL);
RemoteDebuggerEvent* result = head_;
head_ = head_->next();
diff --git a/src/d8-debug.h b/src/d8-debug.h
index 2386b6b..276cbd8 100644
--- a/src/d8-debug.h
+++ b/src/d8-debug.h
@@ -53,7 +53,6 @@
explicit RemoteDebugger(Isolate* isolate, int port)
: isolate_(isolate),
port_(port),
- event_access_(i::OS::CreateMutex()),
event_available_(i::OS::CreateSemaphore(0)),
head_(NULL), tail_(NULL) {}
void Run();
@@ -84,7 +83,7 @@
// Linked list of events from debugged V8 and from keyboard input. Access to
// the list is guarded by a mutex and a semaphore signals new items in the
// list.
- i::Mutex* event_access_;
+ i::Mutex event_access_;
i::Semaphore* event_available_;
RemoteDebuggerEvent* head_;
RemoteDebuggerEvent* tail_;
diff --git a/src/d8.cc b/src/d8.cc
index fe4d744..4a10550 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -157,7 +157,7 @@
i::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
CounterCollection Shell::local_counters_;
CounterCollection* Shell::counters_ = &local_counters_;
-i::Mutex* Shell::context_mutex_(i::OS::CreateMutex());
+i::Mutex Shell::context_mutex_;
Persistent<Context> Shell::utility_context_;
#endif // V8_SHARED
@@ -925,7 +925,7 @@
Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
#ifndef V8_SHARED
// This needs to be a critical section since this is not thread-safe
- i::ScopedLock lock(context_mutex_);
+ i::LockGuard<i::Mutex> lock_guard(&context_mutex_);
#endif // V8_SHARED
// Initialize the global objects
Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
@@ -1011,7 +1011,6 @@
"-------------+\n");
delete [] counters;
}
- delete context_mutex_;
delete counters_file_;
delete counter_map_;
#endif // V8_SHARED
diff --git a/src/d8.h b/src/d8.h
index 6008d35..fbc7a10 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -390,7 +390,7 @@
static CounterCollection local_counters_;
static CounterCollection* counters_;
static i::OS::MemoryMappedFile* counters_file_;
- static i::Mutex* context_mutex_;
+ static i::Mutex context_mutex_;
static Counter* GetCounter(const char* name, bool is_histogram);
static void InstallUtilityScript(Isolate* isolate);
diff --git a/src/debug-agent.cc b/src/debug-agent.cc
index 811c00e..b390cc5 100644
--- a/src/debug-agent.cc
+++ b/src/debug-agent.cc
@@ -106,7 +106,7 @@
"Remote debugging session already active\r\n";
void DebuggerAgent::CreateSession(Socket* client) {
- ScopedLock with(session_access_);
+ LockGuard<RecursiveMutex> session_access_guard(&session_access_);
// If another session is already established terminate this one.
if (session_ != NULL) {
@@ -123,7 +123,7 @@
void DebuggerAgent::CloseSession() {
- ScopedLock with(session_access_);
+ LockGuard<RecursiveMutex> session_access_guard(&session_access_);
// Terminate the session.
if (session_ != NULL) {
@@ -136,7 +136,7 @@
void DebuggerAgent::DebuggerMessage(const v8::Debug::Message& message) {
- ScopedLock with(session_access_);
+ LockGuard<RecursiveMutex> session_access_guard(&session_access_);
// Forward the message handling to the session.
if (session_ != NULL) {
@@ -154,7 +154,7 @@
}
// Terminate the session.
- ScopedLock with(session_access_);
+ LockGuard<RecursiveMutex> session_access_guard(&session_access_);
ASSERT(session == session_);
if (session == session_) {
session_->Shutdown();
diff --git a/src/debug-agent.h b/src/debug-agent.h
index 6115190..e78ed67 100644
--- a/src/debug-agent.h
+++ b/src/debug-agent.h
@@ -48,7 +48,7 @@
isolate_(Isolate::Current()),
name_(StrDup(name)), port_(port),
server_(OS::CreateSocket()), terminate_(false),
- session_access_(OS::CreateMutex()), session_(NULL),
+ session_(NULL),
terminate_now_(OS::CreateSemaphore(0)),
listening_(OS::CreateSemaphore(0)) {
ASSERT(isolate_->debugger_agent_instance() == NULL);
@@ -76,7 +76,7 @@
int port_; // Port to use for the agent.
Socket* server_; // Server socket for listen/accept.
bool terminate_; // Termination flag.
- Mutex* session_access_; // Mutex guarging access to session_.
+ RecursiveMutex session_access_; // Mutex guarding access to session_.
DebuggerAgentSession* session_; // Current active session if any.
Semaphore* terminate_now_; // Semaphore to signal termination.
Semaphore* listening_;
diff --git a/src/debug.cc b/src/debug.cc
index bf208b2..dfe7b97 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -2612,7 +2612,6 @@
message_handler_(NULL),
debugger_unload_pending_(false),
host_dispatch_handler_(NULL),
- dispatch_handler_access_(OS::CreateMutex()),
debug_message_dispatch_handler_(NULL),
message_dispatch_helper_thread_(NULL),
host_dispatch_micros_(100 * 1000),
@@ -2625,8 +2624,6 @@
Debugger::~Debugger() {
- delete dispatch_handler_access_;
- dispatch_handler_access_ = 0;
delete command_received_;
command_received_ = 0;
}
@@ -3272,7 +3269,7 @@
void Debugger::SetMessageHandler(v8::Debug::MessageHandler2 handler) {
- ScopedLock with(debugger_access_);
+ LockGuard<RecursiveMutex> with(debugger_access_);
message_handler_ = handler;
ListenersChanged();
@@ -3309,7 +3306,7 @@
void Debugger::SetDebugMessageDispatchHandler(
v8::Debug::DebugMessageDispatchHandler handler, bool provide_locker) {
- ScopedLock with(dispatch_handler_access_);
+ LockGuard<Mutex> lock_guard(&dispatch_handler_access_);
debug_message_dispatch_handler_ = handler;
if (provide_locker && message_dispatch_helper_thread_ == NULL) {
@@ -3322,7 +3319,7 @@
// Calls the registered debug message handler. This callback is part of the
// public API.
void Debugger::InvokeMessageHandler(MessageImpl message) {
- ScopedLock with(debugger_access_);
+ LockGuard<RecursiveMutex> with(debugger_access_);
if (message_handler_ != NULL) {
message_handler_(message);
@@ -3352,7 +3349,7 @@
MessageDispatchHelperThread* dispatch_thread;
{
- ScopedLock with(dispatch_handler_access_);
+ LockGuard<Mutex> lock_guard(&dispatch_handler_access_);
dispatch_thread = message_dispatch_helper_thread_;
}
@@ -3381,7 +3378,7 @@
bool Debugger::IsDebuggerActive() {
- ScopedLock with(debugger_access_);
+ LockGuard<RecursiveMutex> with(debugger_access_);
return message_handler_ != NULL ||
!event_listener_.is_null() ||
@@ -3472,7 +3469,7 @@
void Debugger::CallMessageDispatchHandler() {
v8::Debug::DebugMessageDispatchHandler handler;
{
- ScopedLock with(dispatch_handler_access_);
+ LockGuard<Mutex> lock_guard(&dispatch_handler_access_);
handler = Debugger::debug_message_dispatch_handler_;
}
if (handler != NULL) {
@@ -3793,24 +3790,17 @@
LockingCommandMessageQueue::LockingCommandMessageQueue(Logger* logger, int size)
- : logger_(logger), queue_(size) {
- lock_ = OS::CreateMutex();
-}
-
-
-LockingCommandMessageQueue::~LockingCommandMessageQueue() {
- delete lock_;
-}
+ : logger_(logger), queue_(size) {}
bool LockingCommandMessageQueue::IsEmpty() const {
- ScopedLock sl(lock_);
+ LockGuard<Mutex> lock_guard(&mutex_);
return queue_.IsEmpty();
}
CommandMessage LockingCommandMessageQueue::Get() {
- ScopedLock sl(lock_);
+ LockGuard<Mutex> lock_guard(&mutex_);
CommandMessage result = queue_.Get();
logger_->DebugEvent("Get", result.text());
return result;
@@ -3818,14 +3808,14 @@
void LockingCommandMessageQueue::Put(const CommandMessage& message) {
- ScopedLock sl(lock_);
+ LockGuard<Mutex> lock_guard(&mutex_);
queue_.Put(message);
logger_->DebugEvent("Put", message.text());
}
void LockingCommandMessageQueue::Clear() {
- ScopedLock sl(lock_);
+ LockGuard<Mutex> lock_guard(&mutex_);
queue_.Clear();
}
@@ -3833,19 +3823,18 @@
MessageDispatchHelperThread::MessageDispatchHelperThread(Isolate* isolate)
: Thread("v8:MsgDispHelpr"),
isolate_(isolate), sem_(OS::CreateSemaphore(0)),
- mutex_(OS::CreateMutex()), already_signalled_(false) {
+ already_signalled_(false) {
}
MessageDispatchHelperThread::~MessageDispatchHelperThread() {
- delete mutex_;
delete sem_;
}
void MessageDispatchHelperThread::Schedule() {
{
- ScopedLock lock(mutex_);
+ LockGuard<Mutex> lock_guard(&mutex_);
if (already_signalled_) {
return;
}
@@ -3859,7 +3848,7 @@
while (true) {
sem_->Wait();
{
- ScopedLock lock(mutex_);
+ LockGuard<Mutex> lock_guard(&mutex_);
already_signalled_ = false;
}
{
diff --git a/src/debug.h b/src/debug.h
index 67debc7..2333b07 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -762,7 +762,6 @@
class LockingCommandMessageQueue BASE_EMBEDDED {
public:
LockingCommandMessageQueue(Logger* logger, int size);
- ~LockingCommandMessageQueue();
bool IsEmpty() const;
CommandMessage Get();
void Put(const CommandMessage& message);
@@ -770,7 +769,7 @@
private:
Logger* logger_;
CommandMessageQueue queue_;
- Mutex* lock_;
+ mutable Mutex mutex_;
DISALLOW_COPY_AND_ASSIGN(LockingCommandMessageQueue);
};
@@ -863,7 +862,7 @@
friend void ForceUnloadDebugger(); // In test-debug.cc
inline bool EventActive(v8::DebugEvent event) {
- ScopedLock with(debugger_access_);
+ LockGuard<RecursiveMutex> lock_guard(debugger_access_);
// Check whether the message handler was been cleared.
if (debugger_unload_pending_) {
@@ -918,7 +917,7 @@
Handle<Object> event_data);
void ListenersChanged();
- Mutex* debugger_access_; // Mutex guarding debugger variables.
+ RecursiveMutex* debugger_access_; // Mutex guarding debugger variables.
Handle<Object> event_listener_; // Global handle to listener.
Handle<Object> event_listener_data_;
bool compiling_natives_; // Are we compiling natives?
@@ -929,7 +928,7 @@
v8::Debug::MessageHandler2 message_handler_;
bool debugger_unload_pending_; // Was message handler cleared?
v8::Debug::HostDispatchHandler host_dispatch_handler_;
- Mutex* dispatch_handler_access_; // Mutex guarding dispatch handler.
+ Mutex dispatch_handler_access_; // Mutex guarding dispatch handler.
v8::Debug::DebugMessageDispatchHandler debug_message_dispatch_handler_;
MessageDispatchHelperThread* message_dispatch_helper_thread_;
int host_dispatch_micros_;
@@ -1056,7 +1055,7 @@
Isolate* isolate_;
Semaphore* const sem_;
- Mutex* const mutex_;
+ Mutex mutex_;
bool already_signalled_;
DISALLOW_COPY_AND_ASSIGN(MessageDispatchHelperThread);
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index dc9ffc5..49d046b 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -784,12 +784,13 @@
}
// Print some helpful diagnostic information.
- int64_t start = OS::Ticks();
if (FLAG_log_timer_events &&
compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
LOG(isolate(), CodeDeoptEvent(compiled_code_));
}
+ ElapsedTimer timer;
if (trace_) {
+ timer.Start();
PrintF("[deoptimizing (DEOPT %s): begin 0x%08" V8PRIxPTR " ",
MessageFor(bailout_type_),
reinterpret_cast<intptr_t>(function_));
@@ -870,7 +871,7 @@
// Print some helpful diagnostic information.
if (trace_) {
- double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+ double ms = timer.Elapsed().InMillisecondsF();
int index = output_count_ - 1; // Index of the topmost frame.
JSFunction* function = output_[index]->GetFunction();
PrintF("[deoptimizing (%s): end 0x%08" V8PRIxPTR " ",
@@ -1696,13 +1697,25 @@
Handle<Object> properties = MaterializeNextValue();
Handle<Object> elements = MaterializeNextValue();
object->set_properties(FixedArray::cast(*properties));
- object->set_elements(FixedArray::cast(*elements));
+ object->set_elements(FixedArrayBase::cast(*elements));
for (int i = 0; i < length - 3; ++i) {
Handle<Object> value = MaterializeNextValue();
object->FastPropertyAtPut(i, *value);
}
break;
}
+ case JS_ARRAY_TYPE: {
+ Handle<JSArray> object =
+ isolate_->factory()->NewJSArray(0, map->elements_kind());
+ materialized_objects_->Add(object);
+ Handle<Object> properties = MaterializeNextValue();
+ Handle<Object> elements = MaterializeNextValue();
+ Handle<Object> length = MaterializeNextValue();
+ object->set_properties(FixedArray::cast(*properties));
+ object->set_elements(FixedArrayBase::cast(*elements));
+ object->set_length(*length);
+ break;
+ }
default:
PrintF("[couldn't handle instance type %d]\n", map->instance_type());
UNREACHABLE();
@@ -2577,9 +2590,12 @@
}
-void Deoptimizer::PatchInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code) {
+void Deoptimizer::PatchInterruptCode(Isolate* isolate,
+ Code* unoptimized_code) {
+ DisallowHeapAllocation no_gc;
+ Code* replacement_code =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+
// Iterate over the back edge table and patch every interrupt
// call to an unconditional call to the replacement code.
int loop_nesting_level = unoptimized_code->allow_osr_at_loop_nesting_level();
@@ -2588,9 +2604,11 @@
!back_edges.Done();
back_edges.Next()) {
if (static_cast<int>(back_edges.loop_depth()) == loop_nesting_level) {
+ ASSERT_EQ(NOT_PATCHED, GetInterruptPatchState(isolate,
+ unoptimized_code,
+ back_edges.pc()));
PatchInterruptCodeAt(unoptimized_code,
back_edges.pc(),
- interrupt_code,
replacement_code);
}
}
@@ -2598,14 +2616,17 @@
unoptimized_code->set_back_edges_patched_for_osr(true);
#ifdef DEBUG
Deoptimizer::VerifyInterruptCode(
- unoptimized_code, interrupt_code, replacement_code, loop_nesting_level);
+ isolate, unoptimized_code, loop_nesting_level);
#endif // DEBUG
}
-void Deoptimizer::RevertInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code) {
+void Deoptimizer::RevertInterruptCode(Isolate* isolate,
+ Code* unoptimized_code) {
+ DisallowHeapAllocation no_gc;
+ Code* interrupt_code =
+ isolate->builtins()->builtin(Builtins::kInterruptCheck);
+
// Iterate over the back edge table and revert the patched interrupt calls.
ASSERT(unoptimized_code->back_edges_patched_for_osr());
int loop_nesting_level = unoptimized_code->allow_osr_at_loop_nesting_level();
@@ -2614,10 +2635,10 @@
!back_edges.Done();
back_edges.Next()) {
if (static_cast<int>(back_edges.loop_depth()) <= loop_nesting_level) {
- RevertInterruptCodeAt(unoptimized_code,
- back_edges.pc(),
- interrupt_code,
- replacement_code);
+ ASSERT_EQ(PATCHED_FOR_OSR, GetInterruptPatchState(isolate,
+ unoptimized_code,
+ back_edges.pc()));
+ RevertInterruptCodeAt(unoptimized_code, back_edges.pc(), interrupt_code);
}
}
@@ -2625,16 +2646,14 @@
unoptimized_code->set_allow_osr_at_loop_nesting_level(0);
#ifdef DEBUG
// Assert that none of the back edges are patched anymore.
- Deoptimizer::VerifyInterruptCode(
- unoptimized_code, interrupt_code, replacement_code, -1);
+ Deoptimizer::VerifyInterruptCode(isolate, unoptimized_code, -1);
#endif // DEBUG
}
#ifdef DEBUG
-void Deoptimizer::VerifyInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code,
+void Deoptimizer::VerifyInterruptCode(Isolate* isolate,
+ Code* unoptimized_code,
int loop_nesting_level) {
for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized_code);
!back_edges.Done();
@@ -2644,10 +2663,9 @@
// Assert that all back edges for shallower loops (and only those)
// have already been patched.
CHECK_EQ((static_cast<int>(loop_depth) <= loop_nesting_level),
- InterruptCodeIsPatched(unoptimized_code,
- back_edges.pc(),
- interrupt_code,
- replacement_code));
+ GetInterruptPatchState(isolate,
+ unoptimized_code,
+ back_edges.pc()) != NOT_PATCHED);
}
}
#endif // DEBUG
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index e5afd1a..0d62bd0 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -144,6 +144,11 @@
DEBUGGER
};
+ enum InterruptPatchState {
+ NOT_PATCHED,
+ PATCHED_FOR_OSR
+ };
+
static const int kBailoutTypesWithCodeEntry = SOFT + 1;
struct JumpTableEntry {
@@ -231,40 +236,34 @@
// Patch all interrupts with allowed loop depth in the unoptimized code to
// unconditionally call replacement_code.
- static void PatchInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code);
+ static void PatchInterruptCode(Isolate* isolate,
+ Code* unoptimized_code);
// Patch the interrupt at the instruction before pc_after in
// the unoptimized code to unconditionally call replacement_code.
static void PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
Code* replacement_code);
// Change all patched interrupts patched in the unoptimized code
// back to normal interrupts.
- static void RevertInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code);
+ static void RevertInterruptCode(Isolate* isolate,
+ Code* unoptimized_code);
// Change patched interrupt in the unoptimized code
// back to a normal interrupt.
static void RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
- Code* replacement_code);
+ Code* interrupt_code);
#ifdef DEBUG
- static bool InterruptCodeIsPatched(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code,
- Code* replacement_code);
+ static InterruptPatchState GetInterruptPatchState(Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after);
// Verify that all back edges of a certain loop depth are patched.
- static void VerifyInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code,
+ static void VerifyInterruptCode(Isolate* isolate,
+ Code* unoptimized_code,
int loop_nesting_level);
#endif // DEBUG
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 0ea5a32..5c170ac 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -775,9 +775,6 @@
DEFINE_bool(log_suspect, false, "Log suspect operations.")
DEFINE_bool(prof, false,
"Log statistical profiling information (implies --log-code).")
-DEFINE_bool(prof_lazy, false,
- "Used with --prof, only does sampling and logging"
- " when profiler is active.")
DEFINE_bool(prof_browser_mode, true,
"Used with --prof, turns on browser-compatible mode for profiling.")
DEFINE_bool(log_regexp, false, "Log regular expression execution.")
diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc
index 664673f..21cfd22 100644
--- a/src/gdb-jit.cc
+++ b/src/gdb-jit.cc
@@ -2063,7 +2063,7 @@
CompilationInfo* info) {
if (!FLAG_gdbjit) return;
- ScopedLock lock(mutex.Pointer());
+ LockGuard<Mutex> lock_guard(mutex.Pointer());
DisallowHeapAllocation no_gc;
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
@@ -2149,7 +2149,7 @@
void GDBJITInterface::RemoveCode(Code* code) {
if (!FLAG_gdbjit) return;
- ScopedLock lock(mutex.Pointer());
+ LockGuard<Mutex> lock_guard(mutex.Pointer());
HashMap::Entry* e = GetEntries()->Lookup(code,
HashForCodeObject(code),
false);
@@ -2187,7 +2187,7 @@
void GDBJITInterface::RegisterDetailedLineInfo(Code* code,
GDBJITLineInfo* line_info) {
- ScopedLock lock(mutex.Pointer());
+ LockGuard<Mutex> lock_guard(mutex.Pointer());
ASSERT(!IsLineInfoTagged(line_info));
HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
ASSERT(e->value == NULL);
diff --git a/src/global-handles.cc b/src/global-handles.cc
index 5632c7c..4a2aad3 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -118,14 +118,12 @@
void Release() {
ASSERT(state() != FREE);
set_state(FREE);
-#ifdef ENABLE_EXTRA_CHECKS
// Zap the values for eager trapping.
object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
set_independent(false);
set_partially_dependent(false);
weak_reference_callback_ = NULL;
-#endif
DecreaseBlockUses();
}
diff --git a/src/heap-inl.h b/src/heap-inl.h
index 6caa742..9d04797 100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -718,13 +718,11 @@
#ifdef DEBUG
for (int i = 0; i < new_space_strings_.length(); ++i) {
Object* obj = Object::cast(new_space_strings_[i]);
- // TODO(yangguo): check that the object is indeed an external string.
ASSERT(heap_->InNewSpace(obj));
ASSERT(obj != HEAP->the_hole_value());
}
for (int i = 0; i < old_space_strings_.length(); ++i) {
Object* obj = Object::cast(old_space_strings_[i]);
- // TODO(yangguo): check that the object is indeed an external string.
ASSERT(!heap_->InNewSpace(obj));
ASSERT(obj != HEAP->the_hole_value());
}
diff --git a/src/heap.cc b/src/heap.cc
index 4fd478f..d4425ea 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -6927,7 +6927,7 @@
store_buffer()->SetUp();
- if (FLAG_concurrent_recompilation) relocation_mutex_ = OS::CreateMutex();
+ if (FLAG_concurrent_recompilation) relocation_mutex_ = new Mutex;
#ifdef DEBUG
relocation_mutex_locked_by_optimizer_thread_ = false;
#endif // DEBUG
@@ -7916,6 +7916,7 @@
if (new_space_strings_[i] == heap_->the_hole_value()) {
continue;
}
+ ASSERT(new_space_strings_[i]->IsExternalString());
if (heap_->InNewSpace(new_space_strings_[i])) {
new_space_strings_[last++] = new_space_strings_[i];
} else {
@@ -7930,6 +7931,7 @@
if (old_space_strings_[i] == heap_->the_hole_value()) {
continue;
}
+ ASSERT(old_space_strings_[i]->IsExternalString());
ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
old_space_strings_[last++] = old_space_strings_[i];
}
@@ -8032,7 +8034,7 @@
void Heap::CheckpointObjectStats() {
- ScopedLock lock(checkpoint_object_stats_mutex.Pointer());
+ LockGuard<Mutex> lock_guard(checkpoint_object_stats_mutex.Pointer());
Counters* counters = isolate()->counters();
#define ADJUST_LAST_TIME_OBJECT_COUNT(name) \
counters->count_of_##name()->Increment( \
diff --git a/src/heap.h b/src/heap.h
index f0920b3..14b395f 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1882,7 +1882,7 @@
void CheckpointObjectStats();
- // We don't use a ScopedLock here since we want to lock the heap
+ // We don't use a LockGuard here since we want to lock the heap
// only when FLAG_concurrent_recompilation is true.
class RelocationLock {
public:
diff --git a/src/hydrogen-escape-analysis.cc b/src/hydrogen-escape-analysis.cc
index 145a779..0e70bcd 100644
--- a/src/hydrogen-escape-analysis.cc
+++ b/src/hydrogen-escape-analysis.cc
@@ -31,21 +31,25 @@
namespace internal {
-void HEscapeAnalysisPhase::CollectIfNoEscapingUses(HInstruction* instr) {
- for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
+bool HEscapeAnalysisPhase::HasNoEscapingUses(HValue* value) {
+ for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
if (use->HasEscapingOperandAt(it.index())) {
if (FLAG_trace_escape_analysis) {
- PrintF("#%d (%s) escapes through #%d (%s) @%d\n", instr->id(),
- instr->Mnemonic(), use->id(), use->Mnemonic(), it.index());
+ PrintF("#%d (%s) escapes through #%d (%s) @%d\n", value->id(),
+ value->Mnemonic(), use->id(), use->Mnemonic(), it.index());
}
- return;
+ return false;
+ }
+ if (use->RedefinedOperandIndex() == it.index() && !HasNoEscapingUses(use)) {
+ if (FLAG_trace_escape_analysis) {
+ PrintF("#%d (%s) escapes redefinition #%d (%s) @%d\n", value->id(),
+ value->Mnemonic(), use->id(), use->Mnemonic(), it.index());
+ }
+ return false;
}
}
- if (FLAG_trace_escape_analysis) {
- PrintF("#%d (%s) is being captured\n", instr->id(), instr->Mnemonic());
- }
- captured_.Add(instr, zone());
+ return true;
}
@@ -55,8 +59,12 @@
HBasicBlock* block = graph()->blocks()->at(i);
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
HInstruction* instr = it.Current();
- if (instr->IsAllocate()) {
- CollectIfNoEscapingUses(instr);
+ if (instr->IsAllocate() && HasNoEscapingUses(instr)) {
+ if (FLAG_trace_escape_analysis) {
+ PrintF("#%d (%s) is being captured\n", instr->id(),
+ instr->Mnemonic());
+ }
+ captured_.Add(instr, zone());
}
}
}
diff --git a/src/hydrogen-escape-analysis.h b/src/hydrogen-escape-analysis.h
index 9db46cb..639f5a9 100644
--- a/src/hydrogen-escape-analysis.h
+++ b/src/hydrogen-escape-analysis.h
@@ -52,7 +52,7 @@
private:
void CollectCapturedValues();
- void CollectIfNoEscapingUses(HInstruction* instr);
+ bool HasNoEscapingUses(HValue* value);
void PerformScalarReplacement();
void AnalyzeDataFlow(HInstruction* instr);
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index b19ca8f..feacefd 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -1537,14 +1537,15 @@
HValue* HGraphBuilder::BuildNewElementsCapacity(HValue* old_capacity) {
- HValue* half_old_capacity = Add<HShr>(old_capacity, graph_->GetConstant1());
+ HValue* half_old_capacity = AddUncasted<HShr>(old_capacity,
+ graph_->GetConstant1());
- HValue* new_capacity = Add<HAdd>(half_old_capacity, old_capacity);
+ HValue* new_capacity = AddUncasted<HAdd>(half_old_capacity, old_capacity);
new_capacity->ClearFlag(HValue::kCanOverflow);
HValue* min_growth = Add<HConstant>(16);
- new_capacity = Add<HAdd>(new_capacity, min_growth);
+ new_capacity = AddUncasted<HAdd>(new_capacity, min_growth);
new_capacity->ClearFlag(HValue::kCanOverflow);
return new_capacity;
@@ -2768,16 +2769,6 @@
if (value != NULL && value->CheckFlag(HValue::kIsArguments)) {
builder->Bailout(kArgumentsObjectValueInATestContext);
}
- if (value->IsConstant()) {
- HConstant* constant_value = HConstant::cast(value);
- if (constant_value->BooleanValue()) {
- builder->current_block()->Goto(if_true(), builder->function_state());
- } else {
- builder->current_block()->Goto(if_false(), builder->function_state());
- }
- builder->set_current_block(NULL);
- return;
- }
HBasicBlock* empty_true = builder->graph()->CreateBasicBlock();
HBasicBlock* empty_false = builder->graph()->CreateBasicBlock();
ToBooleanStub::Types expected(condition()->to_boolean_types());
@@ -3122,7 +3113,9 @@
void HOptimizedGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
for (int i = 0; i < statements->length(); i++) {
- CHECK_ALIVE(Visit(statements->at(i)));
+ Statement* stmt = statements->at(i);
+ CHECK_ALIVE(Visit(stmt));
+ if (stmt->IsJump()) break;
}
}
@@ -4096,9 +4089,7 @@
// size of all objects that are part of the graph.
static bool IsFastLiteral(Handle<JSObject> boilerplate,
int max_depth,
- int* max_properties,
- int* data_size,
- int* pointer_size) {
+ int* max_properties) {
if (boilerplate->map()->is_deprecated()) {
Handle<Object> result = JSObject::TryMigrateInstance(boilerplate);
if (result->IsSmi()) return false;
@@ -4111,9 +4102,7 @@
Handle<FixedArrayBase> elements(boilerplate->elements());
if (elements->length() > 0 &&
elements->map() != isolate->heap()->fixed_cow_array_map()) {
- if (boilerplate->HasFastDoubleElements()) {
- *data_size += FixedDoubleArray::SizeFor(elements->length());
- } else if (boilerplate->HasFastObjectElements()) {
+ if (boilerplate->HasFastObjectElements()) {
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
int length = elements->length();
for (int i = 0; i < length; i++) {
@@ -4123,15 +4112,12 @@
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
if (!IsFastLiteral(value_object,
max_depth - 1,
- max_properties,
- data_size,
- pointer_size)) {
+ max_properties)) {
return false;
}
}
}
- *pointer_size += FixedArray::SizeFor(length);
- } else {
+ } else if (!boilerplate->HasFastDoubleElements()) {
return false;
}
}
@@ -4146,7 +4132,6 @@
for (int i = 0; i < limit; i++) {
PropertyDetails details = descriptors->GetDetails(i);
if (details.type() != FIELD) continue;
- Representation representation = details.representation();
int index = descriptors->GetFieldIndex(i);
if ((*max_properties)-- == 0) return false;
Handle<Object> value(boilerplate->InObjectPropertyAt(index), isolate);
@@ -4154,18 +4139,12 @@
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
if (!IsFastLiteral(value_object,
max_depth - 1,
- max_properties,
- data_size,
- pointer_size)) {
+ max_properties)) {
return false;
}
- } else if (representation.IsDouble()) {
- *data_size += HeapNumber::kSize;
}
}
}
-
- *pointer_size += boilerplate->map()->instance_size();
return true;
}
@@ -4175,32 +4154,21 @@
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
Handle<JSFunction> closure = function_state()->compilation_info()->closure();
- HValue* context = environment()->context();
HInstruction* literal;
// Check whether to use fast or slow deep-copying for boilerplate.
- int data_size = 0;
- int pointer_size = 0;
int max_properties = kMaxFastLiteralProperties;
- Handle<Object> original_boilerplate(closure->literals()->get(
+ Handle<Object> boilerplate(closure->literals()->get(
expr->literal_index()), isolate());
- if (original_boilerplate->IsJSObject() &&
- IsFastLiteral(Handle<JSObject>::cast(original_boilerplate),
+ if (boilerplate->IsJSObject() &&
+ IsFastLiteral(Handle<JSObject>::cast(boilerplate),
kMaxFastLiteralDepth,
- &max_properties,
- &data_size,
- &pointer_size)) {
- Handle<JSObject> original_boilerplate_object =
- Handle<JSObject>::cast(original_boilerplate);
+ &max_properties)) {
Handle<JSObject> boilerplate_object =
- DeepCopy(original_boilerplate_object);
+ Handle<JSObject>::cast(boilerplate);
- literal = BuildFastLiteral(context,
- boilerplate_object,
- original_boilerplate_object,
+ literal = BuildFastLiteral(boilerplate_object,
Handle<Object>::null(),
- data_size,
- pointer_size,
DONT_TRACK_ALLOCATION_SITE);
} else {
NoObservableSideEffectsScope no_effects(this);
@@ -4302,7 +4270,6 @@
ASSERT(current_block()->HasPredecessor());
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
- HValue* context = environment()->context();
HInstruction* literal;
Handle<AllocationSite> site;
@@ -4336,10 +4303,10 @@
ASSERT(!raw_boilerplate.is_null());
ASSERT(site->IsLiteralSite());
- Handle<JSObject> original_boilerplate_object =
+ Handle<JSObject> boilerplate_object =
Handle<JSObject>::cast(raw_boilerplate);
ElementsKind boilerplate_elements_kind =
- Handle<JSObject>::cast(original_boilerplate_object)->GetElementsKind();
+ Handle<JSObject>::cast(boilerplate_object)->GetElementsKind();
// TODO(mvstanton): This heuristic is only a temporary solution. In the
// end, we want to quit creating allocation site info after a certain number
@@ -4348,25 +4315,12 @@
boilerplate_elements_kind);
// Check whether to use fast or slow deep-copying for boilerplate.
- int data_size = 0;
- int pointer_size = 0;
int max_properties = kMaxFastLiteralProperties;
- if (IsFastLiteral(original_boilerplate_object,
+ if (IsFastLiteral(boilerplate_object,
kMaxFastLiteralDepth,
- &max_properties,
- &data_size,
- &pointer_size)) {
- if (mode == TRACK_ALLOCATION_SITE) {
- pointer_size += AllocationMemento::kSize;
- }
-
- Handle<JSObject> boilerplate_object = DeepCopy(original_boilerplate_object);
- literal = BuildFastLiteral(context,
- boilerplate_object,
- original_boilerplate_object,
+ &max_properties)) {
+ literal = BuildFastLiteral(boilerplate_object,
site,
- data_size,
- pointer_size,
mode);
} else {
NoObservableSideEffectsScope no_effects(this);
@@ -4386,8 +4340,7 @@
3);
// De-opt if elements kind changed from boilerplate_elements_kind.
- Handle<Map> map = Handle<Map>(original_boilerplate_object->map(),
- isolate());
+ Handle<Map> map = Handle<Map>(boilerplate_object->map(), isolate());
literal = Add<HCheckMaps>(literal, map, top_info());
}
@@ -8273,59 +8226,15 @@
HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
- HValue* context,
Handle<JSObject> boilerplate_object,
- Handle<JSObject> original_boilerplate_object,
- Handle<Object> allocation_site,
- int data_size,
- int pointer_size,
+ Handle<Object> allocation_site_object,
AllocationSiteMode mode) {
NoObservableSideEffectsScope no_effects(this);
- HInstruction* target = NULL;
- HInstruction* data_target = NULL;
+ Handle<FixedArrayBase> elements(boilerplate_object->elements());
+ int object_size = boilerplate_object->map()->instance_size();
+ int object_offset = object_size;
- if (isolate()->heap()->GetPretenureMode() == TENURED) {
- if (data_size != 0) {
- HValue* size_in_bytes = Add<HConstant>(data_size);
- data_target = Add<HAllocate>(size_in_bytes, HType::JSObject(), TENURED,
- FIXED_DOUBLE_ARRAY_TYPE);
- Handle<Map> free_space_map = isolate()->factory()->free_space_map();
- AddStoreMapConstant(data_target, free_space_map);
- HObjectAccess access =
- HObjectAccess::ForJSObjectOffset(FreeSpace::kSizeOffset);
- Add<HStoreNamedField>(data_target, access, size_in_bytes);
- }
- if (pointer_size != 0) {
- HValue* size_in_bytes = Add<HConstant>(pointer_size);
- target = Add<HAllocate>(size_in_bytes, HType::JSObject(), TENURED,
- JS_OBJECT_TYPE);
- }
- } else {
- InstanceType instance_type = boilerplate_object->map()->instance_type();
- HValue* size_in_bytes = Add<HConstant>(data_size + pointer_size);
- target = Add<HAllocate>(size_in_bytes, HType::JSObject(), NOT_TENURED,
- instance_type);
- }
-
- int offset = 0;
- int data_offset = 0;
- BuildEmitDeepCopy(boilerplate_object, original_boilerplate_object,
- allocation_site, target, &offset, data_target,
- &data_offset, mode);
- return target;
-}
-
-
-void HOptimizedGraphBuilder::BuildEmitDeepCopy(
- Handle<JSObject> boilerplate_object,
- Handle<JSObject> original_boilerplate_object,
- Handle<Object> allocation_site_object,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset,
- AllocationSiteMode mode) {
bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
AllocationSite::CanTrack(boilerplate_object->map()->instance_type());
@@ -8334,99 +8243,67 @@
ASSERT(!create_allocation_site_info ||
AllocationSite::cast(*allocation_site_object)->IsLiteralSite());
- HInstruction* allocation_site = NULL;
-
if (create_allocation_site_info) {
- allocation_site = Add<HConstant>(allocation_site_object);
+ object_size += AllocationMemento::kSize;
}
- // Only elements backing stores for non-COW arrays need to be copied.
- Handle<FixedArrayBase> elements(boilerplate_object->elements());
- Handle<FixedArrayBase> original_elements(
- original_boilerplate_object->elements());
- ElementsKind kind = boilerplate_object->map()->elements_kind();
+ HValue* object_size_constant = Add<HConstant>(object_size);
+ HInstruction* object = Add<HAllocate>(object_size_constant, HType::JSObject(),
+ isolate()->heap()->GetPretenureMode(), JS_OBJECT_TYPE);
- int object_offset = *offset;
- int object_size = boilerplate_object->map()->instance_size();
+
+ BuildEmitObjectHeader(boilerplate_object, object);
+
+ if (create_allocation_site_info) {
+ HInstruction* allocation_site = Add<HConstant>(allocation_site_object);
+ BuildCreateAllocationMemento(object, object_offset, allocation_site);
+ }
+
int elements_size = (elements->length() > 0 &&
elements->map() != isolate()->heap()->fixed_cow_array_map()) ?
elements->Size() : 0;
- int elements_offset = 0;
- if (data_target != NULL && boilerplate_object->HasFastDoubleElements()) {
- elements_offset = *data_offset;
- *data_offset += elements_size;
- } else {
- // Place elements right after this object.
- elements_offset = *offset + object_size;
- *offset += elements_size;
+ HInstruction* object_elements = NULL;
+ if (elements_size > 0) {
+ HValue* object_elements_size = Add<HConstant>(elements_size);
+ if (boilerplate_object->HasFastDoubleElements()) {
+ object_elements = Add<HAllocate>(object_elements_size, HType::JSObject(),
+ isolate()->heap()->GetPretenureMode(), FIXED_DOUBLE_ARRAY_TYPE);
+ } else {
+ object_elements = Add<HAllocate>(object_elements_size, HType::JSObject(),
+ isolate()->heap()->GetPretenureMode(), FIXED_ARRAY_TYPE);
+ }
}
- // Increase the offset so that subsequent objects end up right after this
- // object (and it's elements if they are allocated in the same space).
- *offset += object_size;
+ BuildInitElementsInObjectHeader(boilerplate_object, object, object_elements);
+
// Copy object elements if non-COW.
- HValue* object_elements = BuildEmitObjectHeader(boilerplate_object, target,
- data_target, object_offset, elements_offset, elements_size);
if (object_elements != NULL) {
- BuildEmitElements(elements, original_elements, kind, object_elements,
- target, offset, data_target, data_offset);
+ BuildEmitElements(boilerplate_object, elements, object_elements);
}
// Copy in-object properties.
if (boilerplate_object->map()->NumberOfFields() != 0) {
- HValue* object_properties =
- Add<HInnerAllocatedObject>(target, object_offset);
- BuildEmitInObjectProperties(boilerplate_object, original_boilerplate_object,
- object_properties, target, offset, data_target, data_offset);
+ BuildEmitInObjectProperties(boilerplate_object, object);
}
-
- // Create allocation site info.
- if (mode == TRACK_ALLOCATION_SITE &&
- AllocationSite::CanTrack(boilerplate_object->map()->instance_type())) {
- elements_offset += AllocationMemento::kSize;
- *offset += AllocationMemento::kSize;
- BuildCreateAllocationMemento(target, JSArray::kSize, allocation_site);
- }
+ return object;
}
-HValue* HOptimizedGraphBuilder::BuildEmitObjectHeader(
+void HOptimizedGraphBuilder::BuildEmitObjectHeader(
Handle<JSObject> boilerplate_object,
- HInstruction* target,
- HInstruction* data_target,
- int object_offset,
- int elements_offset,
- int elements_size) {
+ HInstruction* object) {
ASSERT(boilerplate_object->properties()->length() == 0);
- HValue* result = NULL;
- HValue* object_header = Add<HInnerAllocatedObject>(target, object_offset);
Handle<Map> boilerplate_object_map(boilerplate_object->map());
- AddStoreMapConstant(object_header, boilerplate_object_map);
-
- HInstruction* elements;
- if (elements_size == 0) {
- Handle<Object> elements_field =
- Handle<Object>(boilerplate_object->elements(), isolate());
- elements = Add<HConstant>(elements_field);
- } else {
- if (data_target != NULL && boilerplate_object->HasFastDoubleElements()) {
- elements = Add<HInnerAllocatedObject>(data_target, elements_offset);
- } else {
- elements = Add<HInnerAllocatedObject>(target, elements_offset);
- }
- result = elements;
- }
- Add<HStoreNamedField>(object_header, HObjectAccess::ForElementsPointer(),
- elements);
+ AddStoreMapConstant(object, boilerplate_object_map);
Handle<Object> properties_field =
Handle<Object>(boilerplate_object->properties(), isolate());
ASSERT(*properties_field == isolate()->heap()->empty_fixed_array());
HInstruction* properties = Add<HConstant>(properties_field);
HObjectAccess access = HObjectAccess::ForPropertiesPointer();
- Add<HStoreNamedField>(object_header, access, properties);
+ Add<HStoreNamedField>(object, access, properties);
if (boilerplate_object->IsJSArray()) {
Handle<JSArray> boilerplate_array =
@@ -8436,22 +8313,30 @@
HInstruction* length = Add<HConstant>(length_field);
ASSERT(boilerplate_array->length()->IsSmi());
- Add<HStoreNamedField>(object_header, HObjectAccess::ForArrayLength(
+ Add<HStoreNamedField>(object, HObjectAccess::ForArrayLength(
boilerplate_array->GetElementsKind()), length);
}
+}
- return result;
+
+void HOptimizedGraphBuilder::BuildInitElementsInObjectHeader(
+ Handle<JSObject> boilerplate_object,
+ HInstruction* object,
+ HInstruction* object_elements) {
+ ASSERT(boilerplate_object->properties()->length() == 0);
+ if (object_elements == NULL) {
+ Handle<Object> elements_field =
+ Handle<Object>(boilerplate_object->elements(), isolate());
+ object_elements = Add<HConstant>(elements_field);
+ }
+ Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
+ object_elements);
}
void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
Handle<JSObject> boilerplate_object,
- Handle<JSObject> original_boilerplate_object,
- HValue* object_properties,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset) {
+ HInstruction* object) {
Handle<DescriptorArray> descriptors(
boilerplate_object->map()->instance_descriptors());
int limit = boilerplate_object->map()->NumberOfOwnDescriptors();
@@ -8475,31 +8360,20 @@
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- Handle<JSObject> original_value_object = Handle<JSObject>::cast(
- Handle<Object>(original_boilerplate_object->InObjectPropertyAt(index),
- isolate()));
- HInstruction* value_instruction = Add<HInnerAllocatedObject>(target,
- *offset);
-
- Add<HStoreNamedField>(object_properties, access, value_instruction);
- BuildEmitDeepCopy(value_object, original_value_object,
- Handle<Object>::null(), target,
- offset, data_target, data_offset,
- DONT_TRACK_ALLOCATION_SITE);
+ HInstruction* result =
+ BuildFastLiteral(value_object,
+ Handle<Object>::null(), DONT_TRACK_ALLOCATION_SITE);
+ Add<HStoreNamedField>(object, access, result);
} else {
Representation representation = details.representation();
HInstruction* value_instruction = Add<HConstant>(value);
if (representation.IsDouble()) {
// Allocate a HeapNumber box and store the value into it.
- HInstruction* double_box;
- if (data_target != NULL) {
- double_box = Add<HInnerAllocatedObject>(data_target, *data_offset);
- *data_offset += HeapNumber::kSize;
- } else {
- double_box = Add<HInnerAllocatedObject>(target, *offset);
- *offset += HeapNumber::kSize;
- }
+ HValue* heap_number_constant = Add<HConstant>(HeapNumber::kSize);
+ HInstruction* double_box =
+ Add<HAllocate>(heap_number_constant, HType::HeapNumber(),
+ isolate()->heap()->GetPretenureMode(), HEAP_NUMBER_TYPE);
AddStoreMapConstant(double_box,
isolate()->factory()->heap_number_map());
Add<HStoreNamedField>(double_box, HObjectAccess::ForHeapNumberValue(),
@@ -8507,7 +8381,7 @@
value_instruction = double_box;
}
- Add<HStoreNamedField>(object_properties, access, value_instruction);
+ Add<HStoreNamedField>(object, access, value_instruction);
}
}
@@ -8518,31 +8392,25 @@
ASSERT(boilerplate_object->IsJSObject());
int property_offset = boilerplate_object->GetInObjectPropertyOffset(i);
HObjectAccess access = HObjectAccess::ForJSObjectOffset(property_offset);
- Add<HStoreNamedField>(object_properties, access, value_instruction);
+ Add<HStoreNamedField>(object, access, value_instruction);
}
}
void HOptimizedGraphBuilder::BuildEmitElements(
+ Handle<JSObject> boilerplate_object,
Handle<FixedArrayBase> elements,
- Handle<FixedArrayBase> original_elements,
- ElementsKind kind,
- HValue* object_elements,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset) {
+ HValue* object_elements) {
+ ElementsKind kind = boilerplate_object->map()->elements_kind();
int elements_length = elements->length();
HValue* object_elements_length = Add<HConstant>(elements_length);
-
BuildInitializeElementsHeader(object_elements, kind, object_elements_length);
// Copy elements backing store content.
if (elements->IsFixedDoubleArray()) {
BuildEmitFixedDoubleArray(elements, kind, object_elements);
} else if (elements->IsFixedArray()) {
- BuildEmitFixedArray(elements, original_elements, kind, object_elements,
- target, offset, data_target, data_offset);
+ BuildEmitFixedArray(elements, kind, object_elements);
} else {
UNREACHABLE();
}
@@ -8570,32 +8438,20 @@
void HOptimizedGraphBuilder::BuildEmitFixedArray(
Handle<FixedArrayBase> elements,
- Handle<FixedArrayBase> original_elements,
ElementsKind kind,
- HValue* object_elements,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset) {
+ HValue* object_elements) {
HInstruction* boilerplate_elements = Add<HConstant>(elements);
int elements_length = elements->length();
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- Handle<FixedArray> original_fast_elements =
- Handle<FixedArray>::cast(original_elements);
for (int i = 0; i < elements_length; i++) {
Handle<Object> value(fast_elements->get(i), isolate());
HValue* key_constant = Add<HConstant>(i);
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- Handle<JSObject> original_value_object = Handle<JSObject>::cast(
- Handle<Object>(original_fast_elements->get(i), isolate()));
- HInstruction* value_instruction = Add<HInnerAllocatedObject>(target,
- *offset);
- Add<HStoreKeyed>(object_elements, key_constant, value_instruction, kind);
- BuildEmitDeepCopy(value_object, original_value_object,
- Handle<Object>::null(), target,
- offset, data_target, data_offset,
- DONT_TRACK_ALLOCATION_SITE);
+ HInstruction* result =
+ BuildFastLiteral(value_object,
+ Handle<Object>::null(), DONT_TRACK_ALLOCATION_SITE);
+ Add<HStoreKeyed>(object_elements, key_constant, result, kind);
} else {
HInstruction* value_instruction =
Add<HLoadKeyed>(boilerplate_elements, key_constant,
@@ -9813,15 +9669,15 @@
void HStatistics::Print() {
PrintF("Timing results:\n");
- int64_t sum = 0;
- for (int i = 0; i < timing_.length(); ++i) {
- sum += timing_[i];
+ TimeDelta sum;
+ for (int i = 0; i < times_.length(); ++i) {
+ sum += times_[i];
}
for (int i = 0; i < names_.length(); ++i) {
PrintF("%32s", names_[i]);
- double ms = static_cast<double>(timing_[i]) / 1000;
- double percent = static_cast<double>(timing_[i]) * 100 / sum;
+ double ms = times_[i].InMillisecondsF();
+ double percent = times_[i].PercentOf(sum);
PrintF(" %8.3f ms / %4.1f %% ", ms, percent);
unsigned size = sizes_[i];
@@ -9831,29 +9687,29 @@
PrintF("----------------------------------------"
"---------------------------------------\n");
- int64_t total = create_graph_ + optimize_graph_ + generate_code_;
+ TimeDelta total = create_graph_ + optimize_graph_ + generate_code_;
PrintF("%32s %8.3f ms / %4.1f %% \n",
"Create graph",
- static_cast<double>(create_graph_) / 1000,
- static_cast<double>(create_graph_) * 100 / total);
+ create_graph_.InMillisecondsF(),
+ create_graph_.PercentOf(total));
PrintF("%32s %8.3f ms / %4.1f %% \n",
"Optimize graph",
- static_cast<double>(optimize_graph_) / 1000,
- static_cast<double>(optimize_graph_) * 100 / total);
+ optimize_graph_.InMillisecondsF(),
+ optimize_graph_.PercentOf(total));
PrintF("%32s %8.3f ms / %4.1f %% \n",
"Generate and install code",
- static_cast<double>(generate_code_) / 1000,
- static_cast<double>(generate_code_) * 100 / total);
+ generate_code_.InMillisecondsF(),
+ generate_code_.PercentOf(total));
PrintF("----------------------------------------"
"---------------------------------------\n");
PrintF("%32s %8.3f ms (%.1f times slower than full code gen)\n",
"Total",
- static_cast<double>(total) / 1000,
- static_cast<double>(total) / full_code_gen_);
+ total.InMillisecondsF(),
+ total.TimesOf(full_code_gen_));
double source_size_in_kb = static_cast<double>(source_size_) / 1024;
double normalized_time = source_size_in_kb > 0
- ? (static_cast<double>(total) / 1000) / source_size_in_kb
+ ? total.InMillisecondsF() / source_size_in_kb
: 0;
double normalized_size_in_kb = source_size_in_kb > 0
? total_size_ / 1024 / source_size_in_kb
@@ -9864,17 +9720,17 @@
}
-void HStatistics::SaveTiming(const char* name, int64_t ticks, unsigned size) {
+void HStatistics::SaveTiming(const char* name, TimeDelta time, unsigned size) {
total_size_ += size;
for (int i = 0; i < names_.length(); ++i) {
if (strcmp(names_[i], name) == 0) {
- timing_[i] += ticks;
+ times_[i] += time;
sizes_[i] += size;
return;
}
}
names_.Add(name);
- timing_.Add(ticks);
+ times_.Add(time);
sizes_.Add(size);
}
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 22bffd1..cdca1bd 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -2063,60 +2063,31 @@
HInstruction* BuildThisFunction();
- HInstruction* BuildFastLiteral(HValue* context,
- Handle<JSObject> boilerplate_object,
- Handle<JSObject> original_boilerplate_object,
+ HInstruction* BuildFastLiteral(Handle<JSObject> boilerplate_object,
Handle<Object> allocation_site,
- int data_size,
- int pointer_size,
AllocationSiteMode mode);
- void BuildEmitDeepCopy(Handle<JSObject> boilerplat_object,
- Handle<JSObject> object,
- Handle<Object> allocation_site,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset,
- AllocationSiteMode mode);
+ void BuildEmitObjectHeader(Handle<JSObject> boilerplate_object,
+ HInstruction* object);
- MUST_USE_RESULT HValue* BuildEmitObjectHeader(
- Handle<JSObject> boilerplat_object,
- HInstruction* target,
- HInstruction* data_target,
- int object_offset,
- int elements_offset,
- int elements_size);
+ void BuildInitElementsInObjectHeader(Handle<JSObject> boilerplate_object,
+ HInstruction* object,
+ HInstruction* object_elements);
void BuildEmitInObjectProperties(Handle<JSObject> boilerplate_object,
- Handle<JSObject> original_boilerplate_object,
- HValue* object_properties,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset);
+ HInstruction* object);
- void BuildEmitElements(Handle<FixedArrayBase> elements,
- Handle<FixedArrayBase> original_elements,
- ElementsKind kind,
- HValue* object_elements,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset);
+ void BuildEmitElements(Handle<JSObject> boilerplate_object,
+ Handle<FixedArrayBase> elements,
+ HValue* object_elements);
void BuildEmitFixedDoubleArray(Handle<FixedArrayBase> elements,
ElementsKind kind,
HValue* object_elements);
void BuildEmitFixedArray(Handle<FixedArrayBase> elements,
- Handle<FixedArrayBase> original_elements,
ElementsKind kind,
- HValue* object_elements,
- HInstruction* target,
- int* offset,
- HInstruction* data_target,
- int* data_offset);
+ HValue* object_elements);
void AddCheckPrototypeMaps(Handle<JSObject> holder,
Handle<Map> receiver_map);
@@ -2165,41 +2136,37 @@
class HStatistics V8_FINAL: public Malloced {
public:
HStatistics()
- : timing_(5),
+ : times_(5),
names_(5),
sizes_(5),
- create_graph_(0),
- optimize_graph_(0),
- generate_code_(0),
total_size_(0),
- full_code_gen_(0),
source_size_(0) { }
void Initialize(CompilationInfo* info);
void Print();
- void SaveTiming(const char* name, int64_t ticks, unsigned size);
+ void SaveTiming(const char* name, TimeDelta time, unsigned size);
- void IncrementFullCodeGen(int64_t full_code_gen) {
+ void IncrementFullCodeGen(TimeDelta full_code_gen) {
full_code_gen_ += full_code_gen;
}
- void IncrementSubtotals(int64_t create_graph,
- int64_t optimize_graph,
- int64_t generate_code) {
+ void IncrementSubtotals(TimeDelta create_graph,
+ TimeDelta optimize_graph,
+ TimeDelta generate_code) {
create_graph_ += create_graph;
optimize_graph_ += optimize_graph;
generate_code_ += generate_code;
}
private:
- List<int64_t> timing_;
+ List<TimeDelta> times_;
List<const char*> names_;
List<unsigned> sizes_;
- int64_t create_graph_;
- int64_t optimize_graph_;
- int64_t generate_code_;
+ TimeDelta create_graph_;
+ TimeDelta optimize_graph_;
+ TimeDelta generate_code_;
unsigned total_size_;
- int64_t full_code_gen_;
+ TimeDelta full_code_gen_;
double source_size_;
};
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 64f36b3..42b1296 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -4229,16 +4229,6 @@
}
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index a9bd8c5..a4f7ee8 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -200,12 +200,7 @@
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
Code* replacement_code) {
- ASSERT(!InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
// Turn the jump into nops.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kNopByteOne;
@@ -221,12 +216,7 @@
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
- ASSERT(InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
+ Code* interrupt_code) {
// Restore the original jump.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kJnsInstruction;
@@ -241,23 +231,28 @@
#ifdef DEBUG
-bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
+Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after) {
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
if (*(call_target_address - 3) == kNopByteOne) {
- ASSERT_EQ(replacement_code->entry(),
- Assembler::target_address_at(call_target_address));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- return true;
+ Code* osr_builtin =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+ ASSERT_EQ(osr_builtin->entry(),
+ Assembler::target_address_at(call_target_address));
+ return PATCHED_FOR_OSR;
} else {
- ASSERT_EQ(interrupt_code->entry(),
+ // Get the interrupt stub code object to match against from cache.
+ Code* interrupt_builtin =
+ isolate->builtins()->builtin(Builtins::kInterruptCheck);
+ ASSERT_EQ(interrupt_builtin->entry(),
Assembler::target_address_at(call_target_address));
ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- return false;
+ return NOT_PATCHED;
}
}
#endif // DEBUG
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 775a168..09966c9 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -288,8 +288,7 @@
ExternalReference::address_of_stack_limit(isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ bind(&ok);
}
@@ -347,8 +346,7 @@
}
EmitProfilingCounterDecrement(weight);
__ j(positive, &ok, Label::kNear);
- InterruptStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
@@ -395,8 +393,8 @@
__ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
} else {
- InterruptStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
}
__ pop(eax);
EmitProfilingCounterReset();
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index e7e0327..15b0990 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -6430,8 +6430,9 @@
ASSERT(instr->context()->IsRegister());
ASSERT(ToRegister(instr->context()).is(esi));
- StackCheckStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
EnsureSpaceForLazyDeopt();
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
diff --git a/src/isolate.cc b/src/isolate.cc
index 54329b7..d0b6d46 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -343,7 +343,7 @@
#ifdef DEBUG
Thread::LocalStorageKey PerThreadAssertScopeBase::thread_local_key;
#endif // DEBUG
-Mutex* Isolate::process_wide_mutex_ = OS::CreateMutex();
+RecursiveMutex Isolate::process_wide_mutex_;
Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
Atomic32 Isolate::isolate_counter_ = 0;
@@ -352,7 +352,7 @@
ASSERT(!thread_id.Equals(ThreadId::Invalid()));
PerIsolateThreadData* per_thread = new PerIsolateThreadData(this, thread_id);
{
- ScopedLock lock(process_wide_mutex_);
+ LockGuard<RecursiveMutex> lock_guard(&process_wide_mutex_);
ASSERT(thread_data_table_->Lookup(this, thread_id) == NULL);
thread_data_table_->Insert(per_thread);
ASSERT(thread_data_table_->Lookup(this, thread_id) == per_thread);
@@ -366,7 +366,7 @@
ThreadId thread_id = ThreadId::Current();
PerIsolateThreadData* per_thread = NULL;
{
- ScopedLock lock(process_wide_mutex_);
+ LockGuard<RecursiveMutex> lock_guard(&process_wide_mutex_);
per_thread = thread_data_table_->Lookup(this, thread_id);
if (per_thread == NULL) {
per_thread = AllocatePerIsolateThreadData(thread_id);
@@ -386,7 +386,7 @@
ThreadId thread_id) {
PerIsolateThreadData* per_thread = NULL;
{
- ScopedLock lock(process_wide_mutex_);
+ LockGuard<RecursiveMutex> lock_guard(&process_wide_mutex_);
per_thread = thread_data_table_->Lookup(this, thread_id);
}
return per_thread;
@@ -394,7 +394,7 @@
void Isolate::EnsureDefaultIsolate() {
- ScopedLock lock(process_wide_mutex_);
+ LockGuard<RecursiveMutex> lock_guard(&process_wide_mutex_);
if (default_isolate_ == NULL) {
isolate_key_ = Thread::CreateThreadLocalKey();
thread_id_key_ = Thread::CreateThreadLocalKey();
@@ -1750,11 +1750,7 @@
compilation_cache_(NULL),
counters_(NULL),
code_range_(NULL),
- // Must be initialized early to allow v8::SetResourceConstraints calls.
- break_access_(OS::CreateMutex()),
debugger_initialized_(false),
- // Must be initialized early to allow v8::Debug calls.
- debugger_access_(OS::CreateMutex()),
logger_(NULL),
stats_table_(NULL),
stub_cache_(NULL),
@@ -1854,7 +1850,7 @@
Deinit();
- { ScopedLock lock(process_wide_mutex_);
+ { LockGuard<RecursiveMutex> lock_guard(&process_wide_mutex_);
thread_data_table_->RemoveAllThreads(this);
}
@@ -2025,10 +2021,6 @@
delete handle_scope_implementer_;
handle_scope_implementer_ = NULL;
- delete break_access_;
- break_access_ = NULL;
- delete debugger_access_;
- debugger_access_ = NULL;
delete compilation_cache_;
compilation_cache_ = NULL;
@@ -2127,7 +2119,7 @@
void Isolate::InitializeDebugger() {
#ifdef ENABLE_DEBUGGER_SUPPORT
- ScopedLock lock(debugger_access_);
+ LockGuard<RecursiveMutex> lock_guard(debugger_access());
if (NoBarrier_Load(&debugger_initialized_)) return;
InitializeLoggingAndCounters();
debug_ = new Debug(this);
diff --git a/src/isolate.h b/src/isolate.h
index 751f995..8eace12 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -541,10 +541,10 @@
static void EnterDefaultIsolate();
// Mutex for serializing access to break control structures.
- Mutex* break_access() { return break_access_; }
+ RecursiveMutex* break_access() { return &break_access_; }
// Mutex for serializing access to debugger.
- Mutex* debugger_access() { return debugger_access_; }
+ RecursiveMutex* debugger_access() { return &debugger_access_; }
Address get_address_from_id(AddressId id);
@@ -1185,7 +1185,7 @@
// This mutex protects highest_thread_id_, thread_data_table_ and
// default_isolate_.
- static Mutex* process_wide_mutex_;
+ static RecursiveMutex process_wide_mutex_;
static Thread::LocalStorageKey per_isolate_thread_data_key_;
static Thread::LocalStorageKey isolate_key_;
@@ -1253,9 +1253,9 @@
CompilationCache* compilation_cache_;
Counters* counters_;
CodeRange* code_range_;
- Mutex* break_access_;
+ RecursiveMutex break_access_;
Atomic32 debugger_initialized_;
- Mutex* debugger_access_;
+ RecursiveMutex debugger_access_;
Logger* logger_;
StackGuard stack_guard_;
StatsTable* stats_table_;
@@ -1414,12 +1414,30 @@
class AssertNoContextChange BASE_EMBEDDED {
#ifdef DEBUG
public:
- AssertNoContextChange() :
+ AssertNoContextChange() : context_(Isolate::Current()->context()) { }
+ ~AssertNoContextChange() {
+ ASSERT(Isolate::Current()->context() == *context_);
+ }
+
+ private:
+ Handle<Context> context_;
+#else
+ public:
+ AssertNoContextChange() { }
+#endif
+};
+
+
+// TODO(mstarzinger): Depracate as soon as everything is handlified.
+class AssertNoContextChangeWithHandleScope BASE_EMBEDDED {
+#ifdef DEBUG
+ public:
+ AssertNoContextChangeWithHandleScope() :
scope_(Isolate::Current()),
context_(Isolate::Current()->context(), Isolate::Current()) {
}
- ~AssertNoContextChange() {
+ ~AssertNoContextChangeWithHandleScope() {
ASSERT(Isolate::Current()->context() == *context_);
}
@@ -1428,7 +1446,7 @@
Handle<Context> context_;
#else
public:
- AssertNoContextChange() { }
+ AssertNoContextChangeWithHandleScope() { }
#endif
};
@@ -1440,11 +1458,11 @@
}
~ExecutionAccess() { Unlock(isolate_); }
- static void Lock(Isolate* isolate) { isolate->break_access_->Lock(); }
- static void Unlock(Isolate* isolate) { isolate->break_access_->Unlock(); }
+ static void Lock(Isolate* isolate) { isolate->break_access()->Lock(); }
+ static void Unlock(Isolate* isolate) { isolate->break_access()->Unlock(); }
static bool TryLock(Isolate* isolate) {
- return isolate->break_access_->TryLock();
+ return isolate->break_access()->TryLock();
}
private:
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index 2e2f802..3c5abd1 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -2189,7 +2189,7 @@
if (FLAG_hydrogen_stats) {
unsigned size = allocator_->zone()->allocation_size() -
allocator_zone_start_allocation_size_;
- isolate()->GetHStatistics()->SaveTiming(name(), 0, size);
+ isolate()->GetHStatistics()->SaveTiming(name(), TimeDelta(), size);
}
if (ShouldProduceTraceOutput()) {
diff --git a/src/log-utils.cc b/src/log-utils.cc
index 6bba882..909d4a5 100644
--- a/src/log-utils.cc
+++ b/src/log-utils.cc
@@ -41,14 +41,12 @@
Log::Log(Logger* logger)
: is_stopped_(false),
output_handle_(NULL),
- mutex_(NULL),
message_buffer_(NULL),
logger_(logger) {
}
void Log::Initialize(const char* log_file_name) {
- mutex_ = OS::CreateMutex();
message_buffer_ = NewArray<char>(kMessageBufferSize);
// --log-all enables all the log flags.
@@ -66,11 +64,6 @@
// --prof implies --log-code.
if (FLAG_prof) FLAG_log_code = true;
- // --prof_lazy controls --log-code.
- if (FLAG_prof_lazy) {
- FLAG_log_code = false;
- }
-
// If we're logging anything, we need to open the log file.
if (Log::InitLogAtStart()) {
if (strcmp(log_file_name, kLogToConsole) == 0) {
@@ -116,9 +109,6 @@
DeleteArray(message_buffer_);
message_buffer_ = NULL;
- delete mutex_;
- mutex_ = NULL;
-
is_stopped_ = false;
return result;
}
@@ -126,7 +116,7 @@
Log::MessageBuilder::MessageBuilder(Log* log)
: log_(log),
- sl(log_->mutex_),
+ lock_guard_(&log_->mutex_),
pos_(0) {
ASSERT(log_->message_buffer_ != NULL);
}
diff --git a/src/log-utils.h b/src/log-utils.h
index 861a826..ec8415e 100644
--- a/src/log-utils.h
+++ b/src/log-utils.h
@@ -107,7 +107,7 @@
private:
Log* log_;
- ScopedLock sl;
+ LockGuard<Mutex> lock_guard_;
int pos_;
};
@@ -142,7 +142,7 @@
// mutex_ is a Mutex used for enforcing exclusive
// access to the formatting buffer and the log file or log memory buffer.
- Mutex* mutex_;
+ Mutex mutex_;
// Buffer used for formatting log messages. This is a singleton buffer and
// mutex_ should be acquired before using it.
diff --git a/src/log.cc b/src/log.cc
index ff67d10..158d652 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -563,7 +563,6 @@
virtual void Run();
// Pause and Resume TickSample data collection.
- bool paused() const { return paused_; }
void pause() { paused_ = true; }
void resume() { paused_ = false; }
@@ -623,13 +622,13 @@
ASSERT(profiler_ == NULL);
profiler_ = profiler;
IncreaseProfilingDepth();
- if (!FLAG_prof_lazy && !IsActive()) Start();
+ if (!IsActive()) Start();
}
void ClearProfiler() {
- DecreaseProfilingDepth();
profiler_ = NULL;
if (IsActive()) Stop();
+ DecreaseProfilingDepth();
}
private:
@@ -710,14 +709,12 @@
ticker_(NULL),
profiler_(NULL),
log_events_(NULL),
- logging_nesting_(0),
- cpu_profiler_nesting_(0),
+ is_logging_(false),
log_(new Log(this)),
ll_logger_(NULL),
jit_logger_(NULL),
listeners_(5),
- is_initialized_(false),
- epoch_(0) {
+ is_initialized_(false) {
}
@@ -868,7 +865,7 @@
if (!log_->IsEnabled()) return;
ASSERT(FLAG_log_internal_timer_events);
Log::MessageBuilder msg(log_);
- int since_epoch = static_cast<int>(OS::Ticks() - epoch_);
+ int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
msg.Append("code-deopt,%ld,%d\n", since_epoch, code->CodeSize());
msg.WriteToLogFile();
}
@@ -878,7 +875,7 @@
if (!log_->IsEnabled()) return;
ASSERT(FLAG_log_internal_timer_events);
Log::MessageBuilder msg(log_);
- int since_epoch = static_cast<int>(OS::Ticks() - epoch_);
+ int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
const char* format = (se == START) ? "timer-event-start,\"%s\",%ld\n"
: "timer-event-end,\"%s\",%ld\n";
msg.Append(format, name, since_epoch);
@@ -1501,7 +1498,7 @@
Log::MessageBuilder msg(log_);
msg.Append("%s,", kLogEventsNames[TICK_EVENT]);
msg.AppendAddress(sample->pc);
- msg.Append(",%ld", static_cast<int>(OS::Ticks() - epoch_));
+ msg.Append(",%ld", static_cast<int>(timer_.Elapsed().InMicroseconds()));
if (sample->has_external_callback) {
msg.Append(",1,");
msg.AppendAddress(sample->external_callback);
@@ -1522,43 +1519,11 @@
}
-bool Logger::IsProfilerPaused() {
- return profiler_ == NULL || profiler_->paused();
-}
-
-
-void Logger::PauseProfiler() {
+void Logger::StopProfiler() {
if (!log_->IsEnabled()) return;
if (profiler_ != NULL) {
- // It is OK to have negative nesting.
- if (--cpu_profiler_nesting_ == 0) {
- profiler_->pause();
- if (FLAG_prof_lazy) {
- ticker_->Stop();
- FLAG_log_code = false;
- LOG(ISOLATE, UncheckedStringEvent("profiler", "pause"));
- }
- --logging_nesting_;
- }
- }
-}
-
-
-void Logger::ResumeProfiler() {
- if (!log_->IsEnabled()) return;
- if (profiler_ != NULL) {
- if (cpu_profiler_nesting_++ == 0) {
- ++logging_nesting_;
- if (FLAG_prof_lazy) {
- profiler_->Engage();
- LOG(ISOLATE, UncheckedStringEvent("profiler", "resume"));
- FLAG_log_code = true;
- LogCompiledFunctions();
- LogAccessorCallbacks();
- if (!ticker_->IsActive()) ticker_->Start();
- }
- profiler_->resume();
- }
+ profiler_->pause();
+ is_logging_ = false;
}
}
@@ -1566,7 +1531,7 @@
// This function can be called when Log's mutex is acquired,
// either from main or Profiler's thread.
void Logger::LogFailure() {
- PauseProfiler();
+ StopProfiler();
}
@@ -1866,11 +1831,6 @@
FLAG_log_snapshot_positions = true;
}
- // --prof_lazy controls --log-code.
- if (FLAG_prof_lazy) {
- FLAG_log_code = false;
- }
-
SmartArrayPointer<const char> log_file_name =
PrepareLogFileName(FLAG_logfile);
log_->Initialize(*log_file_name);
@@ -1883,20 +1843,16 @@
ticker_ = new Ticker(isolate, kSamplingIntervalMs);
if (Log::InitLogAtStart()) {
- logging_nesting_ = 1;
+ is_logging_ = true;
}
if (FLAG_prof) {
profiler_ = new Profiler(isolate);
- if (FLAG_prof_lazy) {
- profiler_->pause();
- } else {
- logging_nesting_ = 1;
- profiler_->Engage();
- }
+ is_logging_ = true;
+ profiler_->Engage();
}
- if (FLAG_log_internal_timer_events || FLAG_prof) epoch_ = OS::Ticks();
+ if (FLAG_log_internal_timer_events || FLAG_prof) timer_.Start();
return true;
}
diff --git a/src/log.h b/src/log.h
index 76e3cee..81d45e5 100644
--- a/src/log.h
+++ b/src/log.h
@@ -31,6 +31,7 @@
#include "allocation.h"
#include "objects.h"
#include "platform.h"
+#include "platform/elapsed-timer.h"
namespace v8 {
namespace internal {
@@ -340,19 +341,16 @@
void LogRuntime(Vector<const char> format, JSArray* args);
bool is_logging() {
- return logging_nesting_ > 0;
+ return is_logging_;
}
bool is_logging_code_events() {
return is_logging() || jit_logger_ != NULL;
}
- // Pause/Resume collection of profiling data.
- // When data collection is paused, CPU Tick events are discarded until
- // data collection is Resumed.
- void PauseProfiler();
- void ResumeProfiler();
- bool IsProfilerPaused();
+ // Stop collection of profiling data.
+ // When data collection is paused, CPU Tick events are discarded.
+ void StopProfiler();
void LogExistingFunction(Handle<SharedFunctionInfo> shared,
Handle<Code> code);
@@ -434,13 +432,9 @@
friend class TimeLog;
friend class Profiler;
template <StateTag Tag> friend class VMState;
-
friend class LoggerTestHelper;
-
- int logging_nesting_;
- int cpu_profiler_nesting_;
-
+ bool is_logging_;
Log* log_;
LowLevelLogger* ll_logger_;
JitLogger* jit_logger_;
@@ -450,7 +444,7 @@
// 'true' between SetUp() and TearDown().
bool is_initialized_;
- int64_t epoch_;
+ ElapsedTimer timer_;
friend class CpuProfiler;
};
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 710901e..8b0fdb2 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -2380,16 +2380,6 @@
}
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
const Register base = a1;
const Register exponent = a2;
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index 57d3880..bed6e12 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -101,12 +101,7 @@
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
Code* replacement_code) {
- ASSERT(!InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
static const int kInstrSize = Assembler::kInstrSize;
// Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
@@ -123,12 +118,7 @@
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
- ASSERT(InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
+ Code* interrupt_code) {
static const int kInstrSize = Assembler::kInstrSize;
// Restore the sltu instruction so beq can be taken again.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
@@ -143,23 +133,28 @@
#ifdef DEBUG
-bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
+Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after) {
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
if (Assembler::IsAddImmediate(
Assembler::instr_at(pc_after - 6 * kInstrSize))) {
+ Code* osr_builtin =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
ASSERT(reinterpret_cast<uint32_t>(
Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
- reinterpret_cast<uint32_t>(replacement_code->entry()));
- return true;
+ reinterpret_cast<uint32_t>(osr_builtin->entry()));
+ return PATCHED_FOR_OSR;
} else {
+ // Get the interrupt stub code object to match against from cache.
+ Code* interrupt_builtin =
+ isolate->builtins()->builtin(Builtins::kInterruptCheck);
ASSERT(reinterpret_cast<uint32_t>(
Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
- reinterpret_cast<uint32_t>(interrupt_code->entry()));
- return false;
+ reinterpret_cast<uint32_t>(interrupt_builtin->entry()));
+ return NOT_PATCHED;
}
}
#endif // DEBUG
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index d59820f..37d6bcf 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -298,8 +298,7 @@
Label ok;
__ LoadRoot(t0, Heap::kStackLimitRootIndex);
__ Branch(&ok, hs, sp, Operand(t0));
- StackCheckStub stub;
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ bind(&ok);
}
@@ -369,9 +368,8 @@
EmitProfilingCounterDecrement(weight);
__ slt(at, a3, zero_reg);
__ beq(at, zero_reg, &ok);
- // CallStub will emit a li t9 first, so it is safe to use the delay slot.
- InterruptStub stub;
- __ CallStub(&stub);
+ // Call will emit a li t9 first, so it is safe to use the delay slot.
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
@@ -418,8 +416,8 @@
__ push(a2);
__ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
} else {
- InterruptStub stub;
- __ CallStub(&stub);
+ __ Call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
}
__ pop(v0);
EmitProfilingCounterReset();
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index d2ab06a..11aac0b 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -5670,8 +5670,9 @@
Label done;
__ LoadRoot(at, Heap::kStackLimitRootIndex);
__ Branch(&done, hs, sp, Operand(at));
- StackCheckStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
EnsureSpaceForLazyDeopt();
last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
diff --git a/src/objects.cc b/src/objects.cc
index 452c1d6..8cf74ad 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -828,7 +828,8 @@
PropertyAttributes* attributes) {
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChangeWithHandleScope ncc;
+
Isolate* isolate = name->GetIsolate();
Heap* heap = isolate->heap();
@@ -3878,9 +3879,10 @@
StoreFromKeyed store_mode) {
Heap* heap = GetHeap();
Isolate* isolate = heap->isolate();
+
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChangeWithHandleScope ncc;
// Optimization for 2-byte strings often used as keys in a decompression
// dictionary. We internalize these short keys to avoid constantly
@@ -4039,7 +4041,7 @@
ExtensibilityCheck extensibility_check) {
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChangeWithHandleScope ncc;
Isolate* isolate = GetIsolate();
LookupResult lookup(isolate);
LocalLookup(name_raw, &lookup, true);
@@ -4183,12 +4185,12 @@
if (name->IsSymbol()) return ABSENT;
Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope(isolate);
Handle<InterceptorInfo> interceptor(GetNamedInterceptor());
Handle<JSObject> receiver_handle(receiver);
Handle<JSObject> holder_handle(this);
@@ -4318,10 +4320,12 @@
PropertyAttributes JSObject::GetElementAttributeWithInterceptor(
JSReceiver* receiver, uint32_t index, bool continue_search) {
Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope(isolate);
+
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
Handle<JSReceiver> hreceiver(receiver);
Handle<JSObject> holder(this);
@@ -5027,111 +5031,110 @@
}
-MaybeObject* JSObject::DeleteElementWithInterceptor(uint32_t index) {
- Isolate* isolate = GetIsolate();
- Heap* heap = isolate->heap();
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChange ncc;
- HandleScope scope(isolate);
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- if (interceptor->deleter()->IsUndefined()) return heap->false_value();
- v8::IndexedPropertyDeleterCallback deleter =
- v8::ToCData<v8::IndexedPropertyDeleterCallback>(interceptor->deleter());
- Handle<JSObject> this_handle(this);
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-delete", this, index));
- PropertyCallbackArguments args(isolate, interceptor->data(), this, this);
- v8::Handle<v8::Boolean> result = args.Call(deleter, index);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- if (!result.IsEmpty()) {
- ASSERT(result->IsBoolean());
- Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
- result_internal->VerifyApiCallResultType();
- return *result_internal;
- }
- MaybeObject* raw_result = this_handle->GetElementsAccessor()->Delete(
- *this_handle,
- index,
- NORMAL_DELETION);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return raw_result;
-}
-
-
-Handle<Object> JSObject::DeleteElement(Handle<JSObject> obj,
- uint32_t index,
- DeleteMode mode) {
- CALL_HEAP_FUNCTION(obj->GetIsolate(),
- obj->DeleteElement(index, mode),
+// TODO(mstarzinger): Temporary wrapper until handlified.
+static Handle<Object> AccessorDelete(Handle<JSObject> object,
+ uint32_t index,
+ JSObject::DeleteMode mode) {
+ CALL_HEAP_FUNCTION(object->GetIsolate(),
+ object->GetElementsAccessor()->Delete(*object,
+ index,
+ mode),
Object);
}
-MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
- Isolate* isolate = GetIsolate();
+Handle<Object> JSObject::DeleteElementWithInterceptor(Handle<JSObject> object,
+ uint32_t index) {
+ Isolate* isolate = object->GetIsolate();
+ Factory* factory = isolate->factory();
+
+ // Make sure that the top context does not change when doing
+ // callbacks or interceptor calls.
+ AssertNoContextChange ncc;
+
+ Handle<InterceptorInfo> interceptor(object->GetIndexedInterceptor());
+ if (interceptor->deleter()->IsUndefined()) return factory->false_value();
+ v8::IndexedPropertyDeleterCallback deleter =
+ v8::ToCData<v8::IndexedPropertyDeleterCallback>(interceptor->deleter());
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-delete", *object, index));
+ PropertyCallbackArguments args(
+ isolate, interceptor->data(), *object, *object);
+ v8::Handle<v8::Boolean> result = args.Call(deleter, index);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ if (!result.IsEmpty()) {
+ ASSERT(result->IsBoolean());
+ Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
+ result_internal->VerifyApiCallResultType();
+ // Rebox CustomArguments::kReturnValueOffset before returning.
+ return handle(*result_internal, isolate);
+ }
+ Handle<Object> delete_result = AccessorDelete(object, index, NORMAL_DELETION);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return delete_result;
+}
+
+
+Handle<Object> JSObject::DeleteElement(Handle<JSObject> object,
+ uint32_t index,
+ DeleteMode mode) {
+ Isolate* isolate = object->GetIsolate();
+ Factory* factory = isolate->factory();
+
// Check access rights if needed.
- if (IsAccessCheckNeeded() &&
- !isolate->MayIndexedAccess(this, index, v8::ACCESS_DELETE)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_DELETE);
- RETURN_IF_SCHEDULED_EXCEPTION(isolate);
- return isolate->heap()->false_value();
+ if (object->IsAccessCheckNeeded() &&
+ !isolate->MayIndexedAccess(*object, index, v8::ACCESS_DELETE)) {
+ isolate->ReportFailedAccessCheck(*object, v8::ACCESS_DELETE);
+ RETURN_HANDLE_IF_SCHEDULED_EXCEPTION(isolate, Object);
+ return factory->false_value();
}
- if (IsStringObjectWithCharacterAt(index)) {
+ if (object->IsStringObjectWithCharacterAt(index)) {
if (mode == STRICT_DELETION) {
// Deleting a non-configurable property in strict mode.
- HandleScope scope(isolate);
- Handle<Object> holder(this, isolate);
- Handle<Object> name = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> args[2] = { name, holder };
+ Handle<Object> name = factory->NewNumberFromUint(index);
+ Handle<Object> args[2] = { name, object };
Handle<Object> error =
- isolate->factory()->NewTypeError("strict_delete_property",
- HandleVector(args, 2));
- return isolate->Throw(*error);
+ factory->NewTypeError("strict_delete_property",
+ HandleVector(args, 2));
+ isolate->Throw(*error);
+ return Handle<Object>();
}
- return isolate->heap()->false_value();
+ return factory->false_value();
}
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return isolate->heap()->false_value();
+ if (object->IsJSGlobalProxy()) {
+ Handle<Object> proto(object->GetPrototype(), isolate);
+ if (proto->IsNull()) return factory->false_value();
ASSERT(proto->IsJSGlobalObject());
- return JSGlobalObject::cast(proto)->DeleteElement(index, mode);
+ return DeleteElement(Handle<JSObject>::cast(proto), index, mode);
}
- // From this point on everything needs to be handlified.
- HandleScope scope(isolate);
- Handle<JSObject> self(this);
-
Handle<Object> old_value;
bool should_enqueue_change_record = false;
- if (FLAG_harmony_observation && self->map()->is_observed()) {
- should_enqueue_change_record = self->HasLocalElement(index);
+ if (FLAG_harmony_observation && object->map()->is_observed()) {
+ should_enqueue_change_record = object->HasLocalElement(index);
if (should_enqueue_change_record) {
- old_value = self->GetLocalElementAccessorPair(index) != NULL
- ? Handle<Object>::cast(isolate->factory()->the_hole_value())
- : Object::GetElement(self, index);
+ old_value = object->GetLocalElementAccessorPair(index) != NULL
+ ? Handle<Object>::cast(factory->the_hole_value())
+ : Object::GetElement(object, index);
}
}
- MaybeObject* result;
// Skip interceptor if forcing deletion.
- if (self->HasIndexedInterceptor() && mode != FORCE_DELETION) {
- result = self->DeleteElementWithInterceptor(index);
+ Handle<Object> result;
+ if (object->HasIndexedInterceptor() && mode != FORCE_DELETION) {
+ result = DeleteElementWithInterceptor(object, index);
} else {
- result = self->GetElementsAccessor()->Delete(*self, index, mode);
+ result = AccessorDelete(object, index, mode);
}
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- if (should_enqueue_change_record && !self->HasLocalElement(index)) {
- Handle<String> name = isolate->factory()->Uint32ToString(index);
- EnqueueChangeRecord(self, "deleted", name, old_value);
+ if (should_enqueue_change_record && !object->HasLocalElement(index)) {
+ Handle<String> name = factory->Uint32ToString(index);
+ EnqueueChangeRecord(object, "deleted", name, old_value);
}
- return *hresult;
+ return result;
}
@@ -6144,7 +6147,7 @@
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChangeWithHandleScope ncc;
// Try to flatten before operating on the string.
if (name->IsString()) String::cast(*name)->TryFlatten();
@@ -6327,7 +6330,7 @@
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChangeWithHandleScope ncc;
// Try to flatten before operating on the string.
if (name->IsString()) String::cast(name)->TryFlatten();
@@ -6395,7 +6398,7 @@
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
- AssertNoContextChange ncc;
+ AssertNoContextChangeWithHandleScope ncc;
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
@@ -11543,10 +11546,12 @@
bool check_prototype,
SetPropertyMode set_mode) {
Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope(isolate);
+
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
Handle<JSObject> this_handle(this);
Handle<Object> value_handle(value, isolate);
@@ -12553,10 +12558,12 @@
MaybeObject* JSObject::GetElementWithInterceptor(Object* receiver,
uint32_t index) {
Isolate* isolate = GetIsolate();
+ HandleScope scope(isolate);
+
// Make sure that the top context does not change when doing
// callbacks or interceptor calls.
AssertNoContextChange ncc;
- HandleScope scope(isolate);
+
Handle<InterceptorInfo> interceptor(GetIndexedInterceptor(), isolate);
Handle<Object> this_handle(receiver, isolate);
Handle<JSObject> holder_handle(this, isolate);
diff --git a/src/objects.h b/src/objects.h
index 040664d..657d245 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1521,10 +1521,7 @@
inline void VerifyApiCallResultType();
// Prints this object without details.
- inline void ShortPrint() {
- ShortPrint(stdout);
- }
- void ShortPrint(FILE* out);
+ void ShortPrint(FILE* out = stdout);
// Prints this object without details to a message accumulator.
void ShortPrint(StringStream* accumulator);
@@ -1563,10 +1560,7 @@
static inline Smi* cast(Object* object);
// Dispatched behavior.
- inline void SmiPrint() {
- SmiPrint(stdout);
- }
- void SmiPrint(FILE* out);
+ void SmiPrint(FILE* out = stdout);
void SmiPrint(StringStream* accumulator);
DECLARE_VERIFIER(Smi)
@@ -1637,10 +1631,7 @@
static inline Failure* cast(MaybeObject* object);
// Dispatched behavior.
- inline void FailurePrint() {
- FailurePrint(stdout);
- }
- void FailurePrint(FILE* out);
+ void FailurePrint(FILE* out = stdout);
void FailurePrint(StringStream* accumulator);
DECLARE_VERIFIER(Failure)
@@ -1769,12 +1760,9 @@
// Dispatched behavior.
void HeapObjectShortPrint(StringStream* accumulator);
#ifdef OBJECT_PRINT
- inline void HeapObjectPrint() {
- HeapObjectPrint(stdout);
- }
- void HeapObjectPrint(FILE* out);
void PrintHeader(FILE* out, const char* id);
#endif
+ DECLARE_PRINTER(HeapObject)
DECLARE_VERIFIER(HeapObject)
#ifdef VERIFY_HEAP
inline void VerifyObjectField(int offset);
@@ -1858,10 +1846,7 @@
// Dispatched behavior.
bool HeapNumberBooleanValue();
- inline void HeapNumberPrint() {
- HeapNumberPrint(stdout);
- }
- void HeapNumberPrint(FILE* out);
+ void HeapNumberPrint(FILE* out = stdout);
void HeapNumberPrint(StringStream* accumulator);
DECLARE_VERIFIER(HeapNumber)
@@ -1982,7 +1967,7 @@
DeleteMode mode = NORMAL_DELETION);
static Handle<Object> DeleteElement(Handle<JSReceiver> object,
uint32_t index,
- DeleteMode mode);
+ DeleteMode mode = NORMAL_DELETION);
// Set the index'th array element.
// Can cause GC, or return failure if GC is required.
@@ -2320,11 +2305,6 @@
MUST_USE_RESULT MaybeObject* GetIdentityHash(CreationFlag flag);
MUST_USE_RESULT MaybeObject* SetIdentityHash(Smi* hash, CreationFlag flag);
- static Handle<Object> DeleteElement(Handle<JSObject> obj,
- uint32_t index,
- DeleteMode mode = NORMAL_DELETION);
- MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode);
-
inline void ValidateElements();
// Makes sure that this object can contain HeapObject as elements.
@@ -2647,19 +2627,9 @@
DECLARE_PRINTER(JSObject)
DECLARE_VERIFIER(JSObject)
#ifdef OBJECT_PRINT
- inline void PrintProperties() {
- PrintProperties(stdout);
- }
- void PrintProperties(FILE* out);
-
- inline void PrintElements() {
- PrintElements(stdout);
- }
- void PrintElements(FILE* out);
- inline void PrintTransitions() {
- PrintTransitions(stdout);
- }
- void PrintTransitions(FILE* out);
+ void PrintProperties(FILE* out = stdout);
+ void PrintElements(FILE* out = stdout);
+ void PrintTransitions(FILE* out = stdout);
#endif
void PrintElementsTransition(
@@ -2812,7 +2782,11 @@
Handle<Name> name,
DeleteMode mode);
- MUST_USE_RESULT MaybeObject* DeleteElementWithInterceptor(uint32_t index);
+ static Handle<Object> DeleteElement(Handle<JSObject> object,
+ uint32_t index,
+ DeleteMode mode);
+ static Handle<Object> DeleteElementWithInterceptor(Handle<JSObject> object,
+ uint32_t index);
MUST_USE_RESULT MaybeObject* DeleteFastElement(uint32_t index);
MUST_USE_RESULT MaybeObject* DeleteDictionaryElement(uint32_t index,
@@ -3250,10 +3224,7 @@
#ifdef OBJECT_PRINT
// Print all the descriptors.
- inline void PrintDescriptors() {
- PrintDescriptors(stdout);
- }
- void PrintDescriptors(FILE* out);
+ void PrintDescriptors(FILE* out = stdout);
#endif
#ifdef DEBUG
@@ -3746,10 +3717,7 @@
MUST_USE_RESULT MaybeObject* EnsureCapacity(int n, Key key);
#ifdef OBJECT_PRINT
- inline void Print() {
- Print(stdout);
- }
- void Print(FILE* out);
+ void Print(FILE* out = stdout);
#endif
// Returns the key (slow).
Object* SlowReverseLookup(Object* value);
@@ -4831,10 +4799,7 @@
static const char* ICState2String(InlineCacheState state);
static const char* StubType2String(StubType type);
static void PrintExtraICState(FILE* out, Kind kind, ExtraICState extra);
- inline void Disassemble(const char* name) {
- Disassemble(name, stdout);
- }
- void Disassemble(const char* name, FILE* out);
+ void Disassemble(const char* name, FILE* out = stdout);
#endif // ENABLE_DISASSEMBLER
// [instruction_size]: Size of the native instructions
@@ -7132,10 +7097,7 @@
DECL_ACCESSORS(next_function_link, Object)
// Prints the name of the function using PrintF.
- inline void PrintName() {
- PrintName(stdout);
- }
- void PrintName(FILE* out);
+ void PrintName(FILE* out = stdout);
// Casting.
static inline JSFunction* cast(Object* obj);
@@ -8380,13 +8342,9 @@
// Dispatched behavior.
void StringShortPrint(StringStream* accumulator);
#ifdef OBJECT_PRINT
- inline void StringPrint() {
- StringPrint(stdout);
- }
- void StringPrint(FILE* out);
-
char* ToAsciiArray();
#endif
+ DECLARE_PRINTER(String)
DECLARE_VERIFIER(String)
inline bool IsFlat();
diff --git a/src/optimizing-compiler-thread.cc b/src/optimizing-compiler-thread.cc
index 788f027..1f77d5a 100644
--- a/src/optimizing-compiler-thread.cc
+++ b/src/optimizing-compiler-thread.cc
@@ -39,7 +39,7 @@
void OptimizingCompilerThread::Run() {
#ifdef DEBUG
- { ScopedLock lock(thread_id_mutex_);
+ { LockGuard<Mutex> lock_guard(&thread_id_mutex_);
thread_id_ = ThreadId::Current().ToInteger();
}
#endif
@@ -48,8 +48,8 @@
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
- int64_t epoch = 0;
- if (FLAG_trace_concurrent_recompilation) epoch = OS::Ticks();
+ ElapsedTimer total_timer;
+ if (FLAG_trace_concurrent_recompilation) total_timer.Start();
while (true) {
input_queue_semaphore_->Wait();
@@ -65,7 +65,7 @@
break;
case STOP:
if (FLAG_trace_concurrent_recompilation) {
- time_spent_total_ = OS::Ticks() - epoch;
+ time_spent_total_ = total_timer.Elapsed();
}
stop_semaphore_->Signal();
return;
@@ -81,13 +81,13 @@
continue;
}
- int64_t compiling_start = 0;
- if (FLAG_trace_concurrent_recompilation) compiling_start = OS::Ticks();
+ ElapsedTimer compiling_timer;
+ if (FLAG_trace_concurrent_recompilation) compiling_timer.Start();
CompileNext();
if (FLAG_trace_concurrent_recompilation) {
- time_spent_compiling_ += OS::Ticks() - compiling_start;
+ time_spent_compiling_ += compiling_timer.Elapsed();
}
}
}
@@ -108,7 +108,7 @@
// The function may have already been optimized by OSR. Simply continue.
// Use a mutex to make sure that functions marked for install
// are always also queued.
- ScopedLock mark_and_queue(install_mutex_);
+ LockGuard<Mutex> mark_and_queue(&install_mutex_);
{ Heap::RelocationLock relocation_lock(isolate_->heap());
AllowHandleDereference ahd;
optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode();
@@ -175,9 +175,7 @@
}
if (FLAG_trace_concurrent_recompilation) {
- double compile_time = static_cast<double>(time_spent_compiling_);
- double total_time = static_cast<double>(time_spent_total_);
- double percentage = (compile_time * 100) / total_time;
+ double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
}
@@ -191,7 +189,7 @@
OptimizingCompiler* compiler;
while (true) {
{ // Memory barrier to ensure marked functions are queued.
- ScopedLock marked_and_queued(install_mutex_);
+ LockGuard<Mutex> marked_and_queued(&install_mutex_);
if (!output_queue_.Dequeue(&compiler)) return;
}
Compiler::InstallOptimizedCode(compiler);
@@ -213,7 +211,7 @@
#ifdef DEBUG
bool OptimizingCompilerThread::IsOptimizerThread() {
if (!FLAG_concurrent_recompilation) return false;
- ScopedLock lock(thread_id_mutex_);
+ LockGuard<Mutex> lock_guard(&thread_id_mutex_);
return ThreadId::Current().ToInteger() == thread_id_;
}
#endif
diff --git a/src/optimizing-compiler-thread.h b/src/optimizing-compiler-thread.h
index aff94ed..10ed420 100644
--- a/src/optimizing-compiler-thread.h
+++ b/src/optimizing-compiler-thread.h
@@ -31,6 +31,8 @@
#include "atomicops.h"
#include "flags.h"
#include "platform.h"
+#include "platform/mutex.h"
+#include "platform/time.h"
#include "unbound-queue-inl.h"
namespace v8 {
@@ -46,14 +48,10 @@
Thread("OptimizingCompilerThread"),
#ifdef DEBUG
thread_id_(0),
- thread_id_mutex_(OS::CreateMutex()),
#endif
isolate_(isolate),
stop_semaphore_(OS::CreateSemaphore(0)),
- input_queue_semaphore_(OS::CreateSemaphore(0)),
- install_mutex_(OS::CreateMutex()),
- time_spent_compiling_(0),
- time_spent_total_(0) {
+ input_queue_semaphore_(OS::CreateSemaphore(0)) {
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
}
@@ -83,11 +81,9 @@
#endif
~OptimizingCompilerThread() {
- delete install_mutex_;
delete input_queue_semaphore_;
delete stop_semaphore_;
#ifdef DEBUG
- delete thread_id_mutex_;
#endif
}
@@ -101,7 +97,7 @@
#ifdef DEBUG
int thread_id_;
- Mutex* thread_id_mutex_;
+ Mutex thread_id_mutex_;
#endif
Isolate* isolate_;
@@ -109,11 +105,11 @@
Semaphore* input_queue_semaphore_;
UnboundQueue<OptimizingCompiler*> input_queue_;
UnboundQueue<OptimizingCompiler*> output_queue_;
- Mutex* install_mutex_;
+ Mutex install_mutex_;
volatile AtomicWord stop_thread_;
volatile Atomic32 queue_length_;
- int64_t time_spent_compiling_;
- int64_t time_spent_total_;
+ TimeDelta time_spent_compiling_;
+ TimeDelta time_spent_total_;
};
} } // namespace v8::internal
diff --git a/src/parser.cc b/src/parser.cc
index ccab7ec..e4a6bb2 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -569,10 +569,13 @@
FunctionLiteral* Parser::ParseProgram() {
- HistogramTimerScope timer(isolate()->counters()->parse());
+ HistogramTimerScope timer_scope(isolate()->counters()->parse());
Handle<String> source(String::cast(script_->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
- int64_t start = FLAG_trace_parse ? OS::Ticks() : 0;
+ ElapsedTimer timer;
+ if (FLAG_trace_parse) {
+ timer.Start();
+ }
fni_ = new(zone()) FuncNameInferrer(isolate(), zone());
// Initialize parser state.
@@ -593,7 +596,7 @@
}
if (FLAG_trace_parse && result != NULL) {
- double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+ double ms = timer.Elapsed().InMillisecondsF();
if (info()->is_eval()) {
PrintF("[parsing eval");
} else if (info()->script()->name()->IsString()) {
@@ -697,10 +700,13 @@
FunctionLiteral* Parser::ParseLazy() {
- HistogramTimerScope timer(isolate()->counters()->parse_lazy());
+ HistogramTimerScope timer_scope(isolate()->counters()->parse_lazy());
Handle<String> source(String::cast(script_->source()));
isolate()->counters()->total_parse_size()->Increment(source->length());
- int64_t start = FLAG_trace_parse ? OS::Ticks() : 0;
+ ElapsedTimer timer;
+ if (FLAG_trace_parse) {
+ timer.Start();
+ }
Handle<SharedFunctionInfo> shared_info = info()->shared_info();
// Initialize parser state.
@@ -720,7 +726,7 @@
}
if (FLAG_trace_parse && result != NULL) {
- double ms = static_cast<double>(OS::Ticks() - start) / 1000;
+ double ms = timer.Elapsed().InMillisecondsF();
SmartArrayPointer<char> name_chars = result->debug_name()->ToCString();
PrintF("[parsing function: %s - took %0.3f ms]\n", *name_chars, ms);
}
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
index 4c7b017..7bedfe8 100644
--- a/src/platform-cygwin.cc
+++ b/src/platform-cygwin.cc
@@ -87,7 +87,7 @@
static void UpdateAllocatedSpaceLimits(void* address, int size) {
ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
+ LockGuard<Mutex> lock_guard(limit_mutex);
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
@@ -471,7 +471,7 @@
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
+ limit_mutex = new Mutex();
}
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
index e0917fa..0b32b32 100644
--- a/src/platform-freebsd.cc
+++ b/src/platform-freebsd.cc
@@ -95,7 +95,7 @@
static void UpdateAllocatedSpaceLimits(void* address, int size) {
ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
+ LockGuard<Mutex> lock_guard(limit_mutex);
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
@@ -436,7 +436,7 @@
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
+ limit_mutex = new Mutex();
}
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
index ef97449..37b4b11 100644
--- a/src/platform-linux.cc
+++ b/src/platform-linux.cc
@@ -151,7 +151,7 @@
static void UpdateAllocatedSpaceLimits(void* address, int size) {
ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
+ LockGuard<Mutex> lock_guard(limit_mutex);
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
@@ -569,9 +569,9 @@
void OS::SetUp() {
// Seed the random number generator. We preserve microsecond resolution.
- uint64_t seed = Ticks() ^ (getpid() << 16);
+ uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()) ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
+ limit_mutex = new Mutex();
}
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
index 6135cd1..7aa02a7 100644
--- a/src/platform-macos.cc
+++ b/src/platform-macos.cc
@@ -93,7 +93,7 @@
static void UpdateAllocatedSpaceLimits(void* address, int size) {
ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
+ LockGuard<Mutex> lock(limit_mutex);
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
@@ -441,9 +441,9 @@
void OS::SetUp() {
// Seed the random number generator. We preserve microsecond resolution.
- uint64_t seed = Ticks() ^ (getpid() << 16);
+ uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()) ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
+ limit_mutex = new Mutex();
}
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
index e591601..114b8e2 100644
--- a/src/platform-openbsd.cc
+++ b/src/platform-openbsd.cc
@@ -93,7 +93,7 @@
static void UpdateAllocatedSpaceLimits(void* address, int size) {
ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
+ LockGuard<Mutex> lock(limit_mutex);
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
@@ -500,9 +500,9 @@
void OS::SetUp() {
// Seed the random number generator. We preserve microsecond resolution.
- uint64_t seed = Ticks() ^ (getpid() << 16);
+ uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis()) ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
+ limit_mutex = new Mutex();
}
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
index 58d0a24..aaf0ca7 100644
--- a/src/platform-posix.cc
+++ b/src/platform-posix.cc
@@ -313,19 +313,7 @@
double OS::TimeCurrentMillis() {
- struct timeval tv;
- if (gettimeofday(&tv, NULL) < 0) return 0.0;
- return (static_cast<double>(tv.tv_sec) * 1000) +
- (static_cast<double>(tv.tv_usec) / 1000);
-}
-
-
-int64_t OS::Ticks() {
- // gettimeofday has microsecond resolution.
- struct timeval tv;
- if (gettimeofday(&tv, NULL) < 0)
- return 0;
- return (static_cast<int64_t>(tv.tv_sec) * 1000000) + tv.tv_usec;
+ return Time::Now().ToJsTime();
}
@@ -751,48 +739,6 @@
}
-class POSIXMutex : public Mutex {
- public:
- POSIXMutex() {
- pthread_mutexattr_t attr;
- memset(&attr, 0, sizeof(attr));
- int result = pthread_mutexattr_init(&attr);
- ASSERT(result == 0);
- result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
- ASSERT(result == 0);
- result = pthread_mutex_init(&mutex_, &attr);
- ASSERT(result == 0);
- result = pthread_mutexattr_destroy(&attr);
- ASSERT(result == 0);
- USE(result);
- }
-
- virtual ~POSIXMutex() { pthread_mutex_destroy(&mutex_); }
-
- virtual int Lock() { return pthread_mutex_lock(&mutex_); }
-
- virtual int Unlock() { return pthread_mutex_unlock(&mutex_); }
-
- virtual bool TryLock() {
- int result = pthread_mutex_trylock(&mutex_);
- // Return false if the lock is busy and locking failed.
- if (result == EBUSY) {
- return false;
- }
- ASSERT(result == 0); // Verify no other errors.
- return true;
- }
-
- private:
- pthread_mutex_t mutex_; // Pthread mutex for POSIX platforms.
-};
-
-
-Mutex* OS::CreateMutex() {
- return new POSIXMutex();
-}
-
-
// ----------------------------------------------------------------------------
// POSIX socket support.
//
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
index b1d88af..dd5c7a0 100644
--- a/src/platform-solaris.cc
+++ b/src/platform-solaris.cc
@@ -110,7 +110,7 @@
static void UpdateAllocatedSpaceLimits(void* address, int size) {
ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
+ LockGuard<Mutex> lock_guard(limit_mutex);
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
@@ -479,7 +479,7 @@
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
+ limit_mutex = new Mutex();
}
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
index c136631..2775453 100644
--- a/src/platform-win32.cc
+++ b/src/platform-win32.cc
@@ -38,7 +38,6 @@
#endif // MINGW_HAS_SECURE_API
#endif // __MINGW32__
-#define V8_WIN32_HEADERS_FULL
#include "win32-headers.h"
#include "v8.h"
@@ -246,19 +245,15 @@
// timestamps are represented as a doubles in milliseconds since 00:00:00 UTC,
// January 1, 1970.
-class Time {
+class Win32Time {
public:
// Constructors.
- Time();
- explicit Time(double jstime);
- Time(int year, int mon, int day, int hour, int min, int sec);
+ explicit Win32Time(double jstime);
+ Win32Time(int year, int mon, int day, int hour, int min, int sec);
// Convert timestamp to JavaScript representation.
double ToJSTime();
- // Set timestamp to current time.
- void SetToCurrentTime();
-
// Returns the local timezone offset in milliseconds east of UTC. This is
// the number of milliseconds you must add to UTC to get local time, i.e.
// LocalOffset(CET) = 3600000 and LocalOffset(PST) = -28800000. This
@@ -300,10 +295,6 @@
// Return whether or not daylight savings time is in effect at this time.
bool InDST();
- // Return the difference (in milliseconds) between this timestamp and
- // another timestamp.
- int64_t Diff(Time* other);
-
// Accessor for FILETIME representation.
FILETIME& ft() { return time_.ft_; }
@@ -325,26 +316,20 @@
// Static variables.
-bool Time::tz_initialized_ = false;
-TIME_ZONE_INFORMATION Time::tzinfo_;
-char Time::std_tz_name_[kTzNameSize];
-char Time::dst_tz_name_[kTzNameSize];
-
-
-// Initialize timestamp to start of epoc.
-Time::Time() {
- t() = 0;
-}
+bool Win32Time::tz_initialized_ = false;
+TIME_ZONE_INFORMATION Win32Time::tzinfo_;
+char Win32Time::std_tz_name_[kTzNameSize];
+char Win32Time::dst_tz_name_[kTzNameSize];
// Initialize timestamp from a JavaScript timestamp.
-Time::Time(double jstime) {
+Win32Time::Win32Time(double jstime) {
t() = static_cast<int64_t>(jstime) * kTimeScaler + kTimeEpoc;
}
// Initialize timestamp from date/time components.
-Time::Time(int year, int mon, int day, int hour, int min, int sec) {
+Win32Time::Win32Time(int year, int mon, int day, int hour, int min, int sec) {
SYSTEMTIME st;
st.wYear = year;
st.wMonth = mon;
@@ -358,14 +343,14 @@
// Convert timestamp to JavaScript timestamp.
-double Time::ToJSTime() {
+double Win32Time::ToJSTime() {
return static_cast<double>((t() - kTimeEpoc) / kTimeScaler);
}
// Guess the name of the timezone from the bias.
// The guess is very biased towards the northern hemisphere.
-const char* Time::GuessTimezoneNameFromBias(int bias) {
+const char* Win32Time::GuessTimezoneNameFromBias(int bias) {
static const int kHour = 60;
switch (-bias) {
case -9*kHour: return "Alaska";
@@ -390,7 +375,7 @@
// Initialize timezone information. The timezone information is obtained from
// windows. If we cannot get the timezone information we fall back to CET.
// Please notice that this code is not thread-safe.
-void Time::TzSet() {
+void Win32Time::TzSet() {
// Just return if timezone information has already been initialized.
if (tz_initialized_) return;
@@ -439,78 +424,16 @@
}
-// Return the difference in milliseconds between this and another timestamp.
-int64_t Time::Diff(Time* other) {
- return (t() - other->t()) / kTimeScaler;
-}
-
-
-// Set timestamp to current time.
-void Time::SetToCurrentTime() {
- // The default GetSystemTimeAsFileTime has a ~15.5ms resolution.
- // Because we're fast, we like fast timers which have at least a
- // 1ms resolution.
- //
- // timeGetTime() provides 1ms granularity when combined with
- // timeBeginPeriod(). If the host application for v8 wants fast
- // timers, it can use timeBeginPeriod to increase the resolution.
- //
- // Using timeGetTime() has a drawback because it is a 32bit value
- // and hence rolls-over every ~49days.
- //
- // To use the clock, we use GetSystemTimeAsFileTime as our base;
- // and then use timeGetTime to extrapolate current time from the
- // start time. To deal with rollovers, we resync the clock
- // any time when more than kMaxClockElapsedTime has passed or
- // whenever timeGetTime creates a rollover.
-
- static bool initialized = false;
- static TimeStamp init_time;
- static DWORD init_ticks;
- static const int64_t kHundredNanosecondsPerSecond = 10000000;
- static const int64_t kMaxClockElapsedTime =
- 60*kHundredNanosecondsPerSecond; // 1 minute
-
- // If we are uninitialized, we need to resync the clock.
- bool needs_resync = !initialized;
-
- // Get the current time.
- TimeStamp time_now;
- GetSystemTimeAsFileTime(&time_now.ft_);
- DWORD ticks_now = timeGetTime();
-
- // Check if we need to resync due to clock rollover.
- needs_resync |= ticks_now < init_ticks;
-
- // Check if we need to resync due to elapsed time.
- needs_resync |= (time_now.t_ - init_time.t_) > kMaxClockElapsedTime;
-
- // Check if we need to resync due to backwards time change.
- needs_resync |= time_now.t_ < init_time.t_;
-
- // Resync the clock if necessary.
- if (needs_resync) {
- GetSystemTimeAsFileTime(&init_time.ft_);
- init_ticks = ticks_now = timeGetTime();
- initialized = true;
- }
-
- // Finally, compute the actual time. Why is this so hard.
- DWORD elapsed = ticks_now - init_ticks;
- this->time_.t_ = init_time.t_ + (static_cast<int64_t>(elapsed) * 10000);
-}
-
-
// Return the local timezone offset in milliseconds east of UTC. This
// takes into account whether daylight saving is in effect at the time.
// Only times in the 32-bit Unix range may be passed to this function.
// Also, adding the time-zone offset to the input must not overflow.
// The function EquivalentTime() in date.js guarantees this.
-int64_t Time::LocalOffset() {
+int64_t Win32Time::LocalOffset() {
// Initialize timezone information, if needed.
TzSet();
- Time rounded_to_second(*this);
+ Win32Time rounded_to_second(*this);
rounded_to_second.t() = rounded_to_second.t() / 1000 / kTimeScaler *
1000 * kTimeScaler;
// Convert to local time using POSIX localtime function.
@@ -541,7 +464,7 @@
// Return whether or not daylight savings time is in effect at this time.
-bool Time::InDST() {
+bool Win32Time::InDST() {
// Initialize timezone information, if needed.
TzSet();
@@ -565,14 +488,14 @@
// Return the daylight savings time offset for this time.
-int64_t Time::DaylightSavingsOffset() {
+int64_t Win32Time::DaylightSavingsOffset() {
return InDST() ? 60 * kMsPerMinute : 0;
}
// Returns a string identifying the current timezone for the
// timestamp taking into account daylight saving.
-char* Time::LocalTimezone() {
+char* Win32Time::LocalTimezone() {
// Return the standard or DST time zone name based on whether daylight
// saving is in effect at the given time.
return InDST() ? dst_tz_name_ : std_tz_name_;
@@ -614,22 +537,14 @@
// Returns current time as the number of milliseconds since
// 00:00:00 UTC, January 1, 1970.
double OS::TimeCurrentMillis() {
- Time t;
- t.SetToCurrentTime();
- return t.ToJSTime();
-}
-
-
-// Returns the tickcounter based on timeGetTime.
-int64_t OS::Ticks() {
- return timeGetTime() * 1000; // Convert to microseconds.
+ return Time::Now().ToJsTime();
}
// Returns a string identifying the current timezone taking into
// account daylight saving.
const char* OS::LocalTimezone(double time) {
- return Time(time).LocalTimezone();
+ return Win32Time(time).LocalTimezone();
}
@@ -637,7 +552,7 @@
// taking daylight savings time into account.
double OS::LocalTimeOffset() {
// Use current time, rounded to the millisecond.
- Time t(TimeCurrentMillis());
+ Win32Time t(TimeCurrentMillis());
// Time::LocalOffset inlcudes any daylight savings offset, so subtract it.
return static_cast<double>(t.LocalOffset() - t.DaylightSavingsOffset());
}
@@ -646,7 +561,7 @@
// Returns the daylight savings offset in milliseconds for the given
// time.
double OS::DaylightSavingsOffset(double time) {
- int64_t offset = Time(time).DaylightSavingsOffset();
+ int64_t offset = Win32Time(time).DaylightSavingsOffset();
return static_cast<double>(offset);
}
@@ -846,7 +761,7 @@
static void UpdateAllocatedSpaceLimits(void* address, int size) {
ASSERT(limit_mutex != NULL);
- ScopedLock lock(limit_mutex);
+ LockGuard<Mutex> lock_guard(limit_mutex);
lowest_ever_allocated = Min(lowest_ever_allocated, address);
highest_ever_allocated =
@@ -1701,46 +1616,6 @@
// ----------------------------------------------------------------------------
-// Win32 mutex support.
-//
-// On Win32 mutexes are implemented using CRITICAL_SECTION objects. These are
-// faster than Win32 Mutex objects because they are implemented using user mode
-// atomic instructions. Therefore we only do ring transitions if there is lock
-// contention.
-
-class Win32Mutex : public Mutex {
- public:
- Win32Mutex() { InitializeCriticalSection(&cs_); }
-
- virtual ~Win32Mutex() { DeleteCriticalSection(&cs_); }
-
- virtual int Lock() {
- EnterCriticalSection(&cs_);
- return 0;
- }
-
- virtual int Unlock() {
- LeaveCriticalSection(&cs_);
- return 0;
- }
-
-
- virtual bool TryLock() {
- // Returns non-zero if critical section is entered successfully entered.
- return TryEnterCriticalSection(&cs_);
- }
-
- private:
- CRITICAL_SECTION cs_; // Critical section used for mutex
-};
-
-
-Mutex* OS::CreateMutex() {
- return new Win32Mutex();
-}
-
-
-// ----------------------------------------------------------------------------
// Win32 semaphore support.
//
// On Win32 semaphores are implemented using Win32 Semaphore objects. The
@@ -1983,7 +1858,7 @@
// call this setup code within the same millisecond.
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srand(static_cast<unsigned int>(seed));
- limit_mutex = CreateMutex();
+ limit_mutex = new Mutex();
}
diff --git a/src/platform.h b/src/platform.h
index a42bb5a..5f93106 100644
--- a/src/platform.h
+++ b/src/platform.h
@@ -46,7 +46,7 @@
#include <cstdarg>
-#include "lazy-instance.h"
+#include "platform/mutex.h"
#include "utils.h"
#include "v8globals.h"
@@ -94,7 +94,6 @@
namespace internal {
class Semaphore;
-class Mutex;
double ceiling(double x);
double modulo(double x, double y);
@@ -192,10 +191,6 @@
// micro-second resolution.
static int GetUserTime(uint32_t* secs, uint32_t* usecs);
- // Get a tick counter normalized to one tick per microsecond.
- // Used for calculating time intervals.
- static int64_t Ticks();
-
// Returns current time as the number of milliseconds since
// 00:00:00 UTC, January 1, 1970.
static double TimeCurrentMillis();
@@ -293,10 +288,6 @@
static int StackWalk(Vector<StackFrame> frames);
- // Factory method for creating platform dependent Mutex.
- // Please use delete to reclaim the storage for the returned Mutex.
- static Mutex* CreateMutex();
-
// Factory method for creating platform dependent Semaphore.
// Please use delete to reclaim the storage for the returned Semaphore.
static Semaphore* CreateSemaphore(int count);
@@ -688,72 +679,6 @@
// ----------------------------------------------------------------------------
-// Mutex
-//
-// Mutexes are used for serializing access to non-reentrant sections of code.
-// The implementations of mutex should allow for nested/recursive locking.
-
-class Mutex {
- public:
- virtual ~Mutex() {}
-
- // Locks the given mutex. If the mutex is currently unlocked, it becomes
- // locked and owned by the calling thread, and immediately. If the mutex
- // is already locked by another thread, suspends the calling thread until
- // the mutex is unlocked.
- virtual int Lock() = 0;
-
- // Unlocks the given mutex. The mutex is assumed to be locked and owned by
- // the calling thread on entrance.
- virtual int Unlock() = 0;
-
- // Tries to lock the given mutex. Returns whether the mutex was
- // successfully locked.
- virtual bool TryLock() = 0;
-};
-
-struct CreateMutexTrait {
- static Mutex* Create() {
- return OS::CreateMutex();
- }
-};
-
-// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
-// Usage:
-// static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
-//
-// void my_function() {
-// ScopedLock my_lock(my_mutex.Pointer());
-// // Do something.
-// }
-//
-typedef LazyDynamicInstance<
- Mutex, CreateMutexTrait, ThreadSafeInitOnceTrait>::type LazyMutex;
-
-#define LAZY_MUTEX_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
-
-// ----------------------------------------------------------------------------
-// ScopedLock
-//
-// Stack-allocated ScopedLocks provide block-scoped locking and
-// unlocking of a mutex.
-class ScopedLock {
- public:
- explicit ScopedLock(Mutex* mutex): mutex_(mutex) {
- ASSERT(mutex_ != NULL);
- mutex_->Lock();
- }
- ~ScopedLock() {
- mutex_->Unlock();
- }
-
- private:
- Mutex* mutex_;
- DISALLOW_COPY_AND_ASSIGN(ScopedLock);
-};
-
-
-// ----------------------------------------------------------------------------
// Socket
//
diff --git a/src/platform/elapsed-timer.h b/src/platform/elapsed-timer.h
new file mode 100644
index 0000000..e5bcf23
--- /dev/null
+++ b/src/platform/elapsed-timer.h
@@ -0,0 +1,120 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PLATFORM_ELAPSED_TIMER_H_
+#define V8_PLATFORM_ELAPSED_TIMER_H_
+
+#include "checks.h"
+#include "platform/time.h"
+
+namespace v8 {
+namespace internal {
+
+class ElapsedTimer V8_FINAL BASE_EMBEDDED {
+ public:
+#ifdef DEBUG
+ ElapsedTimer() : started_(false) {}
+#endif
+
+ // Starts this timer. Once started a timer can be checked with
+ // |Elapsed()| or |HasExpired()|, and may be restarted using |Restart()|.
+ // This method must not be called on an already started timer.
+ void Start() {
+ ASSERT(!IsStarted());
+ start_ticks_ = Now();
+#ifdef DEBUG
+ started_ = true;
+#endif
+ ASSERT(IsStarted());
+ }
+
+ // Stops this timer. Must not be called on a timer that was not
+ // started before.
+ void Stop() {
+ ASSERT(IsStarted());
+ start_ticks_ = TimeTicks();
+#ifdef DEBUG
+ started_ = false;
+#endif
+ ASSERT(!IsStarted());
+ }
+
+ // Returns |true| if this timer was started previously.
+ bool IsStarted() const {
+ ASSERT(started_ || start_ticks_.IsNull());
+ ASSERT(!started_ || !start_ticks_.IsNull());
+ return !start_ticks_.IsNull();
+ }
+
+ // Restarts the timer and returns the time elapsed since the previous start.
+ // This method is equivalent to obtaining the elapsed time with |Elapsed()|
+ // and then starting the timer again, but does so in one single operation,
+ // avoiding the need to obtain the clock value twice. It may only be called
+ // on a previously started timer.
+ TimeDelta Restart() {
+ ASSERT(IsStarted());
+ TimeTicks ticks = Now();
+ TimeDelta elapsed = ticks - start_ticks_;
+ ASSERT(elapsed.InMicroseconds() >= 0);
+ start_ticks_ = ticks;
+ ASSERT(IsStarted());
+ return elapsed;
+ }
+
+ // Returns the time elapsed since the previous start. This method may only
+ // be called on a previously started timer.
+ MUST_USE_RESULT TimeDelta Elapsed() const {
+ ASSERT(IsStarted());
+ TimeDelta elapsed = Now() - start_ticks_;
+ ASSERT(elapsed.InMicroseconds() >= 0);
+ return elapsed;
+ }
+
+ // Returns |true| if the specified |time_delta| has elapsed since the
+ // previous start, or |false| if not. This method may only be called on
+ // a previously started timer.
+ MUST_USE_RESULT bool HasExpired(TimeDelta time_delta) const {
+ ASSERT(IsStarted());
+ return Elapsed() >= time_delta;
+ }
+
+ private:
+ MUST_USE_RESULT V8_INLINE(static TimeTicks Now()) {
+ TimeTicks now = TimeTicks::HighResNow();
+ ASSERT(!now.IsNull());
+ return now;
+ }
+
+ TimeTicks start_ticks_;
+#ifdef DEBUG
+ bool started_;
+#endif
+};
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_ELAPSED_TIMER_H_
diff --git a/src/platform/mutex.cc b/src/platform/mutex.cc
new file mode 100644
index 0000000..c8d75c7
--- /dev/null
+++ b/src/platform/mutex.cc
@@ -0,0 +1,223 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "platform/mutex.h"
+
+#include <cerrno>
+
+namespace v8 {
+namespace internal {
+
+#if V8_OS_POSIX
+
+static V8_INLINE(void InitializeNativeHandle(pthread_mutex_t* mutex)) {
+ int result;
+#if defined(DEBUG)
+ // Use an error checking mutex in debug mode.
+ pthread_mutexattr_t attr;
+ result = pthread_mutexattr_init(&attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
+ ASSERT_EQ(0, result);
+ result = pthread_mutex_init(mutex, &attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_destroy(&attr);
+#else
+ // Use a fast mutex (default attributes).
+ result = pthread_mutex_init(mutex, NULL);
+#endif // defined(DEBUG)
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE(void InitializeRecursiveNativeHandle(pthread_mutex_t* mutex)) {
+ pthread_mutexattr_t attr;
+ int result = pthread_mutexattr_init(&attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+ ASSERT_EQ(0, result);
+ result = pthread_mutex_init(mutex, &attr);
+ ASSERT_EQ(0, result);
+ result = pthread_mutexattr_destroy(&attr);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE(void DestroyNativeHandle(pthread_mutex_t* mutex)) {
+ int result = pthread_mutex_destroy(mutex);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE(void LockNativeHandle(pthread_mutex_t* mutex)) {
+ int result = pthread_mutex_lock(mutex);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE(void UnlockNativeHandle(pthread_mutex_t* mutex)) {
+ int result = pthread_mutex_unlock(mutex);
+ ASSERT_EQ(0, result);
+ USE(result);
+}
+
+
+static V8_INLINE(bool TryLockNativeHandle(pthread_mutex_t* mutex)) {
+ int result = pthread_mutex_trylock(mutex);
+ if (result == EBUSY) {
+ return false;
+ }
+ ASSERT_EQ(0, result);
+ return true;
+}
+
+#elif V8_OS_WIN
+
+static V8_INLINE(void InitializeNativeHandle(CRITICAL_SECTION* cs)) {
+ InitializeCriticalSection(cs);
+}
+
+
+static V8_INLINE(void InitializeRecursiveNativeHandle(CRITICAL_SECTION* cs)) {
+ InitializeCriticalSection(cs);
+}
+
+
+static V8_INLINE(void DestroyNativeHandle(CRITICAL_SECTION* cs)) {
+ DeleteCriticalSection(cs);
+}
+
+
+static V8_INLINE(void LockNativeHandle(CRITICAL_SECTION* cs)) {
+ EnterCriticalSection(cs);
+}
+
+
+static V8_INLINE(void UnlockNativeHandle(CRITICAL_SECTION* cs)) {
+ LeaveCriticalSection(cs);
+}
+
+
+static V8_INLINE(bool TryLockNativeHandle(CRITICAL_SECTION* cs)) {
+ return TryEnterCriticalSection(cs);
+}
+
+#endif // V8_OS_POSIX
+
+
+Mutex::Mutex() {
+ InitializeNativeHandle(&native_handle_);
+#ifdef DEBUG
+ level_ = 0;
+#endif
+}
+
+
+Mutex::~Mutex() {
+ DestroyNativeHandle(&native_handle_);
+ ASSERT_EQ(0, level_);
+}
+
+
+void Mutex::Lock() {
+ LockNativeHandle(&native_handle_);
+#ifdef DEBUG
+ ASSERT_EQ(0, level_);
+ level_++;
+#endif
+}
+
+
+void Mutex::Unlock() {
+#ifdef DEBUG
+ ASSERT_EQ(1, level_);
+ level_--;
+#endif
+ UnlockNativeHandle(&native_handle_);
+}
+
+
+bool Mutex::TryLock() {
+ if (!TryLockNativeHandle(&native_handle_)) {
+ return false;
+ }
+#ifdef DEBUG
+ ASSERT_EQ(0, level_);
+ level_++;
+#endif
+ return true;
+}
+
+
+RecursiveMutex::RecursiveMutex() {
+ InitializeRecursiveNativeHandle(&native_handle_);
+#ifdef DEBUG
+ level_ = 0;
+#endif
+}
+
+
+RecursiveMutex::~RecursiveMutex() {
+ DestroyNativeHandle(&native_handle_);
+ ASSERT_EQ(0, level_);
+}
+
+
+void RecursiveMutex::Lock() {
+ LockNativeHandle(&native_handle_);
+#ifdef DEBUG
+ ASSERT_LE(0, level_);
+ level_++;
+#endif
+}
+
+
+void RecursiveMutex::Unlock() {
+#ifdef DEBUG
+ ASSERT_LT(0, level_);
+ level_--;
+#endif
+ UnlockNativeHandle(&native_handle_);
+}
+
+
+bool RecursiveMutex::TryLock() {
+ if (!TryLockNativeHandle(&native_handle_)) {
+ return false;
+ }
+#ifdef DEBUG
+ ASSERT_LE(0, level_);
+ level_++;
+#endif
+ return true;
+}
+
+} } // namespace v8::internal
diff --git a/src/platform/mutex.h b/src/platform/mutex.h
new file mode 100644
index 0000000..1940542
--- /dev/null
+++ b/src/platform/mutex.h
@@ -0,0 +1,223 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PLATFORM_MUTEX_H_
+#define V8_PLATFORM_MUTEX_H_
+
+#include "lazy-instance.h"
+#if V8_OS_WIN
+#include "win32-headers.h"
+#endif
+
+#if V8_OS_POSIX
+#include <pthread.h> // NOLINT
+#endif
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Mutex
+//
+// This class is a synchronization primitive that can be used to protect shared
+// data from being simultaneously accessed by multiple threads. A mutex offers
+// exclusive, non-recursive ownership semantics:
+// - A calling thread owns a mutex from the time that it successfully calls
+// either |Lock()| or |TryLock()| until it calls |Unlock()|.
+// - When a thread owns a mutex, all other threads will block (for calls to
+// |Lock()|) or receive a |false| return value (for |TryLock()|) if they
+// attempt to claim ownership of the mutex.
+// A calling thread must not own the mutex prior to calling |Lock()| or
+// |TryLock()|. The behavior of a program is undefined if a mutex is destroyed
+// while still owned by some thread. The Mutex class is non-copyable.
+
+class Mutex V8_FINAL {
+ public:
+ Mutex();
+ ~Mutex();
+
+ // Locks the given mutex. If the mutex is currently unlocked, it becomes
+ // locked and owned by the calling thread, and immediately. If the mutex
+ // is already locked by another thread, suspends the calling thread until
+ // the mutex is unlocked.
+ void Lock();
+
+ // Unlocks the given mutex. The mutex is assumed to be locked and owned by
+ // the calling thread on entrance.
+ void Unlock();
+
+ // Tries to lock the given mutex. Returns whether the mutex was
+ // successfully locked.
+ bool TryLock() V8_WARN_UNUSED_RESULT;
+
+ // The implementation-defined native handle type.
+#if V8_OS_POSIX
+ typedef pthread_mutex_t NativeHandle;
+#elif V8_OS_WIN
+ typedef CRITICAL_SECTION NativeHandle;
+#endif
+
+ NativeHandle& native_handle() V8_WARN_UNUSED_RESULT {
+ return native_handle_;
+ }
+ const NativeHandle& native_handle() const V8_WARN_UNUSED_RESULT {
+ return native_handle_;
+ }
+
+ private:
+ NativeHandle native_handle_;
+#ifdef DEBUG
+ int level_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(Mutex);
+};
+
+
+// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
+// Usage:
+// static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
+//
+// void my_function() {
+// LockGuard<Mutex> guard(my_mutex.Pointer());
+// // Do something.
+// }
+//
+typedef LazyStaticInstance<Mutex,
+ DefaultConstructTrait<Mutex>,
+ ThreadSafeInitOnceTrait>::type LazyMutex;
+
+#define LAZY_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+
+// -----------------------------------------------------------------------------
+// RecursiveMutex
+//
+// This class is a synchronization primitive that can be used to protect shared
+// data from being simultaneously accessed by multiple threads. A recursive
+// mutex offers exclusive, recursive ownership semantics:
+// - A calling thread owns a recursive mutex for a period of time that starts
+// when it successfully calls either |Lock()| or |TryLock()|. During this
+// period, the thread may make additional calls to |Lock()| or |TryLock()|.
+// The period of ownership ends when the thread makes a matching number of
+// calls to |Unlock()|.
+// - When a thread owns a recursive mutex, all other threads will block (for
+// calls to |Lock()|) or receive a |false| return value (for |TryLock()|) if
+// they attempt to claim ownership of the recursive mutex.
+// - The maximum number of times that a recursive mutex may be locked is
+// unspecified, but after that number is reached, calls to |Lock()| will
+// probably abort the process and calls to |TryLock()| return false.
+// The behavior of a program is undefined if a recursive mutex is destroyed
+// while still owned by some thread. The RecursiveMutex class is non-copyable.
+
+class RecursiveMutex V8_FINAL {
+ public:
+ RecursiveMutex();
+ ~RecursiveMutex();
+
+ // Locks the mutex. If another thread has already locked the mutex, a call to
+ // |Lock()| will block execution until the lock is acquired. A thread may call
+ // |Lock()| on a recursive mutex repeatedly. Ownership will only be released
+ // after the thread makes a matching number of calls to |Unlock()|.
+ // The behavior is undefined if the mutex is not unlocked before being
+ // destroyed, i.e. some thread still owns it.
+ void Lock();
+
+ // Unlocks the mutex if its level of ownership is 1 (there was exactly one
+ // more call to |Lock()| than there were calls to unlock() made by this
+ // thread), reduces the level of ownership by 1 otherwise. The mutex must be
+ // locked by the current thread of execution, otherwise, the behavior is
+ // undefined.
+ void Unlock();
+
+ // Tries to lock the given mutex. Returns whether the mutex was
+ // successfully locked.
+ bool TryLock() V8_WARN_UNUSED_RESULT;
+
+ // The implementation-defined native handle type.
+ typedef Mutex::NativeHandle NativeHandle;
+
+ NativeHandle& native_handle() V8_WARN_UNUSED_RESULT {
+ return native_handle_;
+ }
+ const NativeHandle& native_handle() const V8_WARN_UNUSED_RESULT {
+ return native_handle_;
+ }
+
+ private:
+ NativeHandle native_handle_;
+#ifdef DEBUG
+ int level_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(RecursiveMutex);
+};
+
+
+// POD RecursiveMutex initialized lazily (i.e. the first time Pointer() is
+// called).
+// Usage:
+// static LazyRecursiveMutex my_mutex = LAZY_RECURSIVE_MUTEX_INITIALIZER;
+//
+// void my_function() {
+// LockGuard<RecursiveMutex> guard(my_mutex.Pointer());
+// // Do something.
+// }
+//
+typedef LazyStaticInstance<RecursiveMutex,
+ DefaultConstructTrait<RecursiveMutex>,
+ ThreadSafeInitOnceTrait>::type LazyRecursiveMutex;
+
+#define LAZY_RECURSIVE_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+
+// -----------------------------------------------------------------------------
+// LockGuard
+//
+// This class is a mutex wrapper that provides a convenient RAII-style mechanism
+// for owning a mutex for the duration of a scoped block.
+// When a LockGuard object is created, it attempts to take ownership of the
+// mutex it is given. When control leaves the scope in which the LockGuard
+// object was created, the LockGuard is destructed and the mutex is released.
+// The LockGuard class is non-copyable.
+
+template <typename Mutex>
+class LockGuard V8_FINAL {
+ public:
+ explicit LockGuard(Mutex* mutex) : mutex_(mutex) { mutex_->Lock(); }
+ ~LockGuard() { mutex_->Unlock(); }
+
+ private:
+ Mutex* mutex_;
+
+ LockGuard(const LockGuard<Mutex>& other) V8_DELETE;
+ LockGuard<Mutex>& operator=(const LockGuard<Mutex>& other) V8_DELETE;
+};
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_MUTEX_H_
diff --git a/src/platform/time.cc b/src/platform/time.cc
new file mode 100644
index 0000000..653eb14
--- /dev/null
+++ b/src/platform/time.cc
@@ -0,0 +1,526 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "platform/time.h"
+
+#if V8_OS_POSIX
+#include <sys/time.h>
+#endif
+#if V8_OS_MACOSX
+#include <mach/mach_time.h>
+#endif
+
+#include <cstring>
+
+#include "checks.h"
+#include "cpu.h"
+#include "platform.h"
+#if V8_OS_WIN
+#include "win32-headers.h"
+#endif
+
+#if V8_OS_WIN
+// Prototype for GetTickCount64() procedure.
+extern "C" {
+typedef ULONGLONG (WINAPI *GETTICKCOUNT64PROC)(void);
+}
+#endif
+
+namespace v8 {
+namespace internal {
+
+TimeDelta TimeDelta::FromDays(int days) {
+ return TimeDelta(days * Time::kMicrosecondsPerDay);
+}
+
+
+TimeDelta TimeDelta::FromHours(int hours) {
+ return TimeDelta(hours * Time::kMicrosecondsPerHour);
+}
+
+
+TimeDelta TimeDelta::FromMinutes(int minutes) {
+ return TimeDelta(minutes * Time::kMicrosecondsPerMinute);
+}
+
+
+TimeDelta TimeDelta::FromSeconds(int64_t seconds) {
+ return TimeDelta(seconds * Time::kMicrosecondsPerSecond);
+}
+
+
+TimeDelta TimeDelta::FromMilliseconds(int64_t milliseconds) {
+ return TimeDelta(milliseconds * Time::kMicrosecondsPerMillisecond);
+}
+
+
+TimeDelta TimeDelta::FromNanoseconds(int64_t nanoseconds) {
+ return TimeDelta(nanoseconds / Time::kNanosecondsPerMicrosecond);
+}
+
+
+int TimeDelta::InDays() const {
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
+}
+
+
+int TimeDelta::InHours() const {
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
+}
+
+
+int TimeDelta::InMinutes() const {
+ return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
+}
+
+
+double TimeDelta::InSecondsF() const {
+ return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
+}
+
+
+int64_t TimeDelta::InSeconds() const {
+ return delta_ / Time::kMicrosecondsPerSecond;
+}
+
+
+double TimeDelta::InMillisecondsF() const {
+ return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
+}
+
+
+int64_t TimeDelta::InMilliseconds() const {
+ return delta_ / Time::kMicrosecondsPerMillisecond;
+}
+
+
+int64_t TimeDelta::InNanoseconds() const {
+ return delta_ * Time::kNanosecondsPerMicrosecond;
+}
+
+
+#if V8_OS_WIN
+
+// We implement time using the high-resolution timers so that we can get
+// timeouts which are smaller than 10-15ms. To avoid any drift, we
+// periodically resync the internal clock to the system clock.
+class Clock V8_FINAL {
+ public:
+ Clock() : initial_time_(CurrentWallclockTime()),
+ initial_ticks_(TimeTicks::Now()) {}
+
+ Time Now() {
+ // This must be executed under lock.
+ LockGuard<Mutex> lock_guard(&mutex_);
+
+ // Calculate the time elapsed since we started our timer.
+ TimeDelta elapsed = TimeTicks::Now() - initial_ticks_;
+
+ // Check if we don't need to synchronize with the wallclock yet.
+ if (elapsed.InMicroseconds() <= kMaxMicrosecondsToAvoidDrift) {
+ return initial_time_ + elapsed;
+ }
+
+ // Resynchronize with the wallclock.
+ initial_ticks_ = TimeTicks::Now();
+ initial_time_ = CurrentWallclockTime();
+ return initial_time_;
+ }
+
+ Time NowFromSystemTime() {
+ // This must be executed under lock.
+ LockGuard<Mutex> lock_guard(&mutex_);
+
+ // Resynchronize with the wallclock.
+ initial_ticks_ = TimeTicks::Now();
+ initial_time_ = CurrentWallclockTime();
+ return initial_time_;
+ }
+
+ private:
+ // Time between resampling the un-granular clock for this API (1 minute).
+ static const int64_t kMaxMicrosecondsToAvoidDrift =
+ Time::kMicrosecondsPerMinute;
+
+ static Time CurrentWallclockTime() {
+ FILETIME ft;
+ ::GetSystemTimeAsFileTime(&ft);
+ return Time::FromFiletime(ft);
+ }
+
+ TimeTicks initial_ticks_;
+ Time initial_time_;
+ Mutex mutex_;
+};
+
+
+static LazyDynamicInstance<Clock,
+ DefaultCreateTrait<Clock>,
+ ThreadSafeInitOnceTrait>::type clock = LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+
+Time Time::Now() {
+ return clock.Pointer()->Now();
+}
+
+
+Time Time::NowFromSystemTime() {
+ return clock.Pointer()->NowFromSystemTime();
+}
+
+
+// Time between windows epoch and standard epoch.
+static const int64_t kTimeToEpochInMicroseconds = V8_INT64_C(11644473600000000);
+
+
+Time Time::FromFiletime(FILETIME ft) {
+ if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
+ return Time();
+ }
+ if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() &&
+ ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) {
+ return Max();
+ }
+ int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) +
+ (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10;
+ return Time(us - kTimeToEpochInMicroseconds);
+}
+
+
+FILETIME Time::ToFiletime() const {
+ ASSERT(us_ >= 0);
+ FILETIME ft;
+ if (IsNull()) {
+ ft.dwLowDateTime = 0;
+ ft.dwHighDateTime = 0;
+ return ft;
+ }
+ if (IsMax()) {
+ ft.dwLowDateTime = std::numeric_limits<DWORD>::max();
+ ft.dwHighDateTime = std::numeric_limits<DWORD>::max();
+ return ft;
+ }
+ uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10;
+ ft.dwLowDateTime = static_cast<DWORD>(us);
+ ft.dwHighDateTime = static_cast<DWORD>(us >> 32);
+ return ft;
+}
+
+#elif V8_OS_POSIX
+
+Time Time::Now() {
+ struct timeval tv;
+ int result = gettimeofday(&tv, NULL);
+ ASSERT_EQ(0, result);
+ USE(result);
+ return FromTimeval(tv);
+}
+
+
+Time Time::NowFromSystemTime() {
+ return Now();
+}
+
+
+Time Time::FromTimeval(struct timeval tv) {
+ ASSERT(tv.tv_usec >= 0);
+ ASSERT(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
+ if (tv.tv_usec == 0 && tv.tv_sec == 0) {
+ return Time();
+ }
+ if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) &&
+ tv.tv_sec == std::numeric_limits<time_t>::max()) {
+ return Max();
+ }
+ return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec);
+}
+
+
+struct timeval Time::ToTimeval() const {
+ struct timeval tv;
+ if (IsNull()) {
+ tv.tv_sec = 0;
+ tv.tv_usec = 0;
+ return tv;
+ }
+ if (IsMax()) {
+ tv.tv_sec = std::numeric_limits<time_t>::max();
+ tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1);
+ return tv;
+ }
+ tv.tv_sec = us_ / kMicrosecondsPerSecond;
+ tv.tv_usec = us_ % kMicrosecondsPerSecond;
+ return tv;
+}
+
+#endif // V8_OS_WIN
+
+
+Time Time::FromJsTime(double ms_since_epoch) {
+ // The epoch is a valid time, so this constructor doesn't interpret
+ // 0 as the null time.
+ if (ms_since_epoch == std::numeric_limits<double>::max()) {
+ return Max();
+ }
+ return Time(
+ static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond));
+}
+
+
+double Time::ToJsTime() const {
+ if (IsNull()) {
+ // Preserve 0 so the invalid result doesn't depend on the platform.
+ return 0;
+ }
+ if (IsMax()) {
+ // Preserve max without offset to prevent overflow.
+ return std::numeric_limits<double>::max();
+ }
+ return static_cast<double>(us_) / kMicrosecondsPerMillisecond;
+}
+
+
+#if V8_OS_WIN
+
+class TickClock {
+ public:
+ virtual ~TickClock() {}
+ virtual int64_t Now() = 0;
+};
+
+
+// Overview of time counters:
+// (1) CPU cycle counter. (Retrieved via RDTSC)
+// The CPU counter provides the highest resolution time stamp and is the least
+// expensive to retrieve. However, the CPU counter is unreliable and should not
+// be used in production. Its biggest issue is that it is per processor and it
+// is not synchronized between processors. Also, on some computers, the counters
+// will change frequency due to thermal and power changes, and stop in some
+// states.
+//
+// (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
+// resolution (100 nanoseconds) time stamp but is comparatively more expensive
+// to retrieve. What QueryPerformanceCounter actually does is up to the HAL.
+// (with some help from ACPI).
+// According to http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx
+// in the worst case, it gets the counter from the rollover interrupt on the
+// programmable interrupt timer. In best cases, the HAL may conclude that the
+// RDTSC counter runs at a constant frequency, then it uses that instead. On
+// multiprocessor machines, it will try to verify the values returned from
+// RDTSC on each processor are consistent with each other, and apply a handful
+// of workarounds for known buggy hardware. In other words, QPC is supposed to
+// give consistent result on a multiprocessor computer, but it is unreliable in
+// reality due to bugs in BIOS or HAL on some, especially old computers.
+// With recent updates on HAL and newer BIOS, QPC is getting more reliable but
+// it should be used with caution.
+//
+// (3) System time. The system time provides a low-resolution (typically 10ms
+// to 55 milliseconds) time stamp but is comparatively less expensive to
+// retrieve and more reliable.
+class HighResolutionTickClock V8_FINAL : public TickClock {
+ public:
+ explicit HighResolutionTickClock(int64_t ticks_per_second)
+ : ticks_per_second_(ticks_per_second) {
+ ASSERT_LT(0, ticks_per_second);
+ }
+ virtual ~HighResolutionTickClock() {}
+
+ virtual int64_t Now() V8_OVERRIDE {
+ LARGE_INTEGER now;
+ BOOL result = QueryPerformanceCounter(&now);
+ ASSERT(result);
+ USE(result);
+
+ // Intentionally calculate microseconds in a round about manner to avoid
+ // overflow and precision issues. Think twice before simplifying!
+ int64_t whole_seconds = now.QuadPart / ticks_per_second_;
+ int64_t leftover_ticks = now.QuadPart % ticks_per_second_;
+ int64_t ticks = (whole_seconds * Time::kMicrosecondsPerSecond) +
+ ((leftover_ticks * Time::kMicrosecondsPerSecond) / ticks_per_second_);
+
+ // Make sure we never return 0 here, so that TimeTicks::HighResNow()
+ // will never return 0.
+ return ticks + 1;
+ }
+
+ private:
+ int64_t ticks_per_second_;
+};
+
+
+// The GetTickCount64() API is what we actually want for the regular tick
+// clock, but this is only available starting with Windows Vista.
+class WindowsVistaTickClock V8_FINAL : public TickClock {
+ public:
+ explicit WindowsVistaTickClock(GETTICKCOUNT64PROC func) : func_(func) {
+ ASSERT(func_ != NULL);
+ }
+ virtual ~WindowsVistaTickClock() {}
+
+ virtual int64_t Now() V8_OVERRIDE {
+ // Query the current ticks (in ms).
+ ULONGLONG tick_count_ms = (*func_)();
+
+ // Convert to microseconds (make sure to never return 0 here).
+ return (tick_count_ms * Time::kMicrosecondsPerMillisecond) + 1;
+ }
+
+ private:
+ GETTICKCOUNT64PROC func_;
+};
+
+
+class RolloverProtectedTickClock V8_FINAL : public TickClock {
+ public:
+ // We initialize rollover_ms_ to 1 to ensure that we will never
+ // return 0 from TimeTicks::HighResNow() and TimeTicks::Now() below.
+ RolloverProtectedTickClock() : last_seen_now_(0), rollover_ms_(1) {}
+ virtual ~RolloverProtectedTickClock() {}
+
+ virtual int64_t Now() V8_OVERRIDE {
+ LockGuard<Mutex> lock_guard(&mutex_);
+ // We use timeGetTime() to implement TimeTicks::Now(), which rolls over
+ // every ~49.7 days. We try to track rollover ourselves, which works if
+ // TimeTicks::Now() is called at least every 49 days.
+ // Note that we do not use GetTickCount() here, since timeGetTime() gives
+ // more predictable delta values, as described here:
+ // http://blogs.msdn.com/b/larryosterman/archive/2009/09/02/what-s-the-difference-between-gettickcount-and-timegettime.aspx
+ DWORD now = timeGetTime();
+ if (now < last_seen_now_) {
+ rollover_ms_ += V8_INT64_C(0x100000000); // ~49.7 days.
+ }
+ last_seen_now_ = now;
+ return (now + rollover_ms_) * Time::kMicrosecondsPerMillisecond;
+ }
+
+ private:
+ Mutex mutex_;
+ DWORD last_seen_now_;
+ int64_t rollover_ms_;
+};
+
+
+struct CreateTickClockTrait {
+ static TickClock* Create() {
+ // Try to load GetTickCount64() from kernel32.dll (available since Vista).
+ HMODULE kernel32 = ::GetModuleHandleA("kernel32.dll");
+ ASSERT(kernel32 != NULL);
+ FARPROC proc = ::GetProcAddress(kernel32, "GetTickCount64");
+ if (proc != NULL) {
+ return new WindowsVistaTickClock(
+ reinterpret_cast<GETTICKCOUNT64PROC>(proc));
+ }
+
+ // Fallback to the rollover protected tick clock.
+ return new RolloverProtectedTickClock;
+ }
+};
+
+
+static LazyDynamicInstance<TickClock,
+ CreateTickClockTrait,
+ ThreadSafeInitOnceTrait>::type tick_clock =
+ LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+
+struct CreateHighResTickClockTrait {
+ static TickClock* Create() {
+ // Check if the installed hardware supports a high-resolution performance
+ // counter, and if not fallback to the low-resolution tick clock.
+ LARGE_INTEGER ticks_per_second;
+ if (!QueryPerformanceFrequency(&ticks_per_second)) {
+ return tick_clock.Pointer();
+ }
+
+ // On Athlon X2 CPUs (e.g. model 15) the QueryPerformanceCounter
+ // is unreliable, fallback to the low-resolution tick clock.
+ CPU cpu;
+ if (strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15) {
+ return tick_clock.Pointer();
+ }
+
+ return new HighResolutionTickClock(ticks_per_second.QuadPart);
+ }
+};
+
+
+static LazyDynamicInstance<TickClock,
+ CreateHighResTickClockTrait,
+ ThreadSafeInitOnceTrait>::type high_res_tick_clock =
+ LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+
+TimeTicks TimeTicks::Now() {
+ // Make sure we never return 0 here.
+ TimeTicks ticks(tick_clock.Pointer()->Now());
+ ASSERT(!ticks.IsNull());
+ return ticks;
+}
+
+
+TimeTicks TimeTicks::HighResNow() {
+ // Make sure we never return 0 here.
+ TimeTicks ticks(high_res_tick_clock.Pointer()->Now());
+ ASSERT(!ticks.IsNull());
+ return ticks;
+}
+
+#else // V8_OS_WIN
+
+TimeTicks TimeTicks::Now() {
+ return HighResNow();
+}
+
+
+TimeTicks TimeTicks::HighResNow() {
+ int64_t ticks;
+#if V8_OS_MACOSX
+ static struct mach_timebase_info info;
+ if (info.denom == 0) {
+ kern_return_t result = mach_timebase_info(&info);
+ ASSERT_EQ(KERN_SUCCESS, result);
+ USE(result);
+ }
+ ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
+ info.numer / info.denom);
+#elif V8_OS_SOLARIS
+ ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
+#elif V8_OS_POSIX
+ struct timespec ts;
+ int result = clock_gettime(CLOCK_MONOTONIC, &ts);
+ ASSERT_EQ(0, result);
+ USE(result);
+ ticks = (ts.tv_sec * Time::kMicrosecondsPerSecond +
+ ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+#endif // V8_OS_MACOSX
+ // Make sure we never return 0 here.
+ return TimeTicks(ticks + 1);
+}
+
+#endif // V8_OS_WIN
+
+} } // namespace v8::internal
diff --git a/src/platform/time.h b/src/platform/time.h
new file mode 100644
index 0000000..57b894d
--- /dev/null
+++ b/src/platform/time.h
@@ -0,0 +1,381 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PLATFORM_TIME_H_
+#define V8_PLATFORM_TIME_H_
+
+#include <ctime>
+#include <limits>
+
+#include "allocation.h"
+
+// Forward declarations.
+extern "C" {
+struct _FILETIME;
+struct timeval;
+}
+
+namespace v8 {
+namespace internal {
+
+class Time;
+class TimeTicks;
+
+// -----------------------------------------------------------------------------
+// TimeDelta
+//
+// This class represents a duration of time, internally represented in
+// microseonds.
+
+class TimeDelta V8_FINAL BASE_EMBEDDED {
+ public:
+ TimeDelta() : delta_(0) {}
+
+ // Converts units of time to TimeDeltas.
+ static TimeDelta FromDays(int days);
+ static TimeDelta FromHours(int hours);
+ static TimeDelta FromMinutes(int minutes);
+ static TimeDelta FromSeconds(int64_t seconds);
+ static TimeDelta FromMilliseconds(int64_t milliseconds);
+ static TimeDelta FromMicroseconds(int64_t microseconds) {
+ return TimeDelta(microseconds);
+ }
+ static TimeDelta FromNanoseconds(int64_t nanoseconds);
+
+ // Returns the time delta in some unit. The F versions return a floating
+ // point value, the "regular" versions return a rounded-down value.
+ //
+ // InMillisecondsRoundedUp() instead returns an integer that is rounded up
+ // to the next full millisecond.
+ int InDays() const;
+ int InHours() const;
+ int InMinutes() const;
+ double InSecondsF() const;
+ int64_t InSeconds() const;
+ double InMillisecondsF() const;
+ int64_t InMilliseconds() const;
+ int64_t InMillisecondsRoundedUp() const;
+ int64_t InMicroseconds() const { return delta_; }
+ int64_t InNanoseconds() const;
+
+ TimeDelta& operator=(const TimeDelta& other) {
+ delta_ = other.delta_;
+ return *this;
+ }
+
+ // Computations with other deltas.
+ TimeDelta operator+(const TimeDelta& other) const {
+ return TimeDelta(delta_ + other.delta_);
+ }
+ TimeDelta operator-(const TimeDelta& other) const {
+ return TimeDelta(delta_ - other.delta_);
+ }
+
+ TimeDelta& operator+=(const TimeDelta& other) {
+ delta_ += other.delta_;
+ return *this;
+ }
+ TimeDelta& operator-=(const TimeDelta& other) {
+ delta_ -= other.delta_;
+ return *this;
+ }
+ TimeDelta operator-() const {
+ return TimeDelta(-delta_);
+ }
+
+ double TimesOf(const TimeDelta& other) const {
+ return static_cast<double>(delta_) / static_cast<double>(other.delta_);
+ }
+ double PercentOf(const TimeDelta& other) const {
+ return TimesOf(other) * 100.0;
+ }
+
+ // Computations with ints, note that we only allow multiplicative operations
+ // with ints, and additive operations with other deltas.
+ TimeDelta operator*(int64_t a) const {
+ return TimeDelta(delta_ * a);
+ }
+ TimeDelta operator/(int64_t a) const {
+ return TimeDelta(delta_ / a);
+ }
+ TimeDelta& operator*=(int64_t a) {
+ delta_ *= a;
+ return *this;
+ }
+ TimeDelta& operator/=(int64_t a) {
+ delta_ /= a;
+ return *this;
+ }
+ int64_t operator/(const TimeDelta& other) const {
+ return delta_ / other.delta_;
+ }
+
+ // Comparison operators.
+ bool operator==(const TimeDelta& other) const {
+ return delta_ == other.delta_;
+ }
+ bool operator!=(const TimeDelta& other) const {
+ return delta_ != other.delta_;
+ }
+ bool operator<(const TimeDelta& other) const {
+ return delta_ < other.delta_;
+ }
+ bool operator<=(const TimeDelta& other) const {
+ return delta_ <= other.delta_;
+ }
+ bool operator>(const TimeDelta& other) const {
+ return delta_ > other.delta_;
+ }
+ bool operator>=(const TimeDelta& other) const {
+ return delta_ >= other.delta_;
+ }
+
+ private:
+ // Constructs a delta given the duration in microseconds. This is private
+ // to avoid confusion by callers with an integer constructor. Use
+ // FromSeconds, FromMilliseconds, etc. instead.
+ explicit TimeDelta(int64_t delta) : delta_(delta) {}
+
+ // Delta in microseconds.
+ int64_t delta_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Time
+//
+// This class represents an absolute point in time, internally represented as
+// microseconds (s/1,000,000) since 00:00:00 UTC, January 1, 1970.
+
+class Time V8_FINAL BASE_EMBEDDED {
+ public:
+ static const int64_t kMillisecondsPerSecond = 1000;
+ static const int64_t kMicrosecondsPerMillisecond = 1000;
+ static const int64_t kMicrosecondsPerSecond = kMicrosecondsPerMillisecond *
+ kMillisecondsPerSecond;
+ static const int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60;
+ static const int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60;
+ static const int64_t kMicrosecondsPerDay = kMicrosecondsPerHour * 24;
+ static const int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
+ static const int64_t kNanosecondsPerMicrosecond = 1000;
+ static const int64_t kNanosecondsPerSecond = kNanosecondsPerMicrosecond *
+ kMicrosecondsPerSecond;
+
+ // Contains the NULL time. Use Time::Now() to get the current time.
+ Time() : us_(0) {}
+
+ // Returns true if the time object has not been initialized.
+ bool IsNull() const { return us_ == 0; }
+
+ // Returns true if the time object is the maximum time.
+ bool IsMax() const { return us_ == std::numeric_limits<int64_t>::max(); }
+
+ // Returns the current time. Watch out, the system might adjust its clock
+ // in which case time will actually go backwards. We don't guarantee that
+ // times are increasing, or that two calls to Now() won't be the same.
+ static Time Now();
+
+ // Returns the current time. Same as Now() except that this function always
+ // uses system time so that there are no discrepancies between the returned
+ // time and system time even on virtual environments including our test bot.
+ // For timing sensitive unittests, this function should be used.
+ static Time NowFromSystemTime();
+
+ // Returns the time for epoch in Unix-like system (Jan 1, 1970).
+ static Time UnixEpoch() { return Time(0); }
+
+ // Returns the maximum time, which should be greater than any reasonable time
+ // with which we might compare it.
+ static Time Max() { return Time(std::numeric_limits<int64_t>::max()); }
+
+ // Converts to/from POSIX time values.
+ static Time FromTimeval(struct timeval tv);
+ struct timeval ToTimeval() const;
+
+ // Converts to/from Windows file times.
+ static Time FromFiletime(struct _FILETIME ft);
+ struct _FILETIME ToFiletime() const;
+
+ // Converts to/from the Javascript convention for times, a number of
+ // milliseconds since the epoch:
+ static Time FromJsTime(double ms_since_epoch);
+ double ToJsTime() const;
+
+ Time& operator=(const Time& other) {
+ us_ = other.us_;
+ return *this;
+ }
+
+ // Compute the difference between two times.
+ TimeDelta operator-(const Time& other) const {
+ return TimeDelta::FromMicroseconds(us_ - other.us_);
+ }
+
+ // Modify by some time delta.
+ Time& operator+=(const TimeDelta& delta) {
+ us_ += delta.InMicroseconds();
+ return *this;
+ }
+ Time& operator-=(const TimeDelta& delta) {
+ us_ -= delta.InMicroseconds();
+ return *this;
+ }
+
+ // Return a new time modified by some delta.
+ Time operator+(const TimeDelta& delta) const {
+ return Time(us_ + delta.InMicroseconds());
+ }
+ Time operator-(const TimeDelta& delta) const {
+ return Time(us_ - delta.InMicroseconds());
+ }
+
+ // Comparison operators
+ bool operator==(const Time& other) const {
+ return us_ == other.us_;
+ }
+ bool operator!=(const Time& other) const {
+ return us_ != other.us_;
+ }
+ bool operator<(const Time& other) const {
+ return us_ < other.us_;
+ }
+ bool operator<=(const Time& other) const {
+ return us_ <= other.us_;
+ }
+ bool operator>(const Time& other) const {
+ return us_ > other.us_;
+ }
+ bool operator>=(const Time& other) const {
+ return us_ >= other.us_;
+ }
+
+ private:
+ explicit Time(int64_t us) : us_(us) {}
+
+ // Time in microseconds in UTC.
+ int64_t us_;
+};
+
+inline Time operator+(const TimeDelta& delta, const Time& time) {
+ return time + delta;
+}
+
+
+// -----------------------------------------------------------------------------
+// TimeTicks
+//
+// This class represents an abstract time that is most of the time incrementing
+// for use in measuring time durations. It is internally represented in
+// microseconds. It can not be converted to a human-readable time, but is
+// guaranteed not to decrease (if the user changes the computer clock,
+// Time::Now() may actually decrease or jump). But note that TimeTicks may
+// "stand still", for example if the computer suspended.
+
+class TimeTicks V8_FINAL BASE_EMBEDDED {
+ public:
+ TimeTicks() : ticks_(0) {}
+
+ // Platform-dependent tick count representing "right now."
+ // The resolution of this clock is ~1-15ms. Resolution varies depending
+ // on hardware/operating system configuration.
+ // This method never returns a null TimeTicks.
+ static TimeTicks Now();
+
+ // Returns a platform-dependent high-resolution tick count. Implementation
+ // is hardware dependent and may or may not return sub-millisecond
+ // resolution. THIS CALL IS GENERALLY MUCH MORE EXPENSIVE THAN Now() AND
+ // SHOULD ONLY BE USED WHEN IT IS REALLY NEEDED.
+ // This method never returns a null TimeTicks.
+ static TimeTicks HighResNow();
+
+ // Returns true if this object has not been initialized.
+ bool IsNull() const { return ticks_ == 0; }
+
+ TimeTicks& operator=(const TimeTicks other) {
+ ticks_ = other.ticks_;
+ return *this;
+ }
+
+ // Compute the difference between two times.
+ TimeDelta operator-(const TimeTicks other) const {
+ return TimeDelta::FromMicroseconds(ticks_ - other.ticks_);
+ }
+
+ // Modify by some time delta.
+ TimeTicks& operator+=(const TimeDelta& delta) {
+ ticks_ += delta.InMicroseconds();
+ return *this;
+ }
+ TimeTicks& operator-=(const TimeDelta& delta) {
+ ticks_ -= delta.InMicroseconds();
+ return *this;
+ }
+
+ // Return a new TimeTicks modified by some delta.
+ TimeTicks operator+(const TimeDelta& delta) const {
+ return TimeTicks(ticks_ + delta.InMicroseconds());
+ }
+ TimeTicks operator-(const TimeDelta& delta) const {
+ return TimeTicks(ticks_ - delta.InMicroseconds());
+ }
+
+ // Comparison operators
+ bool operator==(const TimeTicks& other) const {
+ return ticks_ == other.ticks_;
+ }
+ bool operator!=(const TimeTicks& other) const {
+ return ticks_ != other.ticks_;
+ }
+ bool operator<(const TimeTicks& other) const {
+ return ticks_ < other.ticks_;
+ }
+ bool operator<=(const TimeTicks& other) const {
+ return ticks_ <= other.ticks_;
+ }
+ bool operator>(const TimeTicks& other) const {
+ return ticks_ > other.ticks_;
+ }
+ bool operator>=(const TimeTicks& other) const {
+ return ticks_ >= other.ticks_;
+ }
+
+ private:
+ // Please use Now() to create a new object. This is for internal use
+ // and testing. Ticks is in microseconds.
+ explicit TimeTicks(int64_t ticks) : ticks_(ticks) {}
+
+ // Tick count in microseconds.
+ int64_t ticks_;
+};
+
+inline TimeTicks operator+(const TimeDelta& delta, const TimeTicks& ticks) {
+ return ticks + delta;
+}
+
+} } // namespace v8::internal
+
+#endif // V8_PLATFORM_TIME_H_
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index b1dadc1..def0097 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -334,8 +334,8 @@
: title_(title),
uid_(uid),
record_samples_(record_samples),
- start_time_us_(OS::Ticks()),
- end_time_us_(0) {
+ start_time_(Time::NowFromSystemTime()) {
+ timer_.Start();
}
@@ -346,7 +346,7 @@
void CpuProfile::CalculateTotalTicksAndSamplingRate() {
- end_time_us_ = OS::Ticks();
+ end_time_ = start_time_ + timer_.Elapsed();
}
diff --git a/src/profile-generator.h b/src/profile-generator.h
index a282af2..5edcac8 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -202,8 +202,8 @@
int samples_count() const { return samples_.length(); }
ProfileNode* sample(int index) const { return samples_.at(index); }
- int64_t start_time_us() const { return start_time_us_; }
- int64_t end_time_us() const { return end_time_us_; }
+ Time start_time() const { return start_time_; }
+ Time end_time() const { return end_time_; }
void UpdateTicksScale();
@@ -213,8 +213,9 @@
const char* title_;
unsigned uid_;
bool record_samples_;
- int64_t start_time_us_;
- int64_t end_time_us_;
+ Time start_time_;
+ Time end_time_;
+ ElapsedTimer timer_;
List<ProfileNode*> samples_;
ProfileTree top_down_;
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index 3752b27..57c50dd 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -177,18 +177,7 @@
PrintF(" for on-stack replacement]\n");
}
- // Get the interrupt stub code object to match against. We aren't
- // prepared to generate it, but we don't expect to have to.
- Code* interrupt_code = NULL;
- InterruptStub interrupt_stub;
- bool found_code = interrupt_stub.FindCodeInCache(&interrupt_code, isolate_);
- if (found_code) {
- Code* replacement_code =
- isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
- Code* unoptimized_code = shared->code();
- Deoptimizer::PatchInterruptCode(
- unoptimized_code, interrupt_code, replacement_code);
- }
+ Deoptimizer::PatchInterruptCode(isolate_, shared->code());
}
diff --git a/src/runtime.cc b/src/runtime.cc
index 71d792a..5760982 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -8662,12 +8662,7 @@
function->PrintName();
PrintF("]\n");
}
- InterruptStub interrupt_stub;
- Handle<Code> interrupt_code = interrupt_stub.GetCode(isolate);
- Handle<Code> replacement_code = isolate->builtins()->OnStackReplacement();
- Deoptimizer::RevertInterruptCode(*unoptimized,
- *interrupt_code,
- *replacement_code);
+ Deoptimizer::RevertInterruptCode(isolate, *unoptimized);
// If the optimization attempt succeeded, return the AST id tagged as a
// smi. This tells the builtin that we need to translate the unoptimized
diff --git a/src/sampler.cc b/src/sampler.cc
index fa8c2e8..0aaa1e9 100644
--- a/src/sampler.cc
+++ b/src/sampler.cc
@@ -27,9 +27,7 @@
#include "sampler.h"
-#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) \
- || defined(__NetBSD__) || defined(__sun) || defined(__ANDROID__) \
- || defined(__native_client__) || defined(__MACH__)
+#if V8_OS_POSIX && !V8_OS_CYGWIN
#define USE_SIGNALS
@@ -39,24 +37,24 @@
#include <sys/time.h>
#include <sys/syscall.h>
-#if defined(__MACH__)
+#if V8_OS_MACOSX
#include <mach/mach.h>
// OpenBSD doesn't have <ucontext.h>. ucontext_t lives in <signal.h>
// and is a typedef for struct sigcontext. There is no uc_mcontext.
-#elif(!defined(__ANDROID__) || defined(__BIONIC_HAVE_UCONTEXT_T)) \
- && !defined(__OpenBSD__)
+#elif(!V8_OS_ANDROID || defined(__BIONIC_HAVE_UCONTEXT_T)) \
+ && !V8_OS_OPENBSD
#include <ucontext.h>
#endif
#include <unistd.h>
// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
// Old versions of the C library <signal.h> didn't define the type.
-#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
+#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
defined(__arm__) && !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
#include <asm/sigcontext.h>
#endif
-#elif defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__)
+#elif V8_OS_WIN || V8_OS_CYGWIN
#include "win32-headers.h"
@@ -74,7 +72,7 @@
#include "vm-state-inl.h"
-#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T)
+#if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
// Not all versions of Android's C library provide ucontext_t.
// Detect this and provide custom but compatible definitions. Note that these
@@ -146,7 +144,7 @@
enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
#endif
-#endif // __ANDROID__ && !defined(__BIONIC_HAVE_UCONTEXT_T)
+#endif // V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T)
namespace v8 {
@@ -179,7 +177,7 @@
pthread_t vm_tid_;
};
-#elif defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__)
+#elif V8_OS_WIN || V8_OS_CYGWIN
// ----------------------------------------------------------------------------
// Win32 profiler support. On Cygwin we use the same sampler implementation as
@@ -250,8 +248,25 @@
class SignalHandler : public AllStatic {
public:
- static inline void EnsureInstalled() {
- if (signal_handler_installed_) return;
+ static void SetUp() { if (!mutex_) mutex_ = new Mutex(); }
+ static void TearDown() { delete mutex_; }
+
+ static void IncreaseSamplerCount() {
+ LockGuard<Mutex> lock_guard(mutex_);
+ if (++client_count_ == 1) Install();
+ }
+
+ static void DecreaseSamplerCount() {
+ LockGuard<Mutex> lock_guard(mutex_);
+ if (--client_count_ == 0) Restore();
+ }
+
+ static bool Installed() {
+ return signal_handler_installed_;
+ }
+
+ private:
+ static void Install() {
struct sigaction sa;
sa.sa_sigaction = &HandleProfilerSignal;
sigemptyset(&sa.sa_mask);
@@ -260,30 +275,31 @@
(sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
}
- static inline void Restore() {
+ static void Restore() {
if (signal_handler_installed_) {
sigaction(SIGPROF, &old_signal_handler_, 0);
signal_handler_installed_ = false;
}
}
- static inline bool Installed() {
- return signal_handler_installed_;
- }
-
- private:
static void HandleProfilerSignal(int signal, siginfo_t* info, void* context);
+ // Protects the process wide state below.
+ static Mutex* mutex_;
+ static int client_count_;
static bool signal_handler_installed_;
static struct sigaction old_signal_handler_;
};
+
+Mutex* SignalHandler::mutex_ = NULL;
+int SignalHandler::client_count_ = 0;
struct sigaction SignalHandler::old_signal_handler_;
bool SignalHandler::signal_handler_installed_ = false;
void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
void* context) {
-#if defined(__native_client__)
+#if V8_OS_NACL
// As Native Client does not support signal handling, profiling
// is disabled.
return;
@@ -301,7 +317,7 @@
}
Sampler* sampler = isolate->logger()->sampler();
- if (sampler == NULL || !sampler->IsActive()) return;
+ if (sampler == NULL) return;
RegisterState state;
@@ -312,10 +328,10 @@
#else
// Extracting the sample from the context is extremely machine dependent.
ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
-#if !defined(__OpenBSD__)
+#if !V8_OS_OPENBSD
mcontext_t& mcontext = ucontext->uc_mcontext;
#endif
-#if defined(__linux__) || defined(__ANDROID__)
+#if V8_OS_LINUX
#if V8_HOST_ARCH_IA32
state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
@@ -343,7 +359,7 @@
state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
#endif // V8_HOST_ARCH_*
-#elif defined(__MACH__)
+#elif V8_OS_MACOSX
#if V8_HOST_ARCH_X64
#if __DARWIN_UNIX03
state.pc = reinterpret_cast<Address>(mcontext->__ss.__rip);
@@ -365,7 +381,7 @@
state.fp = reinterpret_cast<Address>(mcontext->ss.ebp);
#endif // __DARWIN_UNIX03
#endif // V8_HOST_ARCH_IA32
-#elif defined(__FreeBSD__)
+#elif V8_OS_FREEBSD
#if V8_HOST_ARCH_IA32
state.pc = reinterpret_cast<Address>(mcontext.mc_eip);
state.sp = reinterpret_cast<Address>(mcontext.mc_esp);
@@ -379,7 +395,7 @@
state.sp = reinterpret_cast<Address>(mcontext.mc_r13);
state.fp = reinterpret_cast<Address>(mcontext.mc_r11);
#endif // V8_HOST_ARCH_*
-#elif defined(__NetBSD__)
+#elif V8_OS_NETBSD
#if V8_HOST_ARCH_IA32
state.pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
@@ -389,7 +405,7 @@
state.sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
state.fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
#endif // V8_HOST_ARCH_*
-#elif defined(__OpenBSD__)
+#elif V8_OS_OPENBSD
#if V8_HOST_ARCH_IA32
state.pc = reinterpret_cast<Address>(ucontext->sc_eip);
state.sp = reinterpret_cast<Address>(ucontext->sc_esp);
@@ -399,14 +415,14 @@
state.sp = reinterpret_cast<Address>(ucontext->sc_rsp);
state.fp = reinterpret_cast<Address>(ucontext->sc_rbp);
#endif // V8_HOST_ARCH_*
-#elif defined(__sun)
+#elif V8_OS_SOLARIS
state.pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
state.sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
state.fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
-#endif // __sun
+#endif // V8_OS_SOLARIS
#endif // USE_SIMULATOR
sampler->SampleStack(state);
-#endif // __native_client__
+#endif // V8_OS_NACL
}
#endif
@@ -420,12 +436,12 @@
: Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
interval_(interval) {}
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
+ static void SetUp() { if (!mutex_) mutex_ = new Mutex(); }
+ static void TearDown() { delete mutex_; mutex_ = NULL; }
static void AddActiveSampler(Sampler* sampler) {
bool need_to_start = false;
- ScopedLock lock(mutex_);
+ LockGuard<Mutex> lock_guard(mutex_);
if (instance_ == NULL) {
// Start a thread that will send SIGPROF signal to VM threads,
// when CPU profiling will be enabled.
@@ -438,16 +454,13 @@
ASSERT(instance_->interval_ == sampler->interval());
instance_->active_samplers_.Add(sampler);
-#if defined(USE_SIGNALS)
- SignalHandler::EnsureInstalled();
-#endif
if (need_to_start) instance_->StartSynchronously();
}
static void RemoveActiveSampler(Sampler* sampler) {
SamplerThread* instance_to_remove = NULL;
{
- ScopedLock lock(mutex_);
+ LockGuard<Mutex> lock_guard(mutex_);
ASSERT(sampler->IsActive());
bool removed = instance_->active_samplers_.RemoveElement(sampler);
@@ -459,9 +472,6 @@
if (instance_->active_samplers_.is_empty()) {
instance_to_remove = instance_;
instance_ = NULL;
-#if defined(USE_SIGNALS)
- SignalHandler::Restore();
-#endif
}
}
@@ -474,7 +484,7 @@
virtual void Run() {
while (true) {
{
- ScopedLock lock(mutex_);
+ LockGuard<Mutex> lock_guard(mutex_);
if (active_samplers_.is_empty()) break;
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
@@ -550,12 +560,18 @@
void Sampler::SetUp() {
+#if defined(USE_SIGNALS)
+ SignalHandler::SetUp();
+#endif
SamplerThread::SetUp();
}
void Sampler::TearDown() {
SamplerThread::TearDown();
+#if defined(USE_SIGNALS)
+ SignalHandler::TearDown();
+#endif
}
@@ -591,6 +607,22 @@
}
+void Sampler::IncreaseProfilingDepth() {
+ NoBarrier_AtomicIncrement(&profiling_, 1);
+#if defined(USE_SIGNALS)
+ SignalHandler::IncreaseSamplerCount();
+#endif
+}
+
+
+void Sampler::DecreaseProfilingDepth() {
+#if defined(USE_SIGNALS)
+ SignalHandler::DecreaseSamplerCount();
+#endif
+ NoBarrier_AtomicIncrement(&profiling_, -1);
+}
+
+
void Sampler::SampleStack(const RegisterState& state) {
TickSample* sample = isolate_->cpu_profiler()->StartTickSample();
TickSample sample_obj;
@@ -608,15 +640,6 @@
}
-bool Sampler::CanSampleOnProfilerEventsProcessorThread() {
-#if defined(USE_SIGNALS)
- return true;
-#else
- return false;
-#endif
-}
-
-
#if defined(USE_SIGNALS)
void Sampler::DoSample() {
@@ -624,7 +647,7 @@
pthread_kill(platform_data()->vm_tid(), SIGPROF);
}
-#elif defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__)
+#elif V8_OS_WIN || V8_OS_CYGWIN
void Sampler::DoSample() {
HANDLE profiled_thread = platform_data()->profiled_thread();
diff --git a/src/sampler.h b/src/sampler.h
index cd65b12..b17a2ed 100644
--- a/src/sampler.h
+++ b/src/sampler.h
@@ -99,16 +99,15 @@
return NoBarrier_Load(&profiling_) > 0 &&
!NoBarrier_Load(&has_processing_thread_);
}
- void IncreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, 1); }
- void DecreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, -1); }
+ void IncreaseProfilingDepth();
+ void DecreaseProfilingDepth();
// Whether the sampler is running (that is, consumes resources).
bool IsActive() const { return NoBarrier_Load(&active_); }
+ void DoSample();
// If true next sample must be initiated on the profiler event processor
// thread right after latest sample is processed.
- static bool CanSampleOnProfilerEventsProcessorThread();
- void DoSample();
void SetHasProcessingThread(bool value) {
NoBarrier_Store(&has_processing_thread_, value);
}
diff --git a/src/spaces.cc b/src/spaces.cc
index 8a5aa03..e62d381 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -2043,8 +2043,8 @@
// This is safe (not going to deadlock) since Concatenate operations
// are never performed on the same free lists at the same time in
// reverse order.
- ScopedLock lock_target(mutex_);
- ScopedLock lock_source(category->mutex());
+ LockGuard<Mutex> target_lock_guard(mutex());
+ LockGuard<Mutex> source_lock_guard(category->mutex());
free_bytes = category->available();
if (end_ == NULL) {
end_ = category->end();
diff --git a/src/spaces.h b/src/spaces.h
index aa864b6..7f8ab5e 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -32,6 +32,7 @@
#include "hashmap.h"
#include "list.h"
#include "log.h"
+#include "platform/mutex.h"
#include "v8utils.h"
namespace v8 {
@@ -1445,13 +1446,8 @@
FreeListCategory() :
top_(NULL),
end_(NULL),
- mutex_(OS::CreateMutex()),
available_(0) {}
- ~FreeListCategory() {
- delete mutex_;
- }
-
intptr_t Concatenate(FreeListCategory* category);
void Reset();
@@ -1477,7 +1473,7 @@
int available() const { return available_; }
void set_available(int available) { available_ = available; }
- Mutex* mutex() { return mutex_; }
+ Mutex* mutex() { return &mutex_; }
#ifdef DEBUG
intptr_t SumFreeList();
@@ -1487,7 +1483,7 @@
private:
FreeListNode* top_;
FreeListNode* end_;
- Mutex* mutex_;
+ Mutex mutex_;
// Total available bytes in all blocks of this free list category.
int available_;
diff --git a/src/v8.cc b/src/v8.cc
index 2933a94..7d2294e 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -135,7 +135,7 @@
state[i] = FLAG_random_seed;
} else if (entropy_source != NULL) {
uint32_t val;
- ScopedLock lock(entropy_mutex.Pointer());
+ LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
entropy_source(reinterpret_cast<unsigned char*>(&val), sizeof(uint32_t));
state[i] = val;
} else {
diff --git a/src/v8globals.h b/src/v8globals.h
index 8a1cc17..95187e6 100644
--- a/src/v8globals.h
+++ b/src/v8globals.h
@@ -163,6 +163,7 @@
class MessageLocation;
class VirtualMemory;
class Mutex;
+class RecursiveMutex;
typedef bool (*WeakSlotCallback)(Object** pointer);
diff --git a/src/v8threads.cc b/src/v8threads.cc
index 2df187a..c1f20b1 100644
--- a/src/v8threads.cc
+++ b/src/v8threads.cc
@@ -214,7 +214,7 @@
void ThreadManager::Lock() {
- mutex_->Lock();
+ mutex_.Lock();
mutex_owner_ = ThreadId::Current();
ASSERT(IsLockedByCurrentThread());
}
@@ -222,7 +222,7 @@
void ThreadManager::Unlock() {
mutex_owner_ = ThreadId::Invalid();
- mutex_->Unlock();
+ mutex_.Unlock();
}
@@ -303,8 +303,7 @@
// be distinguished from not having a thread id at all (since NULL is
// defined as 0.)
ThreadManager::ThreadManager()
- : mutex_(OS::CreateMutex()),
- mutex_owner_(ThreadId::Invalid()),
+ : mutex_owner_(ThreadId::Invalid()),
lazily_archived_thread_(ThreadId::Invalid()),
lazily_archived_thread_state_(NULL),
free_anchor_(NULL),
@@ -315,7 +314,6 @@
ThreadManager::~ThreadManager() {
- delete mutex_;
DeleteThreadStateList(free_anchor_);
DeleteThreadStateList(in_use_anchor_);
}
diff --git a/src/v8threads.h b/src/v8threads.h
index 8dce860..b8ed817 100644
--- a/src/v8threads.h
+++ b/src/v8threads.h
@@ -119,7 +119,7 @@
void EagerlyArchiveThread();
- Mutex* mutex_;
+ Mutex mutex_;
ThreadId mutex_owner_;
ThreadId lazily_archived_thread_;
ThreadState* lazily_archived_thread_state_;
diff --git a/src/version.cc b/src/version.cc
index d047a12..5644151 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 21
-#define BUILD_NUMBER 6
+#define BUILD_NUMBER 7
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/win32-headers.h b/src/win32-headers.h
index c83937c..98b0120 100644
--- a/src/win32-headers.h
+++ b/src/win32-headers.h
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#ifndef V8_WIN32_HEADERS_H_
+#define V8_WIN32_HEADERS_H_
+
#ifndef WIN32_LEAN_AND_MEAN
// WIN32_LEAN_AND_MEAN implies NOCRYPT and NOGDI.
#define WIN32_LEAN_AND_MEAN
@@ -55,7 +58,6 @@
#include <windows.h>
-#ifdef V8_WIN32_HEADERS_FULL
#include <signal.h> // For raise().
#include <time.h> // For LocalOffset() implementation.
#include <mmsystem.h> // For timeGetTime().
@@ -81,7 +83,6 @@
#endif // __MINGW32__
#include <process.h> // For _beginthreadex().
#include <stdlib.h>
-#endif // V8_WIN32_HEADERS_FULL
#undef VOID
#undef DELETE
@@ -94,6 +95,7 @@
#undef ANY
#undef IGNORE
#undef GetObject
-#undef CreateMutex
#undef CreateSemaphore
#undef Yield
+
+#endif // V8_WIN32_HEADERS_H_
diff --git a/src/win32-math.cc b/src/win32-math.cc
index 9ffc4ea..88fa3a6 100644
--- a/src/win32-math.cc
+++ b/src/win32-math.cc
@@ -31,8 +31,6 @@
// (http://www.opengroup.org/onlinepubs/000095399/)
#ifdef _MSC_VER
-#undef V8_WIN32_LEAN_AND_MEAN
-#define V8_WIN32_HEADERS_FULL
#include "win32-headers.h"
#include <limits.h> // Required for INT_MAX etc.
#include <float.h> // Required for DBL_MAX and on Win32 for finite()
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 9eed917..83d01a6 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -3320,16 +3320,6 @@
}
-void StackCheckStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
-}
-
-
-void InterruptStub::Generate(MacroAssembler* masm) {
- __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
-}
-
-
static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index e9cf567..bb8acce 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -105,12 +105,7 @@
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
Code* replacement_code) {
- ASSERT(!InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
// Turn the jump into nops.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kNopByteOne;
@@ -126,12 +121,7 @@
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
- ASSERT(InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
+ Code* interrupt_code) {
// Restore the original jump.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kJnsInstruction;
@@ -146,23 +136,28 @@
#ifdef DEBUG
-bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
+Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after) {
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
if (*(call_target_address - 3) == kNopByteOne) {
- ASSERT(replacement_code->entry() ==
- Assembler::target_address_at(call_target_address));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- return true;
+ Code* osr_builtin =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+ ASSERT_EQ(osr_builtin->entry(),
+ Assembler::target_address_at(call_target_address));
+ return PATCHED_FOR_OSR;
} else {
- ASSERT_EQ(interrupt_code->entry(),
+ // Get the interrupt stub code object to match against from cache.
+ Code* interrupt_builtin =
+ isolate->builtins()->builtin(Builtins::kInterruptCheck);
+ ASSERT_EQ(interrupt_builtin->entry(),
Assembler::target_address_at(call_target_address));
ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- return false;
+ return NOT_PATCHED;
}
}
#endif // DEBUG
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index 04005ac..54472c2 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -280,8 +280,7 @@
Label ok;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ bind(&ok);
}
@@ -341,8 +340,7 @@
}
EmitProfilingCounterDecrement(weight);
__ j(positive, &ok, Label::kNear);
- InterruptStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
@@ -388,8 +386,8 @@
__ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ CallRuntime(Runtime::kOptimizeFunctionOnNextCall, 1);
} else {
- InterruptStub stub;
- __ CallStub(&stub);
+ __ call(isolate()->builtins()->InterruptCheck(),
+ RelocInfo::CODE_TARGET);
}
__ pop(rax);
EmitProfilingCounterReset();
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index ce66285..fd9bf18 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -5419,8 +5419,9 @@
Label done;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &done, Label::kNear);
- StackCheckStub stub;
- CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
+ CallCode(isolate()->builtins()->StackCheck(),
+ RelocInfo::CODE_TARGET,
+ instr);
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 0938007..0c605d8 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -958,7 +958,10 @@
}
-bool MacroAssembler::IsUnsafeInt(const int x) {
+// ----------------------------------------------------------------------------
+// Smi tagging, untagging and tag detection.
+
+bool MacroAssembler::IsUnsafeInt(const int32_t x) {
static const int kMaxBits = 17;
return !is_intn(x, kMaxBits);
}
@@ -989,9 +992,6 @@
}
-// ----------------------------------------------------------------------------
-// Smi tagging, untagging and tag detection.
-
Register MacroAssembler::GetSmiConstant(Smi* source) {
int value = source->value();
if (value == 0) {
@@ -2196,6 +2196,17 @@
}
+void MacroAssembler::Push(Smi* source) {
+ intptr_t smi = reinterpret_cast<intptr_t>(source);
+ if (is_int32(smi)) {
+ push(Immediate(static_cast<int32_t>(smi)));
+ } else {
+ Register constant = GetSmiConstant(source);
+ push(constant);
+ }
+}
+
+
void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) {
movq(scratch, src);
// High bits.
@@ -2220,6 +2231,14 @@
}
+void MacroAssembler::Test(const Operand& src, Smi* source) {
+ testl(Operand(src, kIntSize), Immediate(source->value()));
+}
+
+
+// ----------------------------------------------------------------------------
+
+
void MacroAssembler::JumpIfNotString(Register object,
Register object_map,
Label* not_string,
@@ -2459,17 +2478,6 @@
}
-void MacroAssembler::Push(Smi* source) {
- intptr_t smi = reinterpret_cast<intptr_t>(source);
- if (is_int32(smi)) {
- push(Immediate(static_cast<int32_t>(smi)));
- } else {
- Register constant = GetSmiConstant(source);
- push(constant);
- }
-}
-
-
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
addq(rsp, Immediate(stack_elements * kPointerSize));
@@ -2477,11 +2485,6 @@
}
-void MacroAssembler::Test(const Operand& src, Smi* source) {
- testl(Operand(src, kIntSize), Immediate(source->value()));
-}
-
-
void MacroAssembler::TestBit(const Operand& src, int bits) {
int byte_offset = bits / kBitsPerByte;
int bit_in_byte = bits & (kBitsPerByte - 1);
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index d9fb373..8e30981 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -375,6 +375,11 @@
// ---------------------------------------------------------------------------
// Smi tagging, untagging and operations on tagged smis.
+ // Support for constant splitting.
+ bool IsUnsafeInt(const int32_t x);
+ void SafeMove(Register dst, Smi* src);
+ void SafePush(Smi* src);
+
void InitializeSmiConstantRegister() {
movq(kSmiConstantRegister,
reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
@@ -782,11 +787,6 @@
// Move if the registers are not identical.
void Move(Register target, Register source);
- // Support for constant splitting.
- bool IsUnsafeInt(const int x);
- void SafeMove(Register dst, Smi* src);
- void SafePush(Smi* src);
-
// Bit-field support.
void TestBit(const Operand& dst, int bit_index);
diff --git a/test/cctest/cctest.gyp b/test/cctest/cctest.gyp
index b7e0771..051d382 100644
--- a/test/cctest/cctest.gyp
+++ b/test/cctest/cctest.gyp
@@ -84,6 +84,7 @@
'test-lockers.cc',
'test-log.cc',
'test-mark-compact.cc',
+ 'test-mutex.cc',
'test-object-observe.cc',
'test-parsing.cc',
'test-platform.cc',
@@ -100,6 +101,7 @@
'test-strtod.cc',
'test-thread-termination.cc',
'test-threads.cc',
+ 'test-time.cc',
'test-types.cc',
'test-unbound-queue.cc',
'test-utils.cc',
diff --git a/test/cctest/test-cpu-profiler.cc b/test/cctest/test-cpu-profiler.cc
index 6d3c2ee..6d66c5e 100644
--- a/test/cctest/test-cpu-profiler.cc
+++ b/test/cctest/test-cpu-profiler.cc
@@ -44,14 +44,15 @@
using i::ProfilerEventsProcessor;
using i::ScopedVector;
using i::SmartPointer;
+using i::TimeDelta;
using i::Vector;
TEST(StartStop) {
CpuProfilesCollection profiles;
ProfileGenerator generator(&profiles);
- SmartPointer<ProfilerEventsProcessor> processor(
- new ProfilerEventsProcessor(&generator, NULL, 100));
+ SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
+ &generator, NULL, TimeDelta::FromMicroseconds(100)));
processor->Start();
processor->StopSynchronously();
}
@@ -142,8 +143,8 @@
CpuProfilesCollection* profiles = new CpuProfilesCollection;
profiles->StartProfiling("", 1, false);
ProfileGenerator generator(profiles);
- SmartPointer<ProfilerEventsProcessor> processor(
- new ProfilerEventsProcessor(&generator, NULL, 100));
+ SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
+ &generator, NULL, TimeDelta::FromMicroseconds(100)));
processor->Start();
CpuProfiler profiler(isolate, profiles, &generator, *processor);
@@ -204,8 +205,8 @@
CpuProfilesCollection* profiles = new CpuProfilesCollection;
profiles->StartProfiling("", 1, false);
ProfileGenerator generator(profiles);
- SmartPointer<ProfilerEventsProcessor> processor(
- new ProfilerEventsProcessor(&generator, NULL, 100));
+ SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
+ &generator, NULL, TimeDelta::FromMicroseconds(100)));
processor->Start();
CpuProfiler profiler(isolate, profiles, &generator, *processor);
@@ -273,8 +274,8 @@
CpuProfilesCollection* profiles = new CpuProfilesCollection;
profiles->StartProfiling("", 1, false);
ProfileGenerator generator(profiles);
- SmartPointer<ProfilerEventsProcessor> processor(
- new ProfilerEventsProcessor(&generator, NULL, 100));
+ SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
+ &generator, NULL, TimeDelta::FromMicroseconds(100)));
processor->Start();
CpuProfiler profiler(isolate, profiles, &generator, *processor);
@@ -419,13 +420,10 @@
v8::HandleScope scope(env->GetIsolate());
v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
- int64_t time_before_profiling = i::OS::Ticks();
v8::Local<v8::String> profile_name = v8::String::New("test");
cpu_profiler->StartCpuProfiling(profile_name);
const v8::CpuProfile* profile = cpu_profiler->StopCpuProfiling(profile_name);
- CHECK(time_before_profiling <= profile->GetStartTime());
CHECK(profile->GetStartTime() <= profile->GetEndTime());
- CHECK(profile->GetEndTime() <= i::OS::Ticks());
}
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 9281337..c12cb58 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -4678,14 +4678,13 @@
private:
int num_threads_;
int num_blocked_;
- v8::internal::Mutex* lock_;
+ v8::internal::Mutex lock_;
v8::internal::Semaphore* sem_;
bool invalid_;
};
ThreadBarrier::ThreadBarrier(int num_threads)
: num_threads_(num_threads), num_blocked_(0) {
- lock_ = OS::CreateMutex();
sem_ = OS::CreateSemaphore(0);
invalid_ = false; // A barrier may only be used once. Then it is invalid.
}
@@ -4694,14 +4693,12 @@
// Do not call, due to race condition with Wait().
// Could be resolved with Pthread condition variables.
ThreadBarrier::~ThreadBarrier() {
- lock_->Lock();
- delete lock_;
delete sem_;
}
void ThreadBarrier::Wait() {
- lock_->Lock();
+ lock_.Lock();
CHECK(!invalid_);
if (num_blocked_ == num_threads_ - 1) {
// Signal and unblock all waiting threads.
@@ -4711,10 +4708,10 @@
invalid_ = true;
printf("BARRIER\n\n");
fflush(stdout);
- lock_->Unlock();
+ lock_.Unlock();
} else { // Wait for the semaphore.
++num_blocked_;
- lock_->Unlock(); // Potential race condition with destructor because
+ lock_.Unlock(); // Potential race condition with destructor because
sem_->Wait(); // these two lines are not atomic.
}
}
diff --git a/test/cctest/test-lock.cc b/test/cctest/test-lock.cc
index d4387d0..0603e44 100644
--- a/test/cctest/test-lock.cc
+++ b/test/cctest/test-lock.cc
@@ -38,33 +38,6 @@
using namespace ::v8::internal;
-// Simple test of locking logic
-TEST(Simple) {
- Mutex* mutex = OS::CreateMutex();
- CHECK_EQ(0, mutex->Lock()); // acquire the lock with the right token
- CHECK_EQ(0, mutex->Unlock()); // can unlock with the right token
- delete mutex;
-}
-
-
-TEST(MultiLock) {
- Mutex* mutex = OS::CreateMutex();
- CHECK_EQ(0, mutex->Lock());
- CHECK_EQ(0, mutex->Unlock());
- delete mutex;
-}
-
-
-TEST(ShallowLock) {
- Mutex* mutex = OS::CreateMutex();
- CHECK_EQ(0, mutex->Lock());
- CHECK_EQ(0, mutex->Unlock());
- CHECK_EQ(0, mutex->Lock());
- CHECK_EQ(0, mutex->Unlock());
- delete mutex;
-}
-
-
TEST(SemaphoreTimeout) {
bool ok;
Semaphore* sem = OS::CreateSemaphore(0);
diff --git a/test/cctest/test-log.cc b/test/cctest/test-log.cc
index 8bcb5f7..6bf56f0 100644
--- a/test/cctest/test-log.cc
+++ b/test/cctest/test-log.cc
@@ -27,7 +27,6 @@
//
// Tests of logging functions from log.h
-#define V8_DISABLE_DEPRECATIONS 1
#ifdef __linux__
#include <pthread.h>
#include <signal.h>
@@ -44,7 +43,6 @@
#include "v8utils.h"
#include "cctest.h"
#include "vm-state-inl.h"
-#undef V8_DISABLE_DEPRECATIONS
using v8::internal::Address;
using v8::internal::EmbeddedVector;
@@ -56,13 +54,12 @@
class ScopedLoggerInitializer {
public:
- explicit ScopedLoggerInitializer(bool prof_lazy)
+ ScopedLoggerInitializer()
: saved_log_(i::FLAG_log),
- saved_prof_lazy_(i::FLAG_prof_lazy),
saved_prof_(i::FLAG_prof),
temp_file_(NULL),
// Need to run this prior to creating the scope.
- trick_to_run_init_flags_(init_flags_(prof_lazy)),
+ trick_to_run_init_flags_(init_flags_()),
scope_(v8::Isolate::GetCurrent()),
env_(v8::Context::New(v8::Isolate::GetCurrent())),
logger_(i::Isolate::Current()->logger()) {
@@ -73,7 +70,6 @@
env_->Exit();
logger_->TearDown();
if (temp_file_ != NULL) fclose(temp_file_);
- i::FLAG_prof_lazy = saved_prof_lazy_;
i::FLAG_prof = saved_prof_;
i::FLAG_log = saved_log_;
}
@@ -91,16 +87,14 @@
}
private:
- static bool init_flags_(bool prof_lazy) {
+ static bool init_flags_() {
i::FLAG_log = true;
i::FLAG_prof = true;
- i::FLAG_prof_lazy = prof_lazy;
i::FLAG_logfile = i::Log::kLogToTemporaryFile;
- return prof_lazy;
+ return false;
}
const bool saved_log_;
- const bool saved_prof_lazy_;
const bool saved_prof_;
FILE* temp_file_;
const bool trick_to_run_init_flags_;
@@ -124,70 +118,6 @@
}
-TEST(ProfLazyMode) {
- ScopedLoggerInitializer initialize_logger(true);
- Logger* logger = initialize_logger.logger();
-
- if (!i::V8::UseCrankshaft()) return;
-
- logger->StringEvent("test-start", "");
- CompileRun("var a = (function(x) { return x + 1; })(10);");
- logger->StringEvent("test-profiler-start", "");
- v8::V8::ResumeProfiler();
- CompileRun(
- "var b = (function(x) { return x + 2; })(10);\n"
- "var c = (function(x) { return x + 3; })(10);\n"
- "var d = (function(x) { return x + 4; })(10);\n"
- "var e = (function(x) { return x + 5; })(10);");
- v8::V8::PauseProfiler();
- logger->StringEvent("test-profiler-stop", "");
- CompileRun("var f = (function(x) { return x + 6; })(10);");
- // Check that profiling can be resumed again.
- logger->StringEvent("test-profiler-start-2", "");
- v8::V8::ResumeProfiler();
- CompileRun(
- "var g = (function(x) { return x + 7; })(10);\n"
- "var h = (function(x) { return x + 8; })(10);\n"
- "var i = (function(x) { return x + 9; })(10);\n"
- "var j = (function(x) { return x + 10; })(10);");
- v8::V8::PauseProfiler();
- logger->StringEvent("test-profiler-stop-2", "");
- logger->StringEvent("test-stop", "");
-
- bool exists = false;
- i::Vector<const char> log(
- i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
- CHECK(exists);
-
- const char* test_start_position =
- StrNStr(log.start(), "test-start,", log.length());
- CHECK_NE(NULL, test_start_position);
- const char* test_profiler_start_position =
- StrNStr(log.start(), "test-profiler-start,", log.length());
- CHECK_NE(NULL, test_profiler_start_position);
- CHECK_GT(test_profiler_start_position, test_start_position);
- const char* test_profiler_stop_position =
- StrNStr(log.start(), "test-profiler-stop,", log.length());
- CHECK_NE(NULL, test_profiler_stop_position);
- CHECK_GT(test_profiler_stop_position, test_profiler_start_position);
- const char* test_profiler_start_2_position =
- StrNStr(log.start(), "test-profiler-start-2,", log.length());
- CHECK_NE(NULL, test_profiler_start_2_position);
- CHECK_GT(test_profiler_start_2_position, test_profiler_stop_position);
-
- // Nothing must be logged until profiling is resumed.
- CHECK_EQ(NULL, StrNStr(test_start_position,
- "code-creation,",
- static_cast<int>(test_profiler_start_position -
- test_start_position)));
- // Nothing must be logged while profiling is suspended.
- CHECK_EQ(NULL, StrNStr(test_profiler_stop_position,
- "code-creation,",
- static_cast<int>(test_profiler_start_2_position -
- test_profiler_stop_position)));
-}
-
-
// BUG(913). Need to implement support for profiling multiple VM threads.
#if 0
@@ -396,7 +326,7 @@
TEST(LogCallbacks) {
- ScopedLoggerInitializer initialize_logger(false);
+ ScopedLoggerInitializer initialize_logger;
Logger* logger = initialize_logger.logger();
v8::Local<v8::FunctionTemplate> obj =
@@ -445,7 +375,7 @@
TEST(LogAccessorCallbacks) {
- ScopedLoggerInitializer initialize_logger(false);
+ ScopedLoggerInitializer initialize_logger;
Logger* logger = initialize_logger.logger();
v8::Local<v8::FunctionTemplate> obj =
@@ -486,18 +416,6 @@
}
-TEST(IsLoggingPreserved) {
- ScopedLoggerInitializer initialize_logger(false);
- Logger* logger = initialize_logger.logger();
-
- CHECK(logger->is_logging());
- logger->ResumeProfiler();
- CHECK(logger->is_logging());
- logger->PauseProfiler();
- CHECK(logger->is_logging());
-}
-
-
typedef i::NativesCollection<i::TEST> TestSources;
@@ -514,7 +432,7 @@
CHECK(!i::V8::IsRunning());
// Start with profiling to capture all code events from the beginning.
- ScopedLoggerInitializer initialize_logger(false);
+ ScopedLoggerInitializer initialize_logger;
Logger* logger = initialize_logger.logger();
// Compile and run a function that creates other functions.
@@ -523,7 +441,7 @@
" obj.test =\n"
" (function a(j) { return function b() { return j; } })(100);\n"
"})(this);");
- v8::V8::PauseProfiler();
+ logger->StopProfiler();
HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
logger->StringEvent("test-logging-done", "");
diff --git a/test/cctest/test-mutex.cc b/test/cctest/test-mutex.cc
new file mode 100644
index 0000000..cdc829f
--- /dev/null
+++ b/test/cctest/test-mutex.cc
@@ -0,0 +1,118 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <cstdlib>
+
+#include "v8.h"
+
+#include "cctest.h"
+#include "platform/mutex.h"
+
+using namespace ::v8::internal;
+
+
+TEST(LockGuardMutex) {
+ Mutex mutex;
+ { LockGuard<Mutex> lock_guard(&mutex);
+ }
+ { LockGuard<Mutex> lock_guard(&mutex);
+ }
+}
+
+
+TEST(LockGuardRecursiveMutex) {
+ RecursiveMutex recursive_mutex;
+ { LockGuard<RecursiveMutex> lock_guard(&recursive_mutex);
+ }
+ { LockGuard<RecursiveMutex> lock_guard1(&recursive_mutex);
+ LockGuard<RecursiveMutex> lock_guard2(&recursive_mutex);
+ }
+}
+
+
+TEST(LockGuardLazyMutex) {
+ LazyMutex lazy_mutex = LAZY_MUTEX_INITIALIZER;
+ { LockGuard<Mutex> lock_guard(lazy_mutex.Pointer());
+ }
+ { LockGuard<Mutex> lock_guard(lazy_mutex.Pointer());
+ }
+}
+
+
+TEST(LockGuardLazyRecursiveMutex) {
+ LazyRecursiveMutex lazy_recursive_mutex = LAZY_RECURSIVE_MUTEX_INITIALIZER;
+ { LockGuard<RecursiveMutex> lock_guard(lazy_recursive_mutex.Pointer());
+ }
+ { LockGuard<RecursiveMutex> lock_guard1(lazy_recursive_mutex.Pointer());
+ LockGuard<RecursiveMutex> lock_guard2(lazy_recursive_mutex.Pointer());
+ }
+}
+
+
+TEST(MultipleMutexes) {
+ Mutex mutex1;
+ Mutex mutex2;
+ Mutex mutex3;
+ // Order 1
+ mutex1.Lock();
+ mutex2.Lock();
+ mutex3.Lock();
+ mutex1.Unlock();
+ mutex2.Unlock();
+ mutex3.Unlock();
+ // Order 2
+ mutex1.Lock();
+ mutex2.Lock();
+ mutex3.Lock();
+ mutex3.Unlock();
+ mutex2.Unlock();
+ mutex1.Unlock();
+}
+
+
+TEST(MultipleRecursiveMutexes) {
+ RecursiveMutex recursive_mutex1;
+ RecursiveMutex recursive_mutex2;
+ // Order 1
+ recursive_mutex1.Lock();
+ recursive_mutex2.Lock();
+ CHECK(recursive_mutex1.TryLock());
+ CHECK(recursive_mutex2.TryLock());
+ recursive_mutex1.Unlock();
+ recursive_mutex1.Unlock();
+ recursive_mutex2.Unlock();
+ recursive_mutex2.Unlock();
+ // Order 2
+ recursive_mutex1.Lock();
+ CHECK(recursive_mutex1.TryLock());
+ recursive_mutex2.Lock();
+ CHECK(recursive_mutex2.TryLock());
+ recursive_mutex2.Unlock();
+ recursive_mutex1.Unlock();
+ recursive_mutex2.Unlock();
+ recursive_mutex1.Unlock();
+}
diff --git a/test/cctest/test-platform-linux.cc b/test/cctest/test-platform-linux.cc
index 6bb2902..7347aac 100644
--- a/test/cctest/test-platform-linux.cc
+++ b/test/cctest/test-platform-linux.cc
@@ -52,18 +52,16 @@
int count = 0;
int last_count = -1;
do {
- CHECK_EQ(0, mutex->Lock());
+ LockGuard<Mutex> lock_guard(mutex);
count = busy_lock_counter;
- CHECK_EQ(0, mutex->Unlock());
yield();
} while (count % 2 == rem && count < kLockCounterLimit);
if (count >= kLockCounterLimit) break;
- CHECK_EQ(0, mutex->Lock());
+ LockGuard<Mutex> lock_guard(mutex);
CHECK_EQ(count, busy_lock_counter);
CHECK(last_count == -1 || count == last_count + 1);
busy_lock_counter++;
last_count = count;
- CHECK_EQ(0, mutex->Unlock());
yield();
}
}
@@ -79,15 +77,14 @@
// increment a variable.
TEST(BusyLock) {
pthread_t other;
- Mutex* mutex = OS::CreateMutex();
+ Mutex mutex;
int thread_created = pthread_create(&other,
NULL,
&RunTestBusyLock,
- mutex);
+ &mutex);
CHECK_EQ(0, thread_created);
- LoopIncrement(mutex, 1);
+ LoopIncrement(&mutex, 1);
pthread_join(other, NULL);
- delete mutex;
}
diff --git a/test/cctest/test-time.cc b/test/cctest/test-time.cc
new file mode 100644
index 0000000..b53ee73
--- /dev/null
+++ b/test/cctest/test-time.cc
@@ -0,0 +1,116 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <cstdlib>
+
+#include "v8.h"
+
+#include "cctest.h"
+#if V8_OS_WIN
+#include "win32-headers.h"
+#endif
+
+using namespace v8::internal;
+
+
+TEST(TimeDeltaFromAndIn) {
+ CHECK(TimeDelta::FromDays(2) == TimeDelta::FromHours(48));
+ CHECK(TimeDelta::FromHours(3) == TimeDelta::FromMinutes(180));
+ CHECK(TimeDelta::FromMinutes(2) == TimeDelta::FromSeconds(120));
+ CHECK(TimeDelta::FromSeconds(2) == TimeDelta::FromMilliseconds(2000));
+ CHECK(TimeDelta::FromMilliseconds(2) == TimeDelta::FromMicroseconds(2000));
+ CHECK_EQ(static_cast<int>(13), TimeDelta::FromDays(13).InDays());
+ CHECK_EQ(static_cast<int>(13), TimeDelta::FromHours(13).InHours());
+ CHECK_EQ(static_cast<int>(13), TimeDelta::FromMinutes(13).InMinutes());
+ CHECK_EQ(static_cast<int64_t>(13), TimeDelta::FromSeconds(13).InSeconds());
+ CHECK_EQ(13.0, TimeDelta::FromSeconds(13).InSecondsF());
+ CHECK_EQ(static_cast<int64_t>(13),
+ TimeDelta::FromMilliseconds(13).InMilliseconds());
+ CHECK_EQ(13.0, TimeDelta::FromMilliseconds(13).InMillisecondsF());
+ CHECK_EQ(static_cast<int64_t>(13),
+ TimeDelta::FromMicroseconds(13).InMicroseconds());
+}
+
+
+TEST(TimeJsTime) {
+ Time t = Time::FromJsTime(700000.3);
+ CHECK_EQ(700000.3, t.ToJsTime());
+}
+
+
+#if V8_OS_POSIX
+TEST(TimeFromTimeVal) {
+ Time null;
+ CHECK(null.IsNull());
+ CHECK(null == Time::FromTimeval(null.ToTimeval()));
+ Time now = Time::Now();
+ CHECK(now == Time::FromTimeval(now.ToTimeval()));
+ Time now_sys = Time::NowFromSystemTime();
+ CHECK(now_sys == Time::FromTimeval(now_sys.ToTimeval()));
+ Time unix_epoch = Time::UnixEpoch();
+ CHECK(unix_epoch == Time::FromTimeval(unix_epoch.ToTimeval()));
+ Time max = Time::Max();
+ CHECK(max.IsMax());
+ CHECK(max == Time::FromTimeval(max.ToTimeval()));
+}
+#endif
+
+
+#if V8_OS_WIN
+TEST(TimeFromFiletime) {
+ Time null;
+ CHECK(null.IsNull());
+ CHECK(null == Time::FromFiletime(null.ToFiletime()));
+ Time now = Time::Now();
+ CHECK(now == Time::FromFiletime(now.ToFiletime()));
+ Time now_sys = Time::NowFromSystemTime();
+ CHECK(now_sys == Time::FromFiletime(now_sys.ToFiletime()));
+ Time unix_epoch = Time::UnixEpoch();
+ CHECK(unix_epoch == Time::FromFiletime(unix_epoch.ToFiletime()));
+ Time max = Time::Max();
+ CHECK(max.IsMax());
+ CHECK(max == Time::FromFiletime(max.ToFiletime()));
+}
+#endif
+
+
+TEST(TimeTicksIsMonotonic) {
+ TimeTicks previous_normal_ticks;
+ TimeTicks previous_highres_ticks;
+ ElapsedTimer timer;
+ timer.Start();
+ while (!timer.HasExpired(TimeDelta::FromMilliseconds(100))) {
+ TimeTicks normal_ticks = TimeTicks::Now();
+ TimeTicks highres_ticks = TimeTicks::HighResNow();
+ CHECK_GE(normal_ticks, previous_normal_ticks);
+ CHECK_GE((normal_ticks - previous_normal_ticks).InMicroseconds(), 0);
+ CHECK_GE(highres_ticks, previous_highres_ticks);
+ CHECK_GE((highres_ticks - previous_highres_ticks).InMicroseconds(), 0);
+ previous_normal_ticks = normal_ticks;
+ previous_highres_ticks = highres_ticks;
+ }
+}
diff --git a/test/mjsunit/compiler/type-feedback-after-throw.js b/test/mjsunit/compiler/type-feedback-after-throw.js
new file mode 100644
index 0000000..891e315
--- /dev/null
+++ b/test/mjsunit/compiler/type-feedback-after-throw.js
@@ -0,0 +1,38 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ throw "Error";
+ return 1 > 5;
+};
+
+try { foo() } catch(e) {}
+try { foo() } catch(e) {}
+%OptimizeFunctionOnNextCall(foo);
+try { foo() } catch(e) {}
diff --git a/test/mjsunit/fast-literal.js b/test/mjsunit/fast-literal.js
new file mode 100644
index 0000000..822d906
--- /dev/null
+++ b/test/mjsunit/fast-literal.js
@@ -0,0 +1,42 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --no-inline-new --nouse-allocation-folding
+
+%SetAllocationTimeout(10, 0);
+function f() {
+ return [[1, 2, 3], [1.1, 1.2, 1.3], [[], [], []]];
+}
+
+f(); f(); f();
+%OptimizeFunctionOnNextCall(f);
+for (var i=0; i<1000; i++) {
+ f();
+}
+
+
+
diff --git a/test/mjsunit/regress/regress-crbug-280333.js b/test/mjsunit/regress/regress-crbug-280333.js
new file mode 100644
index 0000000..ca3fdc7
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-280333.js
@@ -0,0 +1,47 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function funky() { return false; }
+var global;
+
+function foo(x, fun) {
+ var a = x + 1;
+ var b = x + 2; // Need another Simulate to fold the first one into.
+ global = true; // Need a side effect to deopt to.
+ if (fun()) {
+ return a;
+ }
+ return 0;
+}
+
+assertEquals(0, foo(1, funky));
+assertEquals(0, foo(1, funky));
+%OptimizeFunctionOnNextCall(foo);
+assertEquals(0, foo(1, funky));
+assertEquals(2, foo(1, function() { return true; }));
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index eefd142..3f99b13 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -436,8 +436,13 @@
'../../src/optimizing-compiler-thread.cc',
'../../src/parser.cc',
'../../src/parser.h',
+ '../../src/platform/elapsed-timer.h',
+ '../../src/platform/time.cc',
+ '../../src/platform/time.h',
'../../src/platform-posix.h',
'../../src/platform.h',
+ '../../src/platform/mutex.cc',
+ '../../src/platform/mutex.h',
'../../src/preparse-data-format.h',
'../../src/preparse-data.cc',
'../../src/preparse-data.h',
@@ -688,6 +693,9 @@
]
}],
],
+ 'libraries': [
+ '-lrt'
+ ]
},
'sources': [ ### gcmole(os:linux) ###
'../../src/platform-linux.cc',
@@ -700,7 +708,7 @@
'CAN_USE_VFP_INSTRUCTIONS',
],
'sources': [
- '../../src/platform-posix.cc',
+ '../../src/platform-posix.cc'
],
'conditions': [
['host_os=="mac"', {
@@ -716,6 +724,15 @@
}],
],
}, {
+ 'link_settings': {
+ 'target_conditions': [
+ ['_toolset=="host"', {
+ 'libraries': [
+ '-lrt'
+ ]
+ }]
+ ]
+ },
'sources': [
'../../src/platform-linux.cc'
]
@@ -763,7 +780,7 @@
]},
'sources': [
'../../src/platform-solaris.cc',
- '../../src/platform-posix.cc',
+ '../../src/platform-posix.cc'
],
}
],
@@ -786,13 +803,13 @@
['build_env=="Cygwin"', {
'sources': [
'../../src/platform-cygwin.cc',
- '../../src/platform-posix.cc',
+ '../../src/platform-posix.cc'
],
}, {
'sources': [
'../../src/platform-win32.cc',
- '../../src/win32-math.h',
'../../src/win32-math.cc',
+ '../../src/win32-math.h'
],
}],
],
@@ -802,8 +819,8 @@
}, {
'sources': [
'../../src/platform-win32.cc',
- '../../src/win32-math.h',
'../../src/win32-math.cc',
+ '../../src/win32-math.h'
],
'msvs_disabled_warnings': [4351, 4355, 4800],
'link_settings': {