blob: ed202242bad28715258a6291ce68ddeaf5b5b862 [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/optimizing-compile-dispatcher.h"
6
7#include "src/base/atomicops.h"
8#include "src/full-codegen/full-codegen.h"
9#include "src/isolate.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010010#include "src/tracing/trace-event.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000011#include "src/v8.h"
12
13namespace v8 {
14namespace internal {
15
16namespace {
17
18void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
19 bool restore_function_code) {
20 // The recompile job is allocated in the CompilationInfo's zone.
21 CompilationInfo* info = job->info();
22 if (restore_function_code) {
Ben Murdochda12d292016-06-02 14:46:10 +010023 Handle<JSFunction> function = info->closure();
24 function->ReplaceCode(function->shared()->code());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000025 }
26 delete info;
27}
28
29} // namespace
30
31
32class OptimizingCompileDispatcher::CompileTask : public v8::Task {
33 public:
34 explicit CompileTask(Isolate* isolate) : isolate_(isolate) {
35 OptimizingCompileDispatcher* dispatcher =
36 isolate_->optimizing_compile_dispatcher();
37 base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
38 ++dispatcher->ref_count_;
39 }
40
41 virtual ~CompileTask() {}
42
43 private:
44 // v8::Task overrides.
45 void Run() override {
46 DisallowHeapAllocation no_allocation;
47 DisallowHandleAllocation no_handles;
48 DisallowHandleDereference no_deref;
49
50 OptimizingCompileDispatcher* dispatcher =
51 isolate_->optimizing_compile_dispatcher();
52 {
53 TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
Ben Murdoch097c5b22016-05-18 11:27:45 +010054 TRACE_EVENT0("v8", "V8.RecompileConcurrent");
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000055
56 if (dispatcher->recompilation_delay_ != 0) {
57 base::OS::Sleep(base::TimeDelta::FromMilliseconds(
58 dispatcher->recompilation_delay_));
59 }
60
61 dispatcher->CompileNext(dispatcher->NextInput(true));
62 }
63 {
64 base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
65 if (--dispatcher->ref_count_ == 0) {
66 dispatcher->ref_count_zero_.NotifyOne();
67 }
68 }
69 }
70
71 Isolate* isolate_;
72
73 DISALLOW_COPY_AND_ASSIGN(CompileTask);
74};
75
76
77OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
78#ifdef DEBUG
79 {
80 base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
81 DCHECK_EQ(0, ref_count_);
82 }
83#endif
84 DCHECK_EQ(0, input_queue_length_);
85 DeleteArray(input_queue_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000086}
87
88
89OptimizedCompileJob* OptimizingCompileDispatcher::NextInput(
90 bool check_if_flushing) {
91 base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
92 if (input_queue_length_ == 0) return NULL;
93 OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
94 DCHECK_NOT_NULL(job);
95 input_queue_shift_ = InputQueueIndex(1);
96 input_queue_length_--;
97 if (check_if_flushing) {
98 if (static_cast<ModeFlag>(base::Acquire_Load(&mode_)) == FLUSH) {
99 if (!job->info()->is_osr()) {
100 AllowHandleDereference allow_handle_dereference;
101 DisposeOptimizedCompileJob(job, true);
102 }
103 return NULL;
104 }
105 }
106 return job;
107}
108
109
110void OptimizingCompileDispatcher::CompileNext(OptimizedCompileJob* job) {
111 if (!job) return;
112
113 // The function may have already been optimized by OSR. Simply continue.
114 OptimizedCompileJob::Status status = job->OptimizeGraph();
115 USE(status); // Prevent an unused-variable error in release mode.
116 DCHECK(status != OptimizedCompileJob::FAILED);
117
118 // The function may have already been optimized by OSR. Simply continue.
119 // Use a mutex to make sure that functions marked for install
120 // are always also queued.
121 base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
122 output_queue_.push(job);
123 isolate_->stack_guard()->RequestInstallCode();
124}
125
126
127void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
128 for (;;) {
129 OptimizedCompileJob* job = NULL;
130 {
131 base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
132 if (output_queue_.empty()) return;
133 job = output_queue_.front();
134 output_queue_.pop();
135 }
136
137 // OSR jobs are dealt with separately.
138 if (!job->info()->is_osr()) {
139 DisposeOptimizedCompileJob(job, restore_function_code);
140 }
141 }
142}
143
144
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000145void OptimizingCompileDispatcher::Flush() {
146 base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
147 if (FLAG_block_concurrent_recompilation) Unblock();
148 {
149 base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
150 while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
151 base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
152 }
153 FlushOutputQueue(true);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000154 if (FLAG_trace_concurrent_recompilation) {
155 PrintF(" ** Flushed concurrent recompilation queues.\n");
156 }
157}
158
159
160void OptimizingCompileDispatcher::Stop() {
161 base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
162 if (FLAG_block_concurrent_recompilation) Unblock();
163 {
164 base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
165 while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
166 base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
167 }
168
169 if (recompilation_delay_ != 0) {
170 // At this point the optimizing compiler thread's event loop has stopped.
171 // There is no need for a mutex when reading input_queue_length_.
172 while (input_queue_length_ > 0) CompileNext(NextInput());
173 InstallOptimizedFunctions();
174 } else {
175 FlushOutputQueue(false);
176 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000177}
178
179
180void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
181 HandleScope handle_scope(isolate_);
182
183 for (;;) {
184 OptimizedCompileJob* job = NULL;
185 {
186 base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
187 if (output_queue_.empty()) return;
188 job = output_queue_.front();
189 output_queue_.pop();
190 }
191 CompilationInfo* info = job->info();
192 Handle<JSFunction> function(*info->closure());
Ben Murdochda12d292016-06-02 14:46:10 +0100193 if (function->IsOptimized()) {
194 if (FLAG_trace_concurrent_recompilation) {
195 PrintF(" ** Aborting compilation for ");
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000196 function->ShortPrint();
Ben Murdochda12d292016-06-02 14:46:10 +0100197 PrintF(" as it has already been optimized.\n");
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000198 }
Ben Murdochda12d292016-06-02 14:46:10 +0100199 DisposeOptimizedCompileJob(job, false);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000200 } else {
Ben Murdochda12d292016-06-02 14:46:10 +0100201 Compiler::FinalizeOptimizedCompileJob(job);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000202 }
203 }
204}
205
206
207void OptimizingCompileDispatcher::QueueForOptimization(
208 OptimizedCompileJob* job) {
209 DCHECK(IsQueueAvailable());
Ben Murdochda12d292016-06-02 14:46:10 +0100210 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000211 // Add job to the back of the input queue.
212 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
213 DCHECK_LT(input_queue_length_, input_queue_capacity_);
214 input_queue_[InputQueueIndex(input_queue_length_)] = job;
215 input_queue_length_++;
216 }
217 if (FLAG_block_concurrent_recompilation) {
218 blocked_jobs_++;
219 } else {
220 V8::GetCurrentPlatform()->CallOnBackgroundThread(
221 new CompileTask(isolate_), v8::Platform::kShortRunningTask);
222 }
223}
224
225
226void OptimizingCompileDispatcher::Unblock() {
227 while (blocked_jobs_ > 0) {
228 V8::GetCurrentPlatform()->CallOnBackgroundThread(
229 new CompileTask(isolate_), v8::Platform::kShortRunningTask);
230 blocked_jobs_--;
231 }
232}
233
234
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000235} // namespace internal
236} // namespace v8