blob: 4836b9bebb3251e344fad77271a89293a0ca0967 [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/optimizing-compile-dispatcher.h"
6
7#include "src/base/atomicops.h"
8#include "src/full-codegen/full-codegen.h"
9#include "src/isolate.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010010#include "src/tracing/trace-event.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000011#include "src/v8.h"
12
13namespace v8 {
14namespace internal {
15
16namespace {
17
18void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
19 bool restore_function_code) {
20 // The recompile job is allocated in the CompilationInfo's zone.
21 CompilationInfo* info = job->info();
22 if (restore_function_code) {
23 if (info->is_osr()) {
24 if (!job->IsWaitingForInstall()) {
25 // Remove stack check that guards OSR entry on original code.
26 Handle<Code> code = info->unoptimized_code();
27 uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
28 BackEdgeTable::RemoveStackCheck(code, offset);
29 }
30 } else {
31 Handle<JSFunction> function = info->closure();
32 function->ReplaceCode(function->shared()->code());
33 }
34 }
35 delete info;
36}
37
38} // namespace
39
40
41class OptimizingCompileDispatcher::CompileTask : public v8::Task {
42 public:
43 explicit CompileTask(Isolate* isolate) : isolate_(isolate) {
44 OptimizingCompileDispatcher* dispatcher =
45 isolate_->optimizing_compile_dispatcher();
46 base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
47 ++dispatcher->ref_count_;
48 }
49
50 virtual ~CompileTask() {}
51
52 private:
53 // v8::Task overrides.
54 void Run() override {
55 DisallowHeapAllocation no_allocation;
56 DisallowHandleAllocation no_handles;
57 DisallowHandleDereference no_deref;
58
59 OptimizingCompileDispatcher* dispatcher =
60 isolate_->optimizing_compile_dispatcher();
61 {
62 TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
Ben Murdoch097c5b22016-05-18 11:27:45 +010063 TRACE_EVENT0("v8", "V8.RecompileConcurrent");
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000064
65 if (dispatcher->recompilation_delay_ != 0) {
66 base::OS::Sleep(base::TimeDelta::FromMilliseconds(
67 dispatcher->recompilation_delay_));
68 }
69
70 dispatcher->CompileNext(dispatcher->NextInput(true));
71 }
72 {
73 base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
74 if (--dispatcher->ref_count_ == 0) {
75 dispatcher->ref_count_zero_.NotifyOne();
76 }
77 }
78 }
79
80 Isolate* isolate_;
81
82 DISALLOW_COPY_AND_ASSIGN(CompileTask);
83};
84
85
86OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
87#ifdef DEBUG
88 {
89 base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
90 DCHECK_EQ(0, ref_count_);
91 }
92#endif
93 DCHECK_EQ(0, input_queue_length_);
94 DeleteArray(input_queue_);
95 if (FLAG_concurrent_osr) {
96#ifdef DEBUG
97 for (int i = 0; i < osr_buffer_capacity_; i++) {
98 CHECK_NULL(osr_buffer_[i]);
99 }
100#endif
101 DeleteArray(osr_buffer_);
102 }
103}
104
105
106OptimizedCompileJob* OptimizingCompileDispatcher::NextInput(
107 bool check_if_flushing) {
108 base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
109 if (input_queue_length_ == 0) return NULL;
110 OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
111 DCHECK_NOT_NULL(job);
112 input_queue_shift_ = InputQueueIndex(1);
113 input_queue_length_--;
114 if (check_if_flushing) {
115 if (static_cast<ModeFlag>(base::Acquire_Load(&mode_)) == FLUSH) {
116 if (!job->info()->is_osr()) {
117 AllowHandleDereference allow_handle_dereference;
118 DisposeOptimizedCompileJob(job, true);
119 }
120 return NULL;
121 }
122 }
123 return job;
124}
125
126
127void OptimizingCompileDispatcher::CompileNext(OptimizedCompileJob* job) {
128 if (!job) return;
129
130 // The function may have already been optimized by OSR. Simply continue.
131 OptimizedCompileJob::Status status = job->OptimizeGraph();
132 USE(status); // Prevent an unused-variable error in release mode.
133 DCHECK(status != OptimizedCompileJob::FAILED);
134
135 // The function may have already been optimized by OSR. Simply continue.
136 // Use a mutex to make sure that functions marked for install
137 // are always also queued.
138 base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
139 output_queue_.push(job);
140 isolate_->stack_guard()->RequestInstallCode();
141}
142
143
144void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
145 for (;;) {
146 OptimizedCompileJob* job = NULL;
147 {
148 base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
149 if (output_queue_.empty()) return;
150 job = output_queue_.front();
151 output_queue_.pop();
152 }
153
154 // OSR jobs are dealt with separately.
155 if (!job->info()->is_osr()) {
156 DisposeOptimizedCompileJob(job, restore_function_code);
157 }
158 }
159}
160
161
162void OptimizingCompileDispatcher::FlushOsrBuffer(bool restore_function_code) {
163 for (int i = 0; i < osr_buffer_capacity_; i++) {
164 if (osr_buffer_[i] != NULL) {
165 DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code);
166 osr_buffer_[i] = NULL;
167 }
168 }
169}
170
171
172void OptimizingCompileDispatcher::Flush() {
173 base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
174 if (FLAG_block_concurrent_recompilation) Unblock();
175 {
176 base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
177 while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
178 base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
179 }
180 FlushOutputQueue(true);
181 if (FLAG_concurrent_osr) FlushOsrBuffer(true);
182 if (FLAG_trace_concurrent_recompilation) {
183 PrintF(" ** Flushed concurrent recompilation queues.\n");
184 }
185}
186
187
188void OptimizingCompileDispatcher::Stop() {
189 base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
190 if (FLAG_block_concurrent_recompilation) Unblock();
191 {
192 base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
193 while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
194 base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
195 }
196
197 if (recompilation_delay_ != 0) {
198 // At this point the optimizing compiler thread's event loop has stopped.
199 // There is no need for a mutex when reading input_queue_length_.
200 while (input_queue_length_ > 0) CompileNext(NextInput());
201 InstallOptimizedFunctions();
202 } else {
203 FlushOutputQueue(false);
204 }
205
206 if (FLAG_concurrent_osr) FlushOsrBuffer(false);
207
208 if ((FLAG_trace_osr || FLAG_trace_concurrent_recompilation) &&
209 FLAG_concurrent_osr) {
210 PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
211 }
212}
213
214
215void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
216 HandleScope handle_scope(isolate_);
217
218 for (;;) {
219 OptimizedCompileJob* job = NULL;
220 {
221 base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
222 if (output_queue_.empty()) return;
223 job = output_queue_.front();
224 output_queue_.pop();
225 }
226 CompilationInfo* info = job->info();
227 Handle<JSFunction> function(*info->closure());
228 if (info->is_osr()) {
229 if (FLAG_trace_osr) {
230 PrintF("[COSR - ");
231 function->ShortPrint();
232 PrintF(" is ready for install and entry at AST id %d]\n",
233 info->osr_ast_id().ToInt());
234 }
235 job->WaitForInstall();
236 // Remove stack check that guards OSR entry on original code.
237 Handle<Code> code = info->unoptimized_code();
238 uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
239 BackEdgeTable::RemoveStackCheck(code, offset);
240 } else {
241 if (function->IsOptimized()) {
242 if (FLAG_trace_concurrent_recompilation) {
243 PrintF(" ** Aborting compilation for ");
244 function->ShortPrint();
245 PrintF(" as it has already been optimized.\n");
246 }
247 DisposeOptimizedCompileJob(job, false);
248 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100249 MaybeHandle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000250 function->ReplaceCode(code.is_null() ? function->shared()->code()
Ben Murdoch097c5b22016-05-18 11:27:45 +0100251 : *code.ToHandleChecked());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000252 }
253 }
254 }
255}
256
257
258void OptimizingCompileDispatcher::QueueForOptimization(
259 OptimizedCompileJob* job) {
260 DCHECK(IsQueueAvailable());
261 CompilationInfo* info = job->info();
262 if (info->is_osr()) {
263 osr_attempts_++;
264 AddToOsrBuffer(job);
265 // Add job to the front of the input queue.
266 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
267 DCHECK_LT(input_queue_length_, input_queue_capacity_);
268 // Move shift_ back by one.
269 input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1);
270 input_queue_[InputQueueIndex(0)] = job;
271 input_queue_length_++;
272 } else {
273 // Add job to the back of the input queue.
274 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
275 DCHECK_LT(input_queue_length_, input_queue_capacity_);
276 input_queue_[InputQueueIndex(input_queue_length_)] = job;
277 input_queue_length_++;
278 }
279 if (FLAG_block_concurrent_recompilation) {
280 blocked_jobs_++;
281 } else {
282 V8::GetCurrentPlatform()->CallOnBackgroundThread(
283 new CompileTask(isolate_), v8::Platform::kShortRunningTask);
284 }
285}
286
287
288void OptimizingCompileDispatcher::Unblock() {
289 while (blocked_jobs_ > 0) {
290 V8::GetCurrentPlatform()->CallOnBackgroundThread(
291 new CompileTask(isolate_), v8::Platform::kShortRunningTask);
292 blocked_jobs_--;
293 }
294}
295
296
297OptimizedCompileJob* OptimizingCompileDispatcher::FindReadyOSRCandidate(
298 Handle<JSFunction> function, BailoutId osr_ast_id) {
299 for (int i = 0; i < osr_buffer_capacity_; i++) {
300 OptimizedCompileJob* current = osr_buffer_[i];
301 if (current != NULL && current->IsWaitingForInstall() &&
302 current->info()->HasSameOsrEntry(function, osr_ast_id)) {
303 osr_hits_++;
304 osr_buffer_[i] = NULL;
305 return current;
306 }
307 }
308 return NULL;
309}
310
311
312bool OptimizingCompileDispatcher::IsQueuedForOSR(Handle<JSFunction> function,
313 BailoutId osr_ast_id) {
314 for (int i = 0; i < osr_buffer_capacity_; i++) {
315 OptimizedCompileJob* current = osr_buffer_[i];
316 if (current != NULL &&
317 current->info()->HasSameOsrEntry(function, osr_ast_id)) {
318 return !current->IsWaitingForInstall();
319 }
320 }
321 return false;
322}
323
324
325bool OptimizingCompileDispatcher::IsQueuedForOSR(JSFunction* function) {
326 for (int i = 0; i < osr_buffer_capacity_; i++) {
327 OptimizedCompileJob* current = osr_buffer_[i];
328 if (current != NULL && *current->info()->closure() == function) {
329 return !current->IsWaitingForInstall();
330 }
331 }
332 return false;
333}
334
335
336void OptimizingCompileDispatcher::AddToOsrBuffer(OptimizedCompileJob* job) {
337 // Find the next slot that is empty or has a stale job.
338 OptimizedCompileJob* stale = NULL;
339 while (true) {
340 stale = osr_buffer_[osr_buffer_cursor_];
341 if (stale == NULL || stale->IsWaitingForInstall()) break;
342 osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
343 }
344
345 // Add to found slot and dispose the evicted job.
346 if (stale != NULL) {
347 DCHECK(stale->IsWaitingForInstall());
348 CompilationInfo* info = stale->info();
349 if (FLAG_trace_osr) {
350 PrintF("[COSR - Discarded ");
351 info->closure()->PrintName();
352 PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
353 }
354 DisposeOptimizedCompileJob(stale, false);
355 }
356 osr_buffer_[osr_buffer_cursor_] = job;
357 osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
358}
359} // namespace internal
360} // namespace v8