blob: a3d2002178fc625045307f41dd653bcdde02a60f [file] [log] [blame]
Ben Murdochb0fe1622011-05-05 13:52:32 +01001// Copyright 2010 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "codegen.h"
31#include "deoptimizer.h"
32#include "disasm.h"
33#include "full-codegen.h"
34#include "global-handles.h"
35#include "macro-assembler.h"
36#include "prettyprinter.h"
37
38
39namespace v8 {
40namespace internal {
41
42LargeObjectChunk* Deoptimizer::eager_deoptimization_entry_code_ = NULL;
43LargeObjectChunk* Deoptimizer::lazy_deoptimization_entry_code_ = NULL;
44Deoptimizer* Deoptimizer::current_ = NULL;
45DeoptimizingCodeListNode* Deoptimizer::deoptimizing_code_list_ = NULL;
46
47
48Deoptimizer* Deoptimizer::New(JSFunction* function,
49 BailoutType type,
50 unsigned bailout_id,
51 Address from,
52 int fp_to_sp_delta) {
53 Deoptimizer* deoptimizer =
54 new Deoptimizer(function, type, bailout_id, from, fp_to_sp_delta);
55 ASSERT(current_ == NULL);
56 current_ = deoptimizer;
57 return deoptimizer;
58}
59
60
61Deoptimizer* Deoptimizer::Grab() {
62 Deoptimizer* result = current_;
63 ASSERT(result != NULL);
64 result->DeleteFrameDescriptions();
65 current_ = NULL;
66 return result;
67}
68
69
70void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
71 int count,
72 BailoutType type) {
73 TableEntryGenerator generator(masm, type, count);
74 generator.Generate();
75}
76
77
78class DeoptimizingVisitor : public OptimizedFunctionVisitor {
79 public:
80 virtual void EnterContext(Context* context) {
81 if (FLAG_trace_deopt) {
82 PrintF("[deoptimize context: %" V8PRIxPTR "]\n",
83 reinterpret_cast<intptr_t>(context));
84 }
85 }
86
87 virtual void VisitFunction(JSFunction* function) {
88 Deoptimizer::DeoptimizeFunction(function);
89 }
90
91 virtual void LeaveContext(Context* context) {
92 context->ClearOptimizedFunctions();
93 }
94};
95
96
97void Deoptimizer::DeoptimizeAll() {
98 AssertNoAllocation no_allocation;
99
100 if (FLAG_trace_deopt) {
101 PrintF("[deoptimize all contexts]\n");
102 }
103
104 DeoptimizingVisitor visitor;
105 VisitAllOptimizedFunctions(&visitor);
106}
107
108
109void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
110 AssertNoAllocation no_allocation;
111
112 DeoptimizingVisitor visitor;
113 VisitAllOptimizedFunctionsForGlobalObject(object, &visitor);
114}
115
116
117void Deoptimizer::VisitAllOptimizedFunctionsForContext(
118 Context* context, OptimizedFunctionVisitor* visitor) {
119 AssertNoAllocation no_allocation;
120
121 ASSERT(context->IsGlobalContext());
122
123 visitor->EnterContext(context);
124 // Run through the list of optimized functions and deoptimize them.
125 Object* element = context->OptimizedFunctionsListHead();
126 while (!element->IsUndefined()) {
127 JSFunction* element_function = JSFunction::cast(element);
128 // Get the next link before deoptimizing as deoptimizing will clear the
129 // next link.
130 element = element_function->next_function_link();
131 visitor->VisitFunction(element_function);
132 }
133 visitor->LeaveContext(context);
134}
135
136
137void Deoptimizer::VisitAllOptimizedFunctionsForGlobalObject(
138 JSObject* object, OptimizedFunctionVisitor* visitor) {
139 AssertNoAllocation no_allocation;
140
141 if (object->IsJSGlobalProxy()) {
142 Object* proto = object->GetPrototype();
143 ASSERT(proto->IsJSGlobalObject());
144 VisitAllOptimizedFunctionsForContext(
145 GlobalObject::cast(proto)->global_context(), visitor);
146 } else if (object->IsGlobalObject()) {
147 VisitAllOptimizedFunctionsForContext(
148 GlobalObject::cast(object)->global_context(), visitor);
149 }
150}
151
152
153void Deoptimizer::VisitAllOptimizedFunctions(
154 OptimizedFunctionVisitor* visitor) {
155 AssertNoAllocation no_allocation;
156
157 // Run through the list of all global contexts and deoptimize.
158 Object* global = Heap::global_contexts_list();
159 while (!global->IsUndefined()) {
160 VisitAllOptimizedFunctionsForGlobalObject(Context::cast(global)->global(),
161 visitor);
162 global = Context::cast(global)->get(Context::NEXT_CONTEXT_LINK);
163 }
164}
165
166
167void Deoptimizer::HandleWeakDeoptimizedCode(
168 v8::Persistent<v8::Value> obj, void* data) {
169 DeoptimizingCodeListNode* node =
170 reinterpret_cast<DeoptimizingCodeListNode*>(data);
171 RemoveDeoptimizingCode(*node->code());
172#ifdef DEBUG
173 node = Deoptimizer::deoptimizing_code_list_;
174 while (node != NULL) {
175 ASSERT(node != reinterpret_cast<DeoptimizingCodeListNode*>(data));
176 node = node->next();
177 }
178#endif
179}
180
181
182void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) {
183 deoptimizer->DoComputeOutputFrames();
184}
185
186
187Deoptimizer::Deoptimizer(JSFunction* function,
188 BailoutType type,
189 unsigned bailout_id,
190 Address from,
191 int fp_to_sp_delta)
192 : function_(function),
193 bailout_id_(bailout_id),
194 bailout_type_(type),
195 from_(from),
196 fp_to_sp_delta_(fp_to_sp_delta),
197 output_count_(0),
198 output_(NULL),
199 integer32_values_(NULL),
200 double_values_(NULL) {
201 if (FLAG_trace_deopt && type != OSR) {
202 PrintF("**** DEOPT: ");
203 function->PrintName();
204 PrintF(" at bailout #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
205 bailout_id,
206 reinterpret_cast<intptr_t>(from),
207 fp_to_sp_delta - (2 * kPointerSize));
208 } else if (FLAG_trace_osr && type == OSR) {
209 PrintF("**** OSR: ");
210 function->PrintName();
211 PrintF(" at ast id #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
212 bailout_id,
213 reinterpret_cast<intptr_t>(from),
214 fp_to_sp_delta - (2 * kPointerSize));
215 }
216 // Find the optimized code.
217 if (type == EAGER) {
218 ASSERT(from == NULL);
219 optimized_code_ = function_->code();
220 } else if (type == LAZY) {
221 optimized_code_ = FindDeoptimizingCodeFromAddress(from);
222 ASSERT(optimized_code_ != NULL);
223 } else if (type == OSR) {
224 // The function has already been optimized and we're transitioning
225 // from the unoptimized shared version to the optimized one in the
226 // function. The return address (from) points to unoptimized code.
227 optimized_code_ = function_->code();
228 ASSERT(optimized_code_->kind() == Code::OPTIMIZED_FUNCTION);
229 ASSERT(!optimized_code_->contains(from));
230 }
231 ASSERT(Heap::allow_allocation(false));
232 unsigned size = ComputeInputFrameSize();
233 input_ = new(size) FrameDescription(size, function);
234}
235
236
237Deoptimizer::~Deoptimizer() {
238 ASSERT(input_ == NULL && output_ == NULL);
239 delete[] integer32_values_;
240 delete[] double_values_;
241}
242
243
244void Deoptimizer::DeleteFrameDescriptions() {
245 delete input_;
246 for (int i = 0; i < output_count_; ++i) {
247 if (output_[i] != input_) delete output_[i];
248 }
249 delete[] output_;
250 input_ = NULL;
251 output_ = NULL;
252 ASSERT(!Heap::allow_allocation(true));
253}
254
255
256Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
257 ASSERT(id >= 0);
258 if (id >= kNumberOfEntries) return NULL;
259 LargeObjectChunk* base = NULL;
260 if (type == EAGER) {
261 if (eager_deoptimization_entry_code_ == NULL) {
262 eager_deoptimization_entry_code_ = CreateCode(type);
263 }
264 base = eager_deoptimization_entry_code_;
265 } else {
266 if (lazy_deoptimization_entry_code_ == NULL) {
267 lazy_deoptimization_entry_code_ = CreateCode(type);
268 }
269 base = lazy_deoptimization_entry_code_;
270 }
271 return
272 static_cast<Address>(base->GetStartAddress()) + (id * table_entry_size_);
273}
274
275
276int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
277 LargeObjectChunk* base = NULL;
278 if (type == EAGER) {
279 base = eager_deoptimization_entry_code_;
280 } else {
281 base = lazy_deoptimization_entry_code_;
282 }
283 if (base == NULL ||
284 addr < base->GetStartAddress() ||
285 addr >= base->GetStartAddress() +
286 (kNumberOfEntries * table_entry_size_)) {
287 return kNotDeoptimizationEntry;
288 }
289 ASSERT_EQ(0,
290 static_cast<int>(addr - base->GetStartAddress()) % table_entry_size_);
291 return static_cast<int>(addr - base->GetStartAddress()) / table_entry_size_;
292}
293
294
295void Deoptimizer::Setup() {
296 // Do nothing yet.
297}
298
299
300void Deoptimizer::TearDown() {
301 if (eager_deoptimization_entry_code_ != NULL) {
302 eager_deoptimization_entry_code_->Free(EXECUTABLE);
303 eager_deoptimization_entry_code_ = NULL;
304 }
305 if (lazy_deoptimization_entry_code_ != NULL) {
306 lazy_deoptimization_entry_code_->Free(EXECUTABLE);
307 lazy_deoptimization_entry_code_ = NULL;
308 }
309}
310
311
Steve Block9fac8402011-05-12 15:51:54 +0100312int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
313 unsigned id,
314 SharedFunctionInfo* shared) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100315 // TODO(kasperl): For now, we do a simple linear search for the PC
316 // offset associated with the given node id. This should probably be
317 // changed to a binary search.
318 int length = data->DeoptPoints();
319 Smi* smi_id = Smi::FromInt(id);
320 for (int i = 0; i < length; i++) {
321 if (data->AstId(i) == smi_id) {
322 return data->PcAndState(i)->value();
323 }
324 }
325 PrintF("[couldn't find pc offset for node=%u]\n", id);
326 PrintF("[method: %s]\n", *shared->DebugName()->ToCString());
327 // Print the source code if available.
328 HeapStringAllocator string_allocator;
329 StringStream stream(&string_allocator);
330 shared->SourceCodePrint(&stream, -1);
331 PrintF("[source:\n%s\n]", *stream.ToCString());
332
333 UNREACHABLE();
334 return -1;
335}
336
337
338int Deoptimizer::GetDeoptimizedCodeCount() {
339 int length = 0;
340 DeoptimizingCodeListNode* node = Deoptimizer::deoptimizing_code_list_;
341 while (node != NULL) {
342 length++;
343 node = node->next();
344 }
345 return length;
346}
347
348
349void Deoptimizer::DoComputeOutputFrames() {
350 if (bailout_type_ == OSR) {
351 DoComputeOsrOutputFrame();
352 return;
353 }
354
355 // Print some helpful diagnostic information.
356 int64_t start = OS::Ticks();
357 if (FLAG_trace_deopt) {
358 PrintF("[deoptimizing%s: begin 0x%08" V8PRIxPTR " ",
359 (bailout_type_ == LAZY ? " (lazy)" : ""),
360 reinterpret_cast<intptr_t>(function_));
361 function_->PrintName();
362 PrintF(" @%d]\n", bailout_id_);
363 }
364
365 // Determine basic deoptimization information. The optimized frame is
366 // described by the input data.
367 DeoptimizationInputData* input_data =
368 DeoptimizationInputData::cast(optimized_code_->deoptimization_data());
369 unsigned node_id = input_data->AstId(bailout_id_)->value();
370 ByteArray* translations = input_data->TranslationByteArray();
371 unsigned translation_index =
372 input_data->TranslationIndex(bailout_id_)->value();
373
374 // Do the input frame to output frame(s) translation.
375 TranslationIterator iterator(translations, translation_index);
376 Translation::Opcode opcode =
377 static_cast<Translation::Opcode>(iterator.Next());
378 ASSERT(Translation::BEGIN == opcode);
379 USE(opcode);
380 // Read the number of output frames and allocate an array for their
381 // descriptions.
382 int count = iterator.Next();
383 ASSERT(output_ == NULL);
384 output_ = new FrameDescription*[count];
385 // Per-frame lists of untagged and unboxed int32 and double values.
386 integer32_values_ = new List<ValueDescriptionInteger32>[count];
387 double_values_ = new List<ValueDescriptionDouble>[count];
388 for (int i = 0; i < count; ++i) {
389 output_[i] = NULL;
390 integer32_values_[i].Initialize(0);
391 double_values_[i].Initialize(0);
392 }
393 output_count_ = count;
394
395 // Translate each output frame.
396 for (int i = 0; i < count; ++i) {
397 DoComputeFrame(&iterator, i);
398 }
399
400 // Print some helpful diagnostic information.
401 if (FLAG_trace_deopt) {
402 double ms = static_cast<double>(OS::Ticks() - start) / 1000;
403 int index = output_count_ - 1; // Index of the topmost frame.
404 JSFunction* function = output_[index]->GetFunction();
405 PrintF("[deoptimizing: end 0x%08" V8PRIxPTR " ",
406 reinterpret_cast<intptr_t>(function));
407 function->PrintName();
408 PrintF(" => node=%u, pc=0x%08" V8PRIxPTR ", state=%s, took %0.3f ms]\n",
409 node_id,
410 output_[index]->GetPc(),
411 FullCodeGenerator::State2String(
412 static_cast<FullCodeGenerator::State>(
413 output_[index]->GetState()->value())),
414 ms);
415 }
416}
417
418
419void Deoptimizer::InsertHeapNumberValues(int index, JavaScriptFrame* frame) {
420 // We need to adjust the stack index by one for the top-most frame.
421 int extra_slot_count = (index == output_count() - 1) ? 1 : 0;
422 List<ValueDescriptionInteger32>* ints = &integer32_values_[index];
423 for (int i = 0; i < ints->length(); i++) {
424 ValueDescriptionInteger32 value = ints->at(i);
425 double val = static_cast<double>(value.int32_value());
426 InsertHeapNumberValue(frame, value.stack_index(), val, extra_slot_count);
427 }
428
429 // Iterate over double values and convert them to a heap number.
430 List<ValueDescriptionDouble>* doubles = &double_values_[index];
431 for (int i = 0; i < doubles->length(); ++i) {
432 ValueDescriptionDouble value = doubles->at(i);
433 InsertHeapNumberValue(frame, value.stack_index(), value.double_value(),
434 extra_slot_count);
435 }
436}
437
438
439void Deoptimizer::InsertHeapNumberValue(JavaScriptFrame* frame,
440 int stack_index,
441 double val,
442 int extra_slot_count) {
443 // Add one to the TOS index to take the 'state' pushed before jumping
444 // to the stub that calls Runtime::NotifyDeoptimized into account.
445 int tos_index = stack_index + extra_slot_count;
446 int index = (frame->ComputeExpressionsCount() - 1) - tos_index;
447 if (FLAG_trace_deopt) PrintF("Allocating a new heap number: %e\n", val);
448 Handle<Object> num = Factory::NewNumber(val);
449 frame->SetExpression(index, *num);
450}
451
452
453void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
454 int frame_index,
455 unsigned output_offset) {
456 disasm::NameConverter converter;
457 // A GC-safe temporary placeholder that we can put in the output frame.
458 const intptr_t kPlaceholder = reinterpret_cast<intptr_t>(Smi::FromInt(0));
459
460 // Ignore commands marked as duplicate and act on the first non-duplicate.
461 Translation::Opcode opcode =
462 static_cast<Translation::Opcode>(iterator->Next());
463 while (opcode == Translation::DUPLICATE) {
464 opcode = static_cast<Translation::Opcode>(iterator->Next());
465 iterator->Skip(Translation::NumberOfOperandsFor(opcode));
466 opcode = static_cast<Translation::Opcode>(iterator->Next());
467 }
468
469 switch (opcode) {
470 case Translation::BEGIN:
471 case Translation::FRAME:
472 case Translation::DUPLICATE:
473 UNREACHABLE();
474 return;
475
476 case Translation::REGISTER: {
477 int input_reg = iterator->Next();
478 intptr_t input_value = input_->GetRegister(input_reg);
479 if (FLAG_trace_deopt) {
480 PrintF(
481 " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s\n",
482 output_[frame_index]->GetTop() + output_offset,
483 output_offset,
484 input_value,
485 converter.NameOfCPURegister(input_reg));
486 }
487 output_[frame_index]->SetFrameSlot(output_offset, input_value);
488 return;
489 }
490
491 case Translation::INT32_REGISTER: {
492 int input_reg = iterator->Next();
493 intptr_t value = input_->GetRegister(input_reg);
494 bool is_smi = Smi::IsValid(value);
495 unsigned output_index = output_offset / kPointerSize;
496 if (FLAG_trace_deopt) {
497 PrintF(
498 " 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n",
499 output_[frame_index]->GetTop() + output_offset,
500 output_offset,
501 value,
502 converter.NameOfCPURegister(input_reg),
503 is_smi ? "smi" : "heap number");
504 }
505 if (is_smi) {
506 intptr_t tagged_value =
507 reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
508 output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
509 } else {
510 // We save the untagged value on the side and store a GC-safe
511 // temporary placeholder in the frame.
512 AddInteger32Value(frame_index,
513 output_index,
514 static_cast<int32_t>(value));
515 output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
516 }
517 return;
518 }
519
520 case Translation::DOUBLE_REGISTER: {
521 int input_reg = iterator->Next();
522 double value = input_->GetDoubleRegister(input_reg);
523 unsigned output_index = output_offset / kPointerSize;
524 if (FLAG_trace_deopt) {
525 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; %s\n",
526 output_[frame_index]->GetTop() + output_offset,
527 output_offset,
528 value,
529 DoubleRegister::AllocationIndexToString(input_reg));
530 }
531 // We save the untagged value on the side and store a GC-safe
532 // temporary placeholder in the frame.
533 AddDoubleValue(frame_index, output_index, value);
534 output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
535 return;
536 }
537
538 case Translation::STACK_SLOT: {
539 int input_slot_index = iterator->Next();
540 unsigned input_offset =
541 input_->GetOffsetFromSlotIndex(this, input_slot_index);
542 intptr_t input_value = input_->GetFrameSlot(input_offset);
543 if (FLAG_trace_deopt) {
544 PrintF(" 0x%08" V8PRIxPTR ": ",
545 output_[frame_index]->GetTop() + output_offset);
546 PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
547 output_offset,
548 input_value,
549 input_offset);
550 }
551 output_[frame_index]->SetFrameSlot(output_offset, input_value);
552 return;
553 }
554
555 case Translation::INT32_STACK_SLOT: {
556 int input_slot_index = iterator->Next();
557 unsigned input_offset =
558 input_->GetOffsetFromSlotIndex(this, input_slot_index);
559 intptr_t value = input_->GetFrameSlot(input_offset);
560 bool is_smi = Smi::IsValid(value);
561 unsigned output_index = output_offset / kPointerSize;
562 if (FLAG_trace_deopt) {
563 PrintF(" 0x%08" V8PRIxPTR ": ",
564 output_[frame_index]->GetTop() + output_offset);
565 PrintF("[top + %d] <- %" V8PRIdPTR " ; [esp + %d] (%s)\n",
566 output_offset,
567 value,
568 input_offset,
569 is_smi ? "smi" : "heap number");
570 }
571 if (is_smi) {
572 intptr_t tagged_value =
573 reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
574 output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
575 } else {
576 // We save the untagged value on the side and store a GC-safe
577 // temporary placeholder in the frame.
578 AddInteger32Value(frame_index,
579 output_index,
580 static_cast<int32_t>(value));
581 output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
582 }
583 return;
584 }
585
586 case Translation::DOUBLE_STACK_SLOT: {
587 int input_slot_index = iterator->Next();
588 unsigned input_offset =
589 input_->GetOffsetFromSlotIndex(this, input_slot_index);
590 double value = input_->GetDoubleFrameSlot(input_offset);
591 unsigned output_index = output_offset / kPointerSize;
592 if (FLAG_trace_deopt) {
593 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [esp + %d]\n",
594 output_[frame_index]->GetTop() + output_offset,
595 output_offset,
596 value,
597 input_offset);
598 }
599 // We save the untagged value on the side and store a GC-safe
600 // temporary placeholder in the frame.
601 AddDoubleValue(frame_index, output_index, value);
602 output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
603 return;
604 }
605
606 case Translation::LITERAL: {
607 Object* literal = ComputeLiteral(iterator->Next());
608 if (FLAG_trace_deopt) {
609 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
610 output_[frame_index]->GetTop() + output_offset,
611 output_offset);
612 literal->ShortPrint();
613 PrintF(" ; literal\n");
614 }
615 intptr_t value = reinterpret_cast<intptr_t>(literal);
616 output_[frame_index]->SetFrameSlot(output_offset, value);
617 return;
618 }
619
620 case Translation::ARGUMENTS_OBJECT: {
Ben Murdoch086aeea2011-05-13 15:57:08 +0100621 // Use the arguments marker value as a sentinel and fill in the arguments
622 // object after the deoptimized frame is built.
Ben Murdochb0fe1622011-05-05 13:52:32 +0100623 ASSERT(frame_index == 0); // Only supported for first frame.
624 if (FLAG_trace_deopt) {
625 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
626 output_[frame_index]->GetTop() + output_offset,
627 output_offset);
Ben Murdoch086aeea2011-05-13 15:57:08 +0100628 Heap::arguments_marker()->ShortPrint();
Ben Murdochb0fe1622011-05-05 13:52:32 +0100629 PrintF(" ; arguments object\n");
630 }
Ben Murdoch086aeea2011-05-13 15:57:08 +0100631 intptr_t value = reinterpret_cast<intptr_t>(Heap::arguments_marker());
Ben Murdochb0fe1622011-05-05 13:52:32 +0100632 output_[frame_index]->SetFrameSlot(output_offset, value);
633 return;
634 }
635 }
636}
637
638
639bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
640 int* input_offset) {
641 disasm::NameConverter converter;
642 FrameDescription* output = output_[0];
643
644 // The input values are all part of the unoptimized frame so they
645 // are all tagged pointers.
646 uintptr_t input_value = input_->GetFrameSlot(*input_offset);
647 Object* input_object = reinterpret_cast<Object*>(input_value);
648
649 Translation::Opcode opcode =
650 static_cast<Translation::Opcode>(iterator->Next());
651 bool duplicate = (opcode == Translation::DUPLICATE);
652 if (duplicate) {
653 opcode = static_cast<Translation::Opcode>(iterator->Next());
654 }
655
656 switch (opcode) {
657 case Translation::BEGIN:
658 case Translation::FRAME:
659 case Translation::DUPLICATE:
660 UNREACHABLE(); // Malformed input.
661 return false;
662
663 case Translation::REGISTER: {
664 int output_reg = iterator->Next();
665 if (FLAG_trace_osr) {
666 PrintF(" %s <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
667 converter.NameOfCPURegister(output_reg),
668 input_value,
669 *input_offset);
670 }
671 output->SetRegister(output_reg, input_value);
672 break;
673 }
674
675 case Translation::INT32_REGISTER: {
676 // Abort OSR if we don't have a number.
677 if (!input_object->IsNumber()) return false;
678
679 int output_reg = iterator->Next();
680 int int32_value = input_object->IsSmi()
681 ? Smi::cast(input_object)->value()
682 : FastD2I(input_object->Number());
683 // Abort the translation if the conversion lost information.
684 if (!input_object->IsSmi() &&
685 FastI2D(int32_value) != input_object->Number()) {
686 if (FLAG_trace_osr) {
687 PrintF("**** %g could not be converted to int32 ****\n",
688 input_object->Number());
689 }
690 return false;
691 }
692 if (FLAG_trace_osr) {
693 PrintF(" %s <- %d (int32) ; [esp + %d]\n",
694 converter.NameOfCPURegister(output_reg),
695 int32_value,
696 *input_offset);
697 }
698 output->SetRegister(output_reg, int32_value);
699 break;
700 }
701
702 case Translation::DOUBLE_REGISTER: {
703 // Abort OSR if we don't have a number.
704 if (!input_object->IsNumber()) return false;
705
706 int output_reg = iterator->Next();
707 double double_value = input_object->Number();
708 if (FLAG_trace_osr) {
709 PrintF(" %s <- %g (double) ; [esp + %d]\n",
710 DoubleRegister::AllocationIndexToString(output_reg),
711 double_value,
712 *input_offset);
713 }
714 output->SetDoubleRegister(output_reg, double_value);
715 break;
716 }
717
718 case Translation::STACK_SLOT: {
719 int output_index = iterator->Next();
720 unsigned output_offset =
721 output->GetOffsetFromSlotIndex(this, output_index);
722 if (FLAG_trace_osr) {
723 PrintF(" [esp + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
724 output_offset,
725 input_value,
726 *input_offset);
727 }
728 output->SetFrameSlot(output_offset, input_value);
729 break;
730 }
731
732 case Translation::INT32_STACK_SLOT: {
733 // Abort OSR if we don't have a number.
734 if (!input_object->IsNumber()) return false;
735
736 int output_index = iterator->Next();
737 unsigned output_offset =
738 output->GetOffsetFromSlotIndex(this, output_index);
739 int int32_value = input_object->IsSmi()
740 ? Smi::cast(input_object)->value()
741 : DoubleToInt32(input_object->Number());
742 // Abort the translation if the conversion lost information.
743 if (!input_object->IsSmi() &&
744 FastI2D(int32_value) != input_object->Number()) {
745 if (FLAG_trace_osr) {
746 PrintF("**** %g could not be converted to int32 ****\n",
747 input_object->Number());
748 }
749 return false;
750 }
751 if (FLAG_trace_osr) {
752 PrintF(" [esp + %d] <- %d (int32) ; [esp + %d]\n",
753 output_offset,
754 int32_value,
755 *input_offset);
756 }
757 output->SetFrameSlot(output_offset, int32_value);
758 break;
759 }
760
761 case Translation::DOUBLE_STACK_SLOT: {
762 static const int kLowerOffset = 0 * kPointerSize;
763 static const int kUpperOffset = 1 * kPointerSize;
764
765 // Abort OSR if we don't have a number.
766 if (!input_object->IsNumber()) return false;
767
768 int output_index = iterator->Next();
769 unsigned output_offset =
770 output->GetOffsetFromSlotIndex(this, output_index);
771 double double_value = input_object->Number();
772 uint64_t int_value = BitCast<uint64_t, double>(double_value);
773 int32_t lower = static_cast<int32_t>(int_value);
774 int32_t upper = static_cast<int32_t>(int_value >> kBitsPerInt);
775 if (FLAG_trace_osr) {
776 PrintF(" [esp + %d] <- 0x%08x (upper bits of %g) ; [esp + %d]\n",
777 output_offset + kUpperOffset,
778 upper,
779 double_value,
780 *input_offset);
781 PrintF(" [esp + %d] <- 0x%08x (lower bits of %g) ; [esp + %d]\n",
782 output_offset + kLowerOffset,
783 lower,
784 double_value,
785 *input_offset);
786 }
787 output->SetFrameSlot(output_offset + kLowerOffset, lower);
788 output->SetFrameSlot(output_offset + kUpperOffset, upper);
789 break;
790 }
791
792 case Translation::LITERAL: {
793 // Just ignore non-materialized literals.
794 iterator->Next();
795 break;
796 }
797
798 case Translation::ARGUMENTS_OBJECT: {
799 // Optimized code assumes that the argument object has not been
800 // materialized and so bypasses it when doing arguments access.
801 // We should have bailed out before starting the frame
802 // translation.
803 UNREACHABLE();
804 return false;
805 }
806 }
807
808 if (!duplicate) *input_offset -= kPointerSize;
809 return true;
810}
811
812
813unsigned Deoptimizer::ComputeInputFrameSize() const {
814 unsigned fixed_size = ComputeFixedSize(function_);
815 // The fp-to-sp delta already takes the context and the function
816 // into account so we have to avoid double counting them (-2).
817 unsigned result = fixed_size + fp_to_sp_delta_ - (2 * kPointerSize);
818#ifdef DEBUG
819 if (bailout_type_ == OSR) {
820 // TODO(kasperl): It would be nice if we could verify that the
821 // size matches with the stack height we can compute based on the
822 // environment at the OSR entry. The code for that his built into
823 // the DoComputeOsrOutputFrame function for now.
824 } else {
825 unsigned stack_slots = optimized_code_->stack_slots();
826 unsigned outgoing_size = ComputeOutgoingArgumentSize();
827 ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
828 }
829#endif
830 return result;
831}
832
833
834unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const {
835 // The fixed part of the frame consists of the return address, frame
836 // pointer, function, context, and all the incoming arguments.
837 static const unsigned kFixedSlotSize = 4 * kPointerSize;
838 return ComputeIncomingArgumentSize(function) + kFixedSlotSize;
839}
840
841
842unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
843 // The incoming arguments is the values for formal parameters and
844 // the receiver. Every slot contains a pointer.
845 unsigned arguments = function->shared()->formal_parameter_count() + 1;
846 return arguments * kPointerSize;
847}
848
849
850unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
851 DeoptimizationInputData* data = DeoptimizationInputData::cast(
852 optimized_code_->deoptimization_data());
853 unsigned height = data->ArgumentsStackHeight(bailout_id_)->value();
854 return height * kPointerSize;
855}
856
857
858Object* Deoptimizer::ComputeLiteral(int index) const {
859 DeoptimizationInputData* data = DeoptimizationInputData::cast(
860 optimized_code_->deoptimization_data());
861 FixedArray* literals = data->LiteralArray();
862 return literals->get(index);
863}
864
865
866void Deoptimizer::AddInteger32Value(int frame_index,
867 int slot_index,
868 int32_t value) {
869 ValueDescriptionInteger32 value_desc(slot_index, value);
870 integer32_values_[frame_index].Add(value_desc);
871}
872
873
874void Deoptimizer::AddDoubleValue(int frame_index,
875 int slot_index,
876 double value) {
877 ValueDescriptionDouble value_desc(slot_index, value);
878 double_values_[frame_index].Add(value_desc);
879}
880
881
882LargeObjectChunk* Deoptimizer::CreateCode(BailoutType type) {
883 // We cannot run this if the serializer is enabled because this will
884 // cause us to emit relocation information for the external
885 // references. This is fine because the deoptimizer's code section
886 // isn't meant to be serialized at all.
887 ASSERT(!Serializer::enabled());
888 bool old_debug_code = FLAG_debug_code;
889 FLAG_debug_code = false;
890
891 MacroAssembler masm(NULL, 16 * KB);
892 GenerateDeoptimizationEntries(&masm, kNumberOfEntries, type);
893 CodeDesc desc;
894 masm.GetCode(&desc);
895 ASSERT(desc.reloc_size == 0);
896
897 LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
898 memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
899 CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
900 FLAG_debug_code = old_debug_code;
901 return chunk;
902}
903
904
905Code* Deoptimizer::FindDeoptimizingCodeFromAddress(Address addr) {
906 DeoptimizingCodeListNode* node = Deoptimizer::deoptimizing_code_list_;
907 while (node != NULL) {
908 if (node->code()->contains(addr)) return *node->code();
909 node = node->next();
910 }
911 return NULL;
912}
913
914
915void Deoptimizer::RemoveDeoptimizingCode(Code* code) {
916 ASSERT(deoptimizing_code_list_ != NULL);
917 // Run through the code objects to find this one and remove it.
918 DeoptimizingCodeListNode* prev = NULL;
919 DeoptimizingCodeListNode* current = deoptimizing_code_list_;
920 while (current != NULL) {
921 if (*current->code() == code) {
922 // Unlink from list. If prev is NULL we are looking at the first element.
923 if (prev == NULL) {
924 deoptimizing_code_list_ = current->next();
925 } else {
926 prev->set_next(current->next());
927 }
928 delete current;
929 return;
930 }
931 // Move to next in list.
932 prev = current;
933 current = current->next();
934 }
935 // Deoptimizing code is removed through weak callback. Each object is expected
936 // to be removed once and only once.
937 UNREACHABLE();
938}
939
940
941FrameDescription::FrameDescription(uint32_t frame_size,
942 JSFunction* function)
943 : frame_size_(frame_size),
944 function_(function),
945 top_(kZapUint32),
946 pc_(kZapUint32),
947 fp_(kZapUint32) {
948 // Zap all the registers.
949 for (int r = 0; r < Register::kNumRegisters; r++) {
950 SetRegister(r, kZapUint32);
951 }
952
953 // Zap all the slots.
954 for (unsigned o = 0; o < frame_size; o += kPointerSize) {
955 SetFrameSlot(o, kZapUint32);
956 }
957}
958
959
960unsigned FrameDescription::GetOffsetFromSlotIndex(Deoptimizer* deoptimizer,
961 int slot_index) {
962 if (slot_index >= 0) {
963 // Local or spill slots. Skip the fixed part of the frame
964 // including all arguments.
965 unsigned base = static_cast<unsigned>(
966 GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction()));
967 return base - ((slot_index + 1) * kPointerSize);
968 } else {
969 // Incoming parameter.
970 unsigned base = static_cast<unsigned>(GetFrameSize() -
971 deoptimizer->ComputeIncomingArgumentSize(GetFunction()));
972 return base - ((slot_index + 1) * kPointerSize);
973 }
974}
975
976
977void TranslationBuffer::Add(int32_t value) {
978 // Encode the sign bit in the least significant bit.
979 bool is_negative = (value < 0);
980 uint32_t bits = ((is_negative ? -value : value) << 1) |
981 static_cast<int32_t>(is_negative);
982 // Encode the individual bytes using the least significant bit of
983 // each byte to indicate whether or not more bytes follow.
984 do {
985 uint32_t next = bits >> 7;
986 contents_.Add(((bits << 1) & 0xFF) | (next != 0));
987 bits = next;
988 } while (bits != 0);
989}
990
991
992int32_t TranslationIterator::Next() {
993 ASSERT(HasNext());
994 // Run through the bytes until we reach one with a least significant
995 // bit of zero (marks the end).
996 uint32_t bits = 0;
997 for (int i = 0; true; i += 7) {
998 uint8_t next = buffer_->get(index_++);
999 bits |= (next >> 1) << i;
1000 if ((next & 1) == 0) break;
1001 }
1002 // The bits encode the sign in the least significant bit.
1003 bool is_negative = (bits & 1) == 1;
1004 int32_t result = bits >> 1;
1005 return is_negative ? -result : result;
1006}
1007
1008
1009Handle<ByteArray> TranslationBuffer::CreateByteArray() {
1010 int length = contents_.length();
1011 Handle<ByteArray> result = Factory::NewByteArray(length, TENURED);
1012 memcpy(result->GetDataStartAddress(), contents_.ToVector().start(), length);
1013 return result;
1014}
1015
1016
1017void Translation::BeginFrame(int node_id, int literal_id, unsigned height) {
1018 buffer_->Add(FRAME);
1019 buffer_->Add(node_id);
1020 buffer_->Add(literal_id);
1021 buffer_->Add(height);
1022}
1023
1024
1025void Translation::StoreRegister(Register reg) {
1026 buffer_->Add(REGISTER);
1027 buffer_->Add(reg.code());
1028}
1029
1030
1031void Translation::StoreInt32Register(Register reg) {
1032 buffer_->Add(INT32_REGISTER);
1033 buffer_->Add(reg.code());
1034}
1035
1036
1037void Translation::StoreDoubleRegister(DoubleRegister reg) {
1038 buffer_->Add(DOUBLE_REGISTER);
1039 buffer_->Add(DoubleRegister::ToAllocationIndex(reg));
1040}
1041
1042
1043void Translation::StoreStackSlot(int index) {
1044 buffer_->Add(STACK_SLOT);
1045 buffer_->Add(index);
1046}
1047
1048
1049void Translation::StoreInt32StackSlot(int index) {
1050 buffer_->Add(INT32_STACK_SLOT);
1051 buffer_->Add(index);
1052}
1053
1054
1055void Translation::StoreDoubleStackSlot(int index) {
1056 buffer_->Add(DOUBLE_STACK_SLOT);
1057 buffer_->Add(index);
1058}
1059
1060
1061void Translation::StoreLiteral(int literal_id) {
1062 buffer_->Add(LITERAL);
1063 buffer_->Add(literal_id);
1064}
1065
1066
1067void Translation::StoreArgumentsObject() {
1068 buffer_->Add(ARGUMENTS_OBJECT);
1069}
1070
1071
1072void Translation::MarkDuplicate() {
1073 buffer_->Add(DUPLICATE);
1074}
1075
1076
1077int Translation::NumberOfOperandsFor(Opcode opcode) {
1078 switch (opcode) {
1079 case ARGUMENTS_OBJECT:
1080 case DUPLICATE:
1081 return 0;
1082 case BEGIN:
1083 case REGISTER:
1084 case INT32_REGISTER:
1085 case DOUBLE_REGISTER:
1086 case STACK_SLOT:
1087 case INT32_STACK_SLOT:
1088 case DOUBLE_STACK_SLOT:
1089 case LITERAL:
1090 return 1;
1091 case FRAME:
1092 return 3;
1093 }
1094 UNREACHABLE();
1095 return -1;
1096}
1097
1098
1099#ifdef OBJECT_PRINT
1100
1101const char* Translation::StringFor(Opcode opcode) {
1102 switch (opcode) {
1103 case BEGIN:
1104 return "BEGIN";
1105 case FRAME:
1106 return "FRAME";
1107 case REGISTER:
1108 return "REGISTER";
1109 case INT32_REGISTER:
1110 return "INT32_REGISTER";
1111 case DOUBLE_REGISTER:
1112 return "DOUBLE_REGISTER";
1113 case STACK_SLOT:
1114 return "STACK_SLOT";
1115 case INT32_STACK_SLOT:
1116 return "INT32_STACK_SLOT";
1117 case DOUBLE_STACK_SLOT:
1118 return "DOUBLE_STACK_SLOT";
1119 case LITERAL:
1120 return "LITERAL";
1121 case ARGUMENTS_OBJECT:
1122 return "ARGUMENTS_OBJECT";
1123 case DUPLICATE:
1124 return "DUPLICATE";
1125 }
1126 UNREACHABLE();
1127 return "";
1128}
1129
1130#endif
1131
1132
1133DeoptimizingCodeListNode::DeoptimizingCodeListNode(Code* code): next_(NULL) {
1134 // Globalize the code object and make it weak.
1135 code_ = Handle<Code>::cast((GlobalHandles::Create(code)));
1136 GlobalHandles::MakeWeak(reinterpret_cast<Object**>(code_.location()),
1137 this,
1138 Deoptimizer::HandleWeakDeoptimizedCode);
1139}
1140
1141
1142DeoptimizingCodeListNode::~DeoptimizingCodeListNode() {
1143 GlobalHandles::Destroy(reinterpret_cast<Object**>(code_.location()));
1144}
1145
1146
1147} } // namespace v8::internal