blob: dc0f5e90f98610650706336e7165eb7ba42b447f [file] [log] [blame]
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001// Copyright 2010 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "ia32/lithium-codegen-ia32.h"
29#include "code-stubs.h"
30#include "stub-cache.h"
31
32namespace v8 {
33namespace internal {
34
35
36class SafepointGenerator : public PostCallGenerator {
37 public:
38 SafepointGenerator(LCodeGen* codegen,
39 LPointerMap* pointers,
40 int deoptimization_index)
41 : codegen_(codegen),
42 pointers_(pointers),
43 deoptimization_index_(deoptimization_index) { }
44 virtual ~SafepointGenerator() { }
45
46 virtual void Generate() {
47 codegen_->RecordSafepoint(pointers_, deoptimization_index_);
48 }
49
50 private:
51 LCodeGen* codegen_;
52 LPointerMap* pointers_;
53 int deoptimization_index_;
54};
55
56
57#define __ masm()->
58
59bool LCodeGen::GenerateCode() {
60 HPhase phase("Code generation", chunk());
61 ASSERT(is_unused());
62 status_ = GENERATING;
63 CpuFeatures::Scope scope(SSE2);
64 return GeneratePrologue() &&
65 GenerateBody() &&
66 GenerateDeferredCode() &&
67 GenerateSafepointTable();
68}
69
70
71void LCodeGen::FinishCode(Handle<Code> code) {
72 ASSERT(is_done());
73 code->set_stack_slots(StackSlotCount());
74 code->set_safepoint_table_start(safepoints_.GetCodeOffset());
75 PopulateDeoptimizationData(code);
76}
77
78
79void LCodeGen::Abort(const char* format, ...) {
80 if (FLAG_trace_bailout) {
81 SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
82 PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name);
83 va_list arguments;
84 va_start(arguments, format);
85 OS::VPrint(format, arguments);
86 va_end(arguments);
87 PrintF("\n");
88 }
89 status_ = ABORTED;
90}
91
92
93void LCodeGen::Comment(const char* format, ...) {
94 if (!FLAG_code_comments) return;
95 char buffer[4 * KB];
96 StringBuilder builder(buffer, ARRAY_SIZE(buffer));
97 va_list arguments;
98 va_start(arguments, format);
99 builder.AddFormattedList(format, arguments);
100 va_end(arguments);
101
102 // Copy the string before recording it in the assembler to avoid
103 // issues when the stack allocated buffer goes out of scope.
104 size_t length = builder.position();
105 Vector<char> copy = Vector<char>::New(length + 1);
106 memcpy(copy.start(), builder.Finalize(), copy.length());
107 masm()->RecordComment(copy.start());
108}
109
110
111bool LCodeGen::GeneratePrologue() {
112 ASSERT(is_generating());
113
114#ifdef DEBUG
115 if (strlen(FLAG_stop_at) > 0 &&
116 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
117 __ int3();
118 }
119#endif
120
121 __ push(ebp); // Caller's frame pointer.
122 __ mov(ebp, esp);
123 __ push(esi); // Callee's context.
124 __ push(edi); // Callee's JS function.
125
126 // Reserve space for the stack slots needed by the code.
127 int slots = StackSlotCount();
128 if (slots > 0) {
129 if (FLAG_debug_code) {
130 __ mov(Operand(eax), Immediate(slots));
131 Label loop;
132 __ bind(&loop);
133 __ push(Immediate(kSlotsZapValue));
134 __ dec(eax);
135 __ j(not_zero, &loop);
136 } else {
137 __ sub(Operand(esp), Immediate(slots * kPointerSize));
138 }
139 }
140
141 // Trace the call.
142 if (FLAG_trace) {
143 __ CallRuntime(Runtime::kTraceEnter, 0);
144 }
145 return !is_aborted();
146}
147
148
149bool LCodeGen::GenerateBody() {
150 ASSERT(is_generating());
151 bool emit_instructions = true;
152 for (current_instruction_ = 0;
153 !is_aborted() && current_instruction_ < instructions_->length();
154 current_instruction_++) {
155 LInstruction* instr = instructions_->at(current_instruction_);
156 if (instr->IsLabel()) {
157 LLabel* label = LLabel::cast(instr);
158 emit_instructions = !label->HasReplacement();
159 }
160
161 if (emit_instructions) {
162 Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
163 instr->CompileToNative(this);
164 }
165 }
166 return !is_aborted();
167}
168
169
170LInstruction* LCodeGen::GetNextInstruction() {
171 if (current_instruction_ < instructions_->length() - 1) {
172 return instructions_->at(current_instruction_ + 1);
173 } else {
174 return NULL;
175 }
176}
177
178
179bool LCodeGen::GenerateDeferredCode() {
180 ASSERT(is_generating());
181 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
182 LDeferredCode* code = deferred_[i];
183 __ bind(code->entry());
184 code->Generate();
185 __ jmp(code->exit());
186 }
187
188 // Deferred code is the last part of the instruction sequence. Mark
189 // the generated code as done unless we bailed out.
190 if (!is_aborted()) status_ = DONE;
191 return !is_aborted();
192}
193
194
195bool LCodeGen::GenerateSafepointTable() {
196 ASSERT(is_done());
197 safepoints_.Emit(masm(), StackSlotCount());
198 return !is_aborted();
199}
200
201
202Register LCodeGen::ToRegister(int index) const {
203 return Register::FromAllocationIndex(index);
204}
205
206
207XMMRegister LCodeGen::ToDoubleRegister(int index) const {
208 return XMMRegister::FromAllocationIndex(index);
209}
210
211
212Register LCodeGen::ToRegister(LOperand* op) const {
213 ASSERT(op->IsRegister());
214 return ToRegister(op->index());
215}
216
217
218XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
219 ASSERT(op->IsDoubleRegister());
220 return ToDoubleRegister(op->index());
221}
222
223
224int LCodeGen::ToInteger32(LConstantOperand* op) const {
225 Handle<Object> value = chunk_->LookupLiteral(op);
226 ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
227 ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
228 value->Number());
229 return static_cast<int32_t>(value->Number());
230}
231
232
233Immediate LCodeGen::ToImmediate(LOperand* op) {
234 LConstantOperand* const_op = LConstantOperand::cast(op);
235 Handle<Object> literal = chunk_->LookupLiteral(const_op);
236 Representation r = chunk_->LookupLiteralRepresentation(const_op);
237 if (r.IsInteger32()) {
238 ASSERT(literal->IsNumber());
239 return Immediate(static_cast<int32_t>(literal->Number()));
240 } else if (r.IsDouble()) {
241 Abort("unsupported double immediate");
242 }
243 ASSERT(r.IsTagged());
244 return Immediate(literal);
245}
246
247
248Operand LCodeGen::ToOperand(LOperand* op) const {
249 if (op->IsRegister()) return Operand(ToRegister(op));
250 if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
251 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
252 int index = op->index();
253 if (index >= 0) {
254 // Local or spill slot. Skip the frame pointer, function, and
255 // context in the fixed part of the frame.
256 return Operand(ebp, -(index + 3) * kPointerSize);
257 } else {
258 // Incoming parameter. Skip the return address.
259 return Operand(ebp, -(index - 1) * kPointerSize);
260 }
261}
262
263
264void LCodeGen::AddToTranslation(Translation* translation,
265 LOperand* op,
266 bool is_tagged) {
267 if (op == NULL) {
268 // TODO(twuerthinger): Introduce marker operands to indicate that this value
269 // is not present and must be reconstructed from the deoptimizer. Currently
270 // this is only used for the arguments object.
271 translation->StoreArgumentsObject();
272 } else if (op->IsStackSlot()) {
273 if (is_tagged) {
274 translation->StoreStackSlot(op->index());
275 } else {
276 translation->StoreInt32StackSlot(op->index());
277 }
278 } else if (op->IsDoubleStackSlot()) {
279 translation->StoreDoubleStackSlot(op->index());
280 } else if (op->IsArgument()) {
281 ASSERT(is_tagged);
282 int src_index = StackSlotCount() + op->index();
283 translation->StoreStackSlot(src_index);
284 } else if (op->IsRegister()) {
285 Register reg = ToRegister(op);
286 if (is_tagged) {
287 translation->StoreRegister(reg);
288 } else {
289 translation->StoreInt32Register(reg);
290 }
291 } else if (op->IsDoubleRegister()) {
292 XMMRegister reg = ToDoubleRegister(op);
293 translation->StoreDoubleRegister(reg);
294 } else if (op->IsConstantOperand()) {
295 Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
296 int src_index = DefineDeoptimizationLiteral(literal);
297 translation->StoreLiteral(src_index);
298 } else {
299 UNREACHABLE();
300 }
301}
302
303
304void LCodeGen::CallCode(Handle<Code> code,
305 RelocInfo::Mode mode,
306 LInstruction* instr) {
307 if (instr != NULL) {
308 LPointerMap* pointers = instr->pointer_map();
309 RecordPosition(pointers->position());
310 __ call(code, mode);
311 RegisterLazyDeoptimization(instr);
312 } else {
313 LPointerMap no_pointers(0);
314 RecordPosition(no_pointers.position());
315 __ call(code, mode);
316 RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex);
317 }
318}
319
320
321void LCodeGen::CallRuntime(Runtime::Function* function,
322 int num_arguments,
323 LInstruction* instr) {
324 ASSERT(instr != NULL);
325 LPointerMap* pointers = instr->pointer_map();
326 ASSERT(pointers != NULL);
327 RecordPosition(pointers->position());
328
329 __ CallRuntime(function, num_arguments);
330 // Runtime calls to Throw are not supposed to ever return at the
331 // call site, so don't register lazy deoptimization for these. We do
332 // however have to record a safepoint since throwing exceptions can
333 // cause garbage collections.
334 // BUG(3243555): register a lazy deoptimization point at throw. We need
335 // it to be able to inline functions containing a throw statement.
336 if (!instr->IsThrow()) {
337 RegisterLazyDeoptimization(instr);
338 } else {
339 RecordSafepoint(instr->pointer_map(), Safepoint::kNoDeoptimizationIndex);
340 }
341}
342
343
344void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
345 // Create the environment to bailout to. If the call has side effects
346 // execution has to continue after the call otherwise execution can continue
347 // from a previous bailout point repeating the call.
348 LEnvironment* deoptimization_environment;
349 if (instr->HasDeoptimizationEnvironment()) {
350 deoptimization_environment = instr->deoptimization_environment();
351 } else {
352 deoptimization_environment = instr->environment();
353 }
354
355 RegisterEnvironmentForDeoptimization(deoptimization_environment);
356 RecordSafepoint(instr->pointer_map(),
357 deoptimization_environment->deoptimization_index());
358}
359
360
361void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
362 if (!environment->HasBeenRegistered()) {
363 // Physical stack frame layout:
364 // -x ............. -4 0 ..................................... y
365 // [incoming arguments] [spill slots] [pushed outgoing arguments]
366
367 // Layout of the environment:
368 // 0 ..................................................... size-1
369 // [parameters] [locals] [expression stack including arguments]
370
371 // Layout of the translation:
372 // 0 ........................................................ size - 1 + 4
373 // [expression stack including arguments] [locals] [4 words] [parameters]
374 // |>------------ translation_size ------------<|
375
376 int frame_count = 0;
377 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
378 ++frame_count;
379 }
380 Translation translation(&translations_, frame_count);
381 environment->WriteTranslation(this, &translation);
382 int deoptimization_index = deoptimizations_.length();
383 environment->Register(deoptimization_index, translation.index());
384 deoptimizations_.Add(environment);
385 }
386}
387
388
389void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
390 RegisterEnvironmentForDeoptimization(environment);
391 ASSERT(environment->HasBeenRegistered());
392 int id = environment->deoptimization_index();
393 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
394 ASSERT(entry != NULL);
395 if (entry == NULL) {
396 Abort("bailout was not prepared");
397 return;
398 }
399
400 if (FLAG_deopt_every_n_times != 0) {
401 Handle<SharedFunctionInfo> shared(info_->shared_info());
402 Label no_deopt;
403 __ pushfd();
404 __ push(eax);
405 __ push(ebx);
406 __ mov(ebx, shared);
407 __ mov(eax, FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset));
408 __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
409 __ j(not_zero, &no_deopt);
410 if (FLAG_trap_on_deopt) __ int3();
411 __ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
412 __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
413 __ pop(ebx);
414 __ pop(eax);
415 __ popfd();
416 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
417
418 __ bind(&no_deopt);
419 __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
420 __ pop(ebx);
421 __ pop(eax);
422 __ popfd();
423 }
424
425 if (cc == no_condition) {
426 if (FLAG_trap_on_deopt) __ int3();
427 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
428 } else {
429 if (FLAG_trap_on_deopt) {
430 NearLabel done;
431 __ j(NegateCondition(cc), &done);
432 __ int3();
433 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
434 __ bind(&done);
435 } else {
436 __ j(cc, entry, RelocInfo::RUNTIME_ENTRY, not_taken);
437 }
438 }
439}
440
441
442void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
443 int length = deoptimizations_.length();
444 if (length == 0) return;
445 ASSERT(FLAG_deopt);
446 Handle<DeoptimizationInputData> data =
447 Factory::NewDeoptimizationInputData(length, TENURED);
448
449 data->SetTranslationByteArray(*translations_.CreateByteArray());
450 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
451
452 Handle<FixedArray> literals =
453 Factory::NewFixedArray(deoptimization_literals_.length(), TENURED);
454 for (int i = 0; i < deoptimization_literals_.length(); i++) {
455 literals->set(i, *deoptimization_literals_[i]);
456 }
457 data->SetLiteralArray(*literals);
458
459 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
460 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
461
462 // Populate the deoptimization entries.
463 for (int i = 0; i < length; i++) {
464 LEnvironment* env = deoptimizations_[i];
465 data->SetAstId(i, Smi::FromInt(env->ast_id()));
466 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
467 data->SetArgumentsStackHeight(i,
468 Smi::FromInt(env->arguments_stack_height()));
469 }
470 code->set_deoptimization_data(*data);
471}
472
473
474int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
475 int result = deoptimization_literals_.length();
476 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
477 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
478 }
479 deoptimization_literals_.Add(literal);
480 return result;
481}
482
483
484void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
485 ASSERT(deoptimization_literals_.length() == 0);
486
487 const ZoneList<Handle<JSFunction> >* inlined_closures =
488 chunk()->inlined_closures();
489
490 for (int i = 0, length = inlined_closures->length();
491 i < length;
492 i++) {
493 DefineDeoptimizationLiteral(inlined_closures->at(i));
494 }
495
496 inlined_function_count_ = deoptimization_literals_.length();
497}
498
499
500void LCodeGen::RecordSafepoint(LPointerMap* pointers,
501 int deoptimization_index) {
502 const ZoneList<LOperand*>* operands = pointers->operands();
503 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
504 deoptimization_index);
505 for (int i = 0; i < operands->length(); i++) {
506 LOperand* pointer = operands->at(i);
507 if (pointer->IsStackSlot()) {
508 safepoint.DefinePointerSlot(pointer->index());
509 }
510 }
511}
512
513
514void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
515 int arguments,
516 int deoptimization_index) {
517 const ZoneList<LOperand*>* operands = pointers->operands();
518 Safepoint safepoint =
519 safepoints_.DefineSafepointWithRegisters(
520 masm(), arguments, deoptimization_index);
521 for (int i = 0; i < operands->length(); i++) {
522 LOperand* pointer = operands->at(i);
523 if (pointer->IsStackSlot()) {
524 safepoint.DefinePointerSlot(pointer->index());
525 } else if (pointer->IsRegister()) {
526 safepoint.DefinePointerRegister(ToRegister(pointer));
527 }
528 }
529 // Register esi always contains a pointer to the context.
530 safepoint.DefinePointerRegister(esi);
531}
532
533
534void LCodeGen::RecordPosition(int position) {
535 if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
536 masm()->positions_recorder()->RecordPosition(position);
537}
538
539
540void LCodeGen::DoLabel(LLabel* label) {
541 if (label->is_loop_header()) {
542 Comment(";;; B%d - LOOP entry", label->block_id());
543 } else {
544 Comment(";;; B%d", label->block_id());
545 }
546 __ bind(label->label());
547 current_block_ = label->block_id();
548 LCodeGen::DoGap(label);
549}
550
551
552void LCodeGen::DoParallelMove(LParallelMove* move) {
553 // xmm0 must always be a scratch register.
554 XMMRegister xmm_scratch = xmm0;
555 LUnallocated marker_operand(LUnallocated::NONE);
556
557 Register cpu_scratch = esi;
558 bool destroys_cpu_scratch = false;
559
560 LGapResolver resolver(move->move_operands(), &marker_operand);
561 const ZoneList<LMoveOperands>* moves = resolver.ResolveInReverseOrder();
562 for (int i = moves->length() - 1; i >= 0; --i) {
563 LMoveOperands move = moves->at(i);
564 LOperand* from = move.from();
565 LOperand* to = move.to();
566 ASSERT(!from->IsDoubleRegister() ||
567 !ToDoubleRegister(from).is(xmm_scratch));
568 ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch));
569 ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch));
570 ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch));
571 if (from->IsConstantOperand()) {
572 __ mov(ToOperand(to), ToImmediate(from));
573 } else if (from == &marker_operand) {
574 if (to->IsRegister() || to->IsStackSlot()) {
575 __ mov(ToOperand(to), cpu_scratch);
576 ASSERT(destroys_cpu_scratch);
577 } else {
578 ASSERT(to->IsDoubleRegister() || to->IsDoubleStackSlot());
579 __ movdbl(ToOperand(to), xmm_scratch);
580 }
581 } else if (to == &marker_operand) {
582 if (from->IsRegister() || from->IsStackSlot()) {
583 __ mov(cpu_scratch, ToOperand(from));
584 destroys_cpu_scratch = true;
585 } else {
586 ASSERT(from->IsDoubleRegister() || from->IsDoubleStackSlot());
587 __ movdbl(xmm_scratch, ToOperand(from));
588 }
589 } else if (from->IsRegister()) {
590 __ mov(ToOperand(to), ToRegister(from));
591 } else if (to->IsRegister()) {
592 __ mov(ToRegister(to), ToOperand(from));
593 } else if (from->IsStackSlot()) {
594 ASSERT(to->IsStackSlot());
595 __ push(eax);
596 __ mov(eax, ToOperand(from));
597 __ mov(ToOperand(to), eax);
598 __ pop(eax);
599 } else if (from->IsDoubleRegister()) {
600 __ movdbl(ToOperand(to), ToDoubleRegister(from));
601 } else if (to->IsDoubleRegister()) {
602 __ movdbl(ToDoubleRegister(to), ToOperand(from));
603 } else {
604 ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
605 __ movdbl(xmm_scratch, ToOperand(from));
606 __ movdbl(ToOperand(to), xmm_scratch);
607 }
608 }
609
610 if (destroys_cpu_scratch) {
611 __ mov(cpu_scratch, Operand(ebp, -kPointerSize));
612 }
613}
614
615
616void LCodeGen::DoGap(LGap* gap) {
617 for (int i = LGap::FIRST_INNER_POSITION;
618 i <= LGap::LAST_INNER_POSITION;
619 i++) {
620 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
621 LParallelMove* move = gap->GetParallelMove(inner_pos);
622 if (move != NULL) DoParallelMove(move);
623 }
624
625 LInstruction* next = GetNextInstruction();
626 if (next != NULL && next->IsLazyBailout()) {
627 int pc = masm()->pc_offset();
628 safepoints_.SetPcAfterGap(pc);
629 }
630}
631
632
633void LCodeGen::DoParameter(LParameter* instr) {
634 // Nothing to do.
635}
636
637
638void LCodeGen::DoCallStub(LCallStub* instr) {
639 ASSERT(ToRegister(instr->result()).is(eax));
640 switch (instr->hydrogen()->major_key()) {
641 case CodeStub::RegExpConstructResult: {
642 RegExpConstructResultStub stub;
643 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
644 break;
645 }
646 case CodeStub::RegExpExec: {
647 RegExpExecStub stub;
648 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
649 break;
650 }
651 case CodeStub::SubString: {
652 SubStringStub stub;
653 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
654 break;
655 }
656 case CodeStub::StringCharAt: {
657 StringCharAtStub stub;
658 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
659 break;
660 }
661 case CodeStub::MathPow: {
662 MathPowStub stub;
663 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
664 break;
665 }
666 case CodeStub::NumberToString: {
667 NumberToStringStub stub;
668 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
669 break;
670 }
671 case CodeStub::StringAdd: {
672 StringAddStub stub(NO_STRING_ADD_FLAGS);
673 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
674 break;
675 }
676 case CodeStub::StringCompare: {
677 StringCompareStub stub;
678 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
679 break;
680 }
681 case CodeStub::TranscendentalCache: {
682 TranscendentalCacheStub stub(instr->transcendental_type());
683 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
684 break;
685 }
686 default:
687 UNREACHABLE();
688 }
689}
690
691
692void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
693 // Nothing to do.
694}
695
696
697void LCodeGen::DoModI(LModI* instr) {
698 LOperand* right = instr->right();
699 ASSERT(ToRegister(instr->result()).is(edx));
700 ASSERT(ToRegister(instr->left()).is(eax));
701 ASSERT(!ToRegister(instr->right()).is(eax));
702 ASSERT(!ToRegister(instr->right()).is(edx));
703
704 Register right_reg = ToRegister(right);
705
706 // Check for x % 0.
707 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
708 __ test(right_reg, ToOperand(right));
709 DeoptimizeIf(zero, instr->environment());
710 }
711
712 // Sign extend to edx.
713 __ cdq();
714
715 // Check for (0 % -x) that will produce negative zero.
716 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
717 NearLabel positive_left;
718 NearLabel done;
719 __ test(eax, Operand(eax));
720 __ j(not_sign, &positive_left);
721 __ idiv(right_reg);
722
723 // Test the remainder for 0, because then the result would be -0.
724 __ test(edx, Operand(edx));
725 __ j(not_zero, &done);
726
727 DeoptimizeIf(no_condition, instr->environment());
728 __ bind(&positive_left);
729 __ idiv(right_reg);
730 __ bind(&done);
731 } else {
732 __ idiv(right_reg);
733 }
734}
735
736
737void LCodeGen::DoDivI(LDivI* instr) {
738 LOperand* right = instr->right();
739 ASSERT(ToRegister(instr->result()).is(eax));
740 ASSERT(ToRegister(instr->left()).is(eax));
741 ASSERT(!ToRegister(instr->right()).is(eax));
742 ASSERT(!ToRegister(instr->right()).is(edx));
743
744 Register left_reg = eax;
745
746 // Check for x / 0.
747 Register right_reg = ToRegister(right);
748 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
749 __ test(right_reg, ToOperand(right));
750 DeoptimizeIf(zero, instr->environment());
751 }
752
753 // Check for (0 / -x) that will produce negative zero.
754 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
755 NearLabel left_not_zero;
756 __ test(left_reg, Operand(left_reg));
757 __ j(not_zero, &left_not_zero);
758 __ test(right_reg, ToOperand(right));
759 DeoptimizeIf(sign, instr->environment());
760 __ bind(&left_not_zero);
761 }
762
763 // Check for (-kMinInt / -1).
764 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
765 NearLabel left_not_min_int;
766 __ cmp(left_reg, kMinInt);
767 __ j(not_zero, &left_not_min_int);
768 __ cmp(right_reg, -1);
769 DeoptimizeIf(zero, instr->environment());
770 __ bind(&left_not_min_int);
771 }
772
773 // Sign extend to edx.
774 __ cdq();
775 __ idiv(right_reg);
776
777 // Deoptimize if remainder is not 0.
778 __ test(edx, Operand(edx));
779 DeoptimizeIf(not_zero, instr->environment());
780}
781
782
783void LCodeGen::DoMulI(LMulI* instr) {
784 Register left = ToRegister(instr->left());
785 LOperand* right = instr->right();
786
787 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
788 __ mov(ToRegister(instr->temp()), left);
789 }
790
791 if (right->IsConstantOperand()) {
792 __ imul(left, left, ToInteger32(LConstantOperand::cast(right)));
793 } else {
794 __ imul(left, ToOperand(right));
795 }
796
797 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
798 DeoptimizeIf(overflow, instr->environment());
799 }
800
801 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
802 // Bail out if the result is supposed to be negative zero.
803 NearLabel done;
804 __ test(left, Operand(left));
805 __ j(not_zero, &done);
806 if (right->IsConstantOperand()) {
807 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
808 DeoptimizeIf(no_condition, instr->environment());
809 }
810 } else {
811 // Test the non-zero operand for negative sign.
812 __ or_(ToRegister(instr->temp()), ToOperand(right));
813 DeoptimizeIf(sign, instr->environment());
814 }
815 __ bind(&done);
816 }
817}
818
819
820void LCodeGen::DoBitI(LBitI* instr) {
821 LOperand* left = instr->left();
822 LOperand* right = instr->right();
823 ASSERT(left->Equals(instr->result()));
824 ASSERT(left->IsRegister());
825
826 if (right->IsConstantOperand()) {
827 int right_operand = ToInteger32(LConstantOperand::cast(right));
828 switch (instr->op()) {
829 case Token::BIT_AND:
830 __ and_(ToRegister(left), right_operand);
831 break;
832 case Token::BIT_OR:
833 __ or_(ToRegister(left), right_operand);
834 break;
835 case Token::BIT_XOR:
836 __ xor_(ToRegister(left), right_operand);
837 break;
838 default:
839 UNREACHABLE();
840 break;
841 }
842 } else {
843 switch (instr->op()) {
844 case Token::BIT_AND:
845 __ and_(ToRegister(left), ToOperand(right));
846 break;
847 case Token::BIT_OR:
848 __ or_(ToRegister(left), ToOperand(right));
849 break;
850 case Token::BIT_XOR:
851 __ xor_(ToRegister(left), ToOperand(right));
852 break;
853 default:
854 UNREACHABLE();
855 break;
856 }
857 }
858}
859
860
861void LCodeGen::DoShiftI(LShiftI* instr) {
862 LOperand* left = instr->left();
863 LOperand* right = instr->right();
864 ASSERT(left->Equals(instr->result()));
865 ASSERT(left->IsRegister());
866 if (right->IsRegister()) {
867 ASSERT(ToRegister(right).is(ecx));
868
869 switch (instr->op()) {
870 case Token::SAR:
871 __ sar_cl(ToRegister(left));
872 break;
873 case Token::SHR:
874 __ shr_cl(ToRegister(left));
875 if (instr->can_deopt()) {
876 __ test(ToRegister(left), Immediate(0x80000000));
877 DeoptimizeIf(not_zero, instr->environment());
878 }
879 break;
880 case Token::SHL:
881 __ shl_cl(ToRegister(left));
882 break;
883 default:
884 UNREACHABLE();
885 break;
886 }
887 } else {
888 int value = ToInteger32(LConstantOperand::cast(right));
889 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
890 switch (instr->op()) {
891 case Token::SAR:
892 if (shift_count != 0) {
893 __ sar(ToRegister(left), shift_count);
894 }
895 break;
896 case Token::SHR:
897 if (shift_count == 0 && instr->can_deopt()) {
898 __ test(ToRegister(left), Immediate(0x80000000));
899 DeoptimizeIf(not_zero, instr->environment());
900 } else {
901 __ shr(ToRegister(left), shift_count);
902 }
903 break;
904 case Token::SHL:
905 if (shift_count != 0) {
906 __ shl(ToRegister(left), shift_count);
907 }
908 break;
909 default:
910 UNREACHABLE();
911 break;
912 }
913 }
914}
915
916
917void LCodeGen::DoSubI(LSubI* instr) {
918 LOperand* left = instr->left();
919 LOperand* right = instr->right();
920 ASSERT(left->Equals(instr->result()));
921
922 if (right->IsConstantOperand()) {
923 __ sub(ToOperand(left), ToImmediate(right));
924 } else {
925 __ sub(ToRegister(left), ToOperand(right));
926 }
927 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
928 DeoptimizeIf(overflow, instr->environment());
929 }
930}
931
932
933void LCodeGen::DoConstantI(LConstantI* instr) {
934 ASSERT(instr->result()->IsRegister());
935 __ mov(ToRegister(instr->result()), instr->value());
936}
937
938
939void LCodeGen::DoConstantD(LConstantD* instr) {
940 ASSERT(instr->result()->IsDoubleRegister());
941 XMMRegister res = ToDoubleRegister(instr->result());
942 double v = instr->value();
943 // Use xor to produce +0.0 in a fast and compact way, but avoid to
944 // do so if the constant is -0.0.
945 if (BitCast<uint64_t, double>(v) == 0) {
946 __ xorpd(res, res);
947 } else {
948 int32_t v_int32 = static_cast<int32_t>(v);
949 if (static_cast<double>(v_int32) == v) {
950 __ push_imm32(v_int32);
951 __ cvtsi2sd(res, Operand(esp, 0));
952 __ add(Operand(esp), Immediate(kPointerSize));
953 } else {
954 uint64_t int_val = BitCast<uint64_t, double>(v);
955 int32_t lower = static_cast<int32_t>(int_val);
956 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
957 __ push_imm32(upper);
958 __ push_imm32(lower);
959 __ movdbl(res, Operand(esp, 0));
960 __ add(Operand(esp), Immediate(2 * kPointerSize));
961 }
962 }
963}
964
965
966void LCodeGen::DoConstantT(LConstantT* instr) {
967 ASSERT(instr->result()->IsRegister());
968 __ mov(ToRegister(instr->result()), Immediate(instr->value()));
969}
970
971
972void LCodeGen::DoArrayLength(LArrayLength* instr) {
973 Register result = ToRegister(instr->result());
974
975 if (instr->hydrogen()->value()->IsLoadElements()) {
976 // We load the length directly from the elements array.
977 Register elements = ToRegister(instr->input());
978 __ mov(result, FieldOperand(elements, FixedArray::kLengthOffset));
979 } else {
980 // Check that the receiver really is an array.
981 Register array = ToRegister(instr->input());
982 Register temporary = ToRegister(instr->temporary());
983 __ CmpObjectType(array, JS_ARRAY_TYPE, temporary);
984 DeoptimizeIf(not_equal, instr->environment());
985
986 // Load length directly from the array.
987 __ mov(result, FieldOperand(array, JSArray::kLengthOffset));
988 }
989}
990
991
992void LCodeGen::DoValueOf(LValueOf* instr) {
993 Register input = ToRegister(instr->input());
994 Register result = ToRegister(instr->result());
995 Register map = ToRegister(instr->temporary());
996 ASSERT(input.is(result));
997 NearLabel done;
998 // If the object is a smi return the object.
999 __ test(input, Immediate(kSmiTagMask));
1000 __ j(zero, &done);
1001
1002 // If the object is not a value type, return the object.
1003 __ CmpObjectType(input, JS_VALUE_TYPE, map);
1004 __ j(not_equal, &done);
1005 __ mov(result, FieldOperand(input, JSValue::kValueOffset));
1006
1007 __ bind(&done);
1008}
1009
1010
1011void LCodeGen::DoBitNotI(LBitNotI* instr) {
1012 LOperand* input = instr->input();
1013 ASSERT(input->Equals(instr->result()));
1014 __ not_(ToRegister(input));
1015}
1016
1017
1018void LCodeGen::DoThrow(LThrow* instr) {
1019 __ push(ToOperand(instr->input()));
1020 CallRuntime(Runtime::kThrow, 1, instr);
1021
1022 if (FLAG_debug_code) {
1023 Comment("Unreachable code.");
1024 __ int3();
1025 }
1026}
1027
1028
1029void LCodeGen::DoAddI(LAddI* instr) {
1030 LOperand* left = instr->left();
1031 LOperand* right = instr->right();
1032 ASSERT(left->Equals(instr->result()));
1033
1034 if (right->IsConstantOperand()) {
1035 __ add(ToOperand(left), ToImmediate(right));
1036 } else {
1037 __ add(ToRegister(left), ToOperand(right));
1038 }
1039
1040 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1041 DeoptimizeIf(overflow, instr->environment());
1042 }
1043}
1044
1045
1046void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1047 LOperand* left = instr->left();
1048 LOperand* right = instr->right();
1049 // Modulo uses a fixed result register.
1050 ASSERT(instr->op() == Token::MOD || left->Equals(instr->result()));
1051 switch (instr->op()) {
1052 case Token::ADD:
1053 __ addsd(ToDoubleRegister(left), ToDoubleRegister(right));
1054 break;
1055 case Token::SUB:
1056 __ subsd(ToDoubleRegister(left), ToDoubleRegister(right));
1057 break;
1058 case Token::MUL:
1059 __ mulsd(ToDoubleRegister(left), ToDoubleRegister(right));
1060 break;
1061 case Token::DIV:
1062 __ divsd(ToDoubleRegister(left), ToDoubleRegister(right));
1063 break;
1064 case Token::MOD: {
1065 // Pass two doubles as arguments on the stack.
1066 __ PrepareCallCFunction(4, eax);
1067 __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
1068 __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
1069 __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4);
1070
1071 // Return value is in st(0) on ia32.
1072 // Store it into the (fixed) result register.
1073 __ sub(Operand(esp), Immediate(kDoubleSize));
1074 __ fstp_d(Operand(esp, 0));
1075 __ movdbl(ToDoubleRegister(instr->result()), Operand(esp, 0));
1076 __ add(Operand(esp), Immediate(kDoubleSize));
1077 break;
1078 }
1079 default:
1080 UNREACHABLE();
1081 break;
1082 }
1083}
1084
1085
1086void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1087 ASSERT(ToRegister(instr->left()).is(edx));
1088 ASSERT(ToRegister(instr->right()).is(eax));
1089 ASSERT(ToRegister(instr->result()).is(eax));
1090
1091 TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
1092 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1093}
1094
1095
1096int LCodeGen::GetNextEmittedBlock(int block) {
1097 for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1098 LLabel* label = chunk_->GetLabel(i);
1099 if (!label->HasReplacement()) return i;
1100 }
1101 return -1;
1102}
1103
1104
1105void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
1106 int next_block = GetNextEmittedBlock(current_block_);
1107 right_block = chunk_->LookupDestination(right_block);
1108 left_block = chunk_->LookupDestination(left_block);
1109
1110 if (right_block == left_block) {
1111 EmitGoto(left_block);
1112 } else if (left_block == next_block) {
1113 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1114 } else if (right_block == next_block) {
1115 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1116 } else {
1117 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1118 __ jmp(chunk_->GetAssemblyLabel(right_block));
1119 }
1120}
1121
1122
1123void LCodeGen::DoBranch(LBranch* instr) {
1124 int true_block = chunk_->LookupDestination(instr->true_block_id());
1125 int false_block = chunk_->LookupDestination(instr->false_block_id());
1126
1127 Representation r = instr->hydrogen()->representation();
1128 if (r.IsInteger32()) {
1129 Register reg = ToRegister(instr->input());
1130 __ test(reg, Operand(reg));
1131 EmitBranch(true_block, false_block, not_zero);
1132 } else if (r.IsDouble()) {
1133 XMMRegister reg = ToDoubleRegister(instr->input());
1134 __ xorpd(xmm0, xmm0);
1135 __ ucomisd(reg, xmm0);
1136 EmitBranch(true_block, false_block, not_equal);
1137 } else {
1138 ASSERT(r.IsTagged());
1139 Register reg = ToRegister(instr->input());
1140 if (instr->hydrogen()->type().IsBoolean()) {
1141 __ cmp(reg, Factory::true_value());
1142 EmitBranch(true_block, false_block, equal);
1143 } else {
1144 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1145 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1146
1147 __ cmp(reg, Factory::undefined_value());
1148 __ j(equal, false_label);
1149 __ cmp(reg, Factory::true_value());
1150 __ j(equal, true_label);
1151 __ cmp(reg, Factory::false_value());
1152 __ j(equal, false_label);
1153 __ test(reg, Operand(reg));
1154 __ j(equal, false_label);
1155 __ test(reg, Immediate(kSmiTagMask));
1156 __ j(zero, true_label);
1157
1158 // Test for double values. Zero is false.
1159 NearLabel call_stub;
1160 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
1161 Factory::heap_number_map());
1162 __ j(not_equal, &call_stub);
1163 __ fldz();
1164 __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
1165 __ FCmp();
1166 __ j(zero, false_label);
1167 __ jmp(true_label);
1168
1169 // The conversion stub doesn't cause garbage collections so it's
1170 // safe to not record a safepoint after the call.
1171 __ bind(&call_stub);
1172 ToBooleanStub stub;
1173 __ pushad();
1174 __ push(reg);
1175 __ CallStub(&stub);
1176 __ test(eax, Operand(eax));
1177 __ popad();
1178 EmitBranch(true_block, false_block, not_zero);
1179 }
1180 }
1181}
1182
1183
1184void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
1185 block = chunk_->LookupDestination(block);
1186 int next_block = GetNextEmittedBlock(current_block_);
1187 if (block != next_block) {
1188 // Perform stack overflow check if this goto needs it before jumping.
1189 if (deferred_stack_check != NULL) {
1190 ExternalReference stack_limit =
1191 ExternalReference::address_of_stack_limit();
1192 __ cmp(esp, Operand::StaticVariable(stack_limit));
1193 __ j(above_equal, chunk_->GetAssemblyLabel(block));
1194 __ jmp(deferred_stack_check->entry());
1195 deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
1196 } else {
1197 __ jmp(chunk_->GetAssemblyLabel(block));
1198 }
1199 }
1200}
1201
1202
1203void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
1204 __ pushad();
1205 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
1206 RecordSafepointWithRegisters(
1207 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
1208 __ popad();
1209}
1210
1211void LCodeGen::DoGoto(LGoto* instr) {
1212 class DeferredStackCheck: public LDeferredCode {
1213 public:
1214 DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
1215 : LDeferredCode(codegen), instr_(instr) { }
1216 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
1217 private:
1218 LGoto* instr_;
1219 };
1220
1221 DeferredStackCheck* deferred = NULL;
1222 if (instr->include_stack_check()) {
1223 deferred = new DeferredStackCheck(this, instr);
1224 }
1225 EmitGoto(instr->block_id(), deferred);
1226}
1227
1228
1229Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1230 Condition cond = no_condition;
1231 switch (op) {
1232 case Token::EQ:
1233 case Token::EQ_STRICT:
1234 cond = equal;
1235 break;
1236 case Token::LT:
1237 cond = is_unsigned ? below : less;
1238 break;
1239 case Token::GT:
1240 cond = is_unsigned ? above : greater;
1241 break;
1242 case Token::LTE:
1243 cond = is_unsigned ? below_equal : less_equal;
1244 break;
1245 case Token::GTE:
1246 cond = is_unsigned ? above_equal : greater_equal;
1247 break;
1248 case Token::IN:
1249 case Token::INSTANCEOF:
1250 default:
1251 UNREACHABLE();
1252 }
1253 return cond;
1254}
1255
1256
1257void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
1258 if (right->IsConstantOperand()) {
1259 __ cmp(ToOperand(left), ToImmediate(right));
1260 } else {
1261 __ cmp(ToRegister(left), ToOperand(right));
1262 }
1263}
1264
1265
1266void LCodeGen::DoCmpID(LCmpID* instr) {
1267 LOperand* left = instr->left();
1268 LOperand* right = instr->right();
1269 LOperand* result = instr->result();
1270
1271 NearLabel unordered;
1272 if (instr->is_double()) {
1273 // Don't base result on EFLAGS when a NaN is involved. Instead
1274 // jump to the unordered case, which produces a false value.
1275 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
1276 __ j(parity_even, &unordered, not_taken);
1277 } else {
1278 EmitCmpI(left, right);
1279 }
1280
1281 NearLabel done;
1282 Condition cc = TokenToCondition(instr->op(), instr->is_double());
1283 __ mov(ToRegister(result), Handle<Object>(Heap::true_value()));
1284 __ j(cc, &done);
1285
1286 __ bind(&unordered);
1287 __ mov(ToRegister(result), Handle<Object>(Heap::false_value()));
1288 __ bind(&done);
1289}
1290
1291
1292void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1293 LOperand* left = instr->left();
1294 LOperand* right = instr->right();
1295 int false_block = chunk_->LookupDestination(instr->false_block_id());
1296 int true_block = chunk_->LookupDestination(instr->true_block_id());
1297
1298 if (instr->is_double()) {
1299 // Don't base result on EFLAGS when a NaN is involved. Instead
1300 // jump to the false block.
1301 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
1302 __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
1303 } else {
1304 EmitCmpI(left, right);
1305 }
1306
1307 Condition cc = TokenToCondition(instr->op(), instr->is_double());
1308 EmitBranch(true_block, false_block, cc);
1309}
1310
1311
1312void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
1313 Register left = ToRegister(instr->left());
1314 Register right = ToRegister(instr->right());
1315 Register result = ToRegister(instr->result());
1316
1317 __ cmp(left, Operand(right));
1318 __ mov(result, Handle<Object>(Heap::true_value()));
1319 NearLabel done;
1320 __ j(equal, &done);
1321 __ mov(result, Handle<Object>(Heap::false_value()));
1322 __ bind(&done);
1323}
1324
1325
1326void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
1327 Register left = ToRegister(instr->left());
1328 Register right = ToRegister(instr->right());
1329 int false_block = chunk_->LookupDestination(instr->false_block_id());
1330 int true_block = chunk_->LookupDestination(instr->true_block_id());
1331
1332 __ cmp(left, Operand(right));
1333 EmitBranch(true_block, false_block, equal);
1334}
1335
1336
1337void LCodeGen::DoIsNull(LIsNull* instr) {
1338 Register reg = ToRegister(instr->input());
1339 Register result = ToRegister(instr->result());
1340
1341 // TODO(fsc): If the expression is known to be a smi, then it's
1342 // definitely not null. Materialize false.
1343
1344 __ cmp(reg, Factory::null_value());
1345 if (instr->is_strict()) {
1346 __ mov(result, Handle<Object>(Heap::true_value()));
1347 NearLabel done;
1348 __ j(equal, &done);
1349 __ mov(result, Handle<Object>(Heap::false_value()));
1350 __ bind(&done);
1351 } else {
1352 NearLabel true_value, false_value, done;
1353 __ j(equal, &true_value);
1354 __ cmp(reg, Factory::undefined_value());
1355 __ j(equal, &true_value);
1356 __ test(reg, Immediate(kSmiTagMask));
1357 __ j(zero, &false_value);
1358 // Check for undetectable objects by looking in the bit field in
1359 // the map. The object has already been smi checked.
1360 Register scratch = result;
1361 __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
1362 __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
1363 __ test(scratch, Immediate(1 << Map::kIsUndetectable));
1364 __ j(not_zero, &true_value);
1365 __ bind(&false_value);
1366 __ mov(result, Handle<Object>(Heap::false_value()));
1367 __ jmp(&done);
1368 __ bind(&true_value);
1369 __ mov(result, Handle<Object>(Heap::true_value()));
1370 __ bind(&done);
1371 }
1372}
1373
1374
1375void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
1376 Register reg = ToRegister(instr->input());
1377
1378 // TODO(fsc): If the expression is known to be a smi, then it's
1379 // definitely not null. Jump to the false block.
1380
1381 int true_block = chunk_->LookupDestination(instr->true_block_id());
1382 int false_block = chunk_->LookupDestination(instr->false_block_id());
1383
1384 __ cmp(reg, Factory::null_value());
1385 if (instr->is_strict()) {
1386 EmitBranch(true_block, false_block, equal);
1387 } else {
1388 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1389 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1390 __ j(equal, true_label);
1391 __ cmp(reg, Factory::undefined_value());
1392 __ j(equal, true_label);
1393 __ test(reg, Immediate(kSmiTagMask));
1394 __ j(zero, false_label);
1395 // Check for undetectable objects by looking in the bit field in
1396 // the map. The object has already been smi checked.
1397 Register scratch = ToRegister(instr->temp());
1398 __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
1399 __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
1400 __ test(scratch, Immediate(1 << Map::kIsUndetectable));
1401 EmitBranch(true_block, false_block, not_zero);
1402 }
1403}
1404
1405
1406void LCodeGen::DoIsSmi(LIsSmi* instr) {
1407 Operand input = ToOperand(instr->input());
1408 Register result = ToRegister(instr->result());
1409
1410 ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1411 __ test(input, Immediate(kSmiTagMask));
1412 __ mov(result, Handle<Object>(Heap::true_value()));
1413 NearLabel done;
1414 __ j(zero, &done);
1415 __ mov(result, Handle<Object>(Heap::false_value()));
1416 __ bind(&done);
1417}
1418
1419
1420void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1421 Operand input = ToOperand(instr->input());
1422
1423 int true_block = chunk_->LookupDestination(instr->true_block_id());
1424 int false_block = chunk_->LookupDestination(instr->false_block_id());
1425
1426 __ test(input, Immediate(kSmiTagMask));
1427 EmitBranch(true_block, false_block, zero);
1428}
1429
1430
1431InstanceType LHasInstanceType::TestType() {
1432 InstanceType from = hydrogen()->from();
1433 InstanceType to = hydrogen()->to();
1434 if (from == FIRST_TYPE) return to;
1435 ASSERT(from == to || to == LAST_TYPE);
1436 return from;
1437}
1438
1439
1440
1441Condition LHasInstanceType::BranchCondition() {
1442 InstanceType from = hydrogen()->from();
1443 InstanceType to = hydrogen()->to();
1444 if (from == to) return equal;
1445 if (to == LAST_TYPE) return above_equal;
1446 if (from == FIRST_TYPE) return below_equal;
1447 UNREACHABLE();
1448 return equal;
1449}
1450
1451
1452void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
1453 Register input = ToRegister(instr->input());
1454 Register result = ToRegister(instr->result());
1455
1456 ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1457 __ test(input, Immediate(kSmiTagMask));
1458 NearLabel done, is_false;
1459 __ j(zero, &is_false);
1460 __ CmpObjectType(input, instr->TestType(), result);
1461 __ j(NegateCondition(instr->BranchCondition()), &is_false);
1462 __ mov(result, Handle<Object>(Heap::true_value()));
1463 __ jmp(&done);
1464 __ bind(&is_false);
1465 __ mov(result, Handle<Object>(Heap::false_value()));
1466 __ bind(&done);
1467}
1468
1469
1470void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1471 Register input = ToRegister(instr->input());
1472 Register temp = ToRegister(instr->temp());
1473
1474 int true_block = chunk_->LookupDestination(instr->true_block_id());
1475 int false_block = chunk_->LookupDestination(instr->false_block_id());
1476
1477 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1478
1479 __ test(input, Immediate(kSmiTagMask));
1480 __ j(zero, false_label);
1481
1482 __ CmpObjectType(input, instr->TestType(), temp);
1483 EmitBranch(true_block, false_block, instr->BranchCondition());
1484}
1485
1486
1487void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
1488 Register input = ToRegister(instr->input());
1489 Register result = ToRegister(instr->result());
1490
1491 ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1492 __ mov(result, Handle<Object>(Heap::true_value()));
1493 __ test(FieldOperand(input, String::kHashFieldOffset),
1494 Immediate(String::kContainsCachedArrayIndexMask));
1495 NearLabel done;
1496 __ j(not_zero, &done);
1497 __ mov(result, Handle<Object>(Heap::false_value()));
1498 __ bind(&done);
1499}
1500
1501
1502void LCodeGen::DoHasCachedArrayIndexAndBranch(
1503 LHasCachedArrayIndexAndBranch* instr) {
1504 Register input = ToRegister(instr->input());
1505
1506 int true_block = chunk_->LookupDestination(instr->true_block_id());
1507 int false_block = chunk_->LookupDestination(instr->false_block_id());
1508
1509 __ test(FieldOperand(input, String::kHashFieldOffset),
1510 Immediate(String::kContainsCachedArrayIndexMask));
1511 EmitBranch(true_block, false_block, not_equal);
1512}
1513
1514
1515// Branches to a label or falls through with the answer in the z flag. Trashes
1516// the temp registers, but not the input. Only input and temp2 may alias.
1517void LCodeGen::EmitClassOfTest(Label* is_true,
1518 Label* is_false,
1519 Handle<String>class_name,
1520 Register input,
1521 Register temp,
1522 Register temp2) {
1523 ASSERT(!input.is(temp));
1524 ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
1525 __ test(input, Immediate(kSmiTagMask));
1526 __ j(zero, is_false);
1527 __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
1528 __ j(below, is_false);
1529
1530 // Map is now in temp.
1531 // Functions have class 'Function'.
1532 __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
1533 if (class_name->IsEqualTo(CStrVector("Function"))) {
1534 __ j(equal, is_true);
1535 } else {
1536 __ j(equal, is_false);
1537 }
1538
1539 // Check if the constructor in the map is a function.
1540 __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
1541
1542 // As long as JS_FUNCTION_TYPE is the last instance type and it is
1543 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
1544 // LAST_JS_OBJECT_TYPE.
1545 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
1546 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
1547
1548 // Objects with a non-function constructor have class 'Object'.
1549 __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
1550 if (class_name->IsEqualTo(CStrVector("Object"))) {
1551 __ j(not_equal, is_true);
1552 } else {
1553 __ j(not_equal, is_false);
1554 }
1555
1556 // temp now contains the constructor function. Grab the
1557 // instance class name from there.
1558 __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
1559 __ mov(temp, FieldOperand(temp,
1560 SharedFunctionInfo::kInstanceClassNameOffset));
1561 // The class name we are testing against is a symbol because it's a literal.
1562 // The name in the constructor is a symbol because of the way the context is
1563 // booted. This routine isn't expected to work for random API-created
1564 // classes and it doesn't have to because you can't access it with natives
1565 // syntax. Since both sides are symbols it is sufficient to use an identity
1566 // comparison.
1567 __ cmp(temp, class_name);
1568 // End with the answer in the z flag.
1569}
1570
1571
1572void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
1573 Register input = ToRegister(instr->input());
1574 Register result = ToRegister(instr->result());
1575 ASSERT(input.is(result));
1576 Register temp = ToRegister(instr->temporary());
1577 Handle<String> class_name = instr->hydrogen()->class_name();
1578 NearLabel done;
1579 Label is_true, is_false;
1580
1581 EmitClassOfTest(&is_true, &is_false, class_name, input, temp, input);
1582
1583 __ j(not_equal, &is_false);
1584
1585 __ bind(&is_true);
1586 __ mov(result, Handle<Object>(Heap::true_value()));
1587 __ jmp(&done);
1588
1589 __ bind(&is_false);
1590 __ mov(result, Handle<Object>(Heap::false_value()));
1591 __ bind(&done);
1592}
1593
1594
1595void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
1596 Register input = ToRegister(instr->input());
1597 Register temp = ToRegister(instr->temporary());
1598 Register temp2 = ToRegister(instr->temporary2());
1599 if (input.is(temp)) {
1600 // Swap.
1601 Register swapper = temp;
1602 temp = temp2;
1603 temp2 = swapper;
1604 }
1605 Handle<String> class_name = instr->hydrogen()->class_name();
1606
1607 int true_block = chunk_->LookupDestination(instr->true_block_id());
1608 int false_block = chunk_->LookupDestination(instr->false_block_id());
1609
1610 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1611 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1612
1613 EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
1614
1615 EmitBranch(true_block, false_block, equal);
1616}
1617
1618
1619void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
1620 Register reg = ToRegister(instr->input());
1621 int true_block = instr->true_block_id();
1622 int false_block = instr->false_block_id();
1623
1624 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
1625 EmitBranch(true_block, false_block, equal);
1626}
1627
1628
1629void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
1630 InstanceofStub stub;
1631 __ push(ToOperand(instr->left()));
1632 __ push(ToOperand(instr->right()));
1633 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1634
1635 NearLabel true_value, done;
1636 __ test(eax, Operand(eax));
1637 __ j(zero, &true_value);
1638 __ mov(ToRegister(instr->result()), Factory::false_value());
1639 __ jmp(&done);
1640 __ bind(&true_value);
1641 __ mov(ToRegister(instr->result()), Factory::true_value());
1642 __ bind(&done);
1643}
1644
1645
1646void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
1647 int true_block = chunk_->LookupDestination(instr->true_block_id());
1648 int false_block = chunk_->LookupDestination(instr->false_block_id());
1649
1650 InstanceofStub stub;
1651 __ push(ToOperand(instr->left()));
1652 __ push(ToOperand(instr->right()));
1653 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1654 __ test(eax, Operand(eax));
1655 EmitBranch(true_block, false_block, zero);
1656}
1657
1658
1659static Condition ComputeCompareCondition(Token::Value op) {
1660 switch (op) {
1661 case Token::EQ_STRICT:
1662 case Token::EQ:
1663 return equal;
1664 case Token::LT:
1665 return less;
1666 case Token::GT:
1667 return greater;
1668 case Token::LTE:
1669 return less_equal;
1670 case Token::GTE:
1671 return greater_equal;
1672 default:
1673 UNREACHABLE();
1674 return no_condition;
1675 }
1676}
1677
1678
1679void LCodeGen::DoCmpT(LCmpT* instr) {
1680 Token::Value op = instr->op();
1681
1682 Handle<Code> ic = CompareIC::GetUninitialized(op);
1683 CallCode(ic, RelocInfo::CODE_TARGET, instr);
1684
1685 Condition condition = ComputeCompareCondition(op);
1686 if (op == Token::GT || op == Token::LTE) {
1687 condition = ReverseCondition(condition);
1688 }
1689 NearLabel true_value, done;
1690 __ test(eax, Operand(eax));
1691 __ j(condition, &true_value);
1692 __ mov(ToRegister(instr->result()), Factory::false_value());
1693 __ jmp(&done);
1694 __ bind(&true_value);
1695 __ mov(ToRegister(instr->result()), Factory::true_value());
1696 __ bind(&done);
1697}
1698
1699
1700void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
1701 Token::Value op = instr->op();
1702 int true_block = chunk_->LookupDestination(instr->true_block_id());
1703 int false_block = chunk_->LookupDestination(instr->false_block_id());
1704
1705 Handle<Code> ic = CompareIC::GetUninitialized(op);
1706 CallCode(ic, RelocInfo::CODE_TARGET, instr);
1707
1708 // The compare stub expects compare condition and the input operands
1709 // reversed for GT and LTE.
1710 Condition condition = ComputeCompareCondition(op);
1711 if (op == Token::GT || op == Token::LTE) {
1712 condition = ReverseCondition(condition);
1713 }
1714 __ test(eax, Operand(eax));
1715 EmitBranch(true_block, false_block, condition);
1716}
1717
1718
1719void LCodeGen::DoReturn(LReturn* instr) {
1720 if (FLAG_trace) {
1721 // Preserve the return value on the stack and rely on the runtime
1722 // call to return the value in the same register.
1723 __ push(eax);
1724 __ CallRuntime(Runtime::kTraceExit, 1);
1725 }
1726 __ mov(esp, ebp);
1727 __ pop(ebp);
1728 __ ret((ParameterCount() + 1) * kPointerSize);
1729}
1730
1731
1732void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
1733 Register result = ToRegister(instr->result());
1734 __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
1735 if (instr->hydrogen()->check_hole_value()) {
1736 __ cmp(result, Factory::the_hole_value());
1737 DeoptimizeIf(equal, instr->environment());
1738 }
1739}
1740
1741
1742void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
1743 Register value = ToRegister(instr->input());
1744 __ mov(Operand::Cell(instr->hydrogen()->cell()), value);
1745}
1746
1747
1748void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
1749 Register object = ToRegister(instr->input());
1750 Register result = ToRegister(instr->result());
1751 if (instr->hydrogen()->is_in_object()) {
1752 __ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
1753 } else {
1754 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
1755 __ mov(result, FieldOperand(result, instr->hydrogen()->offset()));
1756 }
1757}
1758
1759
1760void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
1761 ASSERT(ToRegister(instr->object()).is(eax));
1762 ASSERT(ToRegister(instr->result()).is(eax));
1763
1764 __ mov(ecx, instr->name());
1765 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
1766 CallCode(ic, RelocInfo::CODE_TARGET, instr);
1767}
1768
1769
1770void LCodeGen::DoLoadElements(LLoadElements* instr) {
1771 ASSERT(instr->result()->Equals(instr->input()));
1772 Register reg = ToRegister(instr->input());
1773 __ mov(reg, FieldOperand(reg, JSObject::kElementsOffset));
1774 if (FLAG_debug_code) {
1775 NearLabel done;
1776 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
1777 Immediate(Factory::fixed_array_map()));
1778 __ j(equal, &done);
1779 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
1780 Immediate(Factory::fixed_cow_array_map()));
1781 __ Check(equal, "Check for fast elements failed.");
1782 __ bind(&done);
1783 }
1784}
1785
1786
1787void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
1788 Register arguments = ToRegister(instr->arguments());
1789 Register length = ToRegister(instr->length());
1790 Operand index = ToOperand(instr->index());
1791 Register result = ToRegister(instr->result());
1792
1793 __ sub(length, index);
1794 DeoptimizeIf(below_equal, instr->environment());
1795
1796 __ mov(result, Operand(arguments, length, times_4, kPointerSize));
1797}
1798
1799
1800void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
1801 Register elements = ToRegister(instr->elements());
1802 Register key = ToRegister(instr->key());
1803 Register result;
1804 if (instr->load_result() != NULL) {
1805 result = ToRegister(instr->load_result());
1806 } else {
1807 result = ToRegister(instr->result());
1808 ASSERT(result.is(elements));
1809 }
1810
1811 // Load the result.
1812 __ mov(result, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
1813
1814 Representation r = instr->hydrogen()->representation();
1815 if (r.IsInteger32()) {
1816 // Untag and check for smi.
1817 __ SmiUntag(result);
1818 DeoptimizeIf(carry, instr->environment());
1819 } else if (r.IsDouble()) {
1820 EmitNumberUntagD(result,
1821 ToDoubleRegister(instr->result()),
1822 instr->environment());
1823 } else {
1824 // Check for the hole value.
1825 ASSERT(r.IsTagged());
1826 __ cmp(result, Factory::the_hole_value());
1827 DeoptimizeIf(equal, instr->environment());
1828 }
1829}
1830
1831
1832void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
1833 ASSERT(ToRegister(instr->object()).is(edx));
1834 ASSERT(ToRegister(instr->key()).is(eax));
1835
1836 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
1837 CallCode(ic, RelocInfo::CODE_TARGET, instr);
1838}
1839
1840
1841void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
1842 Register result = ToRegister(instr->result());
1843
1844 // Check for arguments adapter frame.
1845 Label done, adapted;
1846 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
1847 __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
1848 __ cmp(Operand(result),
1849 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1850 __ j(equal, &adapted);
1851
1852 // No arguments adaptor frame.
1853 __ mov(result, Operand(ebp));
1854 __ jmp(&done);
1855
1856 // Arguments adaptor frame present.
1857 __ bind(&adapted);
1858 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
1859
1860 // Done. Pointer to topmost argument is in result.
1861 __ bind(&done);
1862}
1863
1864
1865void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
1866 Operand elem = ToOperand(instr->input());
1867 Register result = ToRegister(instr->result());
1868
1869 Label done;
1870
1871 // No arguments adaptor frame. Number of arguments is fixed.
1872 __ cmp(ebp, elem);
1873 __ mov(result, Immediate(scope()->num_parameters()));
1874 __ j(equal, &done);
1875
1876 // Arguments adaptor frame present. Get argument length from there.
1877 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
1878 __ mov(result, Operand(result,
1879 ArgumentsAdaptorFrameConstants::kLengthOffset));
1880 __ SmiUntag(result);
1881
1882 // Done. Argument length is in result register.
1883 __ bind(&done);
1884}
1885
1886
1887void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
1888 Register receiver = ToRegister(instr->receiver());
1889 ASSERT(ToRegister(instr->function()).is(edi));
1890 ASSERT(ToRegister(instr->result()).is(eax));
1891
1892 // If the receiver is null or undefined, we have to pass the
1893 // global object as a receiver.
1894 NearLabel global_receiver, receiver_ok;
1895 __ cmp(receiver, Factory::null_value());
1896 __ j(equal, &global_receiver);
1897 __ cmp(receiver, Factory::undefined_value());
1898 __ j(not_equal, &receiver_ok);
1899 __ bind(&global_receiver);
1900 __ mov(receiver, GlobalObjectOperand());
1901 __ bind(&receiver_ok);
1902
1903 Register length = ToRegister(instr->length());
1904 Register elements = ToRegister(instr->elements());
1905
1906 Label invoke;
1907
1908 // Copy the arguments to this function possibly from the
1909 // adaptor frame below it.
1910 const uint32_t kArgumentsLimit = 1 * KB;
1911 __ cmp(length, kArgumentsLimit);
1912 DeoptimizeIf(above, instr->environment());
1913
1914 __ push(receiver);
1915 __ mov(receiver, length);
1916
1917 // Loop through the arguments pushing them onto the execution
1918 // stack.
1919 Label loop;
1920 // length is a small non-negative integer, due to the test above.
1921 __ test(length, Operand(length));
1922 __ j(zero, &invoke);
1923 __ bind(&loop);
1924 __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
1925 __ dec(length);
1926 __ j(not_zero, &loop);
1927
1928 // Invoke the function.
1929 __ bind(&invoke);
1930 ASSERT(receiver.is(eax));
1931 v8::internal::ParameterCount actual(eax);
1932 SafepointGenerator safepoint_generator(this,
1933 instr->pointer_map(),
1934 Safepoint::kNoDeoptimizationIndex);
1935 __ InvokeFunction(edi, actual, CALL_FUNCTION, &safepoint_generator);
1936}
1937
1938
1939void LCodeGen::DoPushArgument(LPushArgument* instr) {
1940 LOperand* argument = instr->input();
1941 if (argument->IsConstantOperand()) {
1942 __ push(ToImmediate(argument));
1943 } else {
1944 __ push(ToOperand(argument));
1945 }
1946}
1947
1948
1949void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
1950 Register result = ToRegister(instr->result());
1951 __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
1952}
1953
1954
1955void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
1956 Register result = ToRegister(instr->result());
1957 __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
1958 __ mov(result, FieldOperand(result, GlobalObject::kGlobalReceiverOffset));
1959}
1960
1961
1962void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
1963 int arity,
1964 LInstruction* instr) {
1965 // Change context if needed.
1966 bool change_context =
1967 (graph()->info()->closure()->context() != function->context()) ||
1968 scope()->contains_with() ||
1969 (scope()->num_heap_slots() > 0);
1970 if (change_context) {
1971 __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
1972 }
1973
1974 // Set eax to arguments count if adaption is not needed. Assumes that eax
1975 // is available to write to at this point.
1976 if (!function->NeedsArgumentsAdaption()) {
1977 __ mov(eax, arity);
1978 }
1979
1980 LPointerMap* pointers = instr->pointer_map();
1981 RecordPosition(pointers->position());
1982
1983 // Invoke function.
1984 if (*function == *graph()->info()->closure()) {
1985 __ CallSelf();
1986 } else {
1987 __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
1988 }
1989
1990 // Setup deoptimization.
1991 RegisterLazyDeoptimization(instr);
1992
1993 // Restore context.
1994 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
1995}
1996
1997
1998void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
1999 ASSERT(ToRegister(instr->result()).is(eax));
2000 __ mov(edi, instr->function());
2001 CallKnownFunction(instr->function(), instr->arity(), instr);
2002}
2003
2004
2005void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
2006 Register input_reg = ToRegister(instr->input());
2007 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
2008 Factory::heap_number_map());
2009 DeoptimizeIf(not_equal, instr->environment());
2010
2011 Label done;
2012 Register tmp = input_reg.is(eax) ? ecx : eax;
2013 Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
2014
2015 // Preserve the value of all registers.
2016 __ PushSafepointRegisters();
2017
2018 Label negative;
2019 __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2020 // Check the sign of the argument. If the argument is positive,
2021 // just return it.
2022 __ test(tmp, Immediate(HeapNumber::kSignMask));
2023 __ j(not_zero, &negative);
2024 __ mov(tmp, input_reg);
2025 __ jmp(&done);
2026
2027 __ bind(&negative);
2028
2029 Label allocated, slow;
2030 __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
2031 __ jmp(&allocated);
2032
2033 // Slow case: Call the runtime system to do the number allocation.
2034 __ bind(&slow);
2035
2036 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
2037 RecordSafepointWithRegisters(
2038 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
2039 // Set the pointer to the new heap number in tmp.
2040 if (!tmp.is(eax)) __ mov(tmp, eax);
2041
2042 // Restore input_reg after call to runtime.
2043 __ mov(input_reg, Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize));
2044
2045 __ bind(&allocated);
2046 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2047 __ and_(tmp2, ~HeapNumber::kSignMask);
2048 __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
2049 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
2050 __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
2051
2052 __ bind(&done);
2053 __ mov(Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize), tmp);
2054
2055 __ PopSafepointRegisters();
2056}
2057
2058
2059void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
2060 // Class for deferred case.
2061 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
2062 public:
2063 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
2064 LUnaryMathOperation* instr)
2065 : LDeferredCode(codegen), instr_(instr) { }
2066 virtual void Generate() {
2067 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
2068 }
2069 private:
2070 LUnaryMathOperation* instr_;
2071 };
2072
2073 ASSERT(instr->input()->Equals(instr->result()));
2074 Representation r = instr->hydrogen()->value()->representation();
2075
2076 if (r.IsDouble()) {
2077 XMMRegister scratch = xmm0;
2078 XMMRegister input_reg = ToDoubleRegister(instr->input());
2079 __ pxor(scratch, scratch);
2080 __ subsd(scratch, input_reg);
2081 __ pand(input_reg, scratch);
2082 } else if (r.IsInteger32()) {
2083 Register input_reg = ToRegister(instr->input());
2084 __ test(input_reg, Operand(input_reg));
2085 Label is_positive;
2086 __ j(not_sign, &is_positive);
2087 __ neg(input_reg);
2088 __ test(input_reg, Operand(input_reg));
2089 DeoptimizeIf(negative, instr->environment());
2090 __ bind(&is_positive);
2091 } else { // Tagged case.
2092 DeferredMathAbsTaggedHeapNumber* deferred =
2093 new DeferredMathAbsTaggedHeapNumber(this, instr);
2094 Label not_smi;
2095 Register input_reg = ToRegister(instr->input());
2096 // Smi check.
2097 __ test(input_reg, Immediate(kSmiTagMask));
2098 __ j(not_zero, deferred->entry());
2099 __ test(input_reg, Operand(input_reg));
2100 Label is_positive;
2101 __ j(not_sign, &is_positive);
2102 __ neg(input_reg);
2103
2104 __ test(input_reg, Operand(input_reg));
2105 DeoptimizeIf(negative, instr->environment());
2106
2107 __ bind(&is_positive);
2108 __ bind(deferred->exit());
2109 }
2110}
2111
2112
2113void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
2114 XMMRegister xmm_scratch = xmm0;
2115 Register output_reg = ToRegister(instr->result());
2116 XMMRegister input_reg = ToDoubleRegister(instr->input());
2117 __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
2118 __ ucomisd(input_reg, xmm_scratch);
2119
2120 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2121 DeoptimizeIf(below_equal, instr->environment());
2122 } else {
2123 DeoptimizeIf(below, instr->environment());
2124 }
2125
2126 // Use truncating instruction (OK because input is positive).
2127 __ cvttsd2si(output_reg, Operand(input_reg));
2128
2129 // Overflow is signalled with minint.
2130 __ cmp(output_reg, 0x80000000u);
2131 DeoptimizeIf(equal, instr->environment());
2132}
2133
2134
2135void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
2136 XMMRegister xmm_scratch = xmm0;
2137 Register output_reg = ToRegister(instr->result());
2138 XMMRegister input_reg = ToDoubleRegister(instr->input());
2139
2140 // xmm_scratch = 0.5
2141 ExternalReference one_half = ExternalReference::address_of_one_half();
2142 __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
2143
2144 // input = input + 0.5
2145 __ addsd(input_reg, xmm_scratch);
2146
2147 // We need to return -0 for the input range [-0.5, 0[, otherwise
2148 // compute Math.floor(value + 0.5).
2149 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2150 __ ucomisd(input_reg, xmm_scratch);
2151 DeoptimizeIf(below_equal, instr->environment());
2152 } else {
2153 // If we don't need to bailout on -0, we check only bailout
2154 // on negative inputs.
2155 __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
2156 __ ucomisd(input_reg, xmm_scratch);
2157 DeoptimizeIf(below, instr->environment());
2158 }
2159
2160 // Compute Math.floor(value + 0.5).
2161 // Use truncating instruction (OK because input is positive).
2162 __ cvttsd2si(output_reg, Operand(input_reg));
2163
2164 // Overflow is signalled with minint.
2165 __ cmp(output_reg, 0x80000000u);
2166 DeoptimizeIf(equal, instr->environment());
2167}
2168
2169
2170void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
2171 XMMRegister input_reg = ToDoubleRegister(instr->input());
2172 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
2173 __ sqrtsd(input_reg, input_reg);
2174}
2175
2176
2177void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
2178 switch (instr->op()) {
2179 case kMathAbs:
2180 DoMathAbs(instr);
2181 break;
2182 case kMathFloor:
2183 DoMathFloor(instr);
2184 break;
2185 case kMathRound:
2186 DoMathRound(instr);
2187 break;
2188 case kMathSqrt:
2189 DoMathSqrt(instr);
2190 break;
2191 default:
2192 UNREACHABLE();
2193 }
2194}
2195
2196
2197void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
2198 ASSERT(ToRegister(instr->result()).is(eax));
2199
2200 int arity = instr->arity();
2201 Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
2202 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2203 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2204}
2205
2206
2207void LCodeGen::DoCallNamed(LCallNamed* instr) {
2208 ASSERT(ToRegister(instr->result()).is(eax));
2209
2210 int arity = instr->arity();
2211 Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
2212 __ mov(ecx, instr->name());
2213 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2214 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2215}
2216
2217
2218void LCodeGen::DoCallFunction(LCallFunction* instr) {
2219 ASSERT(ToRegister(instr->result()).is(eax));
2220
2221 int arity = instr->arity();
2222 CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
2223 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2224 __ Drop(1);
2225 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2226}
2227
2228
2229void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
2230 ASSERT(ToRegister(instr->result()).is(eax));
2231
2232 int arity = instr->arity();
2233 Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
2234 __ mov(ecx, instr->name());
2235 CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2236 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2237}
2238
2239
2240void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
2241 ASSERT(ToRegister(instr->result()).is(eax));
2242 __ mov(edi, instr->target());
2243 CallKnownFunction(instr->target(), instr->arity(), instr);
2244}
2245
2246
2247void LCodeGen::DoCallNew(LCallNew* instr) {
2248 ASSERT(ToRegister(instr->input()).is(edi));
2249 ASSERT(ToRegister(instr->result()).is(eax));
2250
2251 Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
2252 __ Set(eax, Immediate(instr->arity()));
2253 CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
2254}
2255
2256
2257void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
2258 CallRuntime(instr->function(), instr->arity(), instr);
2259}
2260
2261
2262void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
2263 Register object = ToRegister(instr->object());
2264 Register value = ToRegister(instr->value());
2265 int offset = instr->offset();
2266
2267 if (!instr->transition().is_null()) {
2268 __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
2269 }
2270
2271 // Do the store.
2272 if (instr->is_in_object()) {
2273 __ mov(FieldOperand(object, offset), value);
2274 if (instr->needs_write_barrier()) {
2275 Register temp = ToRegister(instr->temp());
2276 // Update the write barrier for the object for in-object properties.
2277 __ RecordWrite(object, offset, value, temp);
2278 }
2279 } else {
2280 Register temp = ToRegister(instr->temp());
2281 __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
2282 __ mov(FieldOperand(temp, offset), value);
2283 if (instr->needs_write_barrier()) {
2284 // Update the write barrier for the properties array.
2285 // object is used as a scratch register.
2286 __ RecordWrite(temp, offset, value, object);
2287 }
2288 }
2289}
2290
2291
2292void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
2293 ASSERT(ToRegister(instr->object()).is(edx));
2294 ASSERT(ToRegister(instr->value()).is(eax));
2295
2296 __ mov(ecx, instr->name());
2297 Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
2298 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2299}
2300
2301
2302void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
2303 __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
2304 DeoptimizeIf(above_equal, instr->environment());
2305}
2306
2307
2308void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
2309 Register value = ToRegister(instr->value());
2310 Register elements = ToRegister(instr->object());
2311 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
2312
2313 // Do the store.
2314 if (instr->key()->IsConstantOperand()) {
2315 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
2316 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
2317 int offset =
2318 ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
2319 __ mov(FieldOperand(elements, offset), value);
2320 } else {
2321 __ mov(FieldOperand(elements, key, times_4, FixedArray::kHeaderSize),
2322 value);
2323 }
2324
2325 // Update the write barrier unless we're certain that we're storing a smi.
2326 if (instr->hydrogen()->NeedsWriteBarrier()) {
2327 // Compute address of modified element and store it into key register.
2328 __ lea(key, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
2329 __ RecordWrite(elements, key, value);
2330 }
2331}
2332
2333
2334void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
2335 ASSERT(ToRegister(instr->object()).is(edx));
2336 ASSERT(ToRegister(instr->key()).is(ecx));
2337 ASSERT(ToRegister(instr->value()).is(eax));
2338
2339 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
2340 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2341}
2342
2343
2344void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
2345 LOperand* input = instr->input();
2346 ASSERT(input->IsRegister() || input->IsStackSlot());
2347 LOperand* output = instr->result();
2348 ASSERT(output->IsDoubleRegister());
2349 __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
2350}
2351
2352
2353void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
2354 class DeferredNumberTagI: public LDeferredCode {
2355 public:
2356 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
2357 : LDeferredCode(codegen), instr_(instr) { }
2358 virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
2359 private:
2360 LNumberTagI* instr_;
2361 };
2362
2363 LOperand* input = instr->input();
2364 ASSERT(input->IsRegister() && input->Equals(instr->result()));
2365 Register reg = ToRegister(input);
2366
2367 DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
2368 __ SmiTag(reg);
2369 __ j(overflow, deferred->entry());
2370 __ bind(deferred->exit());
2371}
2372
2373
2374void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
2375 Label slow;
2376 Register reg = ToRegister(instr->input());
2377 Register tmp = reg.is(eax) ? ecx : eax;
2378
2379 // Preserve the value of all registers.
2380 __ PushSafepointRegisters();
2381
2382 // There was overflow, so bits 30 and 31 of the original integer
2383 // disagree. Try to allocate a heap number in new space and store
2384 // the value in there. If that fails, call the runtime system.
2385 NearLabel done;
2386 __ SmiUntag(reg);
2387 __ xor_(reg, 0x80000000);
2388 __ cvtsi2sd(xmm0, Operand(reg));
2389 if (FLAG_inline_new) {
2390 __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
2391 __ jmp(&done);
2392 }
2393
2394 // Slow case: Call the runtime system to do the number allocation.
2395 __ bind(&slow);
2396
2397 // TODO(3095996): Put a valid pointer value in the stack slot where the result
2398 // register is stored, as this register is in the pointer map, but contains an
2399 // integer value.
2400 __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), Immediate(0));
2401
2402 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
2403 RecordSafepointWithRegisters(
2404 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
2405 if (!reg.is(eax)) __ mov(reg, eax);
2406
2407 // Done. Put the value in xmm0 into the value of the allocated heap
2408 // number.
2409 __ bind(&done);
2410 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
2411 __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), reg);
2412 __ PopSafepointRegisters();
2413}
2414
2415
2416void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
2417 class DeferredNumberTagD: public LDeferredCode {
2418 public:
2419 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
2420 : LDeferredCode(codegen), instr_(instr) { }
2421 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
2422 private:
2423 LNumberTagD* instr_;
2424 };
2425
2426 XMMRegister input_reg = ToDoubleRegister(instr->input());
2427 Register reg = ToRegister(instr->result());
2428 Register tmp = ToRegister(instr->temp());
2429
2430 DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
2431 if (FLAG_inline_new) {
2432 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
2433 } else {
2434 __ jmp(deferred->entry());
2435 }
2436 __ bind(deferred->exit());
2437 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
2438}
2439
2440
2441void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
2442 // TODO(3095996): Get rid of this. For now, we need to make the
2443 // result register contain a valid pointer because it is already
2444 // contained in the register pointer map.
2445 Register reg = ToRegister(instr->result());
2446 __ Set(reg, Immediate(0));
2447
2448 __ PushSafepointRegisters();
2449 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
2450 RecordSafepointWithRegisters(
2451 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
2452 __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), eax);
2453 __ PopSafepointRegisters();
2454}
2455
2456
2457void LCodeGen::DoSmiTag(LSmiTag* instr) {
2458 LOperand* input = instr->input();
2459 ASSERT(input->IsRegister() && input->Equals(instr->result()));
2460 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
2461 __ SmiTag(ToRegister(input));
2462}
2463
2464
2465void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
2466 LOperand* input = instr->input();
2467 ASSERT(input->IsRegister() && input->Equals(instr->result()));
2468 if (instr->needs_check()) {
2469 __ test(ToRegister(input), Immediate(kSmiTagMask));
2470 DeoptimizeIf(not_zero, instr->environment());
2471 }
2472 __ SmiUntag(ToRegister(input));
2473}
2474
2475
2476void LCodeGen::EmitNumberUntagD(Register input_reg,
2477 XMMRegister result_reg,
2478 LEnvironment* env) {
2479 NearLabel load_smi, heap_number, done;
2480
2481 // Smi check.
2482 __ test(input_reg, Immediate(kSmiTagMask));
2483 __ j(zero, &load_smi, not_taken);
2484
2485 // Heap number map check.
2486 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
2487 Factory::heap_number_map());
2488 __ j(equal, &heap_number);
2489
2490 __ cmp(input_reg, Factory::undefined_value());
2491 DeoptimizeIf(not_equal, env);
2492
2493 // Convert undefined to NaN.
2494 __ push(input_reg);
2495 __ mov(input_reg, Factory::nan_value());
2496 __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
2497 __ pop(input_reg);
2498 __ jmp(&done);
2499
2500 // Heap number to XMM conversion.
2501 __ bind(&heap_number);
2502 __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
2503 __ jmp(&done);
2504
2505 // Smi to XMM conversion
2506 __ bind(&load_smi);
2507 __ SmiUntag(input_reg); // Untag smi before converting to float.
2508 __ cvtsi2sd(result_reg, Operand(input_reg));
2509 __ SmiTag(input_reg); // Retag smi.
2510 __ bind(&done);
2511}
2512
2513
2514class DeferredTaggedToI: public LDeferredCode {
2515 public:
2516 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
2517 : LDeferredCode(codegen), instr_(instr) { }
2518 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
2519 private:
2520 LTaggedToI* instr_;
2521};
2522
2523
2524void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
2525 NearLabel done, heap_number;
2526 Register input_reg = ToRegister(instr->input());
2527
2528 // Heap number map check.
2529 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
2530 Factory::heap_number_map());
2531
2532 if (instr->truncating()) {
2533 __ j(equal, &heap_number);
2534 // Check for undefined. Undefined is converted to zero for truncating
2535 // conversions.
2536 __ cmp(input_reg, Factory::undefined_value());
2537 DeoptimizeIf(not_equal, instr->environment());
2538 __ mov(input_reg, 0);
2539 __ jmp(&done);
2540
2541 __ bind(&heap_number);
2542 if (CpuFeatures::IsSupported(SSE3)) {
2543 CpuFeatures::Scope scope(SSE3);
2544 NearLabel convert;
2545 // Use more powerful conversion when sse3 is available.
2546 // Load x87 register with heap number.
2547 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
2548 // Get exponent alone and check for too-big exponent.
2549 __ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2550 __ and_(input_reg, HeapNumber::kExponentMask);
2551 const uint32_t kTooBigExponent =
2552 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
2553 __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
2554 __ j(less, &convert);
2555 // Pop FPU stack before deoptimizing.
2556 __ ffree(0);
2557 __ fincstp();
2558 DeoptimizeIf(no_condition, instr->environment());
2559
2560 // Reserve space for 64 bit answer.
2561 __ bind(&convert);
2562 __ sub(Operand(esp), Immediate(kDoubleSize));
2563 // Do conversion, which cannot fail because we checked the exponent.
2564 __ fisttp_d(Operand(esp, 0));
2565 __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
2566 __ add(Operand(esp), Immediate(kDoubleSize));
2567 } else {
2568 NearLabel deopt;
2569 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
2570 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
2571 __ cvttsd2si(input_reg, Operand(xmm0));
2572 __ cmp(input_reg, 0x80000000u);
2573 __ j(not_equal, &done);
2574 // Check if the input was 0x8000000 (kMinInt).
2575 // If no, then we got an overflow and we deoptimize.
2576 ExternalReference min_int = ExternalReference::address_of_min_int();
2577 __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
2578 __ ucomisd(xmm_temp, xmm0);
2579 DeoptimizeIf(not_equal, instr->environment());
2580 DeoptimizeIf(parity_even, instr->environment()); // NaN.
2581 }
2582 } else {
2583 // Deoptimize if we don't have a heap number.
2584 DeoptimizeIf(not_equal, instr->environment());
2585
2586 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
2587 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
2588 __ cvttsd2si(input_reg, Operand(xmm0));
2589 __ cvtsi2sd(xmm_temp, Operand(input_reg));
2590 __ ucomisd(xmm0, xmm_temp);
2591 DeoptimizeIf(not_equal, instr->environment());
2592 DeoptimizeIf(parity_even, instr->environment()); // NaN.
2593 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2594 __ test(input_reg, Operand(input_reg));
2595 __ j(not_zero, &done);
2596 __ movmskpd(input_reg, xmm0);
2597 __ and_(input_reg, 1);
2598 DeoptimizeIf(not_zero, instr->environment());
2599 }
2600 }
2601 __ bind(&done);
2602}
2603
2604
2605void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
2606 LOperand* input = instr->input();
2607 ASSERT(input->IsRegister());
2608 ASSERT(input->Equals(instr->result()));
2609
2610 Register input_reg = ToRegister(input);
2611
2612 DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
2613
2614 // Smi check.
2615 __ test(input_reg, Immediate(kSmiTagMask));
2616 __ j(not_zero, deferred->entry());
2617
2618 // Smi to int32 conversion
2619 __ SmiUntag(input_reg); // Untag smi.
2620
2621 __ bind(deferred->exit());
2622}
2623
2624
2625void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
2626 LOperand* input = instr->input();
2627 ASSERT(input->IsRegister());
2628 LOperand* result = instr->result();
2629 ASSERT(result->IsDoubleRegister());
2630
2631 Register input_reg = ToRegister(input);
2632 XMMRegister result_reg = ToDoubleRegister(result);
2633
2634 EmitNumberUntagD(input_reg, result_reg, instr->environment());
2635}
2636
2637
2638void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
2639 LOperand* input = instr->input();
2640 ASSERT(input->IsDoubleRegister());
2641 LOperand* result = instr->result();
2642 ASSERT(result->IsRegister());
2643
2644 XMMRegister input_reg = ToDoubleRegister(input);
2645 Register result_reg = ToRegister(result);
2646
2647 if (instr->truncating()) {
2648 // Performs a truncating conversion of a floating point number as used by
2649 // the JS bitwise operations.
2650 __ cvttsd2si(result_reg, Operand(input_reg));
2651 __ cmp(result_reg, 0x80000000u);
2652 if (CpuFeatures::IsSupported(SSE3)) {
2653 // This will deoptimize if the exponent of the input in out of range.
2654 CpuFeatures::Scope scope(SSE3);
2655 NearLabel convert, done;
2656 __ j(not_equal, &done);
2657 __ sub(Operand(esp), Immediate(kDoubleSize));
2658 __ movdbl(Operand(esp, 0), input_reg);
2659 // Get exponent alone and check for too-big exponent.
2660 __ mov(result_reg, Operand(esp, sizeof(int32_t)));
2661 __ and_(result_reg, HeapNumber::kExponentMask);
2662 const uint32_t kTooBigExponent =
2663 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
2664 __ cmp(Operand(result_reg), Immediate(kTooBigExponent));
2665 __ j(less, &convert);
2666 __ add(Operand(esp), Immediate(kDoubleSize));
2667 DeoptimizeIf(no_condition, instr->environment());
2668 __ bind(&convert);
2669 // Do conversion, which cannot fail because we checked the exponent.
2670 __ fld_d(Operand(esp, 0));
2671 __ fisttp_d(Operand(esp, 0));
2672 __ mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
2673 __ add(Operand(esp), Immediate(kDoubleSize));
2674 __ bind(&done);
2675 } else {
2676 // This will bail out if the input was not in the int32 range (or,
2677 // unfortunately, if the input was 0x80000000).
2678 DeoptimizeIf(equal, instr->environment());
2679 }
2680 } else {
2681 NearLabel done;
2682 __ cvttsd2si(result_reg, Operand(input_reg));
2683 __ cvtsi2sd(xmm0, Operand(result_reg));
2684 __ ucomisd(xmm0, input_reg);
2685 DeoptimizeIf(not_equal, instr->environment());
2686 DeoptimizeIf(parity_even, instr->environment()); // NaN.
2687 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2688 // The integer converted back is equal to the original. We
2689 // only have to test if we got -0 as an input.
2690 __ test(result_reg, Operand(result_reg));
2691 __ j(not_zero, &done);
2692 __ movmskpd(result_reg, input_reg);
2693 // Bit 0 contains the sign of the double in input_reg.
2694 // If input was positive, we are ok and return 0, otherwise
2695 // deoptimize.
2696 __ and_(result_reg, 1);
2697 DeoptimizeIf(not_zero, instr->environment());
2698 }
2699 __ bind(&done);
2700 }
2701}
2702
2703
2704void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
2705 LOperand* input = instr->input();
2706 ASSERT(input->IsRegister());
2707 __ test(ToRegister(input), Immediate(kSmiTagMask));
2708 DeoptimizeIf(instr->condition(), instr->environment());
2709}
2710
2711
2712void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
2713 Register input = ToRegister(instr->input());
2714 Register temp = ToRegister(instr->temp());
2715 InstanceType first = instr->hydrogen()->first();
2716 InstanceType last = instr->hydrogen()->last();
2717
2718 __ test(input, Immediate(kSmiTagMask));
2719 DeoptimizeIf(zero, instr->environment());
2720
2721 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2722 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
2723 static_cast<int8_t>(first));
2724
2725 // If there is only one type in the interval check for equality.
2726 if (first == last) {
2727 DeoptimizeIf(not_equal, instr->environment());
2728 } else {
2729 DeoptimizeIf(below, instr->environment());
2730 // Omit check for the last type.
2731 if (last != LAST_TYPE) {
2732 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
2733 static_cast<int8_t>(last));
2734 DeoptimizeIf(above, instr->environment());
2735 }
2736 }
2737}
2738
2739
2740void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
2741 ASSERT(instr->input()->IsRegister());
2742 Register reg = ToRegister(instr->input());
2743 __ cmp(reg, instr->hydrogen()->target());
2744 DeoptimizeIf(not_equal, instr->environment());
2745}
2746
2747
2748void LCodeGen::DoCheckMap(LCheckMap* instr) {
2749 LOperand* input = instr->input();
2750 ASSERT(input->IsRegister());
2751 Register reg = ToRegister(input);
2752 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2753 instr->hydrogen()->map());
2754 DeoptimizeIf(not_equal, instr->environment());
2755}
2756
2757
2758void LCodeGen::LoadPrototype(Register result, Handle<JSObject> prototype) {
2759 if (Heap::InNewSpace(*prototype)) {
2760 Handle<JSGlobalPropertyCell> cell =
2761 Factory::NewJSGlobalPropertyCell(prototype);
2762 __ mov(result, Operand::Cell(cell));
2763 } else {
2764 __ mov(result, prototype);
2765 }
2766}
2767
2768
2769void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
2770 Register reg = ToRegister(instr->temp());
2771
2772 Handle<JSObject> holder = instr->holder();
2773 Handle<Map> receiver_map = instr->receiver_map();
2774 Handle<JSObject> current_prototype(JSObject::cast(receiver_map->prototype()));
2775
2776 // Load prototype object.
2777 LoadPrototype(reg, current_prototype);
2778
2779 // Check prototype maps up to the holder.
2780 while (!current_prototype.is_identical_to(holder)) {
2781 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2782 Handle<Map>(current_prototype->map()));
2783 DeoptimizeIf(not_equal, instr->environment());
2784 current_prototype =
2785 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
2786 // Load next prototype object.
2787 LoadPrototype(reg, current_prototype);
2788 }
2789
2790 // Check the holder map.
2791 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2792 Handle<Map>(current_prototype->map()));
2793 DeoptimizeIf(not_equal, instr->environment());
2794}
2795
2796
2797void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
2798 // Setup the parameters to the stub/runtime call.
2799 __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
2800 __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
2801 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
2802 __ push(Immediate(instr->hydrogen()->constant_elements()));
2803
2804 // Pick the right runtime function or stub to call.
2805 int length = instr->hydrogen()->length();
2806 if (instr->hydrogen()->IsCopyOnWrite()) {
2807 ASSERT(instr->hydrogen()->depth() == 1);
2808 FastCloneShallowArrayStub::Mode mode =
2809 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
2810 FastCloneShallowArrayStub stub(mode, length);
2811 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2812 } else if (instr->hydrogen()->depth() > 1) {
2813 CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
2814 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
2815 CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
2816 } else {
2817 FastCloneShallowArrayStub::Mode mode =
2818 FastCloneShallowArrayStub::CLONE_ELEMENTS;
2819 FastCloneShallowArrayStub stub(mode, length);
2820 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2821 }
2822}
2823
2824
2825void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
2826 // Setup the parameters to the stub/runtime call.
2827 __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
2828 __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
2829 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
2830 __ push(Immediate(instr->hydrogen()->constant_properties()));
2831 __ push(Immediate(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
2832
2833 // Pick the right runtime function or stub to call.
2834 if (instr->hydrogen()->depth() > 1) {
2835 CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
2836 } else {
2837 CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
2838 }
2839}
2840
2841
2842void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
2843 NearLabel materialized;
2844 // Registers will be used as follows:
2845 // edi = JS function.
2846 // ecx = literals array.
2847 // ebx = regexp literal.
2848 // eax = regexp literal clone.
2849 __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
2850 __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
2851 int literal_offset = FixedArray::kHeaderSize +
2852 instr->hydrogen()->literal_index() * kPointerSize;
2853 __ mov(ebx, FieldOperand(ecx, literal_offset));
2854 __ cmp(ebx, Factory::undefined_value());
2855 __ j(not_equal, &materialized);
2856
2857 // Create regexp literal using runtime function
2858 // Result will be in eax.
2859 __ push(ecx);
2860 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
2861 __ push(Immediate(instr->hydrogen()->pattern()));
2862 __ push(Immediate(instr->hydrogen()->flags()));
2863 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
2864 __ mov(ebx, eax);
2865
2866 __ bind(&materialized);
2867 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
2868 Label allocated, runtime_allocate;
2869 __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
2870 __ jmp(&allocated);
2871
2872 __ bind(&runtime_allocate);
2873 __ push(ebx);
2874 __ push(Immediate(Smi::FromInt(size)));
2875 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
2876 __ pop(ebx);
2877
2878 __ bind(&allocated);
2879 // Copy the content into the newly allocated memory.
2880 // (Unroll copy loop once for better throughput).
2881 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
2882 __ mov(edx, FieldOperand(ebx, i));
2883 __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
2884 __ mov(FieldOperand(eax, i), edx);
2885 __ mov(FieldOperand(eax, i + kPointerSize), ecx);
2886 }
2887 if ((size % (2 * kPointerSize)) != 0) {
2888 __ mov(edx, FieldOperand(ebx, size - kPointerSize));
2889 __ mov(FieldOperand(eax, size - kPointerSize), edx);
2890 }
2891}
2892
2893
2894void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
2895 // Use the fast case closure allocation code that allocates in new
2896 // space for nested functions that don't need literals cloning.
2897 Handle<SharedFunctionInfo> shared_info = instr->shared_info();
2898 bool pretenure = !instr->hydrogen()->pretenure();
2899 if (shared_info->num_literals() == 0 && !pretenure) {
2900 FastNewClosureStub stub;
2901 __ push(Immediate(shared_info));
2902 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2903 } else {
2904 __ push(esi);
2905 __ push(Immediate(shared_info));
2906 __ push(Immediate(pretenure
2907 ? Factory::true_value()
2908 : Factory::false_value()));
2909 CallRuntime(Runtime::kNewClosure, 3, instr);
2910 }
2911}
2912
2913
2914void LCodeGen::DoTypeof(LTypeof* instr) {
2915 LOperand* input = instr->input();
2916 if (input->IsConstantOperand()) {
2917 __ push(ToImmediate(input));
2918 } else {
2919 __ push(ToOperand(input));
2920 }
2921 CallRuntime(Runtime::kTypeof, 1, instr);
2922}
2923
2924
2925void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
2926 Register input = ToRegister(instr->input());
2927 Register result = ToRegister(instr->result());
2928 Label true_label;
2929 Label false_label;
2930 NearLabel done;
2931
2932 Condition final_branch_condition = EmitTypeofIs(&true_label,
2933 &false_label,
2934 input,
2935 instr->type_literal());
2936 __ j(final_branch_condition, &true_label);
2937 __ bind(&false_label);
2938 __ mov(result, Handle<Object>(Heap::false_value()));
2939 __ jmp(&done);
2940
2941 __ bind(&true_label);
2942 __ mov(result, Handle<Object>(Heap::true_value()));
2943
2944 __ bind(&done);
2945}
2946
2947
2948void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
2949 Register input = ToRegister(instr->input());
2950 int true_block = chunk_->LookupDestination(instr->true_block_id());
2951 int false_block = chunk_->LookupDestination(instr->false_block_id());
2952 Label* true_label = chunk_->GetAssemblyLabel(true_block);
2953 Label* false_label = chunk_->GetAssemblyLabel(false_block);
2954
2955 Condition final_branch_condition = EmitTypeofIs(true_label,
2956 false_label,
2957 input,
2958 instr->type_literal());
2959
2960 EmitBranch(true_block, false_block, final_branch_condition);
2961}
2962
2963
2964Condition LCodeGen::EmitTypeofIs(Label* true_label,
2965 Label* false_label,
2966 Register input,
2967 Handle<String> type_name) {
2968 Condition final_branch_condition = no_condition;
2969 if (type_name->Equals(Heap::number_symbol())) {
2970 __ test(input, Immediate(kSmiTagMask));
2971 __ j(zero, true_label);
2972 __ cmp(FieldOperand(input, HeapObject::kMapOffset),
2973 Factory::heap_number_map());
2974 final_branch_condition = equal;
2975
2976 } else if (type_name->Equals(Heap::string_symbol())) {
2977 __ test(input, Immediate(kSmiTagMask));
2978 __ j(zero, false_label);
2979 __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
2980 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
2981 1 << Map::kIsUndetectable);
2982 __ j(not_zero, false_label);
2983 __ CmpInstanceType(input, FIRST_NONSTRING_TYPE);
2984 final_branch_condition = below;
2985
2986 } else if (type_name->Equals(Heap::boolean_symbol())) {
2987 __ cmp(input, Handle<Object>(Heap::true_value()));
2988 __ j(equal, true_label);
2989 __ cmp(input, Handle<Object>(Heap::false_value()));
2990 final_branch_condition = equal;
2991
2992 } else if (type_name->Equals(Heap::undefined_symbol())) {
2993 __ cmp(input, Factory::undefined_value());
2994 __ j(equal, true_label);
2995 __ test(input, Immediate(kSmiTagMask));
2996 __ j(zero, false_label);
2997 // Check for undetectable objects => true.
2998 __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
2999 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
3000 1 << Map::kIsUndetectable);
3001 final_branch_condition = not_zero;
3002
3003 } else if (type_name->Equals(Heap::function_symbol())) {
3004 __ test(input, Immediate(kSmiTagMask));
3005 __ j(zero, false_label);
3006 __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
3007 __ j(equal, true_label);
3008 // Regular expressions => 'function' (they are callable).
3009 __ CmpInstanceType(input, JS_REGEXP_TYPE);
3010 final_branch_condition = equal;
3011
3012 } else if (type_name->Equals(Heap::object_symbol())) {
3013 __ test(input, Immediate(kSmiTagMask));
3014 __ j(zero, false_label);
3015 __ cmp(input, Factory::null_value());
3016 __ j(equal, true_label);
3017 // Regular expressions => 'function', not 'object'.
3018 __ CmpObjectType(input, JS_REGEXP_TYPE, input);
3019 __ j(equal, false_label);
3020 // Check for undetectable objects => false.
3021 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
3022 1 << Map::kIsUndetectable);
3023 __ j(not_zero, false_label);
3024 // Check for JS objects => true.
3025 __ CmpInstanceType(input, FIRST_JS_OBJECT_TYPE);
3026 __ j(below, false_label);
3027 __ CmpInstanceType(input, LAST_JS_OBJECT_TYPE);
3028 final_branch_condition = below_equal;
3029
3030 } else {
3031 final_branch_condition = not_equal;
3032 __ jmp(false_label);
3033 // A dead branch instruction will be generated after this point.
3034 }
3035
3036 return final_branch_condition;
3037}
3038
3039
3040void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
3041 // No code for lazy bailout instruction. Used to capture environment after a
3042 // call for populating the safepoint data with deoptimization data.
3043}
3044
3045
3046void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
3047 DeoptimizeIf(no_condition, instr->environment());
3048}
3049
3050
3051void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
3052 LOperand* obj = instr->object();
3053 LOperand* key = instr->key();
3054 __ push(ToOperand(obj));
3055 if (key->IsConstantOperand()) {
3056 __ push(ToImmediate(key));
3057 } else {
3058 __ push(ToOperand(key));
3059 }
3060 RecordPosition(instr->pointer_map()->position());
3061 SafepointGenerator safepoint_generator(this,
3062 instr->pointer_map(),
3063 Safepoint::kNoDeoptimizationIndex);
3064 __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
3065}
3066
3067
3068void LCodeGen::DoStackCheck(LStackCheck* instr) {
3069 // Perform stack overflow check.
3070 NearLabel done;
3071 ExternalReference stack_limit = ExternalReference::address_of_stack_limit();
3072 __ cmp(esp, Operand::StaticVariable(stack_limit));
3073 __ j(above_equal, &done);
3074
3075 StackCheckStub stub;
3076 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3077 __ bind(&done);
3078}
3079
3080
3081void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
3082 // This is a pseudo-instruction that ensures that the environment here is
3083 // properly registered for deoptimization and records the assembler's PC
3084 // offset.
3085 LEnvironment* environment = instr->environment();
3086 environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
3087 instr->SpilledDoubleRegisterArray());
3088
3089 // If the environment were already registered, we would have no way of
3090 // backpatching it with the spill slot operands.
3091 ASSERT(!environment->HasBeenRegistered());
3092 RegisterEnvironmentForDeoptimization(environment);
3093 ASSERT(osr_pc_offset_ == -1);
3094 osr_pc_offset_ = masm()->pc_offset();
3095}
3096
3097
3098#undef __
3099
3100} } // namespace v8::internal