blob: 0e9773748e061ecd88463a27ca689d35793f4789 [file] [log] [blame]
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001// Copyright 2010 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "ia32/lithium-codegen-ia32.h"
29#include "code-stubs.h"
30#include "stub-cache.h"
31
32namespace v8 {
33namespace internal {
34
35
36class SafepointGenerator : public PostCallGenerator {
37 public:
38 SafepointGenerator(LCodeGen* codegen,
39 LPointerMap* pointers,
40 int deoptimization_index)
41 : codegen_(codegen),
42 pointers_(pointers),
43 deoptimization_index_(deoptimization_index) { }
44 virtual ~SafepointGenerator() { }
45
46 virtual void Generate() {
47 codegen_->RecordSafepoint(pointers_, deoptimization_index_);
48 }
49
50 private:
51 LCodeGen* codegen_;
52 LPointerMap* pointers_;
53 int deoptimization_index_;
54};
55
56
57#define __ masm()->
58
59bool LCodeGen::GenerateCode() {
60 HPhase phase("Code generation", chunk());
61 ASSERT(is_unused());
62 status_ = GENERATING;
63 CpuFeatures::Scope scope(SSE2);
64 return GeneratePrologue() &&
65 GenerateBody() &&
66 GenerateDeferredCode() &&
67 GenerateSafepointTable();
68}
69
70
71void LCodeGen::FinishCode(Handle<Code> code) {
72 ASSERT(is_done());
73 code->set_stack_slots(StackSlotCount());
74 code->set_safepoint_table_start(safepoints_.GetCodeOffset());
75 PopulateDeoptimizationData(code);
76}
77
78
79void LCodeGen::Abort(const char* format, ...) {
80 if (FLAG_trace_bailout) {
81 SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
82 PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name);
83 va_list arguments;
84 va_start(arguments, format);
85 OS::VPrint(format, arguments);
86 va_end(arguments);
87 PrintF("\n");
88 }
89 status_ = ABORTED;
90}
91
92
93void LCodeGen::Comment(const char* format, ...) {
94 if (!FLAG_code_comments) return;
95 char buffer[4 * KB];
96 StringBuilder builder(buffer, ARRAY_SIZE(buffer));
97 va_list arguments;
98 va_start(arguments, format);
99 builder.AddFormattedList(format, arguments);
100 va_end(arguments);
101
102 // Copy the string before recording it in the assembler to avoid
103 // issues when the stack allocated buffer goes out of scope.
104 size_t length = builder.position();
105 Vector<char> copy = Vector<char>::New(length + 1);
106 memcpy(copy.start(), builder.Finalize(), copy.length());
107 masm()->RecordComment(copy.start());
108}
109
110
111bool LCodeGen::GeneratePrologue() {
112 ASSERT(is_generating());
113
114#ifdef DEBUG
115 if (strlen(FLAG_stop_at) > 0 &&
116 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
117 __ int3();
118 }
119#endif
120
121 __ push(ebp); // Caller's frame pointer.
122 __ mov(ebp, esp);
123 __ push(esi); // Callee's context.
124 __ push(edi); // Callee's JS function.
125
126 // Reserve space for the stack slots needed by the code.
127 int slots = StackSlotCount();
128 if (slots > 0) {
129 if (FLAG_debug_code) {
130 __ mov(Operand(eax), Immediate(slots));
131 Label loop;
132 __ bind(&loop);
133 __ push(Immediate(kSlotsZapValue));
134 __ dec(eax);
135 __ j(not_zero, &loop);
136 } else {
137 __ sub(Operand(esp), Immediate(slots * kPointerSize));
138 }
139 }
140
141 // Trace the call.
142 if (FLAG_trace) {
143 __ CallRuntime(Runtime::kTraceEnter, 0);
144 }
145 return !is_aborted();
146}
147
148
149bool LCodeGen::GenerateBody() {
150 ASSERT(is_generating());
151 bool emit_instructions = true;
152 for (current_instruction_ = 0;
153 !is_aborted() && current_instruction_ < instructions_->length();
154 current_instruction_++) {
155 LInstruction* instr = instructions_->at(current_instruction_);
156 if (instr->IsLabel()) {
157 LLabel* label = LLabel::cast(instr);
158 emit_instructions = !label->HasReplacement();
159 }
160
161 if (emit_instructions) {
162 Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
163 instr->CompileToNative(this);
164 }
165 }
166 return !is_aborted();
167}
168
169
170LInstruction* LCodeGen::GetNextInstruction() {
171 if (current_instruction_ < instructions_->length() - 1) {
172 return instructions_->at(current_instruction_ + 1);
173 } else {
174 return NULL;
175 }
176}
177
178
179bool LCodeGen::GenerateDeferredCode() {
180 ASSERT(is_generating());
181 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
182 LDeferredCode* code = deferred_[i];
183 __ bind(code->entry());
184 code->Generate();
185 __ jmp(code->exit());
186 }
187
188 // Deferred code is the last part of the instruction sequence. Mark
189 // the generated code as done unless we bailed out.
190 if (!is_aborted()) status_ = DONE;
191 return !is_aborted();
192}
193
194
195bool LCodeGen::GenerateSafepointTable() {
196 ASSERT(is_done());
197 safepoints_.Emit(masm(), StackSlotCount());
198 return !is_aborted();
199}
200
201
202Register LCodeGen::ToRegister(int index) const {
203 return Register::FromAllocationIndex(index);
204}
205
206
207XMMRegister LCodeGen::ToDoubleRegister(int index) const {
208 return XMMRegister::FromAllocationIndex(index);
209}
210
211
212Register LCodeGen::ToRegister(LOperand* op) const {
213 ASSERT(op->IsRegister());
214 return ToRegister(op->index());
215}
216
217
218XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
219 ASSERT(op->IsDoubleRegister());
220 return ToDoubleRegister(op->index());
221}
222
223
224int LCodeGen::ToInteger32(LConstantOperand* op) const {
225 Handle<Object> value = chunk_->LookupLiteral(op);
226 ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
227 ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
228 value->Number());
229 return static_cast<int32_t>(value->Number());
230}
231
232
233Immediate LCodeGen::ToImmediate(LOperand* op) {
234 LConstantOperand* const_op = LConstantOperand::cast(op);
235 Handle<Object> literal = chunk_->LookupLiteral(const_op);
236 Representation r = chunk_->LookupLiteralRepresentation(const_op);
237 if (r.IsInteger32()) {
238 ASSERT(literal->IsNumber());
239 return Immediate(static_cast<int32_t>(literal->Number()));
240 } else if (r.IsDouble()) {
241 Abort("unsupported double immediate");
242 }
243 ASSERT(r.IsTagged());
244 return Immediate(literal);
245}
246
247
248Operand LCodeGen::ToOperand(LOperand* op) const {
249 if (op->IsRegister()) return Operand(ToRegister(op));
250 if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
251 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
252 int index = op->index();
253 if (index >= 0) {
254 // Local or spill slot. Skip the frame pointer, function, and
255 // context in the fixed part of the frame.
256 return Operand(ebp, -(index + 3) * kPointerSize);
257 } else {
258 // Incoming parameter. Skip the return address.
259 return Operand(ebp, -(index - 1) * kPointerSize);
260 }
261}
262
263
264void LCodeGen::AddToTranslation(Translation* translation,
265 LOperand* op,
266 bool is_tagged) {
267 if (op == NULL) {
268 // TODO(twuerthinger): Introduce marker operands to indicate that this value
269 // is not present and must be reconstructed from the deoptimizer. Currently
270 // this is only used for the arguments object.
271 translation->StoreArgumentsObject();
272 } else if (op->IsStackSlot()) {
273 if (is_tagged) {
274 translation->StoreStackSlot(op->index());
275 } else {
276 translation->StoreInt32StackSlot(op->index());
277 }
278 } else if (op->IsDoubleStackSlot()) {
279 translation->StoreDoubleStackSlot(op->index());
280 } else if (op->IsArgument()) {
281 ASSERT(is_tagged);
282 int src_index = StackSlotCount() + op->index();
283 translation->StoreStackSlot(src_index);
284 } else if (op->IsRegister()) {
285 Register reg = ToRegister(op);
286 if (is_tagged) {
287 translation->StoreRegister(reg);
288 } else {
289 translation->StoreInt32Register(reg);
290 }
291 } else if (op->IsDoubleRegister()) {
292 XMMRegister reg = ToDoubleRegister(op);
293 translation->StoreDoubleRegister(reg);
294 } else if (op->IsConstantOperand()) {
295 Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
296 int src_index = DefineDeoptimizationLiteral(literal);
297 translation->StoreLiteral(src_index);
298 } else {
299 UNREACHABLE();
300 }
301}
302
303
304void LCodeGen::CallCode(Handle<Code> code,
305 RelocInfo::Mode mode,
306 LInstruction* instr) {
307 if (instr != NULL) {
308 LPointerMap* pointers = instr->pointer_map();
309 RecordPosition(pointers->position());
310 __ call(code, mode);
311 RegisterLazyDeoptimization(instr);
312 } else {
313 LPointerMap no_pointers(0);
314 RecordPosition(no_pointers.position());
315 __ call(code, mode);
316 RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex);
317 }
ager@chromium.org5f0c45f2010-12-17 08:51:21 +0000318
319 // Signal that we don't inline smi code before these stubs in the
320 // optimizing code generator.
321 if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC ||
322 code->kind() == Code::COMPARE_IC) {
323 __ nop();
324 }
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000325}
326
327
328void LCodeGen::CallRuntime(Runtime::Function* function,
329 int num_arguments,
330 LInstruction* instr) {
331 ASSERT(instr != NULL);
332 LPointerMap* pointers = instr->pointer_map();
333 ASSERT(pointers != NULL);
334 RecordPosition(pointers->position());
335
336 __ CallRuntime(function, num_arguments);
337 // Runtime calls to Throw are not supposed to ever return at the
338 // call site, so don't register lazy deoptimization for these. We do
339 // however have to record a safepoint since throwing exceptions can
340 // cause garbage collections.
341 // BUG(3243555): register a lazy deoptimization point at throw. We need
342 // it to be able to inline functions containing a throw statement.
343 if (!instr->IsThrow()) {
344 RegisterLazyDeoptimization(instr);
345 } else {
346 RecordSafepoint(instr->pointer_map(), Safepoint::kNoDeoptimizationIndex);
347 }
348}
349
350
351void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
352 // Create the environment to bailout to. If the call has side effects
353 // execution has to continue after the call otherwise execution can continue
354 // from a previous bailout point repeating the call.
355 LEnvironment* deoptimization_environment;
356 if (instr->HasDeoptimizationEnvironment()) {
357 deoptimization_environment = instr->deoptimization_environment();
358 } else {
359 deoptimization_environment = instr->environment();
360 }
361
362 RegisterEnvironmentForDeoptimization(deoptimization_environment);
363 RecordSafepoint(instr->pointer_map(),
364 deoptimization_environment->deoptimization_index());
365}
366
367
368void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
369 if (!environment->HasBeenRegistered()) {
370 // Physical stack frame layout:
371 // -x ............. -4 0 ..................................... y
372 // [incoming arguments] [spill slots] [pushed outgoing arguments]
373
374 // Layout of the environment:
375 // 0 ..................................................... size-1
376 // [parameters] [locals] [expression stack including arguments]
377
378 // Layout of the translation:
379 // 0 ........................................................ size - 1 + 4
380 // [expression stack including arguments] [locals] [4 words] [parameters]
381 // |>------------ translation_size ------------<|
382
383 int frame_count = 0;
384 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
385 ++frame_count;
386 }
387 Translation translation(&translations_, frame_count);
388 environment->WriteTranslation(this, &translation);
389 int deoptimization_index = deoptimizations_.length();
390 environment->Register(deoptimization_index, translation.index());
391 deoptimizations_.Add(environment);
392 }
393}
394
395
396void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
397 RegisterEnvironmentForDeoptimization(environment);
398 ASSERT(environment->HasBeenRegistered());
399 int id = environment->deoptimization_index();
400 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
401 ASSERT(entry != NULL);
402 if (entry == NULL) {
403 Abort("bailout was not prepared");
404 return;
405 }
406
407 if (FLAG_deopt_every_n_times != 0) {
408 Handle<SharedFunctionInfo> shared(info_->shared_info());
409 Label no_deopt;
410 __ pushfd();
411 __ push(eax);
412 __ push(ebx);
413 __ mov(ebx, shared);
414 __ mov(eax, FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset));
415 __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
416 __ j(not_zero, &no_deopt);
417 if (FLAG_trap_on_deopt) __ int3();
418 __ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
419 __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
420 __ pop(ebx);
421 __ pop(eax);
422 __ popfd();
423 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
424
425 __ bind(&no_deopt);
426 __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
427 __ pop(ebx);
428 __ pop(eax);
429 __ popfd();
430 }
431
432 if (cc == no_condition) {
433 if (FLAG_trap_on_deopt) __ int3();
434 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
435 } else {
436 if (FLAG_trap_on_deopt) {
437 NearLabel done;
438 __ j(NegateCondition(cc), &done);
439 __ int3();
440 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
441 __ bind(&done);
442 } else {
443 __ j(cc, entry, RelocInfo::RUNTIME_ENTRY, not_taken);
444 }
445 }
446}
447
448
449void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
450 int length = deoptimizations_.length();
451 if (length == 0) return;
452 ASSERT(FLAG_deopt);
453 Handle<DeoptimizationInputData> data =
454 Factory::NewDeoptimizationInputData(length, TENURED);
455
456 data->SetTranslationByteArray(*translations_.CreateByteArray());
457 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
458
459 Handle<FixedArray> literals =
460 Factory::NewFixedArray(deoptimization_literals_.length(), TENURED);
461 for (int i = 0; i < deoptimization_literals_.length(); i++) {
462 literals->set(i, *deoptimization_literals_[i]);
463 }
464 data->SetLiteralArray(*literals);
465
466 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
467 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
468
469 // Populate the deoptimization entries.
470 for (int i = 0; i < length; i++) {
471 LEnvironment* env = deoptimizations_[i];
472 data->SetAstId(i, Smi::FromInt(env->ast_id()));
473 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
474 data->SetArgumentsStackHeight(i,
475 Smi::FromInt(env->arguments_stack_height()));
476 }
477 code->set_deoptimization_data(*data);
478}
479
480
481int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
482 int result = deoptimization_literals_.length();
483 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
484 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
485 }
486 deoptimization_literals_.Add(literal);
487 return result;
488}
489
490
491void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
492 ASSERT(deoptimization_literals_.length() == 0);
493
494 const ZoneList<Handle<JSFunction> >* inlined_closures =
495 chunk()->inlined_closures();
496
497 for (int i = 0, length = inlined_closures->length();
498 i < length;
499 i++) {
500 DefineDeoptimizationLiteral(inlined_closures->at(i));
501 }
502
503 inlined_function_count_ = deoptimization_literals_.length();
504}
505
506
507void LCodeGen::RecordSafepoint(LPointerMap* pointers,
508 int deoptimization_index) {
509 const ZoneList<LOperand*>* operands = pointers->operands();
510 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
511 deoptimization_index);
512 for (int i = 0; i < operands->length(); i++) {
513 LOperand* pointer = operands->at(i);
514 if (pointer->IsStackSlot()) {
515 safepoint.DefinePointerSlot(pointer->index());
516 }
517 }
518}
519
520
521void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
522 int arguments,
523 int deoptimization_index) {
524 const ZoneList<LOperand*>* operands = pointers->operands();
525 Safepoint safepoint =
526 safepoints_.DefineSafepointWithRegisters(
527 masm(), arguments, deoptimization_index);
528 for (int i = 0; i < operands->length(); i++) {
529 LOperand* pointer = operands->at(i);
530 if (pointer->IsStackSlot()) {
531 safepoint.DefinePointerSlot(pointer->index());
532 } else if (pointer->IsRegister()) {
533 safepoint.DefinePointerRegister(ToRegister(pointer));
534 }
535 }
536 // Register esi always contains a pointer to the context.
537 safepoint.DefinePointerRegister(esi);
538}
539
540
541void LCodeGen::RecordPosition(int position) {
542 if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
543 masm()->positions_recorder()->RecordPosition(position);
544}
545
546
547void LCodeGen::DoLabel(LLabel* label) {
548 if (label->is_loop_header()) {
549 Comment(";;; B%d - LOOP entry", label->block_id());
550 } else {
551 Comment(";;; B%d", label->block_id());
552 }
553 __ bind(label->label());
554 current_block_ = label->block_id();
555 LCodeGen::DoGap(label);
556}
557
558
559void LCodeGen::DoParallelMove(LParallelMove* move) {
560 // xmm0 must always be a scratch register.
561 XMMRegister xmm_scratch = xmm0;
562 LUnallocated marker_operand(LUnallocated::NONE);
563
564 Register cpu_scratch = esi;
565 bool destroys_cpu_scratch = false;
566
567 LGapResolver resolver(move->move_operands(), &marker_operand);
568 const ZoneList<LMoveOperands>* moves = resolver.ResolveInReverseOrder();
569 for (int i = moves->length() - 1; i >= 0; --i) {
570 LMoveOperands move = moves->at(i);
571 LOperand* from = move.from();
572 LOperand* to = move.to();
573 ASSERT(!from->IsDoubleRegister() ||
574 !ToDoubleRegister(from).is(xmm_scratch));
575 ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch));
576 ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch));
577 ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch));
578 if (from->IsConstantOperand()) {
579 __ mov(ToOperand(to), ToImmediate(from));
580 } else if (from == &marker_operand) {
581 if (to->IsRegister() || to->IsStackSlot()) {
582 __ mov(ToOperand(to), cpu_scratch);
583 ASSERT(destroys_cpu_scratch);
584 } else {
585 ASSERT(to->IsDoubleRegister() || to->IsDoubleStackSlot());
586 __ movdbl(ToOperand(to), xmm_scratch);
587 }
588 } else if (to == &marker_operand) {
589 if (from->IsRegister() || from->IsStackSlot()) {
590 __ mov(cpu_scratch, ToOperand(from));
591 destroys_cpu_scratch = true;
592 } else {
593 ASSERT(from->IsDoubleRegister() || from->IsDoubleStackSlot());
594 __ movdbl(xmm_scratch, ToOperand(from));
595 }
596 } else if (from->IsRegister()) {
597 __ mov(ToOperand(to), ToRegister(from));
598 } else if (to->IsRegister()) {
599 __ mov(ToRegister(to), ToOperand(from));
600 } else if (from->IsStackSlot()) {
601 ASSERT(to->IsStackSlot());
602 __ push(eax);
603 __ mov(eax, ToOperand(from));
604 __ mov(ToOperand(to), eax);
605 __ pop(eax);
606 } else if (from->IsDoubleRegister()) {
607 __ movdbl(ToOperand(to), ToDoubleRegister(from));
608 } else if (to->IsDoubleRegister()) {
609 __ movdbl(ToDoubleRegister(to), ToOperand(from));
610 } else {
611 ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
612 __ movdbl(xmm_scratch, ToOperand(from));
613 __ movdbl(ToOperand(to), xmm_scratch);
614 }
615 }
616
617 if (destroys_cpu_scratch) {
618 __ mov(cpu_scratch, Operand(ebp, -kPointerSize));
619 }
620}
621
622
623void LCodeGen::DoGap(LGap* gap) {
624 for (int i = LGap::FIRST_INNER_POSITION;
625 i <= LGap::LAST_INNER_POSITION;
626 i++) {
627 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
628 LParallelMove* move = gap->GetParallelMove(inner_pos);
629 if (move != NULL) DoParallelMove(move);
630 }
631
632 LInstruction* next = GetNextInstruction();
633 if (next != NULL && next->IsLazyBailout()) {
634 int pc = masm()->pc_offset();
635 safepoints_.SetPcAfterGap(pc);
636 }
637}
638
639
640void LCodeGen::DoParameter(LParameter* instr) {
641 // Nothing to do.
642}
643
644
645void LCodeGen::DoCallStub(LCallStub* instr) {
646 ASSERT(ToRegister(instr->result()).is(eax));
647 switch (instr->hydrogen()->major_key()) {
648 case CodeStub::RegExpConstructResult: {
649 RegExpConstructResultStub stub;
650 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
651 break;
652 }
653 case CodeStub::RegExpExec: {
654 RegExpExecStub stub;
655 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
656 break;
657 }
658 case CodeStub::SubString: {
659 SubStringStub stub;
660 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
661 break;
662 }
663 case CodeStub::StringCharAt: {
664 StringCharAtStub stub;
665 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
666 break;
667 }
668 case CodeStub::MathPow: {
669 MathPowStub stub;
670 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
671 break;
672 }
673 case CodeStub::NumberToString: {
674 NumberToStringStub stub;
675 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
676 break;
677 }
678 case CodeStub::StringAdd: {
679 StringAddStub stub(NO_STRING_ADD_FLAGS);
680 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
681 break;
682 }
683 case CodeStub::StringCompare: {
684 StringCompareStub stub;
685 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
686 break;
687 }
688 case CodeStub::TranscendentalCache: {
689 TranscendentalCacheStub stub(instr->transcendental_type());
690 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
691 break;
692 }
693 default:
694 UNREACHABLE();
695 }
696}
697
698
699void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
700 // Nothing to do.
701}
702
703
704void LCodeGen::DoModI(LModI* instr) {
705 LOperand* right = instr->right();
706 ASSERT(ToRegister(instr->result()).is(edx));
707 ASSERT(ToRegister(instr->left()).is(eax));
708 ASSERT(!ToRegister(instr->right()).is(eax));
709 ASSERT(!ToRegister(instr->right()).is(edx));
710
711 Register right_reg = ToRegister(right);
712
713 // Check for x % 0.
714 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
715 __ test(right_reg, ToOperand(right));
716 DeoptimizeIf(zero, instr->environment());
717 }
718
719 // Sign extend to edx.
720 __ cdq();
721
722 // Check for (0 % -x) that will produce negative zero.
723 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
724 NearLabel positive_left;
725 NearLabel done;
726 __ test(eax, Operand(eax));
727 __ j(not_sign, &positive_left);
728 __ idiv(right_reg);
729
730 // Test the remainder for 0, because then the result would be -0.
731 __ test(edx, Operand(edx));
732 __ j(not_zero, &done);
733
734 DeoptimizeIf(no_condition, instr->environment());
735 __ bind(&positive_left);
736 __ idiv(right_reg);
737 __ bind(&done);
738 } else {
739 __ idiv(right_reg);
740 }
741}
742
743
744void LCodeGen::DoDivI(LDivI* instr) {
745 LOperand* right = instr->right();
746 ASSERT(ToRegister(instr->result()).is(eax));
747 ASSERT(ToRegister(instr->left()).is(eax));
748 ASSERT(!ToRegister(instr->right()).is(eax));
749 ASSERT(!ToRegister(instr->right()).is(edx));
750
751 Register left_reg = eax;
752
753 // Check for x / 0.
754 Register right_reg = ToRegister(right);
755 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
756 __ test(right_reg, ToOperand(right));
757 DeoptimizeIf(zero, instr->environment());
758 }
759
760 // Check for (0 / -x) that will produce negative zero.
761 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
762 NearLabel left_not_zero;
763 __ test(left_reg, Operand(left_reg));
764 __ j(not_zero, &left_not_zero);
765 __ test(right_reg, ToOperand(right));
766 DeoptimizeIf(sign, instr->environment());
767 __ bind(&left_not_zero);
768 }
769
770 // Check for (-kMinInt / -1).
771 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
772 NearLabel left_not_min_int;
773 __ cmp(left_reg, kMinInt);
774 __ j(not_zero, &left_not_min_int);
775 __ cmp(right_reg, -1);
776 DeoptimizeIf(zero, instr->environment());
777 __ bind(&left_not_min_int);
778 }
779
780 // Sign extend to edx.
781 __ cdq();
782 __ idiv(right_reg);
783
784 // Deoptimize if remainder is not 0.
785 __ test(edx, Operand(edx));
786 DeoptimizeIf(not_zero, instr->environment());
787}
788
789
790void LCodeGen::DoMulI(LMulI* instr) {
791 Register left = ToRegister(instr->left());
792 LOperand* right = instr->right();
793
794 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
795 __ mov(ToRegister(instr->temp()), left);
796 }
797
798 if (right->IsConstantOperand()) {
799 __ imul(left, left, ToInteger32(LConstantOperand::cast(right)));
800 } else {
801 __ imul(left, ToOperand(right));
802 }
803
804 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
805 DeoptimizeIf(overflow, instr->environment());
806 }
807
808 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
809 // Bail out if the result is supposed to be negative zero.
810 NearLabel done;
811 __ test(left, Operand(left));
812 __ j(not_zero, &done);
813 if (right->IsConstantOperand()) {
814 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
815 DeoptimizeIf(no_condition, instr->environment());
816 }
817 } else {
818 // Test the non-zero operand for negative sign.
819 __ or_(ToRegister(instr->temp()), ToOperand(right));
820 DeoptimizeIf(sign, instr->environment());
821 }
822 __ bind(&done);
823 }
824}
825
826
827void LCodeGen::DoBitI(LBitI* instr) {
828 LOperand* left = instr->left();
829 LOperand* right = instr->right();
830 ASSERT(left->Equals(instr->result()));
831 ASSERT(left->IsRegister());
832
833 if (right->IsConstantOperand()) {
834 int right_operand = ToInteger32(LConstantOperand::cast(right));
835 switch (instr->op()) {
836 case Token::BIT_AND:
837 __ and_(ToRegister(left), right_operand);
838 break;
839 case Token::BIT_OR:
840 __ or_(ToRegister(left), right_operand);
841 break;
842 case Token::BIT_XOR:
843 __ xor_(ToRegister(left), right_operand);
844 break;
845 default:
846 UNREACHABLE();
847 break;
848 }
849 } else {
850 switch (instr->op()) {
851 case Token::BIT_AND:
852 __ and_(ToRegister(left), ToOperand(right));
853 break;
854 case Token::BIT_OR:
855 __ or_(ToRegister(left), ToOperand(right));
856 break;
857 case Token::BIT_XOR:
858 __ xor_(ToRegister(left), ToOperand(right));
859 break;
860 default:
861 UNREACHABLE();
862 break;
863 }
864 }
865}
866
867
868void LCodeGen::DoShiftI(LShiftI* instr) {
869 LOperand* left = instr->left();
870 LOperand* right = instr->right();
871 ASSERT(left->Equals(instr->result()));
872 ASSERT(left->IsRegister());
873 if (right->IsRegister()) {
874 ASSERT(ToRegister(right).is(ecx));
875
876 switch (instr->op()) {
877 case Token::SAR:
878 __ sar_cl(ToRegister(left));
879 break;
880 case Token::SHR:
881 __ shr_cl(ToRegister(left));
882 if (instr->can_deopt()) {
883 __ test(ToRegister(left), Immediate(0x80000000));
884 DeoptimizeIf(not_zero, instr->environment());
885 }
886 break;
887 case Token::SHL:
888 __ shl_cl(ToRegister(left));
889 break;
890 default:
891 UNREACHABLE();
892 break;
893 }
894 } else {
895 int value = ToInteger32(LConstantOperand::cast(right));
896 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
897 switch (instr->op()) {
898 case Token::SAR:
899 if (shift_count != 0) {
900 __ sar(ToRegister(left), shift_count);
901 }
902 break;
903 case Token::SHR:
904 if (shift_count == 0 && instr->can_deopt()) {
905 __ test(ToRegister(left), Immediate(0x80000000));
906 DeoptimizeIf(not_zero, instr->environment());
907 } else {
908 __ shr(ToRegister(left), shift_count);
909 }
910 break;
911 case Token::SHL:
912 if (shift_count != 0) {
913 __ shl(ToRegister(left), shift_count);
914 }
915 break;
916 default:
917 UNREACHABLE();
918 break;
919 }
920 }
921}
922
923
924void LCodeGen::DoSubI(LSubI* instr) {
925 LOperand* left = instr->left();
926 LOperand* right = instr->right();
927 ASSERT(left->Equals(instr->result()));
928
929 if (right->IsConstantOperand()) {
930 __ sub(ToOperand(left), ToImmediate(right));
931 } else {
932 __ sub(ToRegister(left), ToOperand(right));
933 }
934 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
935 DeoptimizeIf(overflow, instr->environment());
936 }
937}
938
939
940void LCodeGen::DoConstantI(LConstantI* instr) {
941 ASSERT(instr->result()->IsRegister());
942 __ mov(ToRegister(instr->result()), instr->value());
943}
944
945
946void LCodeGen::DoConstantD(LConstantD* instr) {
947 ASSERT(instr->result()->IsDoubleRegister());
948 XMMRegister res = ToDoubleRegister(instr->result());
949 double v = instr->value();
950 // Use xor to produce +0.0 in a fast and compact way, but avoid to
951 // do so if the constant is -0.0.
952 if (BitCast<uint64_t, double>(v) == 0) {
953 __ xorpd(res, res);
954 } else {
955 int32_t v_int32 = static_cast<int32_t>(v);
956 if (static_cast<double>(v_int32) == v) {
957 __ push_imm32(v_int32);
958 __ cvtsi2sd(res, Operand(esp, 0));
959 __ add(Operand(esp), Immediate(kPointerSize));
960 } else {
961 uint64_t int_val = BitCast<uint64_t, double>(v);
962 int32_t lower = static_cast<int32_t>(int_val);
963 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
964 __ push_imm32(upper);
965 __ push_imm32(lower);
966 __ movdbl(res, Operand(esp, 0));
967 __ add(Operand(esp), Immediate(2 * kPointerSize));
968 }
969 }
970}
971
972
973void LCodeGen::DoConstantT(LConstantT* instr) {
974 ASSERT(instr->result()->IsRegister());
975 __ mov(ToRegister(instr->result()), Immediate(instr->value()));
976}
977
978
979void LCodeGen::DoArrayLength(LArrayLength* instr) {
980 Register result = ToRegister(instr->result());
981
982 if (instr->hydrogen()->value()->IsLoadElements()) {
983 // We load the length directly from the elements array.
984 Register elements = ToRegister(instr->input());
985 __ mov(result, FieldOperand(elements, FixedArray::kLengthOffset));
986 } else {
987 // Check that the receiver really is an array.
988 Register array = ToRegister(instr->input());
989 Register temporary = ToRegister(instr->temporary());
990 __ CmpObjectType(array, JS_ARRAY_TYPE, temporary);
991 DeoptimizeIf(not_equal, instr->environment());
992
993 // Load length directly from the array.
994 __ mov(result, FieldOperand(array, JSArray::kLengthOffset));
995 }
996}
997
998
999void LCodeGen::DoValueOf(LValueOf* instr) {
1000 Register input = ToRegister(instr->input());
1001 Register result = ToRegister(instr->result());
1002 Register map = ToRegister(instr->temporary());
1003 ASSERT(input.is(result));
1004 NearLabel done;
1005 // If the object is a smi return the object.
1006 __ test(input, Immediate(kSmiTagMask));
1007 __ j(zero, &done);
1008
1009 // If the object is not a value type, return the object.
1010 __ CmpObjectType(input, JS_VALUE_TYPE, map);
1011 __ j(not_equal, &done);
1012 __ mov(result, FieldOperand(input, JSValue::kValueOffset));
1013
1014 __ bind(&done);
1015}
1016
1017
1018void LCodeGen::DoBitNotI(LBitNotI* instr) {
1019 LOperand* input = instr->input();
1020 ASSERT(input->Equals(instr->result()));
1021 __ not_(ToRegister(input));
1022}
1023
1024
1025void LCodeGen::DoThrow(LThrow* instr) {
1026 __ push(ToOperand(instr->input()));
1027 CallRuntime(Runtime::kThrow, 1, instr);
1028
1029 if (FLAG_debug_code) {
1030 Comment("Unreachable code.");
1031 __ int3();
1032 }
1033}
1034
1035
1036void LCodeGen::DoAddI(LAddI* instr) {
1037 LOperand* left = instr->left();
1038 LOperand* right = instr->right();
1039 ASSERT(left->Equals(instr->result()));
1040
1041 if (right->IsConstantOperand()) {
1042 __ add(ToOperand(left), ToImmediate(right));
1043 } else {
1044 __ add(ToRegister(left), ToOperand(right));
1045 }
1046
1047 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1048 DeoptimizeIf(overflow, instr->environment());
1049 }
1050}
1051
1052
1053void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1054 LOperand* left = instr->left();
1055 LOperand* right = instr->right();
1056 // Modulo uses a fixed result register.
1057 ASSERT(instr->op() == Token::MOD || left->Equals(instr->result()));
1058 switch (instr->op()) {
1059 case Token::ADD:
1060 __ addsd(ToDoubleRegister(left), ToDoubleRegister(right));
1061 break;
1062 case Token::SUB:
1063 __ subsd(ToDoubleRegister(left), ToDoubleRegister(right));
1064 break;
1065 case Token::MUL:
1066 __ mulsd(ToDoubleRegister(left), ToDoubleRegister(right));
1067 break;
1068 case Token::DIV:
1069 __ divsd(ToDoubleRegister(left), ToDoubleRegister(right));
1070 break;
1071 case Token::MOD: {
1072 // Pass two doubles as arguments on the stack.
1073 __ PrepareCallCFunction(4, eax);
1074 __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
1075 __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
1076 __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4);
1077
1078 // Return value is in st(0) on ia32.
1079 // Store it into the (fixed) result register.
1080 __ sub(Operand(esp), Immediate(kDoubleSize));
1081 __ fstp_d(Operand(esp, 0));
1082 __ movdbl(ToDoubleRegister(instr->result()), Operand(esp, 0));
1083 __ add(Operand(esp), Immediate(kDoubleSize));
1084 break;
1085 }
1086 default:
1087 UNREACHABLE();
1088 break;
1089 }
1090}
1091
1092
1093void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1094 ASSERT(ToRegister(instr->left()).is(edx));
1095 ASSERT(ToRegister(instr->right()).is(eax));
1096 ASSERT(ToRegister(instr->result()).is(eax));
1097
1098 TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
1099 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1100}
1101
1102
1103int LCodeGen::GetNextEmittedBlock(int block) {
1104 for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1105 LLabel* label = chunk_->GetLabel(i);
1106 if (!label->HasReplacement()) return i;
1107 }
1108 return -1;
1109}
1110
1111
1112void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
1113 int next_block = GetNextEmittedBlock(current_block_);
1114 right_block = chunk_->LookupDestination(right_block);
1115 left_block = chunk_->LookupDestination(left_block);
1116
1117 if (right_block == left_block) {
1118 EmitGoto(left_block);
1119 } else if (left_block == next_block) {
1120 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1121 } else if (right_block == next_block) {
1122 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1123 } else {
1124 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1125 __ jmp(chunk_->GetAssemblyLabel(right_block));
1126 }
1127}
1128
1129
1130void LCodeGen::DoBranch(LBranch* instr) {
1131 int true_block = chunk_->LookupDestination(instr->true_block_id());
1132 int false_block = chunk_->LookupDestination(instr->false_block_id());
1133
1134 Representation r = instr->hydrogen()->representation();
1135 if (r.IsInteger32()) {
1136 Register reg = ToRegister(instr->input());
1137 __ test(reg, Operand(reg));
1138 EmitBranch(true_block, false_block, not_zero);
1139 } else if (r.IsDouble()) {
1140 XMMRegister reg = ToDoubleRegister(instr->input());
1141 __ xorpd(xmm0, xmm0);
1142 __ ucomisd(reg, xmm0);
1143 EmitBranch(true_block, false_block, not_equal);
1144 } else {
1145 ASSERT(r.IsTagged());
1146 Register reg = ToRegister(instr->input());
1147 if (instr->hydrogen()->type().IsBoolean()) {
1148 __ cmp(reg, Factory::true_value());
1149 EmitBranch(true_block, false_block, equal);
1150 } else {
1151 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1152 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1153
1154 __ cmp(reg, Factory::undefined_value());
1155 __ j(equal, false_label);
1156 __ cmp(reg, Factory::true_value());
1157 __ j(equal, true_label);
1158 __ cmp(reg, Factory::false_value());
1159 __ j(equal, false_label);
1160 __ test(reg, Operand(reg));
1161 __ j(equal, false_label);
1162 __ test(reg, Immediate(kSmiTagMask));
1163 __ j(zero, true_label);
1164
1165 // Test for double values. Zero is false.
1166 NearLabel call_stub;
1167 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
1168 Factory::heap_number_map());
1169 __ j(not_equal, &call_stub);
1170 __ fldz();
1171 __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
1172 __ FCmp();
1173 __ j(zero, false_label);
1174 __ jmp(true_label);
1175
1176 // The conversion stub doesn't cause garbage collections so it's
1177 // safe to not record a safepoint after the call.
1178 __ bind(&call_stub);
1179 ToBooleanStub stub;
1180 __ pushad();
1181 __ push(reg);
1182 __ CallStub(&stub);
1183 __ test(eax, Operand(eax));
1184 __ popad();
1185 EmitBranch(true_block, false_block, not_zero);
1186 }
1187 }
1188}
1189
1190
1191void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
1192 block = chunk_->LookupDestination(block);
1193 int next_block = GetNextEmittedBlock(current_block_);
1194 if (block != next_block) {
1195 // Perform stack overflow check if this goto needs it before jumping.
1196 if (deferred_stack_check != NULL) {
1197 ExternalReference stack_limit =
1198 ExternalReference::address_of_stack_limit();
1199 __ cmp(esp, Operand::StaticVariable(stack_limit));
1200 __ j(above_equal, chunk_->GetAssemblyLabel(block));
1201 __ jmp(deferred_stack_check->entry());
1202 deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
1203 } else {
1204 __ jmp(chunk_->GetAssemblyLabel(block));
1205 }
1206 }
1207}
1208
1209
1210void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
1211 __ pushad();
1212 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
1213 RecordSafepointWithRegisters(
1214 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
1215 __ popad();
1216}
1217
1218void LCodeGen::DoGoto(LGoto* instr) {
1219 class DeferredStackCheck: public LDeferredCode {
1220 public:
1221 DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
1222 : LDeferredCode(codegen), instr_(instr) { }
1223 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
1224 private:
1225 LGoto* instr_;
1226 };
1227
1228 DeferredStackCheck* deferred = NULL;
1229 if (instr->include_stack_check()) {
1230 deferred = new DeferredStackCheck(this, instr);
1231 }
1232 EmitGoto(instr->block_id(), deferred);
1233}
1234
1235
1236Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1237 Condition cond = no_condition;
1238 switch (op) {
1239 case Token::EQ:
1240 case Token::EQ_STRICT:
1241 cond = equal;
1242 break;
1243 case Token::LT:
1244 cond = is_unsigned ? below : less;
1245 break;
1246 case Token::GT:
1247 cond = is_unsigned ? above : greater;
1248 break;
1249 case Token::LTE:
1250 cond = is_unsigned ? below_equal : less_equal;
1251 break;
1252 case Token::GTE:
1253 cond = is_unsigned ? above_equal : greater_equal;
1254 break;
1255 case Token::IN:
1256 case Token::INSTANCEOF:
1257 default:
1258 UNREACHABLE();
1259 }
1260 return cond;
1261}
1262
1263
1264void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
1265 if (right->IsConstantOperand()) {
1266 __ cmp(ToOperand(left), ToImmediate(right));
1267 } else {
1268 __ cmp(ToRegister(left), ToOperand(right));
1269 }
1270}
1271
1272
1273void LCodeGen::DoCmpID(LCmpID* instr) {
1274 LOperand* left = instr->left();
1275 LOperand* right = instr->right();
1276 LOperand* result = instr->result();
1277
1278 NearLabel unordered;
1279 if (instr->is_double()) {
1280 // Don't base result on EFLAGS when a NaN is involved. Instead
1281 // jump to the unordered case, which produces a false value.
1282 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
1283 __ j(parity_even, &unordered, not_taken);
1284 } else {
1285 EmitCmpI(left, right);
1286 }
1287
1288 NearLabel done;
1289 Condition cc = TokenToCondition(instr->op(), instr->is_double());
1290 __ mov(ToRegister(result), Handle<Object>(Heap::true_value()));
1291 __ j(cc, &done);
1292
1293 __ bind(&unordered);
1294 __ mov(ToRegister(result), Handle<Object>(Heap::false_value()));
1295 __ bind(&done);
1296}
1297
1298
1299void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1300 LOperand* left = instr->left();
1301 LOperand* right = instr->right();
1302 int false_block = chunk_->LookupDestination(instr->false_block_id());
1303 int true_block = chunk_->LookupDestination(instr->true_block_id());
1304
1305 if (instr->is_double()) {
1306 // Don't base result on EFLAGS when a NaN is involved. Instead
1307 // jump to the false block.
1308 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
1309 __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
1310 } else {
1311 EmitCmpI(left, right);
1312 }
1313
1314 Condition cc = TokenToCondition(instr->op(), instr->is_double());
1315 EmitBranch(true_block, false_block, cc);
1316}
1317
1318
1319void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
1320 Register left = ToRegister(instr->left());
1321 Register right = ToRegister(instr->right());
1322 Register result = ToRegister(instr->result());
1323
1324 __ cmp(left, Operand(right));
1325 __ mov(result, Handle<Object>(Heap::true_value()));
1326 NearLabel done;
1327 __ j(equal, &done);
1328 __ mov(result, Handle<Object>(Heap::false_value()));
1329 __ bind(&done);
1330}
1331
1332
1333void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
1334 Register left = ToRegister(instr->left());
1335 Register right = ToRegister(instr->right());
1336 int false_block = chunk_->LookupDestination(instr->false_block_id());
1337 int true_block = chunk_->LookupDestination(instr->true_block_id());
1338
1339 __ cmp(left, Operand(right));
1340 EmitBranch(true_block, false_block, equal);
1341}
1342
1343
1344void LCodeGen::DoIsNull(LIsNull* instr) {
1345 Register reg = ToRegister(instr->input());
1346 Register result = ToRegister(instr->result());
1347
1348 // TODO(fsc): If the expression is known to be a smi, then it's
1349 // definitely not null. Materialize false.
1350
1351 __ cmp(reg, Factory::null_value());
1352 if (instr->is_strict()) {
1353 __ mov(result, Handle<Object>(Heap::true_value()));
1354 NearLabel done;
1355 __ j(equal, &done);
1356 __ mov(result, Handle<Object>(Heap::false_value()));
1357 __ bind(&done);
1358 } else {
1359 NearLabel true_value, false_value, done;
1360 __ j(equal, &true_value);
1361 __ cmp(reg, Factory::undefined_value());
1362 __ j(equal, &true_value);
1363 __ test(reg, Immediate(kSmiTagMask));
1364 __ j(zero, &false_value);
1365 // Check for undetectable objects by looking in the bit field in
1366 // the map. The object has already been smi checked.
1367 Register scratch = result;
1368 __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
1369 __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
1370 __ test(scratch, Immediate(1 << Map::kIsUndetectable));
1371 __ j(not_zero, &true_value);
1372 __ bind(&false_value);
1373 __ mov(result, Handle<Object>(Heap::false_value()));
1374 __ jmp(&done);
1375 __ bind(&true_value);
1376 __ mov(result, Handle<Object>(Heap::true_value()));
1377 __ bind(&done);
1378 }
1379}
1380
1381
1382void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
1383 Register reg = ToRegister(instr->input());
1384
1385 // TODO(fsc): If the expression is known to be a smi, then it's
1386 // definitely not null. Jump to the false block.
1387
1388 int true_block = chunk_->LookupDestination(instr->true_block_id());
1389 int false_block = chunk_->LookupDestination(instr->false_block_id());
1390
1391 __ cmp(reg, Factory::null_value());
1392 if (instr->is_strict()) {
1393 EmitBranch(true_block, false_block, equal);
1394 } else {
1395 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1396 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1397 __ j(equal, true_label);
1398 __ cmp(reg, Factory::undefined_value());
1399 __ j(equal, true_label);
1400 __ test(reg, Immediate(kSmiTagMask));
1401 __ j(zero, false_label);
1402 // Check for undetectable objects by looking in the bit field in
1403 // the map. The object has already been smi checked.
1404 Register scratch = ToRegister(instr->temp());
1405 __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
1406 __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
1407 __ test(scratch, Immediate(1 << Map::kIsUndetectable));
1408 EmitBranch(true_block, false_block, not_zero);
1409 }
1410}
1411
1412
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00001413Condition LCodeGen::EmitIsObject(Register input,
1414 Register temp1,
1415 Register temp2,
1416 Label* is_not_object,
1417 Label* is_object) {
1418 ASSERT(!input.is(temp1));
1419 ASSERT(!input.is(temp2));
1420 ASSERT(!temp1.is(temp2));
1421
1422 __ test(input, Immediate(kSmiTagMask));
1423 __ j(equal, is_not_object);
1424
1425 __ cmp(input, Factory::null_value());
1426 __ j(equal, is_object);
1427
1428 __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
1429 // Undetectable objects behave like undefined.
1430 __ movzx_b(temp2, FieldOperand(temp1, Map::kBitFieldOffset));
1431 __ test(temp2, Immediate(1 << Map::kIsUndetectable));
1432 __ j(not_zero, is_not_object);
1433
1434 __ movzx_b(temp2, FieldOperand(temp1, Map::kInstanceTypeOffset));
1435 __ cmp(temp2, FIRST_JS_OBJECT_TYPE);
1436 __ j(below, is_not_object);
1437 __ cmp(temp2, LAST_JS_OBJECT_TYPE);
1438 return below_equal;
1439}
1440
1441
1442void LCodeGen::DoIsObject(LIsObject* instr) {
1443 Register reg = ToRegister(instr->input());
1444 Register result = ToRegister(instr->result());
1445 Register temp = ToRegister(instr->temp());
1446 Label is_false, is_true, done;
1447
1448 Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
1449 __ j(true_cond, &is_true);
1450
1451 __ bind(&is_false);
1452 __ mov(result, Handle<Object>(Heap::false_value()));
1453 __ jmp(&done);
1454
1455 __ bind(&is_true);
1456 __ mov(result, Handle<Object>(Heap::true_value()));
1457
1458 __ bind(&done);
1459}
1460
1461
1462void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1463 Register reg = ToRegister(instr->input());
1464 Register temp = ToRegister(instr->temp());
1465 Register temp2 = ToRegister(instr->temp2());
1466
1467 int true_block = chunk_->LookupDestination(instr->true_block_id());
1468 int false_block = chunk_->LookupDestination(instr->false_block_id());
1469 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1470 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1471
1472 Condition true_cond = EmitIsObject(reg, temp, temp2, false_label, true_label);
1473
1474 EmitBranch(true_block, false_block, true_cond);
1475}
1476
1477
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001478void LCodeGen::DoIsSmi(LIsSmi* instr) {
1479 Operand input = ToOperand(instr->input());
1480 Register result = ToRegister(instr->result());
1481
1482 ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1483 __ test(input, Immediate(kSmiTagMask));
1484 __ mov(result, Handle<Object>(Heap::true_value()));
1485 NearLabel done;
1486 __ j(zero, &done);
1487 __ mov(result, Handle<Object>(Heap::false_value()));
1488 __ bind(&done);
1489}
1490
1491
1492void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1493 Operand input = ToOperand(instr->input());
1494
1495 int true_block = chunk_->LookupDestination(instr->true_block_id());
1496 int false_block = chunk_->LookupDestination(instr->false_block_id());
1497
1498 __ test(input, Immediate(kSmiTagMask));
1499 EmitBranch(true_block, false_block, zero);
1500}
1501
1502
1503InstanceType LHasInstanceType::TestType() {
1504 InstanceType from = hydrogen()->from();
1505 InstanceType to = hydrogen()->to();
1506 if (from == FIRST_TYPE) return to;
1507 ASSERT(from == to || to == LAST_TYPE);
1508 return from;
1509}
1510
1511
1512
1513Condition LHasInstanceType::BranchCondition() {
1514 InstanceType from = hydrogen()->from();
1515 InstanceType to = hydrogen()->to();
1516 if (from == to) return equal;
1517 if (to == LAST_TYPE) return above_equal;
1518 if (from == FIRST_TYPE) return below_equal;
1519 UNREACHABLE();
1520 return equal;
1521}
1522
1523
1524void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
1525 Register input = ToRegister(instr->input());
1526 Register result = ToRegister(instr->result());
1527
1528 ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1529 __ test(input, Immediate(kSmiTagMask));
1530 NearLabel done, is_false;
1531 __ j(zero, &is_false);
1532 __ CmpObjectType(input, instr->TestType(), result);
1533 __ j(NegateCondition(instr->BranchCondition()), &is_false);
1534 __ mov(result, Handle<Object>(Heap::true_value()));
1535 __ jmp(&done);
1536 __ bind(&is_false);
1537 __ mov(result, Handle<Object>(Heap::false_value()));
1538 __ bind(&done);
1539}
1540
1541
1542void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1543 Register input = ToRegister(instr->input());
1544 Register temp = ToRegister(instr->temp());
1545
1546 int true_block = chunk_->LookupDestination(instr->true_block_id());
1547 int false_block = chunk_->LookupDestination(instr->false_block_id());
1548
1549 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1550
1551 __ test(input, Immediate(kSmiTagMask));
1552 __ j(zero, false_label);
1553
1554 __ CmpObjectType(input, instr->TestType(), temp);
1555 EmitBranch(true_block, false_block, instr->BranchCondition());
1556}
1557
1558
1559void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
1560 Register input = ToRegister(instr->input());
1561 Register result = ToRegister(instr->result());
1562
1563 ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1564 __ mov(result, Handle<Object>(Heap::true_value()));
1565 __ test(FieldOperand(input, String::kHashFieldOffset),
1566 Immediate(String::kContainsCachedArrayIndexMask));
1567 NearLabel done;
1568 __ j(not_zero, &done);
1569 __ mov(result, Handle<Object>(Heap::false_value()));
1570 __ bind(&done);
1571}
1572
1573
1574void LCodeGen::DoHasCachedArrayIndexAndBranch(
1575 LHasCachedArrayIndexAndBranch* instr) {
1576 Register input = ToRegister(instr->input());
1577
1578 int true_block = chunk_->LookupDestination(instr->true_block_id());
1579 int false_block = chunk_->LookupDestination(instr->false_block_id());
1580
1581 __ test(FieldOperand(input, String::kHashFieldOffset),
1582 Immediate(String::kContainsCachedArrayIndexMask));
1583 EmitBranch(true_block, false_block, not_equal);
1584}
1585
1586
1587// Branches to a label or falls through with the answer in the z flag. Trashes
1588// the temp registers, but not the input. Only input and temp2 may alias.
1589void LCodeGen::EmitClassOfTest(Label* is_true,
1590 Label* is_false,
1591 Handle<String>class_name,
1592 Register input,
1593 Register temp,
1594 Register temp2) {
1595 ASSERT(!input.is(temp));
1596 ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
1597 __ test(input, Immediate(kSmiTagMask));
1598 __ j(zero, is_false);
1599 __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
1600 __ j(below, is_false);
1601
1602 // Map is now in temp.
1603 // Functions have class 'Function'.
1604 __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
1605 if (class_name->IsEqualTo(CStrVector("Function"))) {
1606 __ j(equal, is_true);
1607 } else {
1608 __ j(equal, is_false);
1609 }
1610
1611 // Check if the constructor in the map is a function.
1612 __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
1613
1614 // As long as JS_FUNCTION_TYPE is the last instance type and it is
1615 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
1616 // LAST_JS_OBJECT_TYPE.
1617 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
1618 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
1619
1620 // Objects with a non-function constructor have class 'Object'.
1621 __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
1622 if (class_name->IsEqualTo(CStrVector("Object"))) {
1623 __ j(not_equal, is_true);
1624 } else {
1625 __ j(not_equal, is_false);
1626 }
1627
1628 // temp now contains the constructor function. Grab the
1629 // instance class name from there.
1630 __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
1631 __ mov(temp, FieldOperand(temp,
1632 SharedFunctionInfo::kInstanceClassNameOffset));
1633 // The class name we are testing against is a symbol because it's a literal.
1634 // The name in the constructor is a symbol because of the way the context is
1635 // booted. This routine isn't expected to work for random API-created
1636 // classes and it doesn't have to because you can't access it with natives
1637 // syntax. Since both sides are symbols it is sufficient to use an identity
1638 // comparison.
1639 __ cmp(temp, class_name);
1640 // End with the answer in the z flag.
1641}
1642
1643
1644void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
1645 Register input = ToRegister(instr->input());
1646 Register result = ToRegister(instr->result());
1647 ASSERT(input.is(result));
1648 Register temp = ToRegister(instr->temporary());
1649 Handle<String> class_name = instr->hydrogen()->class_name();
1650 NearLabel done;
1651 Label is_true, is_false;
1652
1653 EmitClassOfTest(&is_true, &is_false, class_name, input, temp, input);
1654
1655 __ j(not_equal, &is_false);
1656
1657 __ bind(&is_true);
1658 __ mov(result, Handle<Object>(Heap::true_value()));
1659 __ jmp(&done);
1660
1661 __ bind(&is_false);
1662 __ mov(result, Handle<Object>(Heap::false_value()));
1663 __ bind(&done);
1664}
1665
1666
1667void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
1668 Register input = ToRegister(instr->input());
1669 Register temp = ToRegister(instr->temporary());
1670 Register temp2 = ToRegister(instr->temporary2());
1671 if (input.is(temp)) {
1672 // Swap.
1673 Register swapper = temp;
1674 temp = temp2;
1675 temp2 = swapper;
1676 }
1677 Handle<String> class_name = instr->hydrogen()->class_name();
1678
1679 int true_block = chunk_->LookupDestination(instr->true_block_id());
1680 int false_block = chunk_->LookupDestination(instr->false_block_id());
1681
1682 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1683 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1684
1685 EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
1686
1687 EmitBranch(true_block, false_block, equal);
1688}
1689
1690
1691void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
1692 Register reg = ToRegister(instr->input());
1693 int true_block = instr->true_block_id();
1694 int false_block = instr->false_block_id();
1695
1696 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
1697 EmitBranch(true_block, false_block, equal);
1698}
1699
1700
1701void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00001702 // Object and function are in fixed registers eax and edx.
1703 InstanceofStub stub(InstanceofStub::kArgsInRegisters);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001704 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1705
1706 NearLabel true_value, done;
1707 __ test(eax, Operand(eax));
1708 __ j(zero, &true_value);
1709 __ mov(ToRegister(instr->result()), Factory::false_value());
1710 __ jmp(&done);
1711 __ bind(&true_value);
1712 __ mov(ToRegister(instr->result()), Factory::true_value());
1713 __ bind(&done);
1714}
1715
1716
1717void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
1718 int true_block = chunk_->LookupDestination(instr->true_block_id());
1719 int false_block = chunk_->LookupDestination(instr->false_block_id());
1720
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00001721 InstanceofStub stub(InstanceofStub::kArgsInRegisters);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001722 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1723 __ test(eax, Operand(eax));
1724 EmitBranch(true_block, false_block, zero);
1725}
1726
1727
1728static Condition ComputeCompareCondition(Token::Value op) {
1729 switch (op) {
1730 case Token::EQ_STRICT:
1731 case Token::EQ:
1732 return equal;
1733 case Token::LT:
1734 return less;
1735 case Token::GT:
1736 return greater;
1737 case Token::LTE:
1738 return less_equal;
1739 case Token::GTE:
1740 return greater_equal;
1741 default:
1742 UNREACHABLE();
1743 return no_condition;
1744 }
1745}
1746
1747
1748void LCodeGen::DoCmpT(LCmpT* instr) {
1749 Token::Value op = instr->op();
1750
1751 Handle<Code> ic = CompareIC::GetUninitialized(op);
1752 CallCode(ic, RelocInfo::CODE_TARGET, instr);
1753
1754 Condition condition = ComputeCompareCondition(op);
1755 if (op == Token::GT || op == Token::LTE) {
1756 condition = ReverseCondition(condition);
1757 }
1758 NearLabel true_value, done;
1759 __ test(eax, Operand(eax));
1760 __ j(condition, &true_value);
1761 __ mov(ToRegister(instr->result()), Factory::false_value());
1762 __ jmp(&done);
1763 __ bind(&true_value);
1764 __ mov(ToRegister(instr->result()), Factory::true_value());
1765 __ bind(&done);
1766}
1767
1768
1769void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
1770 Token::Value op = instr->op();
1771 int true_block = chunk_->LookupDestination(instr->true_block_id());
1772 int false_block = chunk_->LookupDestination(instr->false_block_id());
1773
1774 Handle<Code> ic = CompareIC::GetUninitialized(op);
1775 CallCode(ic, RelocInfo::CODE_TARGET, instr);
1776
1777 // The compare stub expects compare condition and the input operands
1778 // reversed for GT and LTE.
1779 Condition condition = ComputeCompareCondition(op);
1780 if (op == Token::GT || op == Token::LTE) {
1781 condition = ReverseCondition(condition);
1782 }
1783 __ test(eax, Operand(eax));
1784 EmitBranch(true_block, false_block, condition);
1785}
1786
1787
1788void LCodeGen::DoReturn(LReturn* instr) {
1789 if (FLAG_trace) {
1790 // Preserve the return value on the stack and rely on the runtime
1791 // call to return the value in the same register.
1792 __ push(eax);
1793 __ CallRuntime(Runtime::kTraceExit, 1);
1794 }
1795 __ mov(esp, ebp);
1796 __ pop(ebp);
1797 __ ret((ParameterCount() + 1) * kPointerSize);
1798}
1799
1800
1801void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
1802 Register result = ToRegister(instr->result());
1803 __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
1804 if (instr->hydrogen()->check_hole_value()) {
1805 __ cmp(result, Factory::the_hole_value());
1806 DeoptimizeIf(equal, instr->environment());
1807 }
1808}
1809
1810
1811void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
1812 Register value = ToRegister(instr->input());
1813 __ mov(Operand::Cell(instr->hydrogen()->cell()), value);
1814}
1815
1816
1817void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
1818 Register object = ToRegister(instr->input());
1819 Register result = ToRegister(instr->result());
1820 if (instr->hydrogen()->is_in_object()) {
1821 __ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
1822 } else {
1823 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
1824 __ mov(result, FieldOperand(result, instr->hydrogen()->offset()));
1825 }
1826}
1827
1828
1829void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
1830 ASSERT(ToRegister(instr->object()).is(eax));
1831 ASSERT(ToRegister(instr->result()).is(eax));
1832
1833 __ mov(ecx, instr->name());
1834 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
1835 CallCode(ic, RelocInfo::CODE_TARGET, instr);
1836}
1837
1838
1839void LCodeGen::DoLoadElements(LLoadElements* instr) {
1840 ASSERT(instr->result()->Equals(instr->input()));
1841 Register reg = ToRegister(instr->input());
1842 __ mov(reg, FieldOperand(reg, JSObject::kElementsOffset));
1843 if (FLAG_debug_code) {
1844 NearLabel done;
1845 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
1846 Immediate(Factory::fixed_array_map()));
1847 __ j(equal, &done);
1848 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
1849 Immediate(Factory::fixed_cow_array_map()));
1850 __ Check(equal, "Check for fast elements failed.");
1851 __ bind(&done);
1852 }
1853}
1854
1855
1856void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
1857 Register arguments = ToRegister(instr->arguments());
1858 Register length = ToRegister(instr->length());
1859 Operand index = ToOperand(instr->index());
1860 Register result = ToRegister(instr->result());
1861
1862 __ sub(length, index);
1863 DeoptimizeIf(below_equal, instr->environment());
1864
1865 __ mov(result, Operand(arguments, length, times_4, kPointerSize));
1866}
1867
1868
1869void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
1870 Register elements = ToRegister(instr->elements());
1871 Register key = ToRegister(instr->key());
1872 Register result;
1873 if (instr->load_result() != NULL) {
1874 result = ToRegister(instr->load_result());
1875 } else {
1876 result = ToRegister(instr->result());
1877 ASSERT(result.is(elements));
1878 }
1879
1880 // Load the result.
1881 __ mov(result, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
1882
1883 Representation r = instr->hydrogen()->representation();
1884 if (r.IsInteger32()) {
1885 // Untag and check for smi.
1886 __ SmiUntag(result);
1887 DeoptimizeIf(carry, instr->environment());
1888 } else if (r.IsDouble()) {
1889 EmitNumberUntagD(result,
1890 ToDoubleRegister(instr->result()),
1891 instr->environment());
1892 } else {
1893 // Check for the hole value.
1894 ASSERT(r.IsTagged());
1895 __ cmp(result, Factory::the_hole_value());
1896 DeoptimizeIf(equal, instr->environment());
1897 }
1898}
1899
1900
1901void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
1902 ASSERT(ToRegister(instr->object()).is(edx));
1903 ASSERT(ToRegister(instr->key()).is(eax));
1904
1905 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
1906 CallCode(ic, RelocInfo::CODE_TARGET, instr);
1907}
1908
1909
1910void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
1911 Register result = ToRegister(instr->result());
1912
1913 // Check for arguments adapter frame.
1914 Label done, adapted;
1915 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
1916 __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
1917 __ cmp(Operand(result),
1918 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
1919 __ j(equal, &adapted);
1920
1921 // No arguments adaptor frame.
1922 __ mov(result, Operand(ebp));
1923 __ jmp(&done);
1924
1925 // Arguments adaptor frame present.
1926 __ bind(&adapted);
1927 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
1928
1929 // Done. Pointer to topmost argument is in result.
1930 __ bind(&done);
1931}
1932
1933
1934void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
1935 Operand elem = ToOperand(instr->input());
1936 Register result = ToRegister(instr->result());
1937
1938 Label done;
1939
1940 // No arguments adaptor frame. Number of arguments is fixed.
1941 __ cmp(ebp, elem);
1942 __ mov(result, Immediate(scope()->num_parameters()));
1943 __ j(equal, &done);
1944
1945 // Arguments adaptor frame present. Get argument length from there.
1946 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
1947 __ mov(result, Operand(result,
1948 ArgumentsAdaptorFrameConstants::kLengthOffset));
1949 __ SmiUntag(result);
1950
1951 // Done. Argument length is in result register.
1952 __ bind(&done);
1953}
1954
1955
1956void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
1957 Register receiver = ToRegister(instr->receiver());
1958 ASSERT(ToRegister(instr->function()).is(edi));
1959 ASSERT(ToRegister(instr->result()).is(eax));
1960
1961 // If the receiver is null or undefined, we have to pass the
1962 // global object as a receiver.
1963 NearLabel global_receiver, receiver_ok;
1964 __ cmp(receiver, Factory::null_value());
1965 __ j(equal, &global_receiver);
1966 __ cmp(receiver, Factory::undefined_value());
1967 __ j(not_equal, &receiver_ok);
1968 __ bind(&global_receiver);
1969 __ mov(receiver, GlobalObjectOperand());
1970 __ bind(&receiver_ok);
1971
1972 Register length = ToRegister(instr->length());
1973 Register elements = ToRegister(instr->elements());
1974
1975 Label invoke;
1976
1977 // Copy the arguments to this function possibly from the
1978 // adaptor frame below it.
1979 const uint32_t kArgumentsLimit = 1 * KB;
1980 __ cmp(length, kArgumentsLimit);
1981 DeoptimizeIf(above, instr->environment());
1982
1983 __ push(receiver);
1984 __ mov(receiver, length);
1985
1986 // Loop through the arguments pushing them onto the execution
1987 // stack.
1988 Label loop;
1989 // length is a small non-negative integer, due to the test above.
1990 __ test(length, Operand(length));
1991 __ j(zero, &invoke);
1992 __ bind(&loop);
1993 __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
1994 __ dec(length);
1995 __ j(not_zero, &loop);
1996
1997 // Invoke the function.
1998 __ bind(&invoke);
1999 ASSERT(receiver.is(eax));
2000 v8::internal::ParameterCount actual(eax);
2001 SafepointGenerator safepoint_generator(this,
2002 instr->pointer_map(),
2003 Safepoint::kNoDeoptimizationIndex);
2004 __ InvokeFunction(edi, actual, CALL_FUNCTION, &safepoint_generator);
2005}
2006
2007
2008void LCodeGen::DoPushArgument(LPushArgument* instr) {
2009 LOperand* argument = instr->input();
2010 if (argument->IsConstantOperand()) {
2011 __ push(ToImmediate(argument));
2012 } else {
2013 __ push(ToOperand(argument));
2014 }
2015}
2016
2017
2018void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
2019 Register result = ToRegister(instr->result());
2020 __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
2021}
2022
2023
2024void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
2025 Register result = ToRegister(instr->result());
2026 __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
2027 __ mov(result, FieldOperand(result, GlobalObject::kGlobalReceiverOffset));
2028}
2029
2030
2031void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2032 int arity,
2033 LInstruction* instr) {
2034 // Change context if needed.
2035 bool change_context =
2036 (graph()->info()->closure()->context() != function->context()) ||
2037 scope()->contains_with() ||
2038 (scope()->num_heap_slots() > 0);
2039 if (change_context) {
2040 __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2041 }
2042
2043 // Set eax to arguments count if adaption is not needed. Assumes that eax
2044 // is available to write to at this point.
2045 if (!function->NeedsArgumentsAdaption()) {
2046 __ mov(eax, arity);
2047 }
2048
2049 LPointerMap* pointers = instr->pointer_map();
2050 RecordPosition(pointers->position());
2051
2052 // Invoke function.
2053 if (*function == *graph()->info()->closure()) {
2054 __ CallSelf();
2055 } else {
2056 __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
2057 }
2058
2059 // Setup deoptimization.
2060 RegisterLazyDeoptimization(instr);
2061
2062 // Restore context.
2063 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2064}
2065
2066
2067void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
2068 ASSERT(ToRegister(instr->result()).is(eax));
2069 __ mov(edi, instr->function());
2070 CallKnownFunction(instr->function(), instr->arity(), instr);
2071}
2072
2073
2074void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
2075 Register input_reg = ToRegister(instr->input());
2076 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
2077 Factory::heap_number_map());
2078 DeoptimizeIf(not_equal, instr->environment());
2079
2080 Label done;
2081 Register tmp = input_reg.is(eax) ? ecx : eax;
2082 Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
2083
2084 // Preserve the value of all registers.
2085 __ PushSafepointRegisters();
2086
2087 Label negative;
2088 __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2089 // Check the sign of the argument. If the argument is positive,
2090 // just return it.
2091 __ test(tmp, Immediate(HeapNumber::kSignMask));
2092 __ j(not_zero, &negative);
2093 __ mov(tmp, input_reg);
2094 __ jmp(&done);
2095
2096 __ bind(&negative);
2097
2098 Label allocated, slow;
2099 __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
2100 __ jmp(&allocated);
2101
2102 // Slow case: Call the runtime system to do the number allocation.
2103 __ bind(&slow);
2104
2105 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
2106 RecordSafepointWithRegisters(
2107 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
2108 // Set the pointer to the new heap number in tmp.
2109 if (!tmp.is(eax)) __ mov(tmp, eax);
2110
2111 // Restore input_reg after call to runtime.
2112 __ mov(input_reg, Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize));
2113
2114 __ bind(&allocated);
2115 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2116 __ and_(tmp2, ~HeapNumber::kSignMask);
2117 __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
2118 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
2119 __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
2120
2121 __ bind(&done);
2122 __ mov(Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize), tmp);
2123
2124 __ PopSafepointRegisters();
2125}
2126
2127
2128void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
2129 // Class for deferred case.
2130 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
2131 public:
2132 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
2133 LUnaryMathOperation* instr)
2134 : LDeferredCode(codegen), instr_(instr) { }
2135 virtual void Generate() {
2136 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
2137 }
2138 private:
2139 LUnaryMathOperation* instr_;
2140 };
2141
2142 ASSERT(instr->input()->Equals(instr->result()));
2143 Representation r = instr->hydrogen()->value()->representation();
2144
2145 if (r.IsDouble()) {
2146 XMMRegister scratch = xmm0;
2147 XMMRegister input_reg = ToDoubleRegister(instr->input());
2148 __ pxor(scratch, scratch);
2149 __ subsd(scratch, input_reg);
2150 __ pand(input_reg, scratch);
2151 } else if (r.IsInteger32()) {
2152 Register input_reg = ToRegister(instr->input());
2153 __ test(input_reg, Operand(input_reg));
2154 Label is_positive;
2155 __ j(not_sign, &is_positive);
2156 __ neg(input_reg);
2157 __ test(input_reg, Operand(input_reg));
2158 DeoptimizeIf(negative, instr->environment());
2159 __ bind(&is_positive);
2160 } else { // Tagged case.
2161 DeferredMathAbsTaggedHeapNumber* deferred =
2162 new DeferredMathAbsTaggedHeapNumber(this, instr);
2163 Label not_smi;
2164 Register input_reg = ToRegister(instr->input());
2165 // Smi check.
2166 __ test(input_reg, Immediate(kSmiTagMask));
2167 __ j(not_zero, deferred->entry());
2168 __ test(input_reg, Operand(input_reg));
2169 Label is_positive;
2170 __ j(not_sign, &is_positive);
2171 __ neg(input_reg);
2172
2173 __ test(input_reg, Operand(input_reg));
2174 DeoptimizeIf(negative, instr->environment());
2175
2176 __ bind(&is_positive);
2177 __ bind(deferred->exit());
2178 }
2179}
2180
2181
2182void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
2183 XMMRegister xmm_scratch = xmm0;
2184 Register output_reg = ToRegister(instr->result());
2185 XMMRegister input_reg = ToDoubleRegister(instr->input());
2186 __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
2187 __ ucomisd(input_reg, xmm_scratch);
2188
2189 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2190 DeoptimizeIf(below_equal, instr->environment());
2191 } else {
2192 DeoptimizeIf(below, instr->environment());
2193 }
2194
2195 // Use truncating instruction (OK because input is positive).
2196 __ cvttsd2si(output_reg, Operand(input_reg));
2197
2198 // Overflow is signalled with minint.
2199 __ cmp(output_reg, 0x80000000u);
2200 DeoptimizeIf(equal, instr->environment());
2201}
2202
2203
2204void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
2205 XMMRegister xmm_scratch = xmm0;
2206 Register output_reg = ToRegister(instr->result());
2207 XMMRegister input_reg = ToDoubleRegister(instr->input());
2208
2209 // xmm_scratch = 0.5
2210 ExternalReference one_half = ExternalReference::address_of_one_half();
2211 __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
2212
2213 // input = input + 0.5
2214 __ addsd(input_reg, xmm_scratch);
2215
2216 // We need to return -0 for the input range [-0.5, 0[, otherwise
2217 // compute Math.floor(value + 0.5).
2218 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2219 __ ucomisd(input_reg, xmm_scratch);
2220 DeoptimizeIf(below_equal, instr->environment());
2221 } else {
2222 // If we don't need to bailout on -0, we check only bailout
2223 // on negative inputs.
2224 __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
2225 __ ucomisd(input_reg, xmm_scratch);
2226 DeoptimizeIf(below, instr->environment());
2227 }
2228
2229 // Compute Math.floor(value + 0.5).
2230 // Use truncating instruction (OK because input is positive).
2231 __ cvttsd2si(output_reg, Operand(input_reg));
2232
2233 // Overflow is signalled with minint.
2234 __ cmp(output_reg, 0x80000000u);
2235 DeoptimizeIf(equal, instr->environment());
2236}
2237
2238
2239void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
2240 XMMRegister input_reg = ToDoubleRegister(instr->input());
2241 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
2242 __ sqrtsd(input_reg, input_reg);
2243}
2244
2245
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00002246void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
2247 XMMRegister xmm_scratch = xmm0;
2248 XMMRegister input_reg = ToDoubleRegister(instr->input());
2249 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
2250 ExternalReference negative_infinity =
2251 ExternalReference::address_of_negative_infinity();
2252 __ movdbl(xmm_scratch, Operand::StaticVariable(negative_infinity));
2253 __ ucomisd(xmm_scratch, input_reg);
2254 DeoptimizeIf(equal, instr->environment());
2255 __ sqrtsd(input_reg, input_reg);
2256}
2257
2258
2259void LCodeGen::DoPower(LPower* instr) {
2260 LOperand* left = instr->left();
2261 LOperand* right = instr->right();
2262 DoubleRegister result_reg = ToDoubleRegister(instr->result());
2263 Representation exponent_type = instr->hydrogen()->right()->representation();
2264 if (exponent_type.IsDouble()) {
2265 // It is safe to use ebx directly since the instruction is marked
2266 // as a call.
2267 __ PrepareCallCFunction(4, ebx);
2268 __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
2269 __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
2270 __ CallCFunction(ExternalReference::power_double_double_function(), 4);
2271 } else if (exponent_type.IsInteger32()) {
2272 // It is safe to use ebx directly since the instruction is marked
2273 // as a call.
2274 ASSERT(!ToRegister(right).is(ebx));
2275 __ PrepareCallCFunction(4, ebx);
2276 __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
2277 __ mov(Operand(esp, 1 * kDoubleSize), ToRegister(right));
2278 __ CallCFunction(ExternalReference::power_double_int_function(), 4);
2279 } else {
2280 ASSERT(exponent_type.IsTagged());
2281 CpuFeatures::Scope scope(SSE2);
2282 Register right_reg = ToRegister(right);
2283
2284 Label non_smi, call;
2285 __ test(right_reg, Immediate(kSmiTagMask));
2286 __ j(not_zero, &non_smi);
2287 __ SmiUntag(right_reg);
2288 __ cvtsi2sd(result_reg, Operand(right_reg));
2289 __ jmp(&call);
2290
2291 __ bind(&non_smi);
2292 // It is safe to use ebx directly since the instruction is marked
2293 // as a call.
2294 ASSERT(!right_reg.is(ebx));
2295 __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , ebx);
2296 DeoptimizeIf(not_equal, instr->environment());
2297 __ movdbl(result_reg, FieldOperand(right_reg, HeapNumber::kValueOffset));
2298
2299 __ bind(&call);
2300 __ PrepareCallCFunction(4, ebx);
2301 __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
2302 __ movdbl(Operand(esp, 1 * kDoubleSize), result_reg);
2303 __ CallCFunction(ExternalReference::power_double_double_function(), 4);
2304 }
2305
2306 // Return value is in st(0) on ia32.
2307 // Store it into the (fixed) result register.
2308 __ sub(Operand(esp), Immediate(kDoubleSize));
2309 __ fstp_d(Operand(esp, 0));
2310 __ movdbl(result_reg, Operand(esp, 0));
2311 __ add(Operand(esp), Immediate(kDoubleSize));
2312}
2313
2314
2315void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
2316 ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
2317 TranscendentalCacheSSE2Stub stub(TranscendentalCache::LOG);
2318 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2319}
2320
2321
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002322void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
2323 switch (instr->op()) {
2324 case kMathAbs:
2325 DoMathAbs(instr);
2326 break;
2327 case kMathFloor:
2328 DoMathFloor(instr);
2329 break;
2330 case kMathRound:
2331 DoMathRound(instr);
2332 break;
2333 case kMathSqrt:
2334 DoMathSqrt(instr);
2335 break;
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00002336 case kMathPowHalf:
2337 DoMathPowHalf(instr);
2338 break;
2339 case kMathLog:
2340 DoMathLog(instr);
2341 break;
2342
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002343 default:
2344 UNREACHABLE();
2345 }
2346}
2347
2348
2349void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
2350 ASSERT(ToRegister(instr->result()).is(eax));
2351
2352 int arity = instr->arity();
2353 Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
2354 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2355 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2356}
2357
2358
2359void LCodeGen::DoCallNamed(LCallNamed* instr) {
2360 ASSERT(ToRegister(instr->result()).is(eax));
2361
2362 int arity = instr->arity();
2363 Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
2364 __ mov(ecx, instr->name());
2365 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2366 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2367}
2368
2369
2370void LCodeGen::DoCallFunction(LCallFunction* instr) {
2371 ASSERT(ToRegister(instr->result()).is(eax));
2372
2373 int arity = instr->arity();
2374 CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
2375 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2376 __ Drop(1);
2377 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2378}
2379
2380
2381void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
2382 ASSERT(ToRegister(instr->result()).is(eax));
2383
2384 int arity = instr->arity();
2385 Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
2386 __ mov(ecx, instr->name());
2387 CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2388 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2389}
2390
2391
2392void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
2393 ASSERT(ToRegister(instr->result()).is(eax));
2394 __ mov(edi, instr->target());
2395 CallKnownFunction(instr->target(), instr->arity(), instr);
2396}
2397
2398
2399void LCodeGen::DoCallNew(LCallNew* instr) {
2400 ASSERT(ToRegister(instr->input()).is(edi));
2401 ASSERT(ToRegister(instr->result()).is(eax));
2402
2403 Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
2404 __ Set(eax, Immediate(instr->arity()));
2405 CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
2406}
2407
2408
2409void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
2410 CallRuntime(instr->function(), instr->arity(), instr);
2411}
2412
2413
2414void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
2415 Register object = ToRegister(instr->object());
2416 Register value = ToRegister(instr->value());
2417 int offset = instr->offset();
2418
2419 if (!instr->transition().is_null()) {
2420 __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
2421 }
2422
2423 // Do the store.
2424 if (instr->is_in_object()) {
2425 __ mov(FieldOperand(object, offset), value);
2426 if (instr->needs_write_barrier()) {
2427 Register temp = ToRegister(instr->temp());
2428 // Update the write barrier for the object for in-object properties.
2429 __ RecordWrite(object, offset, value, temp);
2430 }
2431 } else {
2432 Register temp = ToRegister(instr->temp());
2433 __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
2434 __ mov(FieldOperand(temp, offset), value);
2435 if (instr->needs_write_barrier()) {
2436 // Update the write barrier for the properties array.
2437 // object is used as a scratch register.
2438 __ RecordWrite(temp, offset, value, object);
2439 }
2440 }
2441}
2442
2443
2444void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
2445 ASSERT(ToRegister(instr->object()).is(edx));
2446 ASSERT(ToRegister(instr->value()).is(eax));
2447
2448 __ mov(ecx, instr->name());
2449 Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
2450 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2451}
2452
2453
2454void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
2455 __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
2456 DeoptimizeIf(above_equal, instr->environment());
2457}
2458
2459
2460void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
2461 Register value = ToRegister(instr->value());
2462 Register elements = ToRegister(instr->object());
2463 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
2464
2465 // Do the store.
2466 if (instr->key()->IsConstantOperand()) {
2467 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
2468 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
2469 int offset =
2470 ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
2471 __ mov(FieldOperand(elements, offset), value);
2472 } else {
2473 __ mov(FieldOperand(elements, key, times_4, FixedArray::kHeaderSize),
2474 value);
2475 }
2476
2477 // Update the write barrier unless we're certain that we're storing a smi.
2478 if (instr->hydrogen()->NeedsWriteBarrier()) {
2479 // Compute address of modified element and store it into key register.
2480 __ lea(key, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
2481 __ RecordWrite(elements, key, value);
2482 }
2483}
2484
2485
2486void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
2487 ASSERT(ToRegister(instr->object()).is(edx));
2488 ASSERT(ToRegister(instr->key()).is(ecx));
2489 ASSERT(ToRegister(instr->value()).is(eax));
2490
2491 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
2492 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2493}
2494
2495
2496void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
2497 LOperand* input = instr->input();
2498 ASSERT(input->IsRegister() || input->IsStackSlot());
2499 LOperand* output = instr->result();
2500 ASSERT(output->IsDoubleRegister());
2501 __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
2502}
2503
2504
2505void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
2506 class DeferredNumberTagI: public LDeferredCode {
2507 public:
2508 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
2509 : LDeferredCode(codegen), instr_(instr) { }
2510 virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
2511 private:
2512 LNumberTagI* instr_;
2513 };
2514
2515 LOperand* input = instr->input();
2516 ASSERT(input->IsRegister() && input->Equals(instr->result()));
2517 Register reg = ToRegister(input);
2518
2519 DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
2520 __ SmiTag(reg);
2521 __ j(overflow, deferred->entry());
2522 __ bind(deferred->exit());
2523}
2524
2525
2526void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
2527 Label slow;
2528 Register reg = ToRegister(instr->input());
2529 Register tmp = reg.is(eax) ? ecx : eax;
2530
2531 // Preserve the value of all registers.
2532 __ PushSafepointRegisters();
2533
2534 // There was overflow, so bits 30 and 31 of the original integer
2535 // disagree. Try to allocate a heap number in new space and store
2536 // the value in there. If that fails, call the runtime system.
2537 NearLabel done;
2538 __ SmiUntag(reg);
2539 __ xor_(reg, 0x80000000);
2540 __ cvtsi2sd(xmm0, Operand(reg));
2541 if (FLAG_inline_new) {
2542 __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
2543 __ jmp(&done);
2544 }
2545
2546 // Slow case: Call the runtime system to do the number allocation.
2547 __ bind(&slow);
2548
2549 // TODO(3095996): Put a valid pointer value in the stack slot where the result
2550 // register is stored, as this register is in the pointer map, but contains an
2551 // integer value.
2552 __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), Immediate(0));
2553
2554 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
2555 RecordSafepointWithRegisters(
2556 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
2557 if (!reg.is(eax)) __ mov(reg, eax);
2558
2559 // Done. Put the value in xmm0 into the value of the allocated heap
2560 // number.
2561 __ bind(&done);
2562 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
2563 __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), reg);
2564 __ PopSafepointRegisters();
2565}
2566
2567
2568void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
2569 class DeferredNumberTagD: public LDeferredCode {
2570 public:
2571 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
2572 : LDeferredCode(codegen), instr_(instr) { }
2573 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
2574 private:
2575 LNumberTagD* instr_;
2576 };
2577
2578 XMMRegister input_reg = ToDoubleRegister(instr->input());
2579 Register reg = ToRegister(instr->result());
2580 Register tmp = ToRegister(instr->temp());
2581
2582 DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
2583 if (FLAG_inline_new) {
2584 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
2585 } else {
2586 __ jmp(deferred->entry());
2587 }
2588 __ bind(deferred->exit());
2589 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
2590}
2591
2592
2593void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
2594 // TODO(3095996): Get rid of this. For now, we need to make the
2595 // result register contain a valid pointer because it is already
2596 // contained in the register pointer map.
2597 Register reg = ToRegister(instr->result());
2598 __ Set(reg, Immediate(0));
2599
2600 __ PushSafepointRegisters();
2601 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
2602 RecordSafepointWithRegisters(
2603 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
2604 __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), eax);
2605 __ PopSafepointRegisters();
2606}
2607
2608
2609void LCodeGen::DoSmiTag(LSmiTag* instr) {
2610 LOperand* input = instr->input();
2611 ASSERT(input->IsRegister() && input->Equals(instr->result()));
2612 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
2613 __ SmiTag(ToRegister(input));
2614}
2615
2616
2617void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
2618 LOperand* input = instr->input();
2619 ASSERT(input->IsRegister() && input->Equals(instr->result()));
2620 if (instr->needs_check()) {
2621 __ test(ToRegister(input), Immediate(kSmiTagMask));
2622 DeoptimizeIf(not_zero, instr->environment());
2623 }
2624 __ SmiUntag(ToRegister(input));
2625}
2626
2627
2628void LCodeGen::EmitNumberUntagD(Register input_reg,
2629 XMMRegister result_reg,
2630 LEnvironment* env) {
2631 NearLabel load_smi, heap_number, done;
2632
2633 // Smi check.
2634 __ test(input_reg, Immediate(kSmiTagMask));
2635 __ j(zero, &load_smi, not_taken);
2636
2637 // Heap number map check.
2638 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
2639 Factory::heap_number_map());
2640 __ j(equal, &heap_number);
2641
2642 __ cmp(input_reg, Factory::undefined_value());
2643 DeoptimizeIf(not_equal, env);
2644
2645 // Convert undefined to NaN.
2646 __ push(input_reg);
2647 __ mov(input_reg, Factory::nan_value());
2648 __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
2649 __ pop(input_reg);
2650 __ jmp(&done);
2651
2652 // Heap number to XMM conversion.
2653 __ bind(&heap_number);
2654 __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
2655 __ jmp(&done);
2656
2657 // Smi to XMM conversion
2658 __ bind(&load_smi);
2659 __ SmiUntag(input_reg); // Untag smi before converting to float.
2660 __ cvtsi2sd(result_reg, Operand(input_reg));
2661 __ SmiTag(input_reg); // Retag smi.
2662 __ bind(&done);
2663}
2664
2665
2666class DeferredTaggedToI: public LDeferredCode {
2667 public:
2668 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
2669 : LDeferredCode(codegen), instr_(instr) { }
2670 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
2671 private:
2672 LTaggedToI* instr_;
2673};
2674
2675
2676void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
2677 NearLabel done, heap_number;
2678 Register input_reg = ToRegister(instr->input());
2679
2680 // Heap number map check.
2681 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
2682 Factory::heap_number_map());
2683
2684 if (instr->truncating()) {
2685 __ j(equal, &heap_number);
2686 // Check for undefined. Undefined is converted to zero for truncating
2687 // conversions.
2688 __ cmp(input_reg, Factory::undefined_value());
2689 DeoptimizeIf(not_equal, instr->environment());
2690 __ mov(input_reg, 0);
2691 __ jmp(&done);
2692
2693 __ bind(&heap_number);
2694 if (CpuFeatures::IsSupported(SSE3)) {
2695 CpuFeatures::Scope scope(SSE3);
2696 NearLabel convert;
2697 // Use more powerful conversion when sse3 is available.
2698 // Load x87 register with heap number.
2699 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
2700 // Get exponent alone and check for too-big exponent.
2701 __ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2702 __ and_(input_reg, HeapNumber::kExponentMask);
2703 const uint32_t kTooBigExponent =
2704 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
2705 __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
2706 __ j(less, &convert);
2707 // Pop FPU stack before deoptimizing.
2708 __ ffree(0);
2709 __ fincstp();
2710 DeoptimizeIf(no_condition, instr->environment());
2711
2712 // Reserve space for 64 bit answer.
2713 __ bind(&convert);
2714 __ sub(Operand(esp), Immediate(kDoubleSize));
2715 // Do conversion, which cannot fail because we checked the exponent.
2716 __ fisttp_d(Operand(esp, 0));
2717 __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
2718 __ add(Operand(esp), Immediate(kDoubleSize));
2719 } else {
2720 NearLabel deopt;
2721 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
2722 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
2723 __ cvttsd2si(input_reg, Operand(xmm0));
2724 __ cmp(input_reg, 0x80000000u);
2725 __ j(not_equal, &done);
2726 // Check if the input was 0x8000000 (kMinInt).
2727 // If no, then we got an overflow and we deoptimize.
2728 ExternalReference min_int = ExternalReference::address_of_min_int();
2729 __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
2730 __ ucomisd(xmm_temp, xmm0);
2731 DeoptimizeIf(not_equal, instr->environment());
2732 DeoptimizeIf(parity_even, instr->environment()); // NaN.
2733 }
2734 } else {
2735 // Deoptimize if we don't have a heap number.
2736 DeoptimizeIf(not_equal, instr->environment());
2737
2738 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
2739 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
2740 __ cvttsd2si(input_reg, Operand(xmm0));
2741 __ cvtsi2sd(xmm_temp, Operand(input_reg));
2742 __ ucomisd(xmm0, xmm_temp);
2743 DeoptimizeIf(not_equal, instr->environment());
2744 DeoptimizeIf(parity_even, instr->environment()); // NaN.
2745 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2746 __ test(input_reg, Operand(input_reg));
2747 __ j(not_zero, &done);
2748 __ movmskpd(input_reg, xmm0);
2749 __ and_(input_reg, 1);
2750 DeoptimizeIf(not_zero, instr->environment());
2751 }
2752 }
2753 __ bind(&done);
2754}
2755
2756
2757void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
2758 LOperand* input = instr->input();
2759 ASSERT(input->IsRegister());
2760 ASSERT(input->Equals(instr->result()));
2761
2762 Register input_reg = ToRegister(input);
2763
2764 DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
2765
2766 // Smi check.
2767 __ test(input_reg, Immediate(kSmiTagMask));
2768 __ j(not_zero, deferred->entry());
2769
2770 // Smi to int32 conversion
2771 __ SmiUntag(input_reg); // Untag smi.
2772
2773 __ bind(deferred->exit());
2774}
2775
2776
2777void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
2778 LOperand* input = instr->input();
2779 ASSERT(input->IsRegister());
2780 LOperand* result = instr->result();
2781 ASSERT(result->IsDoubleRegister());
2782
2783 Register input_reg = ToRegister(input);
2784 XMMRegister result_reg = ToDoubleRegister(result);
2785
2786 EmitNumberUntagD(input_reg, result_reg, instr->environment());
2787}
2788
2789
2790void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
2791 LOperand* input = instr->input();
2792 ASSERT(input->IsDoubleRegister());
2793 LOperand* result = instr->result();
2794 ASSERT(result->IsRegister());
2795
2796 XMMRegister input_reg = ToDoubleRegister(input);
2797 Register result_reg = ToRegister(result);
2798
2799 if (instr->truncating()) {
2800 // Performs a truncating conversion of a floating point number as used by
2801 // the JS bitwise operations.
2802 __ cvttsd2si(result_reg, Operand(input_reg));
2803 __ cmp(result_reg, 0x80000000u);
2804 if (CpuFeatures::IsSupported(SSE3)) {
2805 // This will deoptimize if the exponent of the input in out of range.
2806 CpuFeatures::Scope scope(SSE3);
2807 NearLabel convert, done;
2808 __ j(not_equal, &done);
2809 __ sub(Operand(esp), Immediate(kDoubleSize));
2810 __ movdbl(Operand(esp, 0), input_reg);
2811 // Get exponent alone and check for too-big exponent.
2812 __ mov(result_reg, Operand(esp, sizeof(int32_t)));
2813 __ and_(result_reg, HeapNumber::kExponentMask);
2814 const uint32_t kTooBigExponent =
2815 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
2816 __ cmp(Operand(result_reg), Immediate(kTooBigExponent));
2817 __ j(less, &convert);
2818 __ add(Operand(esp), Immediate(kDoubleSize));
2819 DeoptimizeIf(no_condition, instr->environment());
2820 __ bind(&convert);
2821 // Do conversion, which cannot fail because we checked the exponent.
2822 __ fld_d(Operand(esp, 0));
2823 __ fisttp_d(Operand(esp, 0));
2824 __ mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
2825 __ add(Operand(esp), Immediate(kDoubleSize));
2826 __ bind(&done);
2827 } else {
2828 // This will bail out if the input was not in the int32 range (or,
2829 // unfortunately, if the input was 0x80000000).
2830 DeoptimizeIf(equal, instr->environment());
2831 }
2832 } else {
2833 NearLabel done;
2834 __ cvttsd2si(result_reg, Operand(input_reg));
2835 __ cvtsi2sd(xmm0, Operand(result_reg));
2836 __ ucomisd(xmm0, input_reg);
2837 DeoptimizeIf(not_equal, instr->environment());
2838 DeoptimizeIf(parity_even, instr->environment()); // NaN.
2839 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2840 // The integer converted back is equal to the original. We
2841 // only have to test if we got -0 as an input.
2842 __ test(result_reg, Operand(result_reg));
2843 __ j(not_zero, &done);
2844 __ movmskpd(result_reg, input_reg);
2845 // Bit 0 contains the sign of the double in input_reg.
2846 // If input was positive, we are ok and return 0, otherwise
2847 // deoptimize.
2848 __ and_(result_reg, 1);
2849 DeoptimizeIf(not_zero, instr->environment());
2850 }
2851 __ bind(&done);
2852 }
2853}
2854
2855
2856void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
2857 LOperand* input = instr->input();
2858 ASSERT(input->IsRegister());
2859 __ test(ToRegister(input), Immediate(kSmiTagMask));
2860 DeoptimizeIf(instr->condition(), instr->environment());
2861}
2862
2863
2864void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
2865 Register input = ToRegister(instr->input());
2866 Register temp = ToRegister(instr->temp());
2867 InstanceType first = instr->hydrogen()->first();
2868 InstanceType last = instr->hydrogen()->last();
2869
2870 __ test(input, Immediate(kSmiTagMask));
2871 DeoptimizeIf(zero, instr->environment());
2872
2873 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
2874 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
2875 static_cast<int8_t>(first));
2876
2877 // If there is only one type in the interval check for equality.
2878 if (first == last) {
2879 DeoptimizeIf(not_equal, instr->environment());
2880 } else {
2881 DeoptimizeIf(below, instr->environment());
2882 // Omit check for the last type.
2883 if (last != LAST_TYPE) {
2884 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
2885 static_cast<int8_t>(last));
2886 DeoptimizeIf(above, instr->environment());
2887 }
2888 }
2889}
2890
2891
2892void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
2893 ASSERT(instr->input()->IsRegister());
2894 Register reg = ToRegister(instr->input());
2895 __ cmp(reg, instr->hydrogen()->target());
2896 DeoptimizeIf(not_equal, instr->environment());
2897}
2898
2899
2900void LCodeGen::DoCheckMap(LCheckMap* instr) {
2901 LOperand* input = instr->input();
2902 ASSERT(input->IsRegister());
2903 Register reg = ToRegister(input);
2904 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2905 instr->hydrogen()->map());
2906 DeoptimizeIf(not_equal, instr->environment());
2907}
2908
2909
2910void LCodeGen::LoadPrototype(Register result, Handle<JSObject> prototype) {
2911 if (Heap::InNewSpace(*prototype)) {
2912 Handle<JSGlobalPropertyCell> cell =
2913 Factory::NewJSGlobalPropertyCell(prototype);
2914 __ mov(result, Operand::Cell(cell));
2915 } else {
2916 __ mov(result, prototype);
2917 }
2918}
2919
2920
2921void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
2922 Register reg = ToRegister(instr->temp());
2923
2924 Handle<JSObject> holder = instr->holder();
2925 Handle<Map> receiver_map = instr->receiver_map();
2926 Handle<JSObject> current_prototype(JSObject::cast(receiver_map->prototype()));
2927
2928 // Load prototype object.
2929 LoadPrototype(reg, current_prototype);
2930
2931 // Check prototype maps up to the holder.
2932 while (!current_prototype.is_identical_to(holder)) {
2933 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2934 Handle<Map>(current_prototype->map()));
2935 DeoptimizeIf(not_equal, instr->environment());
2936 current_prototype =
2937 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
2938 // Load next prototype object.
2939 LoadPrototype(reg, current_prototype);
2940 }
2941
2942 // Check the holder map.
2943 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2944 Handle<Map>(current_prototype->map()));
2945 DeoptimizeIf(not_equal, instr->environment());
2946}
2947
2948
2949void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
2950 // Setup the parameters to the stub/runtime call.
2951 __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
2952 __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
2953 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
2954 __ push(Immediate(instr->hydrogen()->constant_elements()));
2955
2956 // Pick the right runtime function or stub to call.
2957 int length = instr->hydrogen()->length();
2958 if (instr->hydrogen()->IsCopyOnWrite()) {
2959 ASSERT(instr->hydrogen()->depth() == 1);
2960 FastCloneShallowArrayStub::Mode mode =
2961 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
2962 FastCloneShallowArrayStub stub(mode, length);
2963 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2964 } else if (instr->hydrogen()->depth() > 1) {
2965 CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
2966 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
2967 CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
2968 } else {
2969 FastCloneShallowArrayStub::Mode mode =
2970 FastCloneShallowArrayStub::CLONE_ELEMENTS;
2971 FastCloneShallowArrayStub stub(mode, length);
2972 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2973 }
2974}
2975
2976
2977void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
2978 // Setup the parameters to the stub/runtime call.
2979 __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
2980 __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
2981 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
2982 __ push(Immediate(instr->hydrogen()->constant_properties()));
2983 __ push(Immediate(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
2984
2985 // Pick the right runtime function or stub to call.
2986 if (instr->hydrogen()->depth() > 1) {
2987 CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
2988 } else {
2989 CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
2990 }
2991}
2992
2993
2994void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
2995 NearLabel materialized;
2996 // Registers will be used as follows:
2997 // edi = JS function.
2998 // ecx = literals array.
2999 // ebx = regexp literal.
3000 // eax = regexp literal clone.
3001 __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3002 __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
3003 int literal_offset = FixedArray::kHeaderSize +
3004 instr->hydrogen()->literal_index() * kPointerSize;
3005 __ mov(ebx, FieldOperand(ecx, literal_offset));
3006 __ cmp(ebx, Factory::undefined_value());
3007 __ j(not_equal, &materialized);
3008
3009 // Create regexp literal using runtime function
3010 // Result will be in eax.
3011 __ push(ecx);
3012 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
3013 __ push(Immediate(instr->hydrogen()->pattern()));
3014 __ push(Immediate(instr->hydrogen()->flags()));
3015 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
3016 __ mov(ebx, eax);
3017
3018 __ bind(&materialized);
3019 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
3020 Label allocated, runtime_allocate;
3021 __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
3022 __ jmp(&allocated);
3023
3024 __ bind(&runtime_allocate);
3025 __ push(ebx);
3026 __ push(Immediate(Smi::FromInt(size)));
3027 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
3028 __ pop(ebx);
3029
3030 __ bind(&allocated);
3031 // Copy the content into the newly allocated memory.
3032 // (Unroll copy loop once for better throughput).
3033 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
3034 __ mov(edx, FieldOperand(ebx, i));
3035 __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
3036 __ mov(FieldOperand(eax, i), edx);
3037 __ mov(FieldOperand(eax, i + kPointerSize), ecx);
3038 }
3039 if ((size % (2 * kPointerSize)) != 0) {
3040 __ mov(edx, FieldOperand(ebx, size - kPointerSize));
3041 __ mov(FieldOperand(eax, size - kPointerSize), edx);
3042 }
3043}
3044
3045
3046void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
3047 // Use the fast case closure allocation code that allocates in new
3048 // space for nested functions that don't need literals cloning.
3049 Handle<SharedFunctionInfo> shared_info = instr->shared_info();
3050 bool pretenure = !instr->hydrogen()->pretenure();
3051 if (shared_info->num_literals() == 0 && !pretenure) {
3052 FastNewClosureStub stub;
3053 __ push(Immediate(shared_info));
3054 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3055 } else {
3056 __ push(esi);
3057 __ push(Immediate(shared_info));
3058 __ push(Immediate(pretenure
3059 ? Factory::true_value()
3060 : Factory::false_value()));
3061 CallRuntime(Runtime::kNewClosure, 3, instr);
3062 }
3063}
3064
3065
3066void LCodeGen::DoTypeof(LTypeof* instr) {
3067 LOperand* input = instr->input();
3068 if (input->IsConstantOperand()) {
3069 __ push(ToImmediate(input));
3070 } else {
3071 __ push(ToOperand(input));
3072 }
3073 CallRuntime(Runtime::kTypeof, 1, instr);
3074}
3075
3076
3077void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
3078 Register input = ToRegister(instr->input());
3079 Register result = ToRegister(instr->result());
3080 Label true_label;
3081 Label false_label;
3082 NearLabel done;
3083
3084 Condition final_branch_condition = EmitTypeofIs(&true_label,
3085 &false_label,
3086 input,
3087 instr->type_literal());
3088 __ j(final_branch_condition, &true_label);
3089 __ bind(&false_label);
3090 __ mov(result, Handle<Object>(Heap::false_value()));
3091 __ jmp(&done);
3092
3093 __ bind(&true_label);
3094 __ mov(result, Handle<Object>(Heap::true_value()));
3095
3096 __ bind(&done);
3097}
3098
3099
3100void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
3101 Register input = ToRegister(instr->input());
3102 int true_block = chunk_->LookupDestination(instr->true_block_id());
3103 int false_block = chunk_->LookupDestination(instr->false_block_id());
3104 Label* true_label = chunk_->GetAssemblyLabel(true_block);
3105 Label* false_label = chunk_->GetAssemblyLabel(false_block);
3106
3107 Condition final_branch_condition = EmitTypeofIs(true_label,
3108 false_label,
3109 input,
3110 instr->type_literal());
3111
3112 EmitBranch(true_block, false_block, final_branch_condition);
3113}
3114
3115
3116Condition LCodeGen::EmitTypeofIs(Label* true_label,
3117 Label* false_label,
3118 Register input,
3119 Handle<String> type_name) {
3120 Condition final_branch_condition = no_condition;
3121 if (type_name->Equals(Heap::number_symbol())) {
3122 __ test(input, Immediate(kSmiTagMask));
3123 __ j(zero, true_label);
3124 __ cmp(FieldOperand(input, HeapObject::kMapOffset),
3125 Factory::heap_number_map());
3126 final_branch_condition = equal;
3127
3128 } else if (type_name->Equals(Heap::string_symbol())) {
3129 __ test(input, Immediate(kSmiTagMask));
3130 __ j(zero, false_label);
3131 __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
3132 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
3133 1 << Map::kIsUndetectable);
3134 __ j(not_zero, false_label);
3135 __ CmpInstanceType(input, FIRST_NONSTRING_TYPE);
3136 final_branch_condition = below;
3137
3138 } else if (type_name->Equals(Heap::boolean_symbol())) {
3139 __ cmp(input, Handle<Object>(Heap::true_value()));
3140 __ j(equal, true_label);
3141 __ cmp(input, Handle<Object>(Heap::false_value()));
3142 final_branch_condition = equal;
3143
3144 } else if (type_name->Equals(Heap::undefined_symbol())) {
3145 __ cmp(input, Factory::undefined_value());
3146 __ j(equal, true_label);
3147 __ test(input, Immediate(kSmiTagMask));
3148 __ j(zero, false_label);
3149 // Check for undetectable objects => true.
3150 __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
3151 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
3152 1 << Map::kIsUndetectable);
3153 final_branch_condition = not_zero;
3154
3155 } else if (type_name->Equals(Heap::function_symbol())) {
3156 __ test(input, Immediate(kSmiTagMask));
3157 __ j(zero, false_label);
3158 __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
3159 __ j(equal, true_label);
3160 // Regular expressions => 'function' (they are callable).
3161 __ CmpInstanceType(input, JS_REGEXP_TYPE);
3162 final_branch_condition = equal;
3163
3164 } else if (type_name->Equals(Heap::object_symbol())) {
3165 __ test(input, Immediate(kSmiTagMask));
3166 __ j(zero, false_label);
3167 __ cmp(input, Factory::null_value());
3168 __ j(equal, true_label);
3169 // Regular expressions => 'function', not 'object'.
3170 __ CmpObjectType(input, JS_REGEXP_TYPE, input);
3171 __ j(equal, false_label);
3172 // Check for undetectable objects => false.
3173 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
3174 1 << Map::kIsUndetectable);
3175 __ j(not_zero, false_label);
3176 // Check for JS objects => true.
3177 __ CmpInstanceType(input, FIRST_JS_OBJECT_TYPE);
3178 __ j(below, false_label);
3179 __ CmpInstanceType(input, LAST_JS_OBJECT_TYPE);
3180 final_branch_condition = below_equal;
3181
3182 } else {
3183 final_branch_condition = not_equal;
3184 __ jmp(false_label);
3185 // A dead branch instruction will be generated after this point.
3186 }
3187
3188 return final_branch_condition;
3189}
3190
3191
3192void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
3193 // No code for lazy bailout instruction. Used to capture environment after a
3194 // call for populating the safepoint data with deoptimization data.
3195}
3196
3197
3198void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
3199 DeoptimizeIf(no_condition, instr->environment());
3200}
3201
3202
3203void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
3204 LOperand* obj = instr->object();
3205 LOperand* key = instr->key();
3206 __ push(ToOperand(obj));
3207 if (key->IsConstantOperand()) {
3208 __ push(ToImmediate(key));
3209 } else {
3210 __ push(ToOperand(key));
3211 }
3212 RecordPosition(instr->pointer_map()->position());
3213 SafepointGenerator safepoint_generator(this,
3214 instr->pointer_map(),
3215 Safepoint::kNoDeoptimizationIndex);
3216 __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
3217}
3218
3219
3220void LCodeGen::DoStackCheck(LStackCheck* instr) {
3221 // Perform stack overflow check.
3222 NearLabel done;
3223 ExternalReference stack_limit = ExternalReference::address_of_stack_limit();
3224 __ cmp(esp, Operand::StaticVariable(stack_limit));
3225 __ j(above_equal, &done);
3226
3227 StackCheckStub stub;
3228 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3229 __ bind(&done);
3230}
3231
3232
3233void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
3234 // This is a pseudo-instruction that ensures that the environment here is
3235 // properly registered for deoptimization and records the assembler's PC
3236 // offset.
3237 LEnvironment* environment = instr->environment();
3238 environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
3239 instr->SpilledDoubleRegisterArray());
3240
3241 // If the environment were already registered, we would have no way of
3242 // backpatching it with the spill slot operands.
3243 ASSERT(!environment->HasBeenRegistered());
3244 RegisterEnvironmentForDeoptimization(environment);
3245 ASSERT(osr_pc_offset_ == -1);
3246 osr_pc_offset_ = masm()->pc_offset();
3247}
3248
3249
3250#undef __
3251
3252} } // namespace v8::internal