blob: d32f95d3098cd8ed3a3c0153d3adae19acc7ad08 [file] [log] [blame]
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001// Copyright 2010 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "ia32/lithium-codegen-ia32.h"
29#include "code-stubs.h"
30#include "stub-cache.h"
31
32namespace v8 {
33namespace internal {
34
35
36class SafepointGenerator : public PostCallGenerator {
37 public:
38 SafepointGenerator(LCodeGen* codegen,
39 LPointerMap* pointers,
40 int deoptimization_index)
41 : codegen_(codegen),
42 pointers_(pointers),
43 deoptimization_index_(deoptimization_index) { }
44 virtual ~SafepointGenerator() { }
45
46 virtual void Generate() {
47 codegen_->RecordSafepoint(pointers_, deoptimization_index_);
48 }
49
50 private:
51 LCodeGen* codegen_;
52 LPointerMap* pointers_;
53 int deoptimization_index_;
54};
55
56
57#define __ masm()->
58
59bool LCodeGen::GenerateCode() {
60 HPhase phase("Code generation", chunk());
61 ASSERT(is_unused());
62 status_ = GENERATING;
63 CpuFeatures::Scope scope(SSE2);
64 return GeneratePrologue() &&
65 GenerateBody() &&
66 GenerateDeferredCode() &&
67 GenerateSafepointTable();
68}
69
70
71void LCodeGen::FinishCode(Handle<Code> code) {
72 ASSERT(is_done());
73 code->set_stack_slots(StackSlotCount());
74 code->set_safepoint_table_start(safepoints_.GetCodeOffset());
75 PopulateDeoptimizationData(code);
76}
77
78
79void LCodeGen::Abort(const char* format, ...) {
80 if (FLAG_trace_bailout) {
81 SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
82 PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name);
83 va_list arguments;
84 va_start(arguments, format);
85 OS::VPrint(format, arguments);
86 va_end(arguments);
87 PrintF("\n");
88 }
89 status_ = ABORTED;
90}
91
92
93void LCodeGen::Comment(const char* format, ...) {
94 if (!FLAG_code_comments) return;
95 char buffer[4 * KB];
96 StringBuilder builder(buffer, ARRAY_SIZE(buffer));
97 va_list arguments;
98 va_start(arguments, format);
99 builder.AddFormattedList(format, arguments);
100 va_end(arguments);
101
102 // Copy the string before recording it in the assembler to avoid
103 // issues when the stack allocated buffer goes out of scope.
104 size_t length = builder.position();
105 Vector<char> copy = Vector<char>::New(length + 1);
106 memcpy(copy.start(), builder.Finalize(), copy.length());
107 masm()->RecordComment(copy.start());
108}
109
110
111bool LCodeGen::GeneratePrologue() {
112 ASSERT(is_generating());
113
114#ifdef DEBUG
115 if (strlen(FLAG_stop_at) > 0 &&
116 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
117 __ int3();
118 }
119#endif
120
121 __ push(ebp); // Caller's frame pointer.
122 __ mov(ebp, esp);
123 __ push(esi); // Callee's context.
124 __ push(edi); // Callee's JS function.
125
126 // Reserve space for the stack slots needed by the code.
127 int slots = StackSlotCount();
128 if (slots > 0) {
129 if (FLAG_debug_code) {
130 __ mov(Operand(eax), Immediate(slots));
131 Label loop;
132 __ bind(&loop);
133 __ push(Immediate(kSlotsZapValue));
134 __ dec(eax);
135 __ j(not_zero, &loop);
136 } else {
137 __ sub(Operand(esp), Immediate(slots * kPointerSize));
138 }
139 }
140
141 // Trace the call.
142 if (FLAG_trace) {
143 __ CallRuntime(Runtime::kTraceEnter, 0);
144 }
145 return !is_aborted();
146}
147
148
149bool LCodeGen::GenerateBody() {
150 ASSERT(is_generating());
151 bool emit_instructions = true;
152 for (current_instruction_ = 0;
153 !is_aborted() && current_instruction_ < instructions_->length();
154 current_instruction_++) {
155 LInstruction* instr = instructions_->at(current_instruction_);
156 if (instr->IsLabel()) {
157 LLabel* label = LLabel::cast(instr);
158 emit_instructions = !label->HasReplacement();
159 }
160
161 if (emit_instructions) {
162 Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
163 instr->CompileToNative(this);
164 }
165 }
166 return !is_aborted();
167}
168
169
170LInstruction* LCodeGen::GetNextInstruction() {
171 if (current_instruction_ < instructions_->length() - 1) {
172 return instructions_->at(current_instruction_ + 1);
173 } else {
174 return NULL;
175 }
176}
177
178
179bool LCodeGen::GenerateDeferredCode() {
180 ASSERT(is_generating());
181 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
182 LDeferredCode* code = deferred_[i];
183 __ bind(code->entry());
184 code->Generate();
185 __ jmp(code->exit());
186 }
187
188 // Deferred code is the last part of the instruction sequence. Mark
189 // the generated code as done unless we bailed out.
190 if (!is_aborted()) status_ = DONE;
191 return !is_aborted();
192}
193
194
195bool LCodeGen::GenerateSafepointTable() {
196 ASSERT(is_done());
197 safepoints_.Emit(masm(), StackSlotCount());
198 return !is_aborted();
199}
200
201
202Register LCodeGen::ToRegister(int index) const {
203 return Register::FromAllocationIndex(index);
204}
205
206
207XMMRegister LCodeGen::ToDoubleRegister(int index) const {
208 return XMMRegister::FromAllocationIndex(index);
209}
210
211
212Register LCodeGen::ToRegister(LOperand* op) const {
213 ASSERT(op->IsRegister());
214 return ToRegister(op->index());
215}
216
217
218XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
219 ASSERT(op->IsDoubleRegister());
220 return ToDoubleRegister(op->index());
221}
222
223
224int LCodeGen::ToInteger32(LConstantOperand* op) const {
225 Handle<Object> value = chunk_->LookupLiteral(op);
226 ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
227 ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
228 value->Number());
229 return static_cast<int32_t>(value->Number());
230}
231
232
233Immediate LCodeGen::ToImmediate(LOperand* op) {
234 LConstantOperand* const_op = LConstantOperand::cast(op);
235 Handle<Object> literal = chunk_->LookupLiteral(const_op);
236 Representation r = chunk_->LookupLiteralRepresentation(const_op);
237 if (r.IsInteger32()) {
238 ASSERT(literal->IsNumber());
239 return Immediate(static_cast<int32_t>(literal->Number()));
240 } else if (r.IsDouble()) {
241 Abort("unsupported double immediate");
242 }
243 ASSERT(r.IsTagged());
244 return Immediate(literal);
245}
246
247
248Operand LCodeGen::ToOperand(LOperand* op) const {
249 if (op->IsRegister()) return Operand(ToRegister(op));
250 if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
251 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
252 int index = op->index();
253 if (index >= 0) {
254 // Local or spill slot. Skip the frame pointer, function, and
255 // context in the fixed part of the frame.
256 return Operand(ebp, -(index + 3) * kPointerSize);
257 } else {
258 // Incoming parameter. Skip the return address.
259 return Operand(ebp, -(index - 1) * kPointerSize);
260 }
261}
262
263
264void LCodeGen::AddToTranslation(Translation* translation,
265 LOperand* op,
266 bool is_tagged) {
267 if (op == NULL) {
268 // TODO(twuerthinger): Introduce marker operands to indicate that this value
269 // is not present and must be reconstructed from the deoptimizer. Currently
270 // this is only used for the arguments object.
271 translation->StoreArgumentsObject();
272 } else if (op->IsStackSlot()) {
273 if (is_tagged) {
274 translation->StoreStackSlot(op->index());
275 } else {
276 translation->StoreInt32StackSlot(op->index());
277 }
278 } else if (op->IsDoubleStackSlot()) {
279 translation->StoreDoubleStackSlot(op->index());
280 } else if (op->IsArgument()) {
281 ASSERT(is_tagged);
282 int src_index = StackSlotCount() + op->index();
283 translation->StoreStackSlot(src_index);
284 } else if (op->IsRegister()) {
285 Register reg = ToRegister(op);
286 if (is_tagged) {
287 translation->StoreRegister(reg);
288 } else {
289 translation->StoreInt32Register(reg);
290 }
291 } else if (op->IsDoubleRegister()) {
292 XMMRegister reg = ToDoubleRegister(op);
293 translation->StoreDoubleRegister(reg);
294 } else if (op->IsConstantOperand()) {
295 Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
296 int src_index = DefineDeoptimizationLiteral(literal);
297 translation->StoreLiteral(src_index);
298 } else {
299 UNREACHABLE();
300 }
301}
302
303
304void LCodeGen::CallCode(Handle<Code> code,
305 RelocInfo::Mode mode,
306 LInstruction* instr) {
307 if (instr != NULL) {
308 LPointerMap* pointers = instr->pointer_map();
309 RecordPosition(pointers->position());
310 __ call(code, mode);
311 RegisterLazyDeoptimization(instr);
312 } else {
313 LPointerMap no_pointers(0);
314 RecordPosition(no_pointers.position());
315 __ call(code, mode);
316 RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex);
317 }
ager@chromium.org5f0c45f2010-12-17 08:51:21 +0000318
319 // Signal that we don't inline smi code before these stubs in the
320 // optimizing code generator.
321 if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC ||
322 code->kind() == Code::COMPARE_IC) {
323 __ nop();
324 }
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000325}
326
327
328void LCodeGen::CallRuntime(Runtime::Function* function,
329 int num_arguments,
330 LInstruction* instr) {
331 ASSERT(instr != NULL);
332 LPointerMap* pointers = instr->pointer_map();
333 ASSERT(pointers != NULL);
334 RecordPosition(pointers->position());
335
336 __ CallRuntime(function, num_arguments);
337 // Runtime calls to Throw are not supposed to ever return at the
338 // call site, so don't register lazy deoptimization for these. We do
339 // however have to record a safepoint since throwing exceptions can
340 // cause garbage collections.
341 // BUG(3243555): register a lazy deoptimization point at throw. We need
342 // it to be able to inline functions containing a throw statement.
343 if (!instr->IsThrow()) {
344 RegisterLazyDeoptimization(instr);
345 } else {
346 RecordSafepoint(instr->pointer_map(), Safepoint::kNoDeoptimizationIndex);
347 }
348}
349
350
351void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
352 // Create the environment to bailout to. If the call has side effects
353 // execution has to continue after the call otherwise execution can continue
354 // from a previous bailout point repeating the call.
355 LEnvironment* deoptimization_environment;
356 if (instr->HasDeoptimizationEnvironment()) {
357 deoptimization_environment = instr->deoptimization_environment();
358 } else {
359 deoptimization_environment = instr->environment();
360 }
361
362 RegisterEnvironmentForDeoptimization(deoptimization_environment);
363 RecordSafepoint(instr->pointer_map(),
364 deoptimization_environment->deoptimization_index());
365}
366
367
368void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
369 if (!environment->HasBeenRegistered()) {
370 // Physical stack frame layout:
371 // -x ............. -4 0 ..................................... y
372 // [incoming arguments] [spill slots] [pushed outgoing arguments]
373
374 // Layout of the environment:
375 // 0 ..................................................... size-1
376 // [parameters] [locals] [expression stack including arguments]
377
378 // Layout of the translation:
379 // 0 ........................................................ size - 1 + 4
380 // [expression stack including arguments] [locals] [4 words] [parameters]
381 // |>------------ translation_size ------------<|
382
383 int frame_count = 0;
384 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
385 ++frame_count;
386 }
387 Translation translation(&translations_, frame_count);
388 environment->WriteTranslation(this, &translation);
389 int deoptimization_index = deoptimizations_.length();
390 environment->Register(deoptimization_index, translation.index());
391 deoptimizations_.Add(environment);
392 }
393}
394
395
396void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
397 RegisterEnvironmentForDeoptimization(environment);
398 ASSERT(environment->HasBeenRegistered());
399 int id = environment->deoptimization_index();
400 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
401 ASSERT(entry != NULL);
402 if (entry == NULL) {
403 Abort("bailout was not prepared");
404 return;
405 }
406
407 if (FLAG_deopt_every_n_times != 0) {
408 Handle<SharedFunctionInfo> shared(info_->shared_info());
409 Label no_deopt;
410 __ pushfd();
411 __ push(eax);
412 __ push(ebx);
413 __ mov(ebx, shared);
414 __ mov(eax, FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset));
415 __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
416 __ j(not_zero, &no_deopt);
417 if (FLAG_trap_on_deopt) __ int3();
418 __ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
419 __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
420 __ pop(ebx);
421 __ pop(eax);
422 __ popfd();
423 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
424
425 __ bind(&no_deopt);
426 __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
427 __ pop(ebx);
428 __ pop(eax);
429 __ popfd();
430 }
431
432 if (cc == no_condition) {
433 if (FLAG_trap_on_deopt) __ int3();
434 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
435 } else {
436 if (FLAG_trap_on_deopt) {
437 NearLabel done;
438 __ j(NegateCondition(cc), &done);
439 __ int3();
440 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
441 __ bind(&done);
442 } else {
443 __ j(cc, entry, RelocInfo::RUNTIME_ENTRY, not_taken);
444 }
445 }
446}
447
448
449void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
450 int length = deoptimizations_.length();
451 if (length == 0) return;
452 ASSERT(FLAG_deopt);
453 Handle<DeoptimizationInputData> data =
454 Factory::NewDeoptimizationInputData(length, TENURED);
455
456 data->SetTranslationByteArray(*translations_.CreateByteArray());
457 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
458
459 Handle<FixedArray> literals =
460 Factory::NewFixedArray(deoptimization_literals_.length(), TENURED);
461 for (int i = 0; i < deoptimization_literals_.length(); i++) {
462 literals->set(i, *deoptimization_literals_[i]);
463 }
464 data->SetLiteralArray(*literals);
465
466 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
467 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
468
469 // Populate the deoptimization entries.
470 for (int i = 0; i < length; i++) {
471 LEnvironment* env = deoptimizations_[i];
472 data->SetAstId(i, Smi::FromInt(env->ast_id()));
473 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
474 data->SetArgumentsStackHeight(i,
475 Smi::FromInt(env->arguments_stack_height()));
476 }
477 code->set_deoptimization_data(*data);
478}
479
480
481int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
482 int result = deoptimization_literals_.length();
483 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
484 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
485 }
486 deoptimization_literals_.Add(literal);
487 return result;
488}
489
490
491void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
492 ASSERT(deoptimization_literals_.length() == 0);
493
494 const ZoneList<Handle<JSFunction> >* inlined_closures =
495 chunk()->inlined_closures();
496
497 for (int i = 0, length = inlined_closures->length();
498 i < length;
499 i++) {
500 DefineDeoptimizationLiteral(inlined_closures->at(i));
501 }
502
503 inlined_function_count_ = deoptimization_literals_.length();
504}
505
506
507void LCodeGen::RecordSafepoint(LPointerMap* pointers,
508 int deoptimization_index) {
509 const ZoneList<LOperand*>* operands = pointers->operands();
510 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
511 deoptimization_index);
512 for (int i = 0; i < operands->length(); i++) {
513 LOperand* pointer = operands->at(i);
514 if (pointer->IsStackSlot()) {
515 safepoint.DefinePointerSlot(pointer->index());
516 }
517 }
518}
519
520
521void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
522 int arguments,
523 int deoptimization_index) {
524 const ZoneList<LOperand*>* operands = pointers->operands();
525 Safepoint safepoint =
526 safepoints_.DefineSafepointWithRegisters(
527 masm(), arguments, deoptimization_index);
528 for (int i = 0; i < operands->length(); i++) {
529 LOperand* pointer = operands->at(i);
530 if (pointer->IsStackSlot()) {
531 safepoint.DefinePointerSlot(pointer->index());
532 } else if (pointer->IsRegister()) {
533 safepoint.DefinePointerRegister(ToRegister(pointer));
534 }
535 }
536 // Register esi always contains a pointer to the context.
537 safepoint.DefinePointerRegister(esi);
538}
539
540
541void LCodeGen::RecordPosition(int position) {
542 if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
543 masm()->positions_recorder()->RecordPosition(position);
544}
545
546
547void LCodeGen::DoLabel(LLabel* label) {
548 if (label->is_loop_header()) {
549 Comment(";;; B%d - LOOP entry", label->block_id());
550 } else {
551 Comment(";;; B%d", label->block_id());
552 }
553 __ bind(label->label());
554 current_block_ = label->block_id();
555 LCodeGen::DoGap(label);
556}
557
558
559void LCodeGen::DoParallelMove(LParallelMove* move) {
560 // xmm0 must always be a scratch register.
561 XMMRegister xmm_scratch = xmm0;
562 LUnallocated marker_operand(LUnallocated::NONE);
563
564 Register cpu_scratch = esi;
565 bool destroys_cpu_scratch = false;
566
567 LGapResolver resolver(move->move_operands(), &marker_operand);
568 const ZoneList<LMoveOperands>* moves = resolver.ResolveInReverseOrder();
569 for (int i = moves->length() - 1; i >= 0; --i) {
570 LMoveOperands move = moves->at(i);
571 LOperand* from = move.from();
572 LOperand* to = move.to();
573 ASSERT(!from->IsDoubleRegister() ||
574 !ToDoubleRegister(from).is(xmm_scratch));
575 ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch));
576 ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch));
577 ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch));
578 if (from->IsConstantOperand()) {
579 __ mov(ToOperand(to), ToImmediate(from));
580 } else if (from == &marker_operand) {
581 if (to->IsRegister() || to->IsStackSlot()) {
582 __ mov(ToOperand(to), cpu_scratch);
583 ASSERT(destroys_cpu_scratch);
584 } else {
585 ASSERT(to->IsDoubleRegister() || to->IsDoubleStackSlot());
586 __ movdbl(ToOperand(to), xmm_scratch);
587 }
588 } else if (to == &marker_operand) {
589 if (from->IsRegister() || from->IsStackSlot()) {
590 __ mov(cpu_scratch, ToOperand(from));
591 destroys_cpu_scratch = true;
592 } else {
593 ASSERT(from->IsDoubleRegister() || from->IsDoubleStackSlot());
594 __ movdbl(xmm_scratch, ToOperand(from));
595 }
596 } else if (from->IsRegister()) {
597 __ mov(ToOperand(to), ToRegister(from));
598 } else if (to->IsRegister()) {
599 __ mov(ToRegister(to), ToOperand(from));
600 } else if (from->IsStackSlot()) {
601 ASSERT(to->IsStackSlot());
602 __ push(eax);
603 __ mov(eax, ToOperand(from));
604 __ mov(ToOperand(to), eax);
605 __ pop(eax);
606 } else if (from->IsDoubleRegister()) {
607 __ movdbl(ToOperand(to), ToDoubleRegister(from));
608 } else if (to->IsDoubleRegister()) {
609 __ movdbl(ToDoubleRegister(to), ToOperand(from));
610 } else {
611 ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
612 __ movdbl(xmm_scratch, ToOperand(from));
613 __ movdbl(ToOperand(to), xmm_scratch);
614 }
615 }
616
617 if (destroys_cpu_scratch) {
618 __ mov(cpu_scratch, Operand(ebp, -kPointerSize));
619 }
620}
621
622
623void LCodeGen::DoGap(LGap* gap) {
624 for (int i = LGap::FIRST_INNER_POSITION;
625 i <= LGap::LAST_INNER_POSITION;
626 i++) {
627 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
628 LParallelMove* move = gap->GetParallelMove(inner_pos);
629 if (move != NULL) DoParallelMove(move);
630 }
631
632 LInstruction* next = GetNextInstruction();
633 if (next != NULL && next->IsLazyBailout()) {
634 int pc = masm()->pc_offset();
635 safepoints_.SetPcAfterGap(pc);
636 }
637}
638
639
640void LCodeGen::DoParameter(LParameter* instr) {
641 // Nothing to do.
642}
643
644
645void LCodeGen::DoCallStub(LCallStub* instr) {
646 ASSERT(ToRegister(instr->result()).is(eax));
647 switch (instr->hydrogen()->major_key()) {
648 case CodeStub::RegExpConstructResult: {
649 RegExpConstructResultStub stub;
650 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
651 break;
652 }
653 case CodeStub::RegExpExec: {
654 RegExpExecStub stub;
655 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
656 break;
657 }
658 case CodeStub::SubString: {
659 SubStringStub stub;
660 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
661 break;
662 }
663 case CodeStub::StringCharAt: {
664 StringCharAtStub stub;
665 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
666 break;
667 }
668 case CodeStub::MathPow: {
669 MathPowStub stub;
670 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
671 break;
672 }
673 case CodeStub::NumberToString: {
674 NumberToStringStub stub;
675 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
676 break;
677 }
678 case CodeStub::StringAdd: {
679 StringAddStub stub(NO_STRING_ADD_FLAGS);
680 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
681 break;
682 }
683 case CodeStub::StringCompare: {
684 StringCompareStub stub;
685 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
686 break;
687 }
688 case CodeStub::TranscendentalCache: {
whesse@chromium.org023421e2010-12-21 12:19:12 +0000689 TranscendentalCacheStub stub(instr->transcendental_type(),
690 TranscendentalCacheStub::TAGGED);
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000691 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
692 break;
693 }
694 default:
695 UNREACHABLE();
696 }
697}
698
699
700void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
701 // Nothing to do.
702}
703
704
705void LCodeGen::DoModI(LModI* instr) {
706 LOperand* right = instr->right();
707 ASSERT(ToRegister(instr->result()).is(edx));
708 ASSERT(ToRegister(instr->left()).is(eax));
709 ASSERT(!ToRegister(instr->right()).is(eax));
710 ASSERT(!ToRegister(instr->right()).is(edx));
711
712 Register right_reg = ToRegister(right);
713
714 // Check for x % 0.
715 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
716 __ test(right_reg, ToOperand(right));
717 DeoptimizeIf(zero, instr->environment());
718 }
719
720 // Sign extend to edx.
721 __ cdq();
722
723 // Check for (0 % -x) that will produce negative zero.
724 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
725 NearLabel positive_left;
726 NearLabel done;
727 __ test(eax, Operand(eax));
728 __ j(not_sign, &positive_left);
729 __ idiv(right_reg);
730
731 // Test the remainder for 0, because then the result would be -0.
732 __ test(edx, Operand(edx));
733 __ j(not_zero, &done);
734
735 DeoptimizeIf(no_condition, instr->environment());
736 __ bind(&positive_left);
737 __ idiv(right_reg);
738 __ bind(&done);
739 } else {
740 __ idiv(right_reg);
741 }
742}
743
744
745void LCodeGen::DoDivI(LDivI* instr) {
746 LOperand* right = instr->right();
747 ASSERT(ToRegister(instr->result()).is(eax));
748 ASSERT(ToRegister(instr->left()).is(eax));
749 ASSERT(!ToRegister(instr->right()).is(eax));
750 ASSERT(!ToRegister(instr->right()).is(edx));
751
752 Register left_reg = eax;
753
754 // Check for x / 0.
755 Register right_reg = ToRegister(right);
756 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
757 __ test(right_reg, ToOperand(right));
758 DeoptimizeIf(zero, instr->environment());
759 }
760
761 // Check for (0 / -x) that will produce negative zero.
762 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
763 NearLabel left_not_zero;
764 __ test(left_reg, Operand(left_reg));
765 __ j(not_zero, &left_not_zero);
766 __ test(right_reg, ToOperand(right));
767 DeoptimizeIf(sign, instr->environment());
768 __ bind(&left_not_zero);
769 }
770
771 // Check for (-kMinInt / -1).
772 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
773 NearLabel left_not_min_int;
774 __ cmp(left_reg, kMinInt);
775 __ j(not_zero, &left_not_min_int);
776 __ cmp(right_reg, -1);
777 DeoptimizeIf(zero, instr->environment());
778 __ bind(&left_not_min_int);
779 }
780
781 // Sign extend to edx.
782 __ cdq();
783 __ idiv(right_reg);
784
785 // Deoptimize if remainder is not 0.
786 __ test(edx, Operand(edx));
787 DeoptimizeIf(not_zero, instr->environment());
788}
789
790
791void LCodeGen::DoMulI(LMulI* instr) {
792 Register left = ToRegister(instr->left());
793 LOperand* right = instr->right();
794
795 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
796 __ mov(ToRegister(instr->temp()), left);
797 }
798
799 if (right->IsConstantOperand()) {
800 __ imul(left, left, ToInteger32(LConstantOperand::cast(right)));
801 } else {
802 __ imul(left, ToOperand(right));
803 }
804
805 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
806 DeoptimizeIf(overflow, instr->environment());
807 }
808
809 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
810 // Bail out if the result is supposed to be negative zero.
811 NearLabel done;
812 __ test(left, Operand(left));
813 __ j(not_zero, &done);
814 if (right->IsConstantOperand()) {
815 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
816 DeoptimizeIf(no_condition, instr->environment());
817 }
818 } else {
819 // Test the non-zero operand for negative sign.
820 __ or_(ToRegister(instr->temp()), ToOperand(right));
821 DeoptimizeIf(sign, instr->environment());
822 }
823 __ bind(&done);
824 }
825}
826
827
828void LCodeGen::DoBitI(LBitI* instr) {
829 LOperand* left = instr->left();
830 LOperand* right = instr->right();
831 ASSERT(left->Equals(instr->result()));
832 ASSERT(left->IsRegister());
833
834 if (right->IsConstantOperand()) {
835 int right_operand = ToInteger32(LConstantOperand::cast(right));
836 switch (instr->op()) {
837 case Token::BIT_AND:
838 __ and_(ToRegister(left), right_operand);
839 break;
840 case Token::BIT_OR:
841 __ or_(ToRegister(left), right_operand);
842 break;
843 case Token::BIT_XOR:
844 __ xor_(ToRegister(left), right_operand);
845 break;
846 default:
847 UNREACHABLE();
848 break;
849 }
850 } else {
851 switch (instr->op()) {
852 case Token::BIT_AND:
853 __ and_(ToRegister(left), ToOperand(right));
854 break;
855 case Token::BIT_OR:
856 __ or_(ToRegister(left), ToOperand(right));
857 break;
858 case Token::BIT_XOR:
859 __ xor_(ToRegister(left), ToOperand(right));
860 break;
861 default:
862 UNREACHABLE();
863 break;
864 }
865 }
866}
867
868
869void LCodeGen::DoShiftI(LShiftI* instr) {
870 LOperand* left = instr->left();
871 LOperand* right = instr->right();
872 ASSERT(left->Equals(instr->result()));
873 ASSERT(left->IsRegister());
874 if (right->IsRegister()) {
875 ASSERT(ToRegister(right).is(ecx));
876
877 switch (instr->op()) {
878 case Token::SAR:
879 __ sar_cl(ToRegister(left));
880 break;
881 case Token::SHR:
882 __ shr_cl(ToRegister(left));
883 if (instr->can_deopt()) {
884 __ test(ToRegister(left), Immediate(0x80000000));
885 DeoptimizeIf(not_zero, instr->environment());
886 }
887 break;
888 case Token::SHL:
889 __ shl_cl(ToRegister(left));
890 break;
891 default:
892 UNREACHABLE();
893 break;
894 }
895 } else {
896 int value = ToInteger32(LConstantOperand::cast(right));
897 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
898 switch (instr->op()) {
899 case Token::SAR:
900 if (shift_count != 0) {
901 __ sar(ToRegister(left), shift_count);
902 }
903 break;
904 case Token::SHR:
905 if (shift_count == 0 && instr->can_deopt()) {
906 __ test(ToRegister(left), Immediate(0x80000000));
907 DeoptimizeIf(not_zero, instr->environment());
908 } else {
909 __ shr(ToRegister(left), shift_count);
910 }
911 break;
912 case Token::SHL:
913 if (shift_count != 0) {
914 __ shl(ToRegister(left), shift_count);
915 }
916 break;
917 default:
918 UNREACHABLE();
919 break;
920 }
921 }
922}
923
924
925void LCodeGen::DoSubI(LSubI* instr) {
926 LOperand* left = instr->left();
927 LOperand* right = instr->right();
928 ASSERT(left->Equals(instr->result()));
929
930 if (right->IsConstantOperand()) {
931 __ sub(ToOperand(left), ToImmediate(right));
932 } else {
933 __ sub(ToRegister(left), ToOperand(right));
934 }
935 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
936 DeoptimizeIf(overflow, instr->environment());
937 }
938}
939
940
941void LCodeGen::DoConstantI(LConstantI* instr) {
942 ASSERT(instr->result()->IsRegister());
fschneider@chromium.org9e3e0b62011-01-03 10:16:46 +0000943 __ Set(ToRegister(instr->result()), Immediate(instr->value()));
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000944}
945
946
947void LCodeGen::DoConstantD(LConstantD* instr) {
948 ASSERT(instr->result()->IsDoubleRegister());
949 XMMRegister res = ToDoubleRegister(instr->result());
950 double v = instr->value();
951 // Use xor to produce +0.0 in a fast and compact way, but avoid to
952 // do so if the constant is -0.0.
953 if (BitCast<uint64_t, double>(v) == 0) {
954 __ xorpd(res, res);
955 } else {
956 int32_t v_int32 = static_cast<int32_t>(v);
957 if (static_cast<double>(v_int32) == v) {
958 __ push_imm32(v_int32);
959 __ cvtsi2sd(res, Operand(esp, 0));
960 __ add(Operand(esp), Immediate(kPointerSize));
961 } else {
962 uint64_t int_val = BitCast<uint64_t, double>(v);
963 int32_t lower = static_cast<int32_t>(int_val);
964 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
965 __ push_imm32(upper);
966 __ push_imm32(lower);
967 __ movdbl(res, Operand(esp, 0));
968 __ add(Operand(esp), Immediate(2 * kPointerSize));
969 }
970 }
971}
972
973
974void LCodeGen::DoConstantT(LConstantT* instr) {
975 ASSERT(instr->result()->IsRegister());
fschneider@chromium.org9e3e0b62011-01-03 10:16:46 +0000976 __ Set(ToRegister(instr->result()), Immediate(instr->value()));
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000977}
978
979
fschneider@chromium.org9e3e0b62011-01-03 10:16:46 +0000980void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000981 Register result = ToRegister(instr->result());
fschneider@chromium.org9e3e0b62011-01-03 10:16:46 +0000982 Register array = ToRegister(instr->input());
983 __ mov(result, FieldOperand(array, JSArray::kLengthOffset));
984}
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000985
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000986
fschneider@chromium.org9e3e0b62011-01-03 10:16:46 +0000987void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
988 Register result = ToRegister(instr->result());
989 Register array = ToRegister(instr->input());
990 __ mov(result, FieldOperand(array, FixedArray::kLengthOffset));
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000991}
992
993
994void LCodeGen::DoValueOf(LValueOf* instr) {
995 Register input = ToRegister(instr->input());
996 Register result = ToRegister(instr->result());
997 Register map = ToRegister(instr->temporary());
998 ASSERT(input.is(result));
999 NearLabel done;
1000 // If the object is a smi return the object.
1001 __ test(input, Immediate(kSmiTagMask));
1002 __ j(zero, &done);
1003
1004 // If the object is not a value type, return the object.
1005 __ CmpObjectType(input, JS_VALUE_TYPE, map);
1006 __ j(not_equal, &done);
1007 __ mov(result, FieldOperand(input, JSValue::kValueOffset));
1008
1009 __ bind(&done);
1010}
1011
1012
1013void LCodeGen::DoBitNotI(LBitNotI* instr) {
1014 LOperand* input = instr->input();
1015 ASSERT(input->Equals(instr->result()));
1016 __ not_(ToRegister(input));
1017}
1018
1019
1020void LCodeGen::DoThrow(LThrow* instr) {
1021 __ push(ToOperand(instr->input()));
1022 CallRuntime(Runtime::kThrow, 1, instr);
1023
1024 if (FLAG_debug_code) {
1025 Comment("Unreachable code.");
1026 __ int3();
1027 }
1028}
1029
1030
1031void LCodeGen::DoAddI(LAddI* instr) {
1032 LOperand* left = instr->left();
1033 LOperand* right = instr->right();
1034 ASSERT(left->Equals(instr->result()));
1035
1036 if (right->IsConstantOperand()) {
1037 __ add(ToOperand(left), ToImmediate(right));
1038 } else {
1039 __ add(ToRegister(left), ToOperand(right));
1040 }
1041
1042 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1043 DeoptimizeIf(overflow, instr->environment());
1044 }
1045}
1046
1047
1048void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1049 LOperand* left = instr->left();
1050 LOperand* right = instr->right();
1051 // Modulo uses a fixed result register.
1052 ASSERT(instr->op() == Token::MOD || left->Equals(instr->result()));
1053 switch (instr->op()) {
1054 case Token::ADD:
1055 __ addsd(ToDoubleRegister(left), ToDoubleRegister(right));
1056 break;
1057 case Token::SUB:
1058 __ subsd(ToDoubleRegister(left), ToDoubleRegister(right));
1059 break;
1060 case Token::MUL:
1061 __ mulsd(ToDoubleRegister(left), ToDoubleRegister(right));
1062 break;
1063 case Token::DIV:
1064 __ divsd(ToDoubleRegister(left), ToDoubleRegister(right));
1065 break;
1066 case Token::MOD: {
1067 // Pass two doubles as arguments on the stack.
1068 __ PrepareCallCFunction(4, eax);
1069 __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
1070 __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
1071 __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4);
1072
1073 // Return value is in st(0) on ia32.
1074 // Store it into the (fixed) result register.
1075 __ sub(Operand(esp), Immediate(kDoubleSize));
1076 __ fstp_d(Operand(esp, 0));
1077 __ movdbl(ToDoubleRegister(instr->result()), Operand(esp, 0));
1078 __ add(Operand(esp), Immediate(kDoubleSize));
1079 break;
1080 }
1081 default:
1082 UNREACHABLE();
1083 break;
1084 }
1085}
1086
1087
1088void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1089 ASSERT(ToRegister(instr->left()).is(edx));
1090 ASSERT(ToRegister(instr->right()).is(eax));
1091 ASSERT(ToRegister(instr->result()).is(eax));
1092
1093 TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
1094 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1095}
1096
1097
1098int LCodeGen::GetNextEmittedBlock(int block) {
1099 for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1100 LLabel* label = chunk_->GetLabel(i);
1101 if (!label->HasReplacement()) return i;
1102 }
1103 return -1;
1104}
1105
1106
1107void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
1108 int next_block = GetNextEmittedBlock(current_block_);
1109 right_block = chunk_->LookupDestination(right_block);
1110 left_block = chunk_->LookupDestination(left_block);
1111
1112 if (right_block == left_block) {
1113 EmitGoto(left_block);
1114 } else if (left_block == next_block) {
1115 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1116 } else if (right_block == next_block) {
1117 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1118 } else {
1119 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1120 __ jmp(chunk_->GetAssemblyLabel(right_block));
1121 }
1122}
1123
1124
1125void LCodeGen::DoBranch(LBranch* instr) {
1126 int true_block = chunk_->LookupDestination(instr->true_block_id());
1127 int false_block = chunk_->LookupDestination(instr->false_block_id());
1128
1129 Representation r = instr->hydrogen()->representation();
1130 if (r.IsInteger32()) {
1131 Register reg = ToRegister(instr->input());
1132 __ test(reg, Operand(reg));
1133 EmitBranch(true_block, false_block, not_zero);
1134 } else if (r.IsDouble()) {
1135 XMMRegister reg = ToDoubleRegister(instr->input());
1136 __ xorpd(xmm0, xmm0);
1137 __ ucomisd(reg, xmm0);
1138 EmitBranch(true_block, false_block, not_equal);
1139 } else {
1140 ASSERT(r.IsTagged());
1141 Register reg = ToRegister(instr->input());
1142 if (instr->hydrogen()->type().IsBoolean()) {
1143 __ cmp(reg, Factory::true_value());
1144 EmitBranch(true_block, false_block, equal);
1145 } else {
1146 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1147 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1148
1149 __ cmp(reg, Factory::undefined_value());
1150 __ j(equal, false_label);
1151 __ cmp(reg, Factory::true_value());
1152 __ j(equal, true_label);
1153 __ cmp(reg, Factory::false_value());
1154 __ j(equal, false_label);
1155 __ test(reg, Operand(reg));
1156 __ j(equal, false_label);
1157 __ test(reg, Immediate(kSmiTagMask));
1158 __ j(zero, true_label);
1159
1160 // Test for double values. Zero is false.
1161 NearLabel call_stub;
1162 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
1163 Factory::heap_number_map());
1164 __ j(not_equal, &call_stub);
1165 __ fldz();
1166 __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
1167 __ FCmp();
1168 __ j(zero, false_label);
1169 __ jmp(true_label);
1170
1171 // The conversion stub doesn't cause garbage collections so it's
1172 // safe to not record a safepoint after the call.
1173 __ bind(&call_stub);
1174 ToBooleanStub stub;
1175 __ pushad();
1176 __ push(reg);
1177 __ CallStub(&stub);
1178 __ test(eax, Operand(eax));
1179 __ popad();
1180 EmitBranch(true_block, false_block, not_zero);
1181 }
1182 }
1183}
1184
1185
1186void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
1187 block = chunk_->LookupDestination(block);
1188 int next_block = GetNextEmittedBlock(current_block_);
1189 if (block != next_block) {
1190 // Perform stack overflow check if this goto needs it before jumping.
1191 if (deferred_stack_check != NULL) {
1192 ExternalReference stack_limit =
1193 ExternalReference::address_of_stack_limit();
1194 __ cmp(esp, Operand::StaticVariable(stack_limit));
1195 __ j(above_equal, chunk_->GetAssemblyLabel(block));
1196 __ jmp(deferred_stack_check->entry());
1197 deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
1198 } else {
1199 __ jmp(chunk_->GetAssemblyLabel(block));
1200 }
1201 }
1202}
1203
1204
1205void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
1206 __ pushad();
1207 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
1208 RecordSafepointWithRegisters(
1209 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
1210 __ popad();
1211}
1212
1213void LCodeGen::DoGoto(LGoto* instr) {
1214 class DeferredStackCheck: public LDeferredCode {
1215 public:
1216 DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
1217 : LDeferredCode(codegen), instr_(instr) { }
1218 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
1219 private:
1220 LGoto* instr_;
1221 };
1222
1223 DeferredStackCheck* deferred = NULL;
1224 if (instr->include_stack_check()) {
1225 deferred = new DeferredStackCheck(this, instr);
1226 }
1227 EmitGoto(instr->block_id(), deferred);
1228}
1229
1230
1231Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1232 Condition cond = no_condition;
1233 switch (op) {
1234 case Token::EQ:
1235 case Token::EQ_STRICT:
1236 cond = equal;
1237 break;
1238 case Token::LT:
1239 cond = is_unsigned ? below : less;
1240 break;
1241 case Token::GT:
1242 cond = is_unsigned ? above : greater;
1243 break;
1244 case Token::LTE:
1245 cond = is_unsigned ? below_equal : less_equal;
1246 break;
1247 case Token::GTE:
1248 cond = is_unsigned ? above_equal : greater_equal;
1249 break;
1250 case Token::IN:
1251 case Token::INSTANCEOF:
1252 default:
1253 UNREACHABLE();
1254 }
1255 return cond;
1256}
1257
1258
1259void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
1260 if (right->IsConstantOperand()) {
1261 __ cmp(ToOperand(left), ToImmediate(right));
1262 } else {
1263 __ cmp(ToRegister(left), ToOperand(right));
1264 }
1265}
1266
1267
1268void LCodeGen::DoCmpID(LCmpID* instr) {
1269 LOperand* left = instr->left();
1270 LOperand* right = instr->right();
1271 LOperand* result = instr->result();
1272
1273 NearLabel unordered;
1274 if (instr->is_double()) {
1275 // Don't base result on EFLAGS when a NaN is involved. Instead
1276 // jump to the unordered case, which produces a false value.
1277 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
1278 __ j(parity_even, &unordered, not_taken);
1279 } else {
1280 EmitCmpI(left, right);
1281 }
1282
1283 NearLabel done;
1284 Condition cc = TokenToCondition(instr->op(), instr->is_double());
1285 __ mov(ToRegister(result), Handle<Object>(Heap::true_value()));
1286 __ j(cc, &done);
1287
1288 __ bind(&unordered);
1289 __ mov(ToRegister(result), Handle<Object>(Heap::false_value()));
1290 __ bind(&done);
1291}
1292
1293
1294void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1295 LOperand* left = instr->left();
1296 LOperand* right = instr->right();
1297 int false_block = chunk_->LookupDestination(instr->false_block_id());
1298 int true_block = chunk_->LookupDestination(instr->true_block_id());
1299
1300 if (instr->is_double()) {
1301 // Don't base result on EFLAGS when a NaN is involved. Instead
1302 // jump to the false block.
1303 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
1304 __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
1305 } else {
1306 EmitCmpI(left, right);
1307 }
1308
1309 Condition cc = TokenToCondition(instr->op(), instr->is_double());
1310 EmitBranch(true_block, false_block, cc);
1311}
1312
1313
1314void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
1315 Register left = ToRegister(instr->left());
1316 Register right = ToRegister(instr->right());
1317 Register result = ToRegister(instr->result());
1318
1319 __ cmp(left, Operand(right));
1320 __ mov(result, Handle<Object>(Heap::true_value()));
1321 NearLabel done;
1322 __ j(equal, &done);
1323 __ mov(result, Handle<Object>(Heap::false_value()));
1324 __ bind(&done);
1325}
1326
1327
1328void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
1329 Register left = ToRegister(instr->left());
1330 Register right = ToRegister(instr->right());
1331 int false_block = chunk_->LookupDestination(instr->false_block_id());
1332 int true_block = chunk_->LookupDestination(instr->true_block_id());
1333
1334 __ cmp(left, Operand(right));
1335 EmitBranch(true_block, false_block, equal);
1336}
1337
1338
1339void LCodeGen::DoIsNull(LIsNull* instr) {
1340 Register reg = ToRegister(instr->input());
1341 Register result = ToRegister(instr->result());
1342
1343 // TODO(fsc): If the expression is known to be a smi, then it's
1344 // definitely not null. Materialize false.
1345
1346 __ cmp(reg, Factory::null_value());
1347 if (instr->is_strict()) {
1348 __ mov(result, Handle<Object>(Heap::true_value()));
1349 NearLabel done;
1350 __ j(equal, &done);
1351 __ mov(result, Handle<Object>(Heap::false_value()));
1352 __ bind(&done);
1353 } else {
1354 NearLabel true_value, false_value, done;
1355 __ j(equal, &true_value);
1356 __ cmp(reg, Factory::undefined_value());
1357 __ j(equal, &true_value);
1358 __ test(reg, Immediate(kSmiTagMask));
1359 __ j(zero, &false_value);
1360 // Check for undetectable objects by looking in the bit field in
1361 // the map. The object has already been smi checked.
1362 Register scratch = result;
1363 __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
1364 __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
1365 __ test(scratch, Immediate(1 << Map::kIsUndetectable));
1366 __ j(not_zero, &true_value);
1367 __ bind(&false_value);
1368 __ mov(result, Handle<Object>(Heap::false_value()));
1369 __ jmp(&done);
1370 __ bind(&true_value);
1371 __ mov(result, Handle<Object>(Heap::true_value()));
1372 __ bind(&done);
1373 }
1374}
1375
1376
1377void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
1378 Register reg = ToRegister(instr->input());
1379
1380 // TODO(fsc): If the expression is known to be a smi, then it's
1381 // definitely not null. Jump to the false block.
1382
1383 int true_block = chunk_->LookupDestination(instr->true_block_id());
1384 int false_block = chunk_->LookupDestination(instr->false_block_id());
1385
1386 __ cmp(reg, Factory::null_value());
1387 if (instr->is_strict()) {
1388 EmitBranch(true_block, false_block, equal);
1389 } else {
1390 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1391 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1392 __ j(equal, true_label);
1393 __ cmp(reg, Factory::undefined_value());
1394 __ j(equal, true_label);
1395 __ test(reg, Immediate(kSmiTagMask));
1396 __ j(zero, false_label);
1397 // Check for undetectable objects by looking in the bit field in
1398 // the map. The object has already been smi checked.
1399 Register scratch = ToRegister(instr->temp());
1400 __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
1401 __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
1402 __ test(scratch, Immediate(1 << Map::kIsUndetectable));
1403 EmitBranch(true_block, false_block, not_zero);
1404 }
1405}
1406
1407
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00001408Condition LCodeGen::EmitIsObject(Register input,
1409 Register temp1,
1410 Register temp2,
1411 Label* is_not_object,
1412 Label* is_object) {
1413 ASSERT(!input.is(temp1));
1414 ASSERT(!input.is(temp2));
1415 ASSERT(!temp1.is(temp2));
1416
1417 __ test(input, Immediate(kSmiTagMask));
1418 __ j(equal, is_not_object);
1419
1420 __ cmp(input, Factory::null_value());
1421 __ j(equal, is_object);
1422
1423 __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
1424 // Undetectable objects behave like undefined.
1425 __ movzx_b(temp2, FieldOperand(temp1, Map::kBitFieldOffset));
1426 __ test(temp2, Immediate(1 << Map::kIsUndetectable));
1427 __ j(not_zero, is_not_object);
1428
1429 __ movzx_b(temp2, FieldOperand(temp1, Map::kInstanceTypeOffset));
1430 __ cmp(temp2, FIRST_JS_OBJECT_TYPE);
1431 __ j(below, is_not_object);
1432 __ cmp(temp2, LAST_JS_OBJECT_TYPE);
1433 return below_equal;
1434}
1435
1436
1437void LCodeGen::DoIsObject(LIsObject* instr) {
1438 Register reg = ToRegister(instr->input());
1439 Register result = ToRegister(instr->result());
1440 Register temp = ToRegister(instr->temp());
1441 Label is_false, is_true, done;
1442
1443 Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
1444 __ j(true_cond, &is_true);
1445
1446 __ bind(&is_false);
1447 __ mov(result, Handle<Object>(Heap::false_value()));
1448 __ jmp(&done);
1449
1450 __ bind(&is_true);
1451 __ mov(result, Handle<Object>(Heap::true_value()));
1452
1453 __ bind(&done);
1454}
1455
1456
1457void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1458 Register reg = ToRegister(instr->input());
1459 Register temp = ToRegister(instr->temp());
1460 Register temp2 = ToRegister(instr->temp2());
1461
1462 int true_block = chunk_->LookupDestination(instr->true_block_id());
1463 int false_block = chunk_->LookupDestination(instr->false_block_id());
1464 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1465 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1466
1467 Condition true_cond = EmitIsObject(reg, temp, temp2, false_label, true_label);
1468
1469 EmitBranch(true_block, false_block, true_cond);
1470}
1471
1472
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001473void LCodeGen::DoIsSmi(LIsSmi* instr) {
1474 Operand input = ToOperand(instr->input());
1475 Register result = ToRegister(instr->result());
1476
1477 ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1478 __ test(input, Immediate(kSmiTagMask));
1479 __ mov(result, Handle<Object>(Heap::true_value()));
1480 NearLabel done;
1481 __ j(zero, &done);
1482 __ mov(result, Handle<Object>(Heap::false_value()));
1483 __ bind(&done);
1484}
1485
1486
1487void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1488 Operand input = ToOperand(instr->input());
1489
1490 int true_block = chunk_->LookupDestination(instr->true_block_id());
1491 int false_block = chunk_->LookupDestination(instr->false_block_id());
1492
1493 __ test(input, Immediate(kSmiTagMask));
1494 EmitBranch(true_block, false_block, zero);
1495}
1496
1497
1498InstanceType LHasInstanceType::TestType() {
1499 InstanceType from = hydrogen()->from();
1500 InstanceType to = hydrogen()->to();
1501 if (from == FIRST_TYPE) return to;
1502 ASSERT(from == to || to == LAST_TYPE);
1503 return from;
1504}
1505
1506
1507
1508Condition LHasInstanceType::BranchCondition() {
1509 InstanceType from = hydrogen()->from();
1510 InstanceType to = hydrogen()->to();
1511 if (from == to) return equal;
1512 if (to == LAST_TYPE) return above_equal;
1513 if (from == FIRST_TYPE) return below_equal;
1514 UNREACHABLE();
1515 return equal;
1516}
1517
1518
1519void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
1520 Register input = ToRegister(instr->input());
1521 Register result = ToRegister(instr->result());
1522
1523 ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1524 __ test(input, Immediate(kSmiTagMask));
1525 NearLabel done, is_false;
1526 __ j(zero, &is_false);
1527 __ CmpObjectType(input, instr->TestType(), result);
1528 __ j(NegateCondition(instr->BranchCondition()), &is_false);
1529 __ mov(result, Handle<Object>(Heap::true_value()));
1530 __ jmp(&done);
1531 __ bind(&is_false);
1532 __ mov(result, Handle<Object>(Heap::false_value()));
1533 __ bind(&done);
1534}
1535
1536
1537void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1538 Register input = ToRegister(instr->input());
1539 Register temp = ToRegister(instr->temp());
1540
1541 int true_block = chunk_->LookupDestination(instr->true_block_id());
1542 int false_block = chunk_->LookupDestination(instr->false_block_id());
1543
1544 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1545
1546 __ test(input, Immediate(kSmiTagMask));
1547 __ j(zero, false_label);
1548
1549 __ CmpObjectType(input, instr->TestType(), temp);
1550 EmitBranch(true_block, false_block, instr->BranchCondition());
1551}
1552
1553
1554void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
1555 Register input = ToRegister(instr->input());
1556 Register result = ToRegister(instr->result());
1557
1558 ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1559 __ mov(result, Handle<Object>(Heap::true_value()));
1560 __ test(FieldOperand(input, String::kHashFieldOffset),
1561 Immediate(String::kContainsCachedArrayIndexMask));
1562 NearLabel done;
1563 __ j(not_zero, &done);
1564 __ mov(result, Handle<Object>(Heap::false_value()));
1565 __ bind(&done);
1566}
1567
1568
1569void LCodeGen::DoHasCachedArrayIndexAndBranch(
1570 LHasCachedArrayIndexAndBranch* instr) {
1571 Register input = ToRegister(instr->input());
1572
1573 int true_block = chunk_->LookupDestination(instr->true_block_id());
1574 int false_block = chunk_->LookupDestination(instr->false_block_id());
1575
1576 __ test(FieldOperand(input, String::kHashFieldOffset),
1577 Immediate(String::kContainsCachedArrayIndexMask));
1578 EmitBranch(true_block, false_block, not_equal);
1579}
1580
1581
1582// Branches to a label or falls through with the answer in the z flag. Trashes
1583// the temp registers, but not the input. Only input and temp2 may alias.
1584void LCodeGen::EmitClassOfTest(Label* is_true,
1585 Label* is_false,
1586 Handle<String>class_name,
1587 Register input,
1588 Register temp,
1589 Register temp2) {
1590 ASSERT(!input.is(temp));
1591 ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
1592 __ test(input, Immediate(kSmiTagMask));
1593 __ j(zero, is_false);
1594 __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
1595 __ j(below, is_false);
1596
1597 // Map is now in temp.
1598 // Functions have class 'Function'.
1599 __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
1600 if (class_name->IsEqualTo(CStrVector("Function"))) {
1601 __ j(equal, is_true);
1602 } else {
1603 __ j(equal, is_false);
1604 }
1605
1606 // Check if the constructor in the map is a function.
1607 __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
1608
1609 // As long as JS_FUNCTION_TYPE is the last instance type and it is
1610 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
1611 // LAST_JS_OBJECT_TYPE.
1612 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
1613 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
1614
1615 // Objects with a non-function constructor have class 'Object'.
1616 __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
1617 if (class_name->IsEqualTo(CStrVector("Object"))) {
1618 __ j(not_equal, is_true);
1619 } else {
1620 __ j(not_equal, is_false);
1621 }
1622
1623 // temp now contains the constructor function. Grab the
1624 // instance class name from there.
1625 __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
1626 __ mov(temp, FieldOperand(temp,
1627 SharedFunctionInfo::kInstanceClassNameOffset));
1628 // The class name we are testing against is a symbol because it's a literal.
1629 // The name in the constructor is a symbol because of the way the context is
1630 // booted. This routine isn't expected to work for random API-created
1631 // classes and it doesn't have to because you can't access it with natives
1632 // syntax. Since both sides are symbols it is sufficient to use an identity
1633 // comparison.
1634 __ cmp(temp, class_name);
1635 // End with the answer in the z flag.
1636}
1637
1638
1639void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
1640 Register input = ToRegister(instr->input());
1641 Register result = ToRegister(instr->result());
1642 ASSERT(input.is(result));
1643 Register temp = ToRegister(instr->temporary());
1644 Handle<String> class_name = instr->hydrogen()->class_name();
1645 NearLabel done;
1646 Label is_true, is_false;
1647
1648 EmitClassOfTest(&is_true, &is_false, class_name, input, temp, input);
1649
1650 __ j(not_equal, &is_false);
1651
1652 __ bind(&is_true);
1653 __ mov(result, Handle<Object>(Heap::true_value()));
1654 __ jmp(&done);
1655
1656 __ bind(&is_false);
1657 __ mov(result, Handle<Object>(Heap::false_value()));
1658 __ bind(&done);
1659}
1660
1661
1662void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
1663 Register input = ToRegister(instr->input());
1664 Register temp = ToRegister(instr->temporary());
1665 Register temp2 = ToRegister(instr->temporary2());
1666 if (input.is(temp)) {
1667 // Swap.
1668 Register swapper = temp;
1669 temp = temp2;
1670 temp2 = swapper;
1671 }
1672 Handle<String> class_name = instr->hydrogen()->class_name();
1673
1674 int true_block = chunk_->LookupDestination(instr->true_block_id());
1675 int false_block = chunk_->LookupDestination(instr->false_block_id());
1676
1677 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1678 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1679
1680 EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
1681
1682 EmitBranch(true_block, false_block, equal);
1683}
1684
1685
1686void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
1687 Register reg = ToRegister(instr->input());
1688 int true_block = instr->true_block_id();
1689 int false_block = instr->false_block_id();
1690
1691 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
1692 EmitBranch(true_block, false_block, equal);
1693}
1694
1695
1696void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
kmillikin@chromium.orgd2c22f02011-01-10 08:15:37 +00001697 // Object and function are in fixed registers defined by the stub.
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00001698 InstanceofStub stub(InstanceofStub::kArgsInRegisters);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001699 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1700
1701 NearLabel true_value, done;
1702 __ test(eax, Operand(eax));
1703 __ j(zero, &true_value);
1704 __ mov(ToRegister(instr->result()), Factory::false_value());
1705 __ jmp(&done);
1706 __ bind(&true_value);
1707 __ mov(ToRegister(instr->result()), Factory::true_value());
1708 __ bind(&done);
1709}
1710
1711
1712void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
1713 int true_block = chunk_->LookupDestination(instr->true_block_id());
1714 int false_block = chunk_->LookupDestination(instr->false_block_id());
1715
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00001716 InstanceofStub stub(InstanceofStub::kArgsInRegisters);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001717 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1718 __ test(eax, Operand(eax));
1719 EmitBranch(true_block, false_block, zero);
1720}
1721
1722
kmillikin@chromium.orgd2c22f02011-01-10 08:15:37 +00001723void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
1724 class DeferredInstanceOfKnownGlobal: public LDeferredCode {
1725 public:
1726 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
1727 LInstanceOfKnownGlobal* instr)
1728 : LDeferredCode(codegen), instr_(instr) { }
1729 virtual void Generate() {
1730 codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
1731 }
1732
1733 Label* map_check() { return &map_check_; }
1734
1735 private:
1736 LInstanceOfKnownGlobal* instr_;
1737 Label map_check_;
1738 };
1739
1740 DeferredInstanceOfKnownGlobal* deferred;
1741 deferred = new DeferredInstanceOfKnownGlobal(this, instr);
1742
1743 Label done, false_result;
1744 Register object = ToRegister(instr->input());
1745 Register temp = ToRegister(instr->temp());
1746
1747 // A Smi is not instance of anything.
1748 __ test(object, Immediate(kSmiTagMask));
1749 __ j(zero, &false_result, not_taken);
1750
1751 // This is the inlined call site instanceof cache. The two occourences of the
1752 // hole value will be patched to the last map/result pair generated by the
1753 // instanceof stub.
1754 NearLabel cache_miss;
1755 Register map = ToRegister(instr->temp());
1756 __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
1757 __ bind(deferred->map_check()); // Label for calculating code patching.
1758 __ cmp(map, Factory::the_hole_value()); // Patched to cached map.
1759 __ j(not_equal, &cache_miss, not_taken);
1760 __ mov(eax, Factory::the_hole_value()); // Patched to either true or false.
1761 __ jmp(&done);
1762
1763 // The inlined call site cache did not match. Check null and string before
1764 // calling the deferred code.
1765 __ bind(&cache_miss);
1766 // Null is not instance of anything.
1767 __ cmp(object, Factory::null_value());
1768 __ j(equal, &false_result);
1769
1770 // String values are not instances of anything.
1771 Condition is_string = masm_->IsObjectStringType(object, temp, temp);
1772 __ j(is_string, &false_result);
1773
1774 // Go to the deferred code.
1775 __ jmp(deferred->entry());
1776
1777 __ bind(&false_result);
1778 __ mov(ToRegister(instr->result()), Factory::false_value());
1779
1780 // Here result has either true or false. Deferred code also produces true or
1781 // false object.
1782 __ bind(deferred->exit());
1783 __ bind(&done);
1784}
1785
1786
1787void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
1788 Label* map_check) {
1789 __ PushSafepointRegisters();
1790
1791 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
1792 flags = static_cast<InstanceofStub::Flags>(
1793 flags | InstanceofStub::kArgsInRegisters);
1794 flags = static_cast<InstanceofStub::Flags>(
1795 flags | InstanceofStub::kCallSiteInlineCheck);
1796 flags = static_cast<InstanceofStub::Flags>(
1797 flags | InstanceofStub::kReturnTrueFalseObject);
1798 InstanceofStub stub(flags);
1799
1800 // Get the temp register reserved by the instruction. This needs to be edi as
1801 // its slot of the pushing of safepoint registers is used to communicate the
1802 // offset to the location of the map check.
1803 Register temp = ToRegister(instr->temp());
1804 ASSERT(temp.is(edi));
1805 __ mov(InstanceofStub::right(), Immediate(instr->function()));
1806 static const int kAdditionalDelta = 13;
1807 int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
1808 Label before_push_delta;
1809 __ bind(&before_push_delta);
1810 __ mov(temp, Immediate(delta));
1811 __ mov(Operand(esp, EspIndexForPushAll(temp) * kPointerSize), temp);
1812 __ call(stub.GetCode(), RelocInfo::CODE_TARGET);
1813 ASSERT_EQ(kAdditionalDelta,
1814 masm_->SizeOfCodeGeneratedSince(&before_push_delta));
1815 RecordSafepointWithRegisters(
1816 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
1817 // Put the result value into the eax slot and restore all registers.
1818 __ mov(Operand(esp, EspIndexForPushAll(eax) * kPointerSize), eax);
1819
1820 __ PopSafepointRegisters();
1821}
1822
1823
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001824static Condition ComputeCompareCondition(Token::Value op) {
1825 switch (op) {
1826 case Token::EQ_STRICT:
1827 case Token::EQ:
1828 return equal;
1829 case Token::LT:
1830 return less;
1831 case Token::GT:
1832 return greater;
1833 case Token::LTE:
1834 return less_equal;
1835 case Token::GTE:
1836 return greater_equal;
1837 default:
1838 UNREACHABLE();
1839 return no_condition;
1840 }
1841}
1842
1843
1844void LCodeGen::DoCmpT(LCmpT* instr) {
1845 Token::Value op = instr->op();
1846
1847 Handle<Code> ic = CompareIC::GetUninitialized(op);
1848 CallCode(ic, RelocInfo::CODE_TARGET, instr);
1849
1850 Condition condition = ComputeCompareCondition(op);
1851 if (op == Token::GT || op == Token::LTE) {
1852 condition = ReverseCondition(condition);
1853 }
1854 NearLabel true_value, done;
1855 __ test(eax, Operand(eax));
1856 __ j(condition, &true_value);
1857 __ mov(ToRegister(instr->result()), Factory::false_value());
1858 __ jmp(&done);
1859 __ bind(&true_value);
1860 __ mov(ToRegister(instr->result()), Factory::true_value());
1861 __ bind(&done);
1862}
1863
1864
1865void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
1866 Token::Value op = instr->op();
1867 int true_block = chunk_->LookupDestination(instr->true_block_id());
1868 int false_block = chunk_->LookupDestination(instr->false_block_id());
1869
1870 Handle<Code> ic = CompareIC::GetUninitialized(op);
1871 CallCode(ic, RelocInfo::CODE_TARGET, instr);
1872
1873 // The compare stub expects compare condition and the input operands
1874 // reversed for GT and LTE.
1875 Condition condition = ComputeCompareCondition(op);
1876 if (op == Token::GT || op == Token::LTE) {
1877 condition = ReverseCondition(condition);
1878 }
1879 __ test(eax, Operand(eax));
1880 EmitBranch(true_block, false_block, condition);
1881}
1882
1883
1884void LCodeGen::DoReturn(LReturn* instr) {
1885 if (FLAG_trace) {
1886 // Preserve the return value on the stack and rely on the runtime
1887 // call to return the value in the same register.
1888 __ push(eax);
1889 __ CallRuntime(Runtime::kTraceExit, 1);
1890 }
1891 __ mov(esp, ebp);
1892 __ pop(ebp);
1893 __ ret((ParameterCount() + 1) * kPointerSize);
1894}
1895
1896
1897void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
1898 Register result = ToRegister(instr->result());
1899 __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
1900 if (instr->hydrogen()->check_hole_value()) {
1901 __ cmp(result, Factory::the_hole_value());
1902 DeoptimizeIf(equal, instr->environment());
1903 }
1904}
1905
1906
1907void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
1908 Register value = ToRegister(instr->input());
1909 __ mov(Operand::Cell(instr->hydrogen()->cell()), value);
1910}
1911
1912
1913void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
1914 Register object = ToRegister(instr->input());
1915 Register result = ToRegister(instr->result());
1916 if (instr->hydrogen()->is_in_object()) {
1917 __ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
1918 } else {
1919 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
1920 __ mov(result, FieldOperand(result, instr->hydrogen()->offset()));
1921 }
1922}
1923
1924
1925void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
1926 ASSERT(ToRegister(instr->object()).is(eax));
1927 ASSERT(ToRegister(instr->result()).is(eax));
1928
1929 __ mov(ecx, instr->name());
1930 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
1931 CallCode(ic, RelocInfo::CODE_TARGET, instr);
1932}
1933
1934
fschneider@chromium.org9e3e0b62011-01-03 10:16:46 +00001935void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
1936 Register function = ToRegister(instr->function());
1937 Register temp = ToRegister(instr->temporary());
1938 Register result = ToRegister(instr->result());
1939
1940 // Check that the function really is a function.
1941 __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
1942 DeoptimizeIf(not_equal, instr->environment());
1943
1944 // Check whether the function has an instance prototype.
1945 NearLabel non_instance;
1946 __ test_b(FieldOperand(result, Map::kBitFieldOffset),
1947 1 << Map::kHasNonInstancePrototype);
1948 __ j(not_zero, &non_instance);
1949
1950 // Get the prototype or initial map from the function.
1951 __ mov(result,
1952 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
1953
1954 // Check that the function has a prototype or an initial map.
1955 __ cmp(Operand(result), Immediate(Factory::the_hole_value()));
1956 DeoptimizeIf(equal, instr->environment());
1957
1958 // If the function does not have an initial map, we're done.
1959 NearLabel done;
1960 __ CmpObjectType(result, MAP_TYPE, temp);
1961 __ j(not_equal, &done);
1962
1963 // Get the prototype from the initial map.
1964 __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
1965 __ jmp(&done);
1966
1967 // Non-instance prototype: Fetch prototype from constructor field
1968 // in the function's map.
1969 __ bind(&non_instance);
1970 __ mov(result, FieldOperand(result, Map::kConstructorOffset));
1971
1972 // All done.
1973 __ bind(&done);
1974}
1975
1976
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001977void LCodeGen::DoLoadElements(LLoadElements* instr) {
1978 ASSERT(instr->result()->Equals(instr->input()));
1979 Register reg = ToRegister(instr->input());
1980 __ mov(reg, FieldOperand(reg, JSObject::kElementsOffset));
1981 if (FLAG_debug_code) {
1982 NearLabel done;
1983 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
1984 Immediate(Factory::fixed_array_map()));
1985 __ j(equal, &done);
1986 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
1987 Immediate(Factory::fixed_cow_array_map()));
1988 __ Check(equal, "Check for fast elements failed.");
1989 __ bind(&done);
1990 }
1991}
1992
1993
1994void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
1995 Register arguments = ToRegister(instr->arguments());
1996 Register length = ToRegister(instr->length());
1997 Operand index = ToOperand(instr->index());
1998 Register result = ToRegister(instr->result());
1999
2000 __ sub(length, index);
2001 DeoptimizeIf(below_equal, instr->environment());
2002
kmillikin@chromium.orgd2c22f02011-01-10 08:15:37 +00002003 // There are two words between the frame pointer and the last argument.
2004 // Subtracting from length accounts for one of them add one more.
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002005 __ mov(result, Operand(arguments, length, times_4, kPointerSize));
2006}
2007
2008
2009void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2010 Register elements = ToRegister(instr->elements());
2011 Register key = ToRegister(instr->key());
2012 Register result;
2013 if (instr->load_result() != NULL) {
2014 result = ToRegister(instr->load_result());
2015 } else {
2016 result = ToRegister(instr->result());
2017 ASSERT(result.is(elements));
2018 }
2019
2020 // Load the result.
2021 __ mov(result, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
2022
2023 Representation r = instr->hydrogen()->representation();
2024 if (r.IsInteger32()) {
2025 // Untag and check for smi.
2026 __ SmiUntag(result);
2027 DeoptimizeIf(carry, instr->environment());
2028 } else if (r.IsDouble()) {
2029 EmitNumberUntagD(result,
2030 ToDoubleRegister(instr->result()),
2031 instr->environment());
2032 } else {
2033 // Check for the hole value.
2034 ASSERT(r.IsTagged());
2035 __ cmp(result, Factory::the_hole_value());
2036 DeoptimizeIf(equal, instr->environment());
2037 }
2038}
2039
2040
2041void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2042 ASSERT(ToRegister(instr->object()).is(edx));
2043 ASSERT(ToRegister(instr->key()).is(eax));
2044
2045 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
2046 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2047}
2048
2049
2050void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2051 Register result = ToRegister(instr->result());
2052
2053 // Check for arguments adapter frame.
kmillikin@chromium.orgd2c22f02011-01-10 08:15:37 +00002054 NearLabel done, adapted;
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002055 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2056 __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
2057 __ cmp(Operand(result),
2058 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2059 __ j(equal, &adapted);
2060
2061 // No arguments adaptor frame.
2062 __ mov(result, Operand(ebp));
2063 __ jmp(&done);
2064
2065 // Arguments adaptor frame present.
2066 __ bind(&adapted);
2067 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2068
kmillikin@chromium.orgd2c22f02011-01-10 08:15:37 +00002069 // Result is the frame pointer for the frame if not adapted and for the real
2070 // frame below the adaptor frame if adapted.
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002071 __ bind(&done);
2072}
2073
2074
2075void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2076 Operand elem = ToOperand(instr->input());
2077 Register result = ToRegister(instr->result());
2078
kmillikin@chromium.orgd2c22f02011-01-10 08:15:37 +00002079 NearLabel done;
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002080
kmillikin@chromium.orgd2c22f02011-01-10 08:15:37 +00002081 // If no arguments adaptor frame the number of arguments is fixed.
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002082 __ cmp(ebp, elem);
2083 __ mov(result, Immediate(scope()->num_parameters()));
2084 __ j(equal, &done);
2085
2086 // Arguments adaptor frame present. Get argument length from there.
2087 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2088 __ mov(result, Operand(result,
2089 ArgumentsAdaptorFrameConstants::kLengthOffset));
2090 __ SmiUntag(result);
2091
kmillikin@chromium.orgd2c22f02011-01-10 08:15:37 +00002092 // Argument length is in result register.
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002093 __ bind(&done);
2094}
2095
2096
2097void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2098 Register receiver = ToRegister(instr->receiver());
2099 ASSERT(ToRegister(instr->function()).is(edi));
2100 ASSERT(ToRegister(instr->result()).is(eax));
2101
2102 // If the receiver is null or undefined, we have to pass the
2103 // global object as a receiver.
2104 NearLabel global_receiver, receiver_ok;
2105 __ cmp(receiver, Factory::null_value());
2106 __ j(equal, &global_receiver);
2107 __ cmp(receiver, Factory::undefined_value());
2108 __ j(not_equal, &receiver_ok);
2109 __ bind(&global_receiver);
2110 __ mov(receiver, GlobalObjectOperand());
2111 __ bind(&receiver_ok);
2112
2113 Register length = ToRegister(instr->length());
2114 Register elements = ToRegister(instr->elements());
2115
2116 Label invoke;
2117
2118 // Copy the arguments to this function possibly from the
2119 // adaptor frame below it.
2120 const uint32_t kArgumentsLimit = 1 * KB;
2121 __ cmp(length, kArgumentsLimit);
2122 DeoptimizeIf(above, instr->environment());
2123
2124 __ push(receiver);
2125 __ mov(receiver, length);
2126
2127 // Loop through the arguments pushing them onto the execution
2128 // stack.
2129 Label loop;
2130 // length is a small non-negative integer, due to the test above.
2131 __ test(length, Operand(length));
2132 __ j(zero, &invoke);
2133 __ bind(&loop);
2134 __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
2135 __ dec(length);
2136 __ j(not_zero, &loop);
2137
2138 // Invoke the function.
2139 __ bind(&invoke);
2140 ASSERT(receiver.is(eax));
2141 v8::internal::ParameterCount actual(eax);
2142 SafepointGenerator safepoint_generator(this,
2143 instr->pointer_map(),
2144 Safepoint::kNoDeoptimizationIndex);
2145 __ InvokeFunction(edi, actual, CALL_FUNCTION, &safepoint_generator);
2146}
2147
2148
2149void LCodeGen::DoPushArgument(LPushArgument* instr) {
2150 LOperand* argument = instr->input();
2151 if (argument->IsConstantOperand()) {
2152 __ push(ToImmediate(argument));
2153 } else {
2154 __ push(ToOperand(argument));
2155 }
2156}
2157
2158
2159void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
2160 Register result = ToRegister(instr->result());
2161 __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
2162}
2163
2164
2165void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
2166 Register result = ToRegister(instr->result());
2167 __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
2168 __ mov(result, FieldOperand(result, GlobalObject::kGlobalReceiverOffset));
2169}
2170
2171
2172void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2173 int arity,
2174 LInstruction* instr) {
2175 // Change context if needed.
2176 bool change_context =
2177 (graph()->info()->closure()->context() != function->context()) ||
2178 scope()->contains_with() ||
2179 (scope()->num_heap_slots() > 0);
2180 if (change_context) {
2181 __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2182 }
2183
2184 // Set eax to arguments count if adaption is not needed. Assumes that eax
2185 // is available to write to at this point.
2186 if (!function->NeedsArgumentsAdaption()) {
2187 __ mov(eax, arity);
2188 }
2189
2190 LPointerMap* pointers = instr->pointer_map();
2191 RecordPosition(pointers->position());
2192
2193 // Invoke function.
2194 if (*function == *graph()->info()->closure()) {
2195 __ CallSelf();
2196 } else {
2197 __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
2198 }
2199
2200 // Setup deoptimization.
2201 RegisterLazyDeoptimization(instr);
2202
2203 // Restore context.
2204 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2205}
2206
2207
2208void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
2209 ASSERT(ToRegister(instr->result()).is(eax));
2210 __ mov(edi, instr->function());
2211 CallKnownFunction(instr->function(), instr->arity(), instr);
2212}
2213
2214
2215void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
2216 Register input_reg = ToRegister(instr->input());
2217 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
2218 Factory::heap_number_map());
2219 DeoptimizeIf(not_equal, instr->environment());
2220
2221 Label done;
2222 Register tmp = input_reg.is(eax) ? ecx : eax;
2223 Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
2224
2225 // Preserve the value of all registers.
2226 __ PushSafepointRegisters();
2227
2228 Label negative;
2229 __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2230 // Check the sign of the argument. If the argument is positive,
2231 // just return it.
2232 __ test(tmp, Immediate(HeapNumber::kSignMask));
2233 __ j(not_zero, &negative);
2234 __ mov(tmp, input_reg);
2235 __ jmp(&done);
2236
2237 __ bind(&negative);
2238
2239 Label allocated, slow;
2240 __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
2241 __ jmp(&allocated);
2242
2243 // Slow case: Call the runtime system to do the number allocation.
2244 __ bind(&slow);
2245
2246 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
2247 RecordSafepointWithRegisters(
2248 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
2249 // Set the pointer to the new heap number in tmp.
2250 if (!tmp.is(eax)) __ mov(tmp, eax);
2251
2252 // Restore input_reg after call to runtime.
2253 __ mov(input_reg, Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize));
2254
2255 __ bind(&allocated);
2256 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2257 __ and_(tmp2, ~HeapNumber::kSignMask);
2258 __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
2259 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
2260 __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
2261
2262 __ bind(&done);
2263 __ mov(Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize), tmp);
2264
2265 __ PopSafepointRegisters();
2266}
2267
2268
2269void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
2270 // Class for deferred case.
2271 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
2272 public:
2273 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
2274 LUnaryMathOperation* instr)
2275 : LDeferredCode(codegen), instr_(instr) { }
2276 virtual void Generate() {
2277 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
2278 }
2279 private:
2280 LUnaryMathOperation* instr_;
2281 };
2282
2283 ASSERT(instr->input()->Equals(instr->result()));
2284 Representation r = instr->hydrogen()->value()->representation();
2285
2286 if (r.IsDouble()) {
2287 XMMRegister scratch = xmm0;
2288 XMMRegister input_reg = ToDoubleRegister(instr->input());
2289 __ pxor(scratch, scratch);
2290 __ subsd(scratch, input_reg);
2291 __ pand(input_reg, scratch);
2292 } else if (r.IsInteger32()) {
2293 Register input_reg = ToRegister(instr->input());
2294 __ test(input_reg, Operand(input_reg));
2295 Label is_positive;
2296 __ j(not_sign, &is_positive);
2297 __ neg(input_reg);
2298 __ test(input_reg, Operand(input_reg));
2299 DeoptimizeIf(negative, instr->environment());
2300 __ bind(&is_positive);
2301 } else { // Tagged case.
2302 DeferredMathAbsTaggedHeapNumber* deferred =
2303 new DeferredMathAbsTaggedHeapNumber(this, instr);
2304 Label not_smi;
2305 Register input_reg = ToRegister(instr->input());
2306 // Smi check.
2307 __ test(input_reg, Immediate(kSmiTagMask));
2308 __ j(not_zero, deferred->entry());
2309 __ test(input_reg, Operand(input_reg));
2310 Label is_positive;
2311 __ j(not_sign, &is_positive);
2312 __ neg(input_reg);
2313
2314 __ test(input_reg, Operand(input_reg));
2315 DeoptimizeIf(negative, instr->environment());
2316
2317 __ bind(&is_positive);
2318 __ bind(deferred->exit());
2319 }
2320}
2321
2322
2323void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
2324 XMMRegister xmm_scratch = xmm0;
2325 Register output_reg = ToRegister(instr->result());
2326 XMMRegister input_reg = ToDoubleRegister(instr->input());
2327 __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
2328 __ ucomisd(input_reg, xmm_scratch);
2329
2330 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2331 DeoptimizeIf(below_equal, instr->environment());
2332 } else {
2333 DeoptimizeIf(below, instr->environment());
2334 }
2335
2336 // Use truncating instruction (OK because input is positive).
2337 __ cvttsd2si(output_reg, Operand(input_reg));
2338
2339 // Overflow is signalled with minint.
2340 __ cmp(output_reg, 0x80000000u);
2341 DeoptimizeIf(equal, instr->environment());
2342}
2343
2344
2345void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
2346 XMMRegister xmm_scratch = xmm0;
2347 Register output_reg = ToRegister(instr->result());
2348 XMMRegister input_reg = ToDoubleRegister(instr->input());
2349
2350 // xmm_scratch = 0.5
2351 ExternalReference one_half = ExternalReference::address_of_one_half();
2352 __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
2353
2354 // input = input + 0.5
2355 __ addsd(input_reg, xmm_scratch);
2356
2357 // We need to return -0 for the input range [-0.5, 0[, otherwise
2358 // compute Math.floor(value + 0.5).
2359 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2360 __ ucomisd(input_reg, xmm_scratch);
2361 DeoptimizeIf(below_equal, instr->environment());
2362 } else {
2363 // If we don't need to bailout on -0, we check only bailout
2364 // on negative inputs.
2365 __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
2366 __ ucomisd(input_reg, xmm_scratch);
2367 DeoptimizeIf(below, instr->environment());
2368 }
2369
2370 // Compute Math.floor(value + 0.5).
2371 // Use truncating instruction (OK because input is positive).
2372 __ cvttsd2si(output_reg, Operand(input_reg));
2373
2374 // Overflow is signalled with minint.
2375 __ cmp(output_reg, 0x80000000u);
2376 DeoptimizeIf(equal, instr->environment());
2377}
2378
2379
2380void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
2381 XMMRegister input_reg = ToDoubleRegister(instr->input());
2382 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
2383 __ sqrtsd(input_reg, input_reg);
2384}
2385
2386
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00002387void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
2388 XMMRegister xmm_scratch = xmm0;
2389 XMMRegister input_reg = ToDoubleRegister(instr->input());
2390 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
2391 ExternalReference negative_infinity =
2392 ExternalReference::address_of_negative_infinity();
2393 __ movdbl(xmm_scratch, Operand::StaticVariable(negative_infinity));
2394 __ ucomisd(xmm_scratch, input_reg);
2395 DeoptimizeIf(equal, instr->environment());
2396 __ sqrtsd(input_reg, input_reg);
2397}
2398
2399
2400void LCodeGen::DoPower(LPower* instr) {
2401 LOperand* left = instr->left();
2402 LOperand* right = instr->right();
2403 DoubleRegister result_reg = ToDoubleRegister(instr->result());
2404 Representation exponent_type = instr->hydrogen()->right()->representation();
2405 if (exponent_type.IsDouble()) {
2406 // It is safe to use ebx directly since the instruction is marked
2407 // as a call.
2408 __ PrepareCallCFunction(4, ebx);
2409 __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
2410 __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
2411 __ CallCFunction(ExternalReference::power_double_double_function(), 4);
2412 } else if (exponent_type.IsInteger32()) {
2413 // It is safe to use ebx directly since the instruction is marked
2414 // as a call.
2415 ASSERT(!ToRegister(right).is(ebx));
2416 __ PrepareCallCFunction(4, ebx);
2417 __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
2418 __ mov(Operand(esp, 1 * kDoubleSize), ToRegister(right));
2419 __ CallCFunction(ExternalReference::power_double_int_function(), 4);
2420 } else {
2421 ASSERT(exponent_type.IsTagged());
2422 CpuFeatures::Scope scope(SSE2);
2423 Register right_reg = ToRegister(right);
2424
2425 Label non_smi, call;
2426 __ test(right_reg, Immediate(kSmiTagMask));
2427 __ j(not_zero, &non_smi);
2428 __ SmiUntag(right_reg);
2429 __ cvtsi2sd(result_reg, Operand(right_reg));
2430 __ jmp(&call);
2431
2432 __ bind(&non_smi);
2433 // It is safe to use ebx directly since the instruction is marked
2434 // as a call.
2435 ASSERT(!right_reg.is(ebx));
2436 __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , ebx);
2437 DeoptimizeIf(not_equal, instr->environment());
2438 __ movdbl(result_reg, FieldOperand(right_reg, HeapNumber::kValueOffset));
2439
2440 __ bind(&call);
2441 __ PrepareCallCFunction(4, ebx);
2442 __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
2443 __ movdbl(Operand(esp, 1 * kDoubleSize), result_reg);
2444 __ CallCFunction(ExternalReference::power_double_double_function(), 4);
2445 }
2446
2447 // Return value is in st(0) on ia32.
2448 // Store it into the (fixed) result register.
2449 __ sub(Operand(esp), Immediate(kDoubleSize));
2450 __ fstp_d(Operand(esp, 0));
2451 __ movdbl(result_reg, Operand(esp, 0));
2452 __ add(Operand(esp), Immediate(kDoubleSize));
2453}
2454
2455
2456void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
2457 ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
whesse@chromium.org023421e2010-12-21 12:19:12 +00002458 TranscendentalCacheStub stub(TranscendentalCache::LOG,
2459 TranscendentalCacheStub::UNTAGGED);
2460 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2461}
2462
2463
2464void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
2465 ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
2466 TranscendentalCacheStub stub(TranscendentalCache::COS,
2467 TranscendentalCacheStub::UNTAGGED);
2468 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2469}
2470
2471
2472void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
2473 ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
2474 TranscendentalCacheStub stub(TranscendentalCache::SIN,
2475 TranscendentalCacheStub::UNTAGGED);
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00002476 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2477}
2478
2479
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002480void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
2481 switch (instr->op()) {
2482 case kMathAbs:
2483 DoMathAbs(instr);
2484 break;
2485 case kMathFloor:
2486 DoMathFloor(instr);
2487 break;
2488 case kMathRound:
2489 DoMathRound(instr);
2490 break;
2491 case kMathSqrt:
2492 DoMathSqrt(instr);
2493 break;
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00002494 case kMathPowHalf:
2495 DoMathPowHalf(instr);
2496 break;
whesse@chromium.org023421e2010-12-21 12:19:12 +00002497 case kMathCos:
2498 DoMathCos(instr);
2499 break;
2500 case kMathSin:
2501 DoMathSin(instr);
2502 break;
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00002503 case kMathLog:
2504 DoMathLog(instr);
2505 break;
2506
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002507 default:
2508 UNREACHABLE();
2509 }
2510}
2511
2512
2513void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
2514 ASSERT(ToRegister(instr->result()).is(eax));
2515
2516 int arity = instr->arity();
2517 Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
2518 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2519 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2520}
2521
2522
2523void LCodeGen::DoCallNamed(LCallNamed* instr) {
2524 ASSERT(ToRegister(instr->result()).is(eax));
2525
2526 int arity = instr->arity();
2527 Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
2528 __ mov(ecx, instr->name());
2529 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2530 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2531}
2532
2533
2534void LCodeGen::DoCallFunction(LCallFunction* instr) {
2535 ASSERT(ToRegister(instr->result()).is(eax));
2536
2537 int arity = instr->arity();
2538 CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
2539 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2540 __ Drop(1);
2541 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2542}
2543
2544
2545void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
2546 ASSERT(ToRegister(instr->result()).is(eax));
2547
2548 int arity = instr->arity();
2549 Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
2550 __ mov(ecx, instr->name());
2551 CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2552 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2553}
2554
2555
2556void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
2557 ASSERT(ToRegister(instr->result()).is(eax));
2558 __ mov(edi, instr->target());
2559 CallKnownFunction(instr->target(), instr->arity(), instr);
2560}
2561
2562
2563void LCodeGen::DoCallNew(LCallNew* instr) {
2564 ASSERT(ToRegister(instr->input()).is(edi));
2565 ASSERT(ToRegister(instr->result()).is(eax));
2566
2567 Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
2568 __ Set(eax, Immediate(instr->arity()));
2569 CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
2570}
2571
2572
2573void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
2574 CallRuntime(instr->function(), instr->arity(), instr);
2575}
2576
2577
2578void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
2579 Register object = ToRegister(instr->object());
2580 Register value = ToRegister(instr->value());
2581 int offset = instr->offset();
2582
2583 if (!instr->transition().is_null()) {
2584 __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
2585 }
2586
2587 // Do the store.
2588 if (instr->is_in_object()) {
2589 __ mov(FieldOperand(object, offset), value);
2590 if (instr->needs_write_barrier()) {
2591 Register temp = ToRegister(instr->temp());
2592 // Update the write barrier for the object for in-object properties.
2593 __ RecordWrite(object, offset, value, temp);
2594 }
2595 } else {
2596 Register temp = ToRegister(instr->temp());
2597 __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
2598 __ mov(FieldOperand(temp, offset), value);
2599 if (instr->needs_write_barrier()) {
2600 // Update the write barrier for the properties array.
2601 // object is used as a scratch register.
2602 __ RecordWrite(temp, offset, value, object);
2603 }
2604 }
2605}
2606
2607
2608void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
2609 ASSERT(ToRegister(instr->object()).is(edx));
2610 ASSERT(ToRegister(instr->value()).is(eax));
2611
2612 __ mov(ecx, instr->name());
2613 Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
2614 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2615}
2616
2617
2618void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
2619 __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
2620 DeoptimizeIf(above_equal, instr->environment());
2621}
2622
2623
2624void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
2625 Register value = ToRegister(instr->value());
2626 Register elements = ToRegister(instr->object());
2627 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
2628
2629 // Do the store.
2630 if (instr->key()->IsConstantOperand()) {
2631 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
2632 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
2633 int offset =
2634 ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
2635 __ mov(FieldOperand(elements, offset), value);
2636 } else {
2637 __ mov(FieldOperand(elements, key, times_4, FixedArray::kHeaderSize),
2638 value);
2639 }
2640
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002641 if (instr->hydrogen()->NeedsWriteBarrier()) {
2642 // Compute address of modified element and store it into key register.
2643 __ lea(key, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
2644 __ RecordWrite(elements, key, value);
2645 }
2646}
2647
2648
2649void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
2650 ASSERT(ToRegister(instr->object()).is(edx));
2651 ASSERT(ToRegister(instr->key()).is(ecx));
2652 ASSERT(ToRegister(instr->value()).is(eax));
2653
2654 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
2655 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2656}
2657
2658
2659void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
2660 LOperand* input = instr->input();
2661 ASSERT(input->IsRegister() || input->IsStackSlot());
2662 LOperand* output = instr->result();
2663 ASSERT(output->IsDoubleRegister());
2664 __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
2665}
2666
2667
2668void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
2669 class DeferredNumberTagI: public LDeferredCode {
2670 public:
2671 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
2672 : LDeferredCode(codegen), instr_(instr) { }
2673 virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
2674 private:
2675 LNumberTagI* instr_;
2676 };
2677
2678 LOperand* input = instr->input();
2679 ASSERT(input->IsRegister() && input->Equals(instr->result()));
2680 Register reg = ToRegister(input);
2681
2682 DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
2683 __ SmiTag(reg);
2684 __ j(overflow, deferred->entry());
2685 __ bind(deferred->exit());
2686}
2687
2688
2689void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
2690 Label slow;
2691 Register reg = ToRegister(instr->input());
2692 Register tmp = reg.is(eax) ? ecx : eax;
2693
2694 // Preserve the value of all registers.
2695 __ PushSafepointRegisters();
2696
2697 // There was overflow, so bits 30 and 31 of the original integer
2698 // disagree. Try to allocate a heap number in new space and store
2699 // the value in there. If that fails, call the runtime system.
2700 NearLabel done;
2701 __ SmiUntag(reg);
2702 __ xor_(reg, 0x80000000);
2703 __ cvtsi2sd(xmm0, Operand(reg));
2704 if (FLAG_inline_new) {
2705 __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
2706 __ jmp(&done);
2707 }
2708
2709 // Slow case: Call the runtime system to do the number allocation.
2710 __ bind(&slow);
2711
2712 // TODO(3095996): Put a valid pointer value in the stack slot where the result
2713 // register is stored, as this register is in the pointer map, but contains an
2714 // integer value.
2715 __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), Immediate(0));
2716
2717 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
2718 RecordSafepointWithRegisters(
2719 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
2720 if (!reg.is(eax)) __ mov(reg, eax);
2721
2722 // Done. Put the value in xmm0 into the value of the allocated heap
2723 // number.
2724 __ bind(&done);
2725 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
2726 __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), reg);
2727 __ PopSafepointRegisters();
2728}
2729
2730
2731void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
2732 class DeferredNumberTagD: public LDeferredCode {
2733 public:
2734 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
2735 : LDeferredCode(codegen), instr_(instr) { }
2736 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
2737 private:
2738 LNumberTagD* instr_;
2739 };
2740
2741 XMMRegister input_reg = ToDoubleRegister(instr->input());
2742 Register reg = ToRegister(instr->result());
2743 Register tmp = ToRegister(instr->temp());
2744
2745 DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
2746 if (FLAG_inline_new) {
2747 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
2748 } else {
2749 __ jmp(deferred->entry());
2750 }
2751 __ bind(deferred->exit());
2752 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
2753}
2754
2755
2756void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
2757 // TODO(3095996): Get rid of this. For now, we need to make the
2758 // result register contain a valid pointer because it is already
2759 // contained in the register pointer map.
2760 Register reg = ToRegister(instr->result());
2761 __ Set(reg, Immediate(0));
2762
2763 __ PushSafepointRegisters();
2764 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
2765 RecordSafepointWithRegisters(
2766 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
2767 __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), eax);
2768 __ PopSafepointRegisters();
2769}
2770
2771
2772void LCodeGen::DoSmiTag(LSmiTag* instr) {
2773 LOperand* input = instr->input();
2774 ASSERT(input->IsRegister() && input->Equals(instr->result()));
2775 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
2776 __ SmiTag(ToRegister(input));
2777}
2778
2779
2780void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
2781 LOperand* input = instr->input();
2782 ASSERT(input->IsRegister() && input->Equals(instr->result()));
2783 if (instr->needs_check()) {
2784 __ test(ToRegister(input), Immediate(kSmiTagMask));
2785 DeoptimizeIf(not_zero, instr->environment());
2786 }
2787 __ SmiUntag(ToRegister(input));
2788}
2789
2790
2791void LCodeGen::EmitNumberUntagD(Register input_reg,
2792 XMMRegister result_reg,
2793 LEnvironment* env) {
2794 NearLabel load_smi, heap_number, done;
2795
2796 // Smi check.
2797 __ test(input_reg, Immediate(kSmiTagMask));
2798 __ j(zero, &load_smi, not_taken);
2799
2800 // Heap number map check.
2801 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
2802 Factory::heap_number_map());
2803 __ j(equal, &heap_number);
2804
2805 __ cmp(input_reg, Factory::undefined_value());
2806 DeoptimizeIf(not_equal, env);
2807
2808 // Convert undefined to NaN.
2809 __ push(input_reg);
2810 __ mov(input_reg, Factory::nan_value());
2811 __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
2812 __ pop(input_reg);
2813 __ jmp(&done);
2814
2815 // Heap number to XMM conversion.
2816 __ bind(&heap_number);
2817 __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
2818 __ jmp(&done);
2819
2820 // Smi to XMM conversion
2821 __ bind(&load_smi);
2822 __ SmiUntag(input_reg); // Untag smi before converting to float.
2823 __ cvtsi2sd(result_reg, Operand(input_reg));
2824 __ SmiTag(input_reg); // Retag smi.
2825 __ bind(&done);
2826}
2827
2828
2829class DeferredTaggedToI: public LDeferredCode {
2830 public:
2831 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
2832 : LDeferredCode(codegen), instr_(instr) { }
2833 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
2834 private:
2835 LTaggedToI* instr_;
2836};
2837
2838
2839void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
2840 NearLabel done, heap_number;
2841 Register input_reg = ToRegister(instr->input());
2842
2843 // Heap number map check.
2844 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
2845 Factory::heap_number_map());
2846
2847 if (instr->truncating()) {
2848 __ j(equal, &heap_number);
2849 // Check for undefined. Undefined is converted to zero for truncating
2850 // conversions.
2851 __ cmp(input_reg, Factory::undefined_value());
2852 DeoptimizeIf(not_equal, instr->environment());
2853 __ mov(input_reg, 0);
2854 __ jmp(&done);
2855
2856 __ bind(&heap_number);
2857 if (CpuFeatures::IsSupported(SSE3)) {
2858 CpuFeatures::Scope scope(SSE3);
2859 NearLabel convert;
2860 // Use more powerful conversion when sse3 is available.
2861 // Load x87 register with heap number.
2862 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
2863 // Get exponent alone and check for too-big exponent.
2864 __ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2865 __ and_(input_reg, HeapNumber::kExponentMask);
2866 const uint32_t kTooBigExponent =
2867 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
2868 __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
2869 __ j(less, &convert);
2870 // Pop FPU stack before deoptimizing.
2871 __ ffree(0);
2872 __ fincstp();
2873 DeoptimizeIf(no_condition, instr->environment());
2874
2875 // Reserve space for 64 bit answer.
2876 __ bind(&convert);
2877 __ sub(Operand(esp), Immediate(kDoubleSize));
2878 // Do conversion, which cannot fail because we checked the exponent.
2879 __ fisttp_d(Operand(esp, 0));
2880 __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
2881 __ add(Operand(esp), Immediate(kDoubleSize));
2882 } else {
2883 NearLabel deopt;
2884 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
2885 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
2886 __ cvttsd2si(input_reg, Operand(xmm0));
2887 __ cmp(input_reg, 0x80000000u);
2888 __ j(not_equal, &done);
2889 // Check if the input was 0x8000000 (kMinInt).
2890 // If no, then we got an overflow and we deoptimize.
2891 ExternalReference min_int = ExternalReference::address_of_min_int();
2892 __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
2893 __ ucomisd(xmm_temp, xmm0);
2894 DeoptimizeIf(not_equal, instr->environment());
2895 DeoptimizeIf(parity_even, instr->environment()); // NaN.
2896 }
2897 } else {
2898 // Deoptimize if we don't have a heap number.
2899 DeoptimizeIf(not_equal, instr->environment());
2900
2901 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
2902 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
2903 __ cvttsd2si(input_reg, Operand(xmm0));
2904 __ cvtsi2sd(xmm_temp, Operand(input_reg));
2905 __ ucomisd(xmm0, xmm_temp);
2906 DeoptimizeIf(not_equal, instr->environment());
2907 DeoptimizeIf(parity_even, instr->environment()); // NaN.
2908 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2909 __ test(input_reg, Operand(input_reg));
2910 __ j(not_zero, &done);
2911 __ movmskpd(input_reg, xmm0);
2912 __ and_(input_reg, 1);
2913 DeoptimizeIf(not_zero, instr->environment());
2914 }
2915 }
2916 __ bind(&done);
2917}
2918
2919
2920void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
2921 LOperand* input = instr->input();
2922 ASSERT(input->IsRegister());
2923 ASSERT(input->Equals(instr->result()));
2924
2925 Register input_reg = ToRegister(input);
2926
2927 DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
2928
2929 // Smi check.
2930 __ test(input_reg, Immediate(kSmiTagMask));
2931 __ j(not_zero, deferred->entry());
2932
2933 // Smi to int32 conversion
2934 __ SmiUntag(input_reg); // Untag smi.
2935
2936 __ bind(deferred->exit());
2937}
2938
2939
2940void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
2941 LOperand* input = instr->input();
2942 ASSERT(input->IsRegister());
2943 LOperand* result = instr->result();
2944 ASSERT(result->IsDoubleRegister());
2945
2946 Register input_reg = ToRegister(input);
2947 XMMRegister result_reg = ToDoubleRegister(result);
2948
2949 EmitNumberUntagD(input_reg, result_reg, instr->environment());
2950}
2951
2952
2953void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
2954 LOperand* input = instr->input();
2955 ASSERT(input->IsDoubleRegister());
2956 LOperand* result = instr->result();
2957 ASSERT(result->IsRegister());
2958
2959 XMMRegister input_reg = ToDoubleRegister(input);
2960 Register result_reg = ToRegister(result);
2961
2962 if (instr->truncating()) {
2963 // Performs a truncating conversion of a floating point number as used by
2964 // the JS bitwise operations.
2965 __ cvttsd2si(result_reg, Operand(input_reg));
2966 __ cmp(result_reg, 0x80000000u);
2967 if (CpuFeatures::IsSupported(SSE3)) {
2968 // This will deoptimize if the exponent of the input in out of range.
2969 CpuFeatures::Scope scope(SSE3);
2970 NearLabel convert, done;
2971 __ j(not_equal, &done);
2972 __ sub(Operand(esp), Immediate(kDoubleSize));
2973 __ movdbl(Operand(esp, 0), input_reg);
2974 // Get exponent alone and check for too-big exponent.
2975 __ mov(result_reg, Operand(esp, sizeof(int32_t)));
2976 __ and_(result_reg, HeapNumber::kExponentMask);
2977 const uint32_t kTooBigExponent =
2978 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
2979 __ cmp(Operand(result_reg), Immediate(kTooBigExponent));
2980 __ j(less, &convert);
2981 __ add(Operand(esp), Immediate(kDoubleSize));
2982 DeoptimizeIf(no_condition, instr->environment());
2983 __ bind(&convert);
2984 // Do conversion, which cannot fail because we checked the exponent.
2985 __ fld_d(Operand(esp, 0));
2986 __ fisttp_d(Operand(esp, 0));
2987 __ mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
2988 __ add(Operand(esp), Immediate(kDoubleSize));
2989 __ bind(&done);
2990 } else {
2991 // This will bail out if the input was not in the int32 range (or,
2992 // unfortunately, if the input was 0x80000000).
2993 DeoptimizeIf(equal, instr->environment());
2994 }
2995 } else {
2996 NearLabel done;
2997 __ cvttsd2si(result_reg, Operand(input_reg));
2998 __ cvtsi2sd(xmm0, Operand(result_reg));
2999 __ ucomisd(xmm0, input_reg);
3000 DeoptimizeIf(not_equal, instr->environment());
3001 DeoptimizeIf(parity_even, instr->environment()); // NaN.
3002 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3003 // The integer converted back is equal to the original. We
3004 // only have to test if we got -0 as an input.
3005 __ test(result_reg, Operand(result_reg));
3006 __ j(not_zero, &done);
3007 __ movmskpd(result_reg, input_reg);
3008 // Bit 0 contains the sign of the double in input_reg.
3009 // If input was positive, we are ok and return 0, otherwise
3010 // deoptimize.
3011 __ and_(result_reg, 1);
3012 DeoptimizeIf(not_zero, instr->environment());
3013 }
3014 __ bind(&done);
3015 }
3016}
3017
3018
3019void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
3020 LOperand* input = instr->input();
3021 ASSERT(input->IsRegister());
3022 __ test(ToRegister(input), Immediate(kSmiTagMask));
3023 DeoptimizeIf(instr->condition(), instr->environment());
3024}
3025
3026
3027void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
3028 Register input = ToRegister(instr->input());
3029 Register temp = ToRegister(instr->temp());
3030 InstanceType first = instr->hydrogen()->first();
3031 InstanceType last = instr->hydrogen()->last();
3032
kasperl@chromium.orga5551262010-12-07 12:49:48 +00003033 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
3034 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
3035 static_cast<int8_t>(first));
3036
3037 // If there is only one type in the interval check for equality.
3038 if (first == last) {
3039 DeoptimizeIf(not_equal, instr->environment());
3040 } else {
3041 DeoptimizeIf(below, instr->environment());
3042 // Omit check for the last type.
3043 if (last != LAST_TYPE) {
3044 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
3045 static_cast<int8_t>(last));
3046 DeoptimizeIf(above, instr->environment());
3047 }
3048 }
3049}
3050
3051
3052void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
3053 ASSERT(instr->input()->IsRegister());
3054 Register reg = ToRegister(instr->input());
3055 __ cmp(reg, instr->hydrogen()->target());
3056 DeoptimizeIf(not_equal, instr->environment());
3057}
3058
3059
3060void LCodeGen::DoCheckMap(LCheckMap* instr) {
3061 LOperand* input = instr->input();
3062 ASSERT(input->IsRegister());
3063 Register reg = ToRegister(input);
3064 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
3065 instr->hydrogen()->map());
3066 DeoptimizeIf(not_equal, instr->environment());
3067}
3068
3069
3070void LCodeGen::LoadPrototype(Register result, Handle<JSObject> prototype) {
3071 if (Heap::InNewSpace(*prototype)) {
3072 Handle<JSGlobalPropertyCell> cell =
3073 Factory::NewJSGlobalPropertyCell(prototype);
3074 __ mov(result, Operand::Cell(cell));
3075 } else {
3076 __ mov(result, prototype);
3077 }
3078}
3079
3080
3081void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
3082 Register reg = ToRegister(instr->temp());
3083
3084 Handle<JSObject> holder = instr->holder();
3085 Handle<Map> receiver_map = instr->receiver_map();
3086 Handle<JSObject> current_prototype(JSObject::cast(receiver_map->prototype()));
3087
3088 // Load prototype object.
3089 LoadPrototype(reg, current_prototype);
3090
3091 // Check prototype maps up to the holder.
3092 while (!current_prototype.is_identical_to(holder)) {
3093 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
3094 Handle<Map>(current_prototype->map()));
3095 DeoptimizeIf(not_equal, instr->environment());
3096 current_prototype =
3097 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
3098 // Load next prototype object.
3099 LoadPrototype(reg, current_prototype);
3100 }
3101
3102 // Check the holder map.
3103 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
3104 Handle<Map>(current_prototype->map()));
3105 DeoptimizeIf(not_equal, instr->environment());
3106}
3107
3108
3109void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
3110 // Setup the parameters to the stub/runtime call.
3111 __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3112 __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
3113 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
3114 __ push(Immediate(instr->hydrogen()->constant_elements()));
3115
3116 // Pick the right runtime function or stub to call.
3117 int length = instr->hydrogen()->length();
3118 if (instr->hydrogen()->IsCopyOnWrite()) {
3119 ASSERT(instr->hydrogen()->depth() == 1);
3120 FastCloneShallowArrayStub::Mode mode =
3121 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
3122 FastCloneShallowArrayStub stub(mode, length);
3123 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3124 } else if (instr->hydrogen()->depth() > 1) {
3125 CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
3126 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
3127 CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
3128 } else {
3129 FastCloneShallowArrayStub::Mode mode =
3130 FastCloneShallowArrayStub::CLONE_ELEMENTS;
3131 FastCloneShallowArrayStub stub(mode, length);
3132 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3133 }
3134}
3135
3136
3137void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
3138 // Setup the parameters to the stub/runtime call.
3139 __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3140 __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
3141 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
3142 __ push(Immediate(instr->hydrogen()->constant_properties()));
3143 __ push(Immediate(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
3144
lrn@chromium.org5d00b602011-01-05 09:51:43 +00003145 // Pick the right runtime function to call.
kasperl@chromium.orga5551262010-12-07 12:49:48 +00003146 if (instr->hydrogen()->depth() > 1) {
3147 CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
3148 } else {
3149 CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
3150 }
3151}
3152
3153
3154void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
3155 NearLabel materialized;
3156 // Registers will be used as follows:
3157 // edi = JS function.
3158 // ecx = literals array.
3159 // ebx = regexp literal.
3160 // eax = regexp literal clone.
3161 __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3162 __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
3163 int literal_offset = FixedArray::kHeaderSize +
3164 instr->hydrogen()->literal_index() * kPointerSize;
3165 __ mov(ebx, FieldOperand(ecx, literal_offset));
3166 __ cmp(ebx, Factory::undefined_value());
3167 __ j(not_equal, &materialized);
3168
3169 // Create regexp literal using runtime function
3170 // Result will be in eax.
3171 __ push(ecx);
3172 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
3173 __ push(Immediate(instr->hydrogen()->pattern()));
3174 __ push(Immediate(instr->hydrogen()->flags()));
3175 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
3176 __ mov(ebx, eax);
3177
3178 __ bind(&materialized);
3179 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
3180 Label allocated, runtime_allocate;
3181 __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
3182 __ jmp(&allocated);
3183
3184 __ bind(&runtime_allocate);
3185 __ push(ebx);
3186 __ push(Immediate(Smi::FromInt(size)));
3187 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
3188 __ pop(ebx);
3189
3190 __ bind(&allocated);
3191 // Copy the content into the newly allocated memory.
3192 // (Unroll copy loop once for better throughput).
3193 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
3194 __ mov(edx, FieldOperand(ebx, i));
3195 __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
3196 __ mov(FieldOperand(eax, i), edx);
3197 __ mov(FieldOperand(eax, i + kPointerSize), ecx);
3198 }
3199 if ((size % (2 * kPointerSize)) != 0) {
3200 __ mov(edx, FieldOperand(ebx, size - kPointerSize));
3201 __ mov(FieldOperand(eax, size - kPointerSize), edx);
3202 }
3203}
3204
3205
3206void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
3207 // Use the fast case closure allocation code that allocates in new
3208 // space for nested functions that don't need literals cloning.
3209 Handle<SharedFunctionInfo> shared_info = instr->shared_info();
3210 bool pretenure = !instr->hydrogen()->pretenure();
3211 if (shared_info->num_literals() == 0 && !pretenure) {
3212 FastNewClosureStub stub;
3213 __ push(Immediate(shared_info));
3214 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3215 } else {
3216 __ push(esi);
3217 __ push(Immediate(shared_info));
3218 __ push(Immediate(pretenure
3219 ? Factory::true_value()
3220 : Factory::false_value()));
3221 CallRuntime(Runtime::kNewClosure, 3, instr);
3222 }
3223}
3224
3225
3226void LCodeGen::DoTypeof(LTypeof* instr) {
3227 LOperand* input = instr->input();
3228 if (input->IsConstantOperand()) {
3229 __ push(ToImmediate(input));
3230 } else {
3231 __ push(ToOperand(input));
3232 }
3233 CallRuntime(Runtime::kTypeof, 1, instr);
3234}
3235
3236
3237void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
3238 Register input = ToRegister(instr->input());
3239 Register result = ToRegister(instr->result());
3240 Label true_label;
3241 Label false_label;
3242 NearLabel done;
3243
3244 Condition final_branch_condition = EmitTypeofIs(&true_label,
3245 &false_label,
3246 input,
3247 instr->type_literal());
3248 __ j(final_branch_condition, &true_label);
3249 __ bind(&false_label);
3250 __ mov(result, Handle<Object>(Heap::false_value()));
3251 __ jmp(&done);
3252
3253 __ bind(&true_label);
3254 __ mov(result, Handle<Object>(Heap::true_value()));
3255
3256 __ bind(&done);
3257}
3258
3259
3260void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
3261 Register input = ToRegister(instr->input());
3262 int true_block = chunk_->LookupDestination(instr->true_block_id());
3263 int false_block = chunk_->LookupDestination(instr->false_block_id());
3264 Label* true_label = chunk_->GetAssemblyLabel(true_block);
3265 Label* false_label = chunk_->GetAssemblyLabel(false_block);
3266
3267 Condition final_branch_condition = EmitTypeofIs(true_label,
3268 false_label,
3269 input,
3270 instr->type_literal());
3271
3272 EmitBranch(true_block, false_block, final_branch_condition);
3273}
3274
3275
3276Condition LCodeGen::EmitTypeofIs(Label* true_label,
3277 Label* false_label,
3278 Register input,
3279 Handle<String> type_name) {
3280 Condition final_branch_condition = no_condition;
3281 if (type_name->Equals(Heap::number_symbol())) {
3282 __ test(input, Immediate(kSmiTagMask));
3283 __ j(zero, true_label);
3284 __ cmp(FieldOperand(input, HeapObject::kMapOffset),
3285 Factory::heap_number_map());
3286 final_branch_condition = equal;
3287
3288 } else if (type_name->Equals(Heap::string_symbol())) {
3289 __ test(input, Immediate(kSmiTagMask));
3290 __ j(zero, false_label);
3291 __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
3292 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
3293 1 << Map::kIsUndetectable);
3294 __ j(not_zero, false_label);
3295 __ CmpInstanceType(input, FIRST_NONSTRING_TYPE);
3296 final_branch_condition = below;
3297
3298 } else if (type_name->Equals(Heap::boolean_symbol())) {
3299 __ cmp(input, Handle<Object>(Heap::true_value()));
3300 __ j(equal, true_label);
3301 __ cmp(input, Handle<Object>(Heap::false_value()));
3302 final_branch_condition = equal;
3303
3304 } else if (type_name->Equals(Heap::undefined_symbol())) {
3305 __ cmp(input, Factory::undefined_value());
3306 __ j(equal, true_label);
3307 __ test(input, Immediate(kSmiTagMask));
3308 __ j(zero, false_label);
3309 // Check for undetectable objects => true.
3310 __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
3311 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
3312 1 << Map::kIsUndetectable);
3313 final_branch_condition = not_zero;
3314
3315 } else if (type_name->Equals(Heap::function_symbol())) {
3316 __ test(input, Immediate(kSmiTagMask));
3317 __ j(zero, false_label);
3318 __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
3319 __ j(equal, true_label);
3320 // Regular expressions => 'function' (they are callable).
3321 __ CmpInstanceType(input, JS_REGEXP_TYPE);
3322 final_branch_condition = equal;
3323
3324 } else if (type_name->Equals(Heap::object_symbol())) {
3325 __ test(input, Immediate(kSmiTagMask));
3326 __ j(zero, false_label);
3327 __ cmp(input, Factory::null_value());
3328 __ j(equal, true_label);
3329 // Regular expressions => 'function', not 'object'.
3330 __ CmpObjectType(input, JS_REGEXP_TYPE, input);
3331 __ j(equal, false_label);
3332 // Check for undetectable objects => false.
3333 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
3334 1 << Map::kIsUndetectable);
3335 __ j(not_zero, false_label);
3336 // Check for JS objects => true.
3337 __ CmpInstanceType(input, FIRST_JS_OBJECT_TYPE);
3338 __ j(below, false_label);
3339 __ CmpInstanceType(input, LAST_JS_OBJECT_TYPE);
3340 final_branch_condition = below_equal;
3341
3342 } else {
3343 final_branch_condition = not_equal;
3344 __ jmp(false_label);
3345 // A dead branch instruction will be generated after this point.
3346 }
3347
3348 return final_branch_condition;
3349}
3350
3351
3352void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
3353 // No code for lazy bailout instruction. Used to capture environment after a
3354 // call for populating the safepoint data with deoptimization data.
3355}
3356
3357
3358void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
3359 DeoptimizeIf(no_condition, instr->environment());
3360}
3361
3362
3363void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
3364 LOperand* obj = instr->object();
3365 LOperand* key = instr->key();
3366 __ push(ToOperand(obj));
3367 if (key->IsConstantOperand()) {
3368 __ push(ToImmediate(key));
3369 } else {
3370 __ push(ToOperand(key));
3371 }
3372 RecordPosition(instr->pointer_map()->position());
3373 SafepointGenerator safepoint_generator(this,
3374 instr->pointer_map(),
3375 Safepoint::kNoDeoptimizationIndex);
3376 __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
3377}
3378
3379
3380void LCodeGen::DoStackCheck(LStackCheck* instr) {
3381 // Perform stack overflow check.
3382 NearLabel done;
3383 ExternalReference stack_limit = ExternalReference::address_of_stack_limit();
3384 __ cmp(esp, Operand::StaticVariable(stack_limit));
3385 __ j(above_equal, &done);
3386
3387 StackCheckStub stub;
3388 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3389 __ bind(&done);
3390}
3391
3392
3393void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
3394 // This is a pseudo-instruction that ensures that the environment here is
3395 // properly registered for deoptimization and records the assembler's PC
3396 // offset.
3397 LEnvironment* environment = instr->environment();
3398 environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
3399 instr->SpilledDoubleRegisterArray());
3400
3401 // If the environment were already registered, we would have no way of
3402 // backpatching it with the spill slot operands.
3403 ASSERT(!environment->HasBeenRegistered());
3404 RegisterEnvironmentForDeoptimization(environment);
3405 ASSERT(osr_pc_offset_ == -1);
3406 osr_pc_offset_ = masm()->pc_offset();
3407}
3408
3409
3410#undef __
3411
3412} } // namespace v8::internal