blob: 24ee1fefdb66e86651fff45d3ac83e683622951f [file] [log] [blame]
sgjesse@chromium.orgc6c57182011-01-17 12:24:25 +00001// Copyright 2011 the V8 project authors. All rights reserved.
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
sgjesse@chromium.orgc6c57182011-01-17 12:24:25 +000028#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_IA32)
31
kasperl@chromium.orga5551262010-12-07 12:49:48 +000032#include "ia32/lithium-codegen-ia32.h"
33#include "code-stubs.h"
34#include "stub-cache.h"
35
36namespace v8 {
37namespace internal {
38
39
40class SafepointGenerator : public PostCallGenerator {
41 public:
42 SafepointGenerator(LCodeGen* codegen,
43 LPointerMap* pointers,
44 int deoptimization_index)
45 : codegen_(codegen),
46 pointers_(pointers),
47 deoptimization_index_(deoptimization_index) { }
48 virtual ~SafepointGenerator() { }
49
50 virtual void Generate() {
51 codegen_->RecordSafepoint(pointers_, deoptimization_index_);
52 }
53
54 private:
55 LCodeGen* codegen_;
56 LPointerMap* pointers_;
57 int deoptimization_index_;
58};
59
60
sgjesse@chromium.orgc6c57182011-01-17 12:24:25 +000061class LGapNode: public ZoneObject {
62 public:
63 explicit LGapNode(LOperand* operand)
64 : operand_(operand), resolved_(false), visited_id_(-1) { }
65
66 LOperand* operand() const { return operand_; }
67 bool IsResolved() const { return !IsAssigned() || resolved_; }
68 void MarkResolved() {
69 ASSERT(!IsResolved());
70 resolved_ = true;
71 }
72 int visited_id() const { return visited_id_; }
73 void set_visited_id(int id) {
74 ASSERT(id > visited_id_);
75 visited_id_ = id;
76 }
77
78 bool IsAssigned() const { return assigned_from_.is_set(); }
79 LGapNode* assigned_from() const { return assigned_from_.get(); }
80 void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
81
82 private:
83 LOperand* operand_;
84 SetOncePointer<LGapNode> assigned_from_;
85 bool resolved_;
86 int visited_id_;
87};
88
89
90LGapResolver::LGapResolver()
91 : nodes_(32),
92 identified_cycles_(4),
93 result_(16),
94 next_visited_id_(0) {
95}
96
97
98const ZoneList<LMoveOperands>* LGapResolver::Resolve(
99 const ZoneList<LMoveOperands>* moves,
100 LOperand* marker_operand) {
101 nodes_.Rewind(0);
102 identified_cycles_.Rewind(0);
103 result_.Rewind(0);
104 next_visited_id_ = 0;
105
106 for (int i = 0; i < moves->length(); ++i) {
107 LMoveOperands move = moves->at(i);
108 if (!move.IsRedundant()) RegisterMove(move);
109 }
110
111 for (int i = 0; i < identified_cycles_.length(); ++i) {
112 ResolveCycle(identified_cycles_[i], marker_operand);
113 }
114
115 int unresolved_nodes;
116 do {
117 unresolved_nodes = 0;
118 for (int j = 0; j < nodes_.length(); j++) {
119 LGapNode* node = nodes_[j];
120 if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
121 AddResultMove(node->assigned_from(), node);
122 node->MarkResolved();
123 }
124 if (!node->IsResolved()) ++unresolved_nodes;
125 }
126 } while (unresolved_nodes > 0);
127 return &result_;
128}
129
130
131void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
132 AddResultMove(from->operand(), to->operand());
133}
134
135
136void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
137 result_.Add(LMoveOperands(from, to));
138}
139
140
141void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
142 ZoneList<LOperand*> cycle_operands(8);
143 cycle_operands.Add(marker_operand);
144 LGapNode* cur = start;
145 do {
146 cur->MarkResolved();
147 cycle_operands.Add(cur->operand());
148 cur = cur->assigned_from();
149 } while (cur != start);
150 cycle_operands.Add(marker_operand);
151
152 for (int i = cycle_operands.length() - 1; i > 0; --i) {
153 LOperand* from = cycle_operands[i];
154 LOperand* to = cycle_operands[i - 1];
155 AddResultMove(from, to);
156 }
157}
158
159
160bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
161 ASSERT(a != b);
162 LGapNode* cur = a;
163 while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
164 cur->set_visited_id(visited_id);
165 cur = cur->assigned_from();
166 }
167
168 return cur == b;
169}
170
171
172bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
173 ASSERT(a != b);
174 return CanReach(a, b, next_visited_id_++);
175}
176
177
178void LGapResolver::RegisterMove(LMoveOperands move) {
179 if (move.from()->IsConstantOperand()) {
180 // Constant moves should be last in the machine code. Therefore add them
181 // first to the result set.
182 AddResultMove(move.from(), move.to());
183 } else {
184 LGapNode* from = LookupNode(move.from());
185 LGapNode* to = LookupNode(move.to());
186 if (to->IsAssigned() && to->assigned_from() == from) {
187 move.Eliminate();
188 return;
189 }
190 ASSERT(!to->IsAssigned());
191 if (CanReach(from, to)) {
192 // This introduces a cycle. Save.
193 identified_cycles_.Add(from);
194 }
195 to->set_assigned_from(from);
196 }
197}
198
199
200LGapNode* LGapResolver::LookupNode(LOperand* operand) {
201 for (int i = 0; i < nodes_.length(); ++i) {
202 if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
203 }
204
205 // No node found => create a new one.
206 LGapNode* result = new LGapNode(operand);
207 nodes_.Add(result);
208 return result;
209}
210
211
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000212#define __ masm()->
213
214bool LCodeGen::GenerateCode() {
215 HPhase phase("Code generation", chunk());
216 ASSERT(is_unused());
217 status_ = GENERATING;
218 CpuFeatures::Scope scope(SSE2);
219 return GeneratePrologue() &&
220 GenerateBody() &&
221 GenerateDeferredCode() &&
222 GenerateSafepointTable();
223}
224
225
226void LCodeGen::FinishCode(Handle<Code> code) {
227 ASSERT(is_done());
228 code->set_stack_slots(StackSlotCount());
229 code->set_safepoint_table_start(safepoints_.GetCodeOffset());
230 PopulateDeoptimizationData(code);
231}
232
233
234void LCodeGen::Abort(const char* format, ...) {
235 if (FLAG_trace_bailout) {
236 SmartPointer<char> debug_name = graph()->debug_name()->ToCString();
237 PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name);
238 va_list arguments;
239 va_start(arguments, format);
240 OS::VPrint(format, arguments);
241 va_end(arguments);
242 PrintF("\n");
243 }
244 status_ = ABORTED;
245}
246
247
248void LCodeGen::Comment(const char* format, ...) {
249 if (!FLAG_code_comments) return;
250 char buffer[4 * KB];
251 StringBuilder builder(buffer, ARRAY_SIZE(buffer));
252 va_list arguments;
253 va_start(arguments, format);
254 builder.AddFormattedList(format, arguments);
255 va_end(arguments);
256
257 // Copy the string before recording it in the assembler to avoid
258 // issues when the stack allocated buffer goes out of scope.
259 size_t length = builder.position();
260 Vector<char> copy = Vector<char>::New(length + 1);
261 memcpy(copy.start(), builder.Finalize(), copy.length());
262 masm()->RecordComment(copy.start());
263}
264
265
266bool LCodeGen::GeneratePrologue() {
267 ASSERT(is_generating());
268
269#ifdef DEBUG
270 if (strlen(FLAG_stop_at) > 0 &&
271 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
272 __ int3();
273 }
274#endif
275
276 __ push(ebp); // Caller's frame pointer.
277 __ mov(ebp, esp);
278 __ push(esi); // Callee's context.
279 __ push(edi); // Callee's JS function.
280
281 // Reserve space for the stack slots needed by the code.
282 int slots = StackSlotCount();
283 if (slots > 0) {
284 if (FLAG_debug_code) {
285 __ mov(Operand(eax), Immediate(slots));
286 Label loop;
287 __ bind(&loop);
288 __ push(Immediate(kSlotsZapValue));
289 __ dec(eax);
290 __ j(not_zero, &loop);
291 } else {
292 __ sub(Operand(esp), Immediate(slots * kPointerSize));
sgjesse@chromium.orgc6c57182011-01-17 12:24:25 +0000293#ifdef _MSC_VER
294 // On windows, you may not access the stack more than one page below
295 // the most recently mapped page. To make the allocated area randomly
296 // accessible, we write to each page in turn (the value is irrelevant).
297 const int kPageSize = 4 * KB;
298 for (int offset = slots * kPointerSize - kPageSize;
299 offset > 0;
300 offset -= kPageSize) {
301 __ mov(Operand(esp, offset), eax);
302 }
303#endif
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000304 }
305 }
306
307 // Trace the call.
308 if (FLAG_trace) {
309 __ CallRuntime(Runtime::kTraceEnter, 0);
310 }
311 return !is_aborted();
312}
313
314
315bool LCodeGen::GenerateBody() {
316 ASSERT(is_generating());
317 bool emit_instructions = true;
318 for (current_instruction_ = 0;
319 !is_aborted() && current_instruction_ < instructions_->length();
320 current_instruction_++) {
321 LInstruction* instr = instructions_->at(current_instruction_);
322 if (instr->IsLabel()) {
323 LLabel* label = LLabel::cast(instr);
324 emit_instructions = !label->HasReplacement();
325 }
326
327 if (emit_instructions) {
328 Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
329 instr->CompileToNative(this);
330 }
331 }
332 return !is_aborted();
333}
334
335
336LInstruction* LCodeGen::GetNextInstruction() {
337 if (current_instruction_ < instructions_->length() - 1) {
338 return instructions_->at(current_instruction_ + 1);
339 } else {
340 return NULL;
341 }
342}
343
344
345bool LCodeGen::GenerateDeferredCode() {
346 ASSERT(is_generating());
347 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
348 LDeferredCode* code = deferred_[i];
349 __ bind(code->entry());
350 code->Generate();
351 __ jmp(code->exit());
352 }
353
354 // Deferred code is the last part of the instruction sequence. Mark
355 // the generated code as done unless we bailed out.
356 if (!is_aborted()) status_ = DONE;
357 return !is_aborted();
358}
359
360
361bool LCodeGen::GenerateSafepointTable() {
362 ASSERT(is_done());
363 safepoints_.Emit(masm(), StackSlotCount());
364 return !is_aborted();
365}
366
367
368Register LCodeGen::ToRegister(int index) const {
369 return Register::FromAllocationIndex(index);
370}
371
372
373XMMRegister LCodeGen::ToDoubleRegister(int index) const {
374 return XMMRegister::FromAllocationIndex(index);
375}
376
377
378Register LCodeGen::ToRegister(LOperand* op) const {
379 ASSERT(op->IsRegister());
380 return ToRegister(op->index());
381}
382
383
384XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
385 ASSERT(op->IsDoubleRegister());
386 return ToDoubleRegister(op->index());
387}
388
389
390int LCodeGen::ToInteger32(LConstantOperand* op) const {
391 Handle<Object> value = chunk_->LookupLiteral(op);
392 ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32());
393 ASSERT(static_cast<double>(static_cast<int32_t>(value->Number())) ==
394 value->Number());
395 return static_cast<int32_t>(value->Number());
396}
397
398
399Immediate LCodeGen::ToImmediate(LOperand* op) {
400 LConstantOperand* const_op = LConstantOperand::cast(op);
401 Handle<Object> literal = chunk_->LookupLiteral(const_op);
402 Representation r = chunk_->LookupLiteralRepresentation(const_op);
403 if (r.IsInteger32()) {
404 ASSERT(literal->IsNumber());
405 return Immediate(static_cast<int32_t>(literal->Number()));
406 } else if (r.IsDouble()) {
407 Abort("unsupported double immediate");
408 }
409 ASSERT(r.IsTagged());
410 return Immediate(literal);
411}
412
413
414Operand LCodeGen::ToOperand(LOperand* op) const {
415 if (op->IsRegister()) return Operand(ToRegister(op));
416 if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
417 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
418 int index = op->index();
419 if (index >= 0) {
420 // Local or spill slot. Skip the frame pointer, function, and
421 // context in the fixed part of the frame.
422 return Operand(ebp, -(index + 3) * kPointerSize);
423 } else {
424 // Incoming parameter. Skip the return address.
425 return Operand(ebp, -(index - 1) * kPointerSize);
426 }
427}
428
429
sgjesse@chromium.orgc6c57182011-01-17 12:24:25 +0000430void LCodeGen::WriteTranslation(LEnvironment* environment,
431 Translation* translation) {
432 if (environment == NULL) return;
433
434 // The translation includes one command per value in the environment.
435 int translation_size = environment->values()->length();
436 // The output frame height does not include the parameters.
437 int height = translation_size - environment->parameter_count();
438
439 WriteTranslation(environment->outer(), translation);
440 int closure_id = DefineDeoptimizationLiteral(environment->closure());
441 translation->BeginFrame(environment->ast_id(), closure_id, height);
442 for (int i = 0; i < translation_size; ++i) {
443 LOperand* value = environment->values()->at(i);
444 // spilled_registers_ and spilled_double_registers_ are either
445 // both NULL or both set.
446 if (environment->spilled_registers() != NULL && value != NULL) {
447 if (value->IsRegister() &&
448 environment->spilled_registers()[value->index()] != NULL) {
449 translation->MarkDuplicate();
450 AddToTranslation(translation,
451 environment->spilled_registers()[value->index()],
452 environment->HasTaggedValueAt(i));
453 } else if (
454 value->IsDoubleRegister() &&
455 environment->spilled_double_registers()[value->index()] != NULL) {
456 translation->MarkDuplicate();
457 AddToTranslation(
458 translation,
459 environment->spilled_double_registers()[value->index()],
460 false);
461 }
462 }
463
464 AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
465 }
466}
467
468
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000469void LCodeGen::AddToTranslation(Translation* translation,
470 LOperand* op,
471 bool is_tagged) {
472 if (op == NULL) {
473 // TODO(twuerthinger): Introduce marker operands to indicate that this value
474 // is not present and must be reconstructed from the deoptimizer. Currently
475 // this is only used for the arguments object.
476 translation->StoreArgumentsObject();
477 } else if (op->IsStackSlot()) {
478 if (is_tagged) {
479 translation->StoreStackSlot(op->index());
480 } else {
481 translation->StoreInt32StackSlot(op->index());
482 }
483 } else if (op->IsDoubleStackSlot()) {
484 translation->StoreDoubleStackSlot(op->index());
485 } else if (op->IsArgument()) {
486 ASSERT(is_tagged);
487 int src_index = StackSlotCount() + op->index();
488 translation->StoreStackSlot(src_index);
489 } else if (op->IsRegister()) {
490 Register reg = ToRegister(op);
491 if (is_tagged) {
492 translation->StoreRegister(reg);
493 } else {
494 translation->StoreInt32Register(reg);
495 }
496 } else if (op->IsDoubleRegister()) {
497 XMMRegister reg = ToDoubleRegister(op);
498 translation->StoreDoubleRegister(reg);
499 } else if (op->IsConstantOperand()) {
500 Handle<Object> literal = chunk()->LookupLiteral(LConstantOperand::cast(op));
501 int src_index = DefineDeoptimizationLiteral(literal);
502 translation->StoreLiteral(src_index);
503 } else {
504 UNREACHABLE();
505 }
506}
507
508
509void LCodeGen::CallCode(Handle<Code> code,
510 RelocInfo::Mode mode,
511 LInstruction* instr) {
512 if (instr != NULL) {
513 LPointerMap* pointers = instr->pointer_map();
514 RecordPosition(pointers->position());
515 __ call(code, mode);
516 RegisterLazyDeoptimization(instr);
517 } else {
518 LPointerMap no_pointers(0);
519 RecordPosition(no_pointers.position());
520 __ call(code, mode);
521 RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex);
522 }
ager@chromium.org5f0c45f2010-12-17 08:51:21 +0000523
524 // Signal that we don't inline smi code before these stubs in the
525 // optimizing code generator.
526 if (code->kind() == Code::TYPE_RECORDING_BINARY_OP_IC ||
527 code->kind() == Code::COMPARE_IC) {
528 __ nop();
529 }
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000530}
531
532
533void LCodeGen::CallRuntime(Runtime::Function* function,
534 int num_arguments,
535 LInstruction* instr) {
536 ASSERT(instr != NULL);
537 LPointerMap* pointers = instr->pointer_map();
538 ASSERT(pointers != NULL);
539 RecordPosition(pointers->position());
540
541 __ CallRuntime(function, num_arguments);
542 // Runtime calls to Throw are not supposed to ever return at the
543 // call site, so don't register lazy deoptimization for these. We do
544 // however have to record a safepoint since throwing exceptions can
545 // cause garbage collections.
546 // BUG(3243555): register a lazy deoptimization point at throw. We need
547 // it to be able to inline functions containing a throw statement.
548 if (!instr->IsThrow()) {
549 RegisterLazyDeoptimization(instr);
550 } else {
551 RecordSafepoint(instr->pointer_map(), Safepoint::kNoDeoptimizationIndex);
552 }
553}
554
555
556void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) {
557 // Create the environment to bailout to. If the call has side effects
558 // execution has to continue after the call otherwise execution can continue
559 // from a previous bailout point repeating the call.
560 LEnvironment* deoptimization_environment;
561 if (instr->HasDeoptimizationEnvironment()) {
562 deoptimization_environment = instr->deoptimization_environment();
563 } else {
564 deoptimization_environment = instr->environment();
565 }
566
567 RegisterEnvironmentForDeoptimization(deoptimization_environment);
568 RecordSafepoint(instr->pointer_map(),
569 deoptimization_environment->deoptimization_index());
570}
571
572
573void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment) {
574 if (!environment->HasBeenRegistered()) {
575 // Physical stack frame layout:
576 // -x ............. -4 0 ..................................... y
577 // [incoming arguments] [spill slots] [pushed outgoing arguments]
578
579 // Layout of the environment:
580 // 0 ..................................................... size-1
581 // [parameters] [locals] [expression stack including arguments]
582
583 // Layout of the translation:
584 // 0 ........................................................ size - 1 + 4
585 // [expression stack including arguments] [locals] [4 words] [parameters]
586 // |>------------ translation_size ------------<|
587
588 int frame_count = 0;
589 for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
590 ++frame_count;
591 }
592 Translation translation(&translations_, frame_count);
sgjesse@chromium.orgc6c57182011-01-17 12:24:25 +0000593 WriteTranslation(environment, &translation);
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000594 int deoptimization_index = deoptimizations_.length();
595 environment->Register(deoptimization_index, translation.index());
596 deoptimizations_.Add(environment);
597 }
598}
599
600
601void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
602 RegisterEnvironmentForDeoptimization(environment);
603 ASSERT(environment->HasBeenRegistered());
604 int id = environment->deoptimization_index();
605 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER);
606 ASSERT(entry != NULL);
607 if (entry == NULL) {
608 Abort("bailout was not prepared");
609 return;
610 }
611
612 if (FLAG_deopt_every_n_times != 0) {
613 Handle<SharedFunctionInfo> shared(info_->shared_info());
614 Label no_deopt;
615 __ pushfd();
616 __ push(eax);
617 __ push(ebx);
618 __ mov(ebx, shared);
619 __ mov(eax, FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset));
620 __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
621 __ j(not_zero, &no_deopt);
622 if (FLAG_trap_on_deopt) __ int3();
623 __ mov(eax, Immediate(Smi::FromInt(FLAG_deopt_every_n_times)));
624 __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
625 __ pop(ebx);
626 __ pop(eax);
627 __ popfd();
628 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
629
630 __ bind(&no_deopt);
631 __ mov(FieldOperand(ebx, SharedFunctionInfo::kDeoptCounterOffset), eax);
632 __ pop(ebx);
633 __ pop(eax);
634 __ popfd();
635 }
636
637 if (cc == no_condition) {
638 if (FLAG_trap_on_deopt) __ int3();
639 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
640 } else {
641 if (FLAG_trap_on_deopt) {
642 NearLabel done;
643 __ j(NegateCondition(cc), &done);
644 __ int3();
645 __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
646 __ bind(&done);
647 } else {
648 __ j(cc, entry, RelocInfo::RUNTIME_ENTRY, not_taken);
649 }
650 }
651}
652
653
654void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
655 int length = deoptimizations_.length();
656 if (length == 0) return;
657 ASSERT(FLAG_deopt);
658 Handle<DeoptimizationInputData> data =
659 Factory::NewDeoptimizationInputData(length, TENURED);
660
661 data->SetTranslationByteArray(*translations_.CreateByteArray());
662 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
663
664 Handle<FixedArray> literals =
665 Factory::NewFixedArray(deoptimization_literals_.length(), TENURED);
666 for (int i = 0; i < deoptimization_literals_.length(); i++) {
667 literals->set(i, *deoptimization_literals_[i]);
668 }
669 data->SetLiteralArray(*literals);
670
671 data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id()));
672 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
673
674 // Populate the deoptimization entries.
675 for (int i = 0; i < length; i++) {
676 LEnvironment* env = deoptimizations_[i];
677 data->SetAstId(i, Smi::FromInt(env->ast_id()));
678 data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
679 data->SetArgumentsStackHeight(i,
680 Smi::FromInt(env->arguments_stack_height()));
681 }
682 code->set_deoptimization_data(*data);
683}
684
685
686int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
687 int result = deoptimization_literals_.length();
688 for (int i = 0; i < deoptimization_literals_.length(); ++i) {
689 if (deoptimization_literals_[i].is_identical_to(literal)) return i;
690 }
691 deoptimization_literals_.Add(literal);
692 return result;
693}
694
695
696void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
697 ASSERT(deoptimization_literals_.length() == 0);
698
699 const ZoneList<Handle<JSFunction> >* inlined_closures =
700 chunk()->inlined_closures();
701
702 for (int i = 0, length = inlined_closures->length();
703 i < length;
704 i++) {
705 DefineDeoptimizationLiteral(inlined_closures->at(i));
706 }
707
708 inlined_function_count_ = deoptimization_literals_.length();
709}
710
711
712void LCodeGen::RecordSafepoint(LPointerMap* pointers,
713 int deoptimization_index) {
714 const ZoneList<LOperand*>* operands = pointers->operands();
715 Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
716 deoptimization_index);
717 for (int i = 0; i < operands->length(); i++) {
718 LOperand* pointer = operands->at(i);
719 if (pointer->IsStackSlot()) {
720 safepoint.DefinePointerSlot(pointer->index());
721 }
722 }
723}
724
725
726void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
727 int arguments,
728 int deoptimization_index) {
729 const ZoneList<LOperand*>* operands = pointers->operands();
730 Safepoint safepoint =
731 safepoints_.DefineSafepointWithRegisters(
732 masm(), arguments, deoptimization_index);
733 for (int i = 0; i < operands->length(); i++) {
734 LOperand* pointer = operands->at(i);
735 if (pointer->IsStackSlot()) {
736 safepoint.DefinePointerSlot(pointer->index());
737 } else if (pointer->IsRegister()) {
738 safepoint.DefinePointerRegister(ToRegister(pointer));
739 }
740 }
741 // Register esi always contains a pointer to the context.
742 safepoint.DefinePointerRegister(esi);
743}
744
745
746void LCodeGen::RecordPosition(int position) {
747 if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
748 masm()->positions_recorder()->RecordPosition(position);
749}
750
751
752void LCodeGen::DoLabel(LLabel* label) {
753 if (label->is_loop_header()) {
754 Comment(";;; B%d - LOOP entry", label->block_id());
755 } else {
756 Comment(";;; B%d", label->block_id());
757 }
758 __ bind(label->label());
759 current_block_ = label->block_id();
760 LCodeGen::DoGap(label);
761}
762
763
764void LCodeGen::DoParallelMove(LParallelMove* move) {
765 // xmm0 must always be a scratch register.
766 XMMRegister xmm_scratch = xmm0;
767 LUnallocated marker_operand(LUnallocated::NONE);
768
769 Register cpu_scratch = esi;
770 bool destroys_cpu_scratch = false;
771
sgjesse@chromium.orgc6c57182011-01-17 12:24:25 +0000772 const ZoneList<LMoveOperands>* moves =
773 resolver_.Resolve(move->move_operands(), &marker_operand);
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000774 for (int i = moves->length() - 1; i >= 0; --i) {
775 LMoveOperands move = moves->at(i);
776 LOperand* from = move.from();
777 LOperand* to = move.to();
778 ASSERT(!from->IsDoubleRegister() ||
779 !ToDoubleRegister(from).is(xmm_scratch));
780 ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch));
781 ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch));
782 ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch));
783 if (from->IsConstantOperand()) {
784 __ mov(ToOperand(to), ToImmediate(from));
785 } else if (from == &marker_operand) {
786 if (to->IsRegister() || to->IsStackSlot()) {
787 __ mov(ToOperand(to), cpu_scratch);
788 ASSERT(destroys_cpu_scratch);
789 } else {
790 ASSERT(to->IsDoubleRegister() || to->IsDoubleStackSlot());
791 __ movdbl(ToOperand(to), xmm_scratch);
792 }
793 } else if (to == &marker_operand) {
794 if (from->IsRegister() || from->IsStackSlot()) {
795 __ mov(cpu_scratch, ToOperand(from));
796 destroys_cpu_scratch = true;
797 } else {
798 ASSERT(from->IsDoubleRegister() || from->IsDoubleStackSlot());
799 __ movdbl(xmm_scratch, ToOperand(from));
800 }
801 } else if (from->IsRegister()) {
802 __ mov(ToOperand(to), ToRegister(from));
803 } else if (to->IsRegister()) {
804 __ mov(ToRegister(to), ToOperand(from));
805 } else if (from->IsStackSlot()) {
806 ASSERT(to->IsStackSlot());
807 __ push(eax);
808 __ mov(eax, ToOperand(from));
809 __ mov(ToOperand(to), eax);
810 __ pop(eax);
811 } else if (from->IsDoubleRegister()) {
812 __ movdbl(ToOperand(to), ToDoubleRegister(from));
813 } else if (to->IsDoubleRegister()) {
814 __ movdbl(ToDoubleRegister(to), ToOperand(from));
815 } else {
816 ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
817 __ movdbl(xmm_scratch, ToOperand(from));
818 __ movdbl(ToOperand(to), xmm_scratch);
819 }
820 }
821
822 if (destroys_cpu_scratch) {
823 __ mov(cpu_scratch, Operand(ebp, -kPointerSize));
824 }
825}
826
827
828void LCodeGen::DoGap(LGap* gap) {
829 for (int i = LGap::FIRST_INNER_POSITION;
830 i <= LGap::LAST_INNER_POSITION;
831 i++) {
832 LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
833 LParallelMove* move = gap->GetParallelMove(inner_pos);
834 if (move != NULL) DoParallelMove(move);
835 }
836
837 LInstruction* next = GetNextInstruction();
838 if (next != NULL && next->IsLazyBailout()) {
839 int pc = masm()->pc_offset();
840 safepoints_.SetPcAfterGap(pc);
841 }
842}
843
844
845void LCodeGen::DoParameter(LParameter* instr) {
846 // Nothing to do.
847}
848
849
850void LCodeGen::DoCallStub(LCallStub* instr) {
851 ASSERT(ToRegister(instr->result()).is(eax));
852 switch (instr->hydrogen()->major_key()) {
853 case CodeStub::RegExpConstructResult: {
854 RegExpConstructResultStub stub;
855 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
856 break;
857 }
858 case CodeStub::RegExpExec: {
859 RegExpExecStub stub;
860 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
861 break;
862 }
863 case CodeStub::SubString: {
864 SubStringStub stub;
865 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
866 break;
867 }
868 case CodeStub::StringCharAt: {
869 StringCharAtStub stub;
870 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
871 break;
872 }
873 case CodeStub::MathPow: {
874 MathPowStub stub;
875 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
876 break;
877 }
878 case CodeStub::NumberToString: {
879 NumberToStringStub stub;
880 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
881 break;
882 }
883 case CodeStub::StringAdd: {
884 StringAddStub stub(NO_STRING_ADD_FLAGS);
885 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
886 break;
887 }
888 case CodeStub::StringCompare: {
889 StringCompareStub stub;
890 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
891 break;
892 }
893 case CodeStub::TranscendentalCache: {
whesse@chromium.org023421e2010-12-21 12:19:12 +0000894 TranscendentalCacheStub stub(instr->transcendental_type(),
895 TranscendentalCacheStub::TAGGED);
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000896 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
897 break;
898 }
899 default:
900 UNREACHABLE();
901 }
902}
903
904
905void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
906 // Nothing to do.
907}
908
909
910void LCodeGen::DoModI(LModI* instr) {
911 LOperand* right = instr->right();
912 ASSERT(ToRegister(instr->result()).is(edx));
913 ASSERT(ToRegister(instr->left()).is(eax));
914 ASSERT(!ToRegister(instr->right()).is(eax));
915 ASSERT(!ToRegister(instr->right()).is(edx));
916
917 Register right_reg = ToRegister(right);
918
919 // Check for x % 0.
920 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
921 __ test(right_reg, ToOperand(right));
922 DeoptimizeIf(zero, instr->environment());
923 }
924
925 // Sign extend to edx.
926 __ cdq();
927
928 // Check for (0 % -x) that will produce negative zero.
929 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
930 NearLabel positive_left;
931 NearLabel done;
932 __ test(eax, Operand(eax));
933 __ j(not_sign, &positive_left);
934 __ idiv(right_reg);
935
936 // Test the remainder for 0, because then the result would be -0.
937 __ test(edx, Operand(edx));
938 __ j(not_zero, &done);
939
940 DeoptimizeIf(no_condition, instr->environment());
941 __ bind(&positive_left);
942 __ idiv(right_reg);
943 __ bind(&done);
944 } else {
945 __ idiv(right_reg);
946 }
947}
948
949
950void LCodeGen::DoDivI(LDivI* instr) {
951 LOperand* right = instr->right();
952 ASSERT(ToRegister(instr->result()).is(eax));
953 ASSERT(ToRegister(instr->left()).is(eax));
954 ASSERT(!ToRegister(instr->right()).is(eax));
955 ASSERT(!ToRegister(instr->right()).is(edx));
956
957 Register left_reg = eax;
958
959 // Check for x / 0.
960 Register right_reg = ToRegister(right);
961 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
962 __ test(right_reg, ToOperand(right));
963 DeoptimizeIf(zero, instr->environment());
964 }
965
966 // Check for (0 / -x) that will produce negative zero.
967 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
968 NearLabel left_not_zero;
969 __ test(left_reg, Operand(left_reg));
970 __ j(not_zero, &left_not_zero);
971 __ test(right_reg, ToOperand(right));
972 DeoptimizeIf(sign, instr->environment());
973 __ bind(&left_not_zero);
974 }
975
976 // Check for (-kMinInt / -1).
977 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
978 NearLabel left_not_min_int;
979 __ cmp(left_reg, kMinInt);
980 __ j(not_zero, &left_not_min_int);
981 __ cmp(right_reg, -1);
982 DeoptimizeIf(zero, instr->environment());
983 __ bind(&left_not_min_int);
984 }
985
986 // Sign extend to edx.
987 __ cdq();
988 __ idiv(right_reg);
989
990 // Deoptimize if remainder is not 0.
991 __ test(edx, Operand(edx));
992 DeoptimizeIf(not_zero, instr->environment());
993}
994
995
996void LCodeGen::DoMulI(LMulI* instr) {
997 Register left = ToRegister(instr->left());
998 LOperand* right = instr->right();
999
1000 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1001 __ mov(ToRegister(instr->temp()), left);
1002 }
1003
1004 if (right->IsConstantOperand()) {
1005 __ imul(left, left, ToInteger32(LConstantOperand::cast(right)));
1006 } else {
1007 __ imul(left, ToOperand(right));
1008 }
1009
1010 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1011 DeoptimizeIf(overflow, instr->environment());
1012 }
1013
1014 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1015 // Bail out if the result is supposed to be negative zero.
1016 NearLabel done;
1017 __ test(left, Operand(left));
1018 __ j(not_zero, &done);
1019 if (right->IsConstantOperand()) {
1020 if (ToInteger32(LConstantOperand::cast(right)) < 0) {
1021 DeoptimizeIf(no_condition, instr->environment());
1022 }
1023 } else {
1024 // Test the non-zero operand for negative sign.
1025 __ or_(ToRegister(instr->temp()), ToOperand(right));
1026 DeoptimizeIf(sign, instr->environment());
1027 }
1028 __ bind(&done);
1029 }
1030}
1031
1032
1033void LCodeGen::DoBitI(LBitI* instr) {
1034 LOperand* left = instr->left();
1035 LOperand* right = instr->right();
1036 ASSERT(left->Equals(instr->result()));
1037 ASSERT(left->IsRegister());
1038
1039 if (right->IsConstantOperand()) {
1040 int right_operand = ToInteger32(LConstantOperand::cast(right));
1041 switch (instr->op()) {
1042 case Token::BIT_AND:
1043 __ and_(ToRegister(left), right_operand);
1044 break;
1045 case Token::BIT_OR:
1046 __ or_(ToRegister(left), right_operand);
1047 break;
1048 case Token::BIT_XOR:
1049 __ xor_(ToRegister(left), right_operand);
1050 break;
1051 default:
1052 UNREACHABLE();
1053 break;
1054 }
1055 } else {
1056 switch (instr->op()) {
1057 case Token::BIT_AND:
1058 __ and_(ToRegister(left), ToOperand(right));
1059 break;
1060 case Token::BIT_OR:
1061 __ or_(ToRegister(left), ToOperand(right));
1062 break;
1063 case Token::BIT_XOR:
1064 __ xor_(ToRegister(left), ToOperand(right));
1065 break;
1066 default:
1067 UNREACHABLE();
1068 break;
1069 }
1070 }
1071}
1072
1073
1074void LCodeGen::DoShiftI(LShiftI* instr) {
1075 LOperand* left = instr->left();
1076 LOperand* right = instr->right();
1077 ASSERT(left->Equals(instr->result()));
1078 ASSERT(left->IsRegister());
1079 if (right->IsRegister()) {
1080 ASSERT(ToRegister(right).is(ecx));
1081
1082 switch (instr->op()) {
1083 case Token::SAR:
1084 __ sar_cl(ToRegister(left));
1085 break;
1086 case Token::SHR:
1087 __ shr_cl(ToRegister(left));
1088 if (instr->can_deopt()) {
1089 __ test(ToRegister(left), Immediate(0x80000000));
1090 DeoptimizeIf(not_zero, instr->environment());
1091 }
1092 break;
1093 case Token::SHL:
1094 __ shl_cl(ToRegister(left));
1095 break;
1096 default:
1097 UNREACHABLE();
1098 break;
1099 }
1100 } else {
1101 int value = ToInteger32(LConstantOperand::cast(right));
1102 uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
1103 switch (instr->op()) {
1104 case Token::SAR:
1105 if (shift_count != 0) {
1106 __ sar(ToRegister(left), shift_count);
1107 }
1108 break;
1109 case Token::SHR:
1110 if (shift_count == 0 && instr->can_deopt()) {
1111 __ test(ToRegister(left), Immediate(0x80000000));
1112 DeoptimizeIf(not_zero, instr->environment());
1113 } else {
1114 __ shr(ToRegister(left), shift_count);
1115 }
1116 break;
1117 case Token::SHL:
1118 if (shift_count != 0) {
1119 __ shl(ToRegister(left), shift_count);
1120 }
1121 break;
1122 default:
1123 UNREACHABLE();
1124 break;
1125 }
1126 }
1127}
1128
1129
1130void LCodeGen::DoSubI(LSubI* instr) {
1131 LOperand* left = instr->left();
1132 LOperand* right = instr->right();
1133 ASSERT(left->Equals(instr->result()));
1134
1135 if (right->IsConstantOperand()) {
1136 __ sub(ToOperand(left), ToImmediate(right));
1137 } else {
1138 __ sub(ToRegister(left), ToOperand(right));
1139 }
1140 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1141 DeoptimizeIf(overflow, instr->environment());
1142 }
1143}
1144
1145
1146void LCodeGen::DoConstantI(LConstantI* instr) {
1147 ASSERT(instr->result()->IsRegister());
fschneider@chromium.org9e3e0b62011-01-03 10:16:46 +00001148 __ Set(ToRegister(instr->result()), Immediate(instr->value()));
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001149}
1150
1151
1152void LCodeGen::DoConstantD(LConstantD* instr) {
1153 ASSERT(instr->result()->IsDoubleRegister());
1154 XMMRegister res = ToDoubleRegister(instr->result());
1155 double v = instr->value();
1156 // Use xor to produce +0.0 in a fast and compact way, but avoid to
1157 // do so if the constant is -0.0.
1158 if (BitCast<uint64_t, double>(v) == 0) {
1159 __ xorpd(res, res);
1160 } else {
1161 int32_t v_int32 = static_cast<int32_t>(v);
1162 if (static_cast<double>(v_int32) == v) {
1163 __ push_imm32(v_int32);
1164 __ cvtsi2sd(res, Operand(esp, 0));
1165 __ add(Operand(esp), Immediate(kPointerSize));
1166 } else {
1167 uint64_t int_val = BitCast<uint64_t, double>(v);
1168 int32_t lower = static_cast<int32_t>(int_val);
1169 int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
1170 __ push_imm32(upper);
1171 __ push_imm32(lower);
1172 __ movdbl(res, Operand(esp, 0));
1173 __ add(Operand(esp), Immediate(2 * kPointerSize));
1174 }
1175 }
1176}
1177
1178
1179void LCodeGen::DoConstantT(LConstantT* instr) {
1180 ASSERT(instr->result()->IsRegister());
fschneider@chromium.org9e3e0b62011-01-03 10:16:46 +00001181 __ Set(ToRegister(instr->result()), Immediate(instr->value()));
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001182}
1183
1184
fschneider@chromium.org9e3e0b62011-01-03 10:16:46 +00001185void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001186 Register result = ToRegister(instr->result());
fschneider@chromium.org9e3e0b62011-01-03 10:16:46 +00001187 Register array = ToRegister(instr->input());
1188 __ mov(result, FieldOperand(array, JSArray::kLengthOffset));
1189}
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001190
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001191
fschneider@chromium.org9e3e0b62011-01-03 10:16:46 +00001192void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
1193 Register result = ToRegister(instr->result());
1194 Register array = ToRegister(instr->input());
1195 __ mov(result, FieldOperand(array, FixedArray::kLengthOffset));
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001196}
1197
1198
1199void LCodeGen::DoValueOf(LValueOf* instr) {
1200 Register input = ToRegister(instr->input());
1201 Register result = ToRegister(instr->result());
1202 Register map = ToRegister(instr->temporary());
1203 ASSERT(input.is(result));
1204 NearLabel done;
1205 // If the object is a smi return the object.
1206 __ test(input, Immediate(kSmiTagMask));
1207 __ j(zero, &done);
1208
1209 // If the object is not a value type, return the object.
1210 __ CmpObjectType(input, JS_VALUE_TYPE, map);
1211 __ j(not_equal, &done);
1212 __ mov(result, FieldOperand(input, JSValue::kValueOffset));
1213
1214 __ bind(&done);
1215}
1216
1217
1218void LCodeGen::DoBitNotI(LBitNotI* instr) {
1219 LOperand* input = instr->input();
1220 ASSERT(input->Equals(instr->result()));
1221 __ not_(ToRegister(input));
1222}
1223
1224
1225void LCodeGen::DoThrow(LThrow* instr) {
1226 __ push(ToOperand(instr->input()));
1227 CallRuntime(Runtime::kThrow, 1, instr);
1228
1229 if (FLAG_debug_code) {
1230 Comment("Unreachable code.");
1231 __ int3();
1232 }
1233}
1234
1235
1236void LCodeGen::DoAddI(LAddI* instr) {
1237 LOperand* left = instr->left();
1238 LOperand* right = instr->right();
1239 ASSERT(left->Equals(instr->result()));
1240
1241 if (right->IsConstantOperand()) {
1242 __ add(ToOperand(left), ToImmediate(right));
1243 } else {
1244 __ add(ToRegister(left), ToOperand(right));
1245 }
1246
1247 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1248 DeoptimizeIf(overflow, instr->environment());
1249 }
1250}
1251
1252
1253void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1254 LOperand* left = instr->left();
1255 LOperand* right = instr->right();
1256 // Modulo uses a fixed result register.
1257 ASSERT(instr->op() == Token::MOD || left->Equals(instr->result()));
1258 switch (instr->op()) {
1259 case Token::ADD:
1260 __ addsd(ToDoubleRegister(left), ToDoubleRegister(right));
1261 break;
1262 case Token::SUB:
1263 __ subsd(ToDoubleRegister(left), ToDoubleRegister(right));
1264 break;
1265 case Token::MUL:
1266 __ mulsd(ToDoubleRegister(left), ToDoubleRegister(right));
1267 break;
1268 case Token::DIV:
1269 __ divsd(ToDoubleRegister(left), ToDoubleRegister(right));
1270 break;
1271 case Token::MOD: {
1272 // Pass two doubles as arguments on the stack.
1273 __ PrepareCallCFunction(4, eax);
1274 __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
1275 __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
1276 __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4);
1277
1278 // Return value is in st(0) on ia32.
1279 // Store it into the (fixed) result register.
1280 __ sub(Operand(esp), Immediate(kDoubleSize));
1281 __ fstp_d(Operand(esp, 0));
1282 __ movdbl(ToDoubleRegister(instr->result()), Operand(esp, 0));
1283 __ add(Operand(esp), Immediate(kDoubleSize));
1284 break;
1285 }
1286 default:
1287 UNREACHABLE();
1288 break;
1289 }
1290}
1291
1292
1293void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
1294 ASSERT(ToRegister(instr->left()).is(edx));
1295 ASSERT(ToRegister(instr->right()).is(eax));
1296 ASSERT(ToRegister(instr->result()).is(eax));
1297
1298 TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
1299 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1300}
1301
1302
1303int LCodeGen::GetNextEmittedBlock(int block) {
1304 for (int i = block + 1; i < graph()->blocks()->length(); ++i) {
1305 LLabel* label = chunk_->GetLabel(i);
1306 if (!label->HasReplacement()) return i;
1307 }
1308 return -1;
1309}
1310
1311
1312void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) {
1313 int next_block = GetNextEmittedBlock(current_block_);
1314 right_block = chunk_->LookupDestination(right_block);
1315 left_block = chunk_->LookupDestination(left_block);
1316
1317 if (right_block == left_block) {
1318 EmitGoto(left_block);
1319 } else if (left_block == next_block) {
1320 __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
1321 } else if (right_block == next_block) {
1322 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1323 } else {
1324 __ j(cc, chunk_->GetAssemblyLabel(left_block));
1325 __ jmp(chunk_->GetAssemblyLabel(right_block));
1326 }
1327}
1328
1329
1330void LCodeGen::DoBranch(LBranch* instr) {
1331 int true_block = chunk_->LookupDestination(instr->true_block_id());
1332 int false_block = chunk_->LookupDestination(instr->false_block_id());
1333
1334 Representation r = instr->hydrogen()->representation();
1335 if (r.IsInteger32()) {
1336 Register reg = ToRegister(instr->input());
1337 __ test(reg, Operand(reg));
1338 EmitBranch(true_block, false_block, not_zero);
1339 } else if (r.IsDouble()) {
1340 XMMRegister reg = ToDoubleRegister(instr->input());
1341 __ xorpd(xmm0, xmm0);
1342 __ ucomisd(reg, xmm0);
1343 EmitBranch(true_block, false_block, not_equal);
1344 } else {
1345 ASSERT(r.IsTagged());
1346 Register reg = ToRegister(instr->input());
1347 if (instr->hydrogen()->type().IsBoolean()) {
1348 __ cmp(reg, Factory::true_value());
1349 EmitBranch(true_block, false_block, equal);
1350 } else {
1351 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1352 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1353
1354 __ cmp(reg, Factory::undefined_value());
1355 __ j(equal, false_label);
1356 __ cmp(reg, Factory::true_value());
1357 __ j(equal, true_label);
1358 __ cmp(reg, Factory::false_value());
1359 __ j(equal, false_label);
1360 __ test(reg, Operand(reg));
1361 __ j(equal, false_label);
1362 __ test(reg, Immediate(kSmiTagMask));
1363 __ j(zero, true_label);
1364
1365 // Test for double values. Zero is false.
1366 NearLabel call_stub;
1367 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
1368 Factory::heap_number_map());
1369 __ j(not_equal, &call_stub);
1370 __ fldz();
1371 __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
1372 __ FCmp();
1373 __ j(zero, false_label);
1374 __ jmp(true_label);
1375
1376 // The conversion stub doesn't cause garbage collections so it's
1377 // safe to not record a safepoint after the call.
1378 __ bind(&call_stub);
1379 ToBooleanStub stub;
1380 __ pushad();
1381 __ push(reg);
1382 __ CallStub(&stub);
1383 __ test(eax, Operand(eax));
1384 __ popad();
1385 EmitBranch(true_block, false_block, not_zero);
1386 }
1387 }
1388}
1389
1390
1391void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
1392 block = chunk_->LookupDestination(block);
1393 int next_block = GetNextEmittedBlock(current_block_);
1394 if (block != next_block) {
1395 // Perform stack overflow check if this goto needs it before jumping.
1396 if (deferred_stack_check != NULL) {
1397 ExternalReference stack_limit =
1398 ExternalReference::address_of_stack_limit();
1399 __ cmp(esp, Operand::StaticVariable(stack_limit));
1400 __ j(above_equal, chunk_->GetAssemblyLabel(block));
1401 __ jmp(deferred_stack_check->entry());
1402 deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
1403 } else {
1404 __ jmp(chunk_->GetAssemblyLabel(block));
1405 }
1406 }
1407}
1408
1409
1410void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
1411 __ pushad();
1412 __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
1413 RecordSafepointWithRegisters(
1414 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
1415 __ popad();
1416}
1417
1418void LCodeGen::DoGoto(LGoto* instr) {
1419 class DeferredStackCheck: public LDeferredCode {
1420 public:
1421 DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
1422 : LDeferredCode(codegen), instr_(instr) { }
1423 virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
1424 private:
1425 LGoto* instr_;
1426 };
1427
1428 DeferredStackCheck* deferred = NULL;
1429 if (instr->include_stack_check()) {
1430 deferred = new DeferredStackCheck(this, instr);
1431 }
1432 EmitGoto(instr->block_id(), deferred);
1433}
1434
1435
1436Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
1437 Condition cond = no_condition;
1438 switch (op) {
1439 case Token::EQ:
1440 case Token::EQ_STRICT:
1441 cond = equal;
1442 break;
1443 case Token::LT:
1444 cond = is_unsigned ? below : less;
1445 break;
1446 case Token::GT:
1447 cond = is_unsigned ? above : greater;
1448 break;
1449 case Token::LTE:
1450 cond = is_unsigned ? below_equal : less_equal;
1451 break;
1452 case Token::GTE:
1453 cond = is_unsigned ? above_equal : greater_equal;
1454 break;
1455 case Token::IN:
1456 case Token::INSTANCEOF:
1457 default:
1458 UNREACHABLE();
1459 }
1460 return cond;
1461}
1462
1463
1464void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
1465 if (right->IsConstantOperand()) {
1466 __ cmp(ToOperand(left), ToImmediate(right));
1467 } else {
1468 __ cmp(ToRegister(left), ToOperand(right));
1469 }
1470}
1471
1472
1473void LCodeGen::DoCmpID(LCmpID* instr) {
1474 LOperand* left = instr->left();
1475 LOperand* right = instr->right();
1476 LOperand* result = instr->result();
1477
1478 NearLabel unordered;
1479 if (instr->is_double()) {
1480 // Don't base result on EFLAGS when a NaN is involved. Instead
1481 // jump to the unordered case, which produces a false value.
1482 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
1483 __ j(parity_even, &unordered, not_taken);
1484 } else {
1485 EmitCmpI(left, right);
1486 }
1487
1488 NearLabel done;
1489 Condition cc = TokenToCondition(instr->op(), instr->is_double());
1490 __ mov(ToRegister(result), Handle<Object>(Heap::true_value()));
1491 __ j(cc, &done);
1492
1493 __ bind(&unordered);
1494 __ mov(ToRegister(result), Handle<Object>(Heap::false_value()));
1495 __ bind(&done);
1496}
1497
1498
1499void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
1500 LOperand* left = instr->left();
1501 LOperand* right = instr->right();
1502 int false_block = chunk_->LookupDestination(instr->false_block_id());
1503 int true_block = chunk_->LookupDestination(instr->true_block_id());
1504
1505 if (instr->is_double()) {
1506 // Don't base result on EFLAGS when a NaN is involved. Instead
1507 // jump to the false block.
1508 __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
1509 __ j(parity_even, chunk_->GetAssemblyLabel(false_block));
1510 } else {
1511 EmitCmpI(left, right);
1512 }
1513
1514 Condition cc = TokenToCondition(instr->op(), instr->is_double());
1515 EmitBranch(true_block, false_block, cc);
1516}
1517
1518
1519void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
1520 Register left = ToRegister(instr->left());
1521 Register right = ToRegister(instr->right());
1522 Register result = ToRegister(instr->result());
1523
1524 __ cmp(left, Operand(right));
1525 __ mov(result, Handle<Object>(Heap::true_value()));
1526 NearLabel done;
1527 __ j(equal, &done);
1528 __ mov(result, Handle<Object>(Heap::false_value()));
1529 __ bind(&done);
1530}
1531
1532
1533void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
1534 Register left = ToRegister(instr->left());
1535 Register right = ToRegister(instr->right());
1536 int false_block = chunk_->LookupDestination(instr->false_block_id());
1537 int true_block = chunk_->LookupDestination(instr->true_block_id());
1538
1539 __ cmp(left, Operand(right));
1540 EmitBranch(true_block, false_block, equal);
1541}
1542
1543
1544void LCodeGen::DoIsNull(LIsNull* instr) {
1545 Register reg = ToRegister(instr->input());
1546 Register result = ToRegister(instr->result());
1547
1548 // TODO(fsc): If the expression is known to be a smi, then it's
1549 // definitely not null. Materialize false.
1550
1551 __ cmp(reg, Factory::null_value());
1552 if (instr->is_strict()) {
1553 __ mov(result, Handle<Object>(Heap::true_value()));
1554 NearLabel done;
1555 __ j(equal, &done);
1556 __ mov(result, Handle<Object>(Heap::false_value()));
1557 __ bind(&done);
1558 } else {
1559 NearLabel true_value, false_value, done;
1560 __ j(equal, &true_value);
1561 __ cmp(reg, Factory::undefined_value());
1562 __ j(equal, &true_value);
1563 __ test(reg, Immediate(kSmiTagMask));
1564 __ j(zero, &false_value);
1565 // Check for undetectable objects by looking in the bit field in
1566 // the map. The object has already been smi checked.
1567 Register scratch = result;
1568 __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
1569 __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
1570 __ test(scratch, Immediate(1 << Map::kIsUndetectable));
1571 __ j(not_zero, &true_value);
1572 __ bind(&false_value);
1573 __ mov(result, Handle<Object>(Heap::false_value()));
1574 __ jmp(&done);
1575 __ bind(&true_value);
1576 __ mov(result, Handle<Object>(Heap::true_value()));
1577 __ bind(&done);
1578 }
1579}
1580
1581
1582void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
1583 Register reg = ToRegister(instr->input());
1584
1585 // TODO(fsc): If the expression is known to be a smi, then it's
1586 // definitely not null. Jump to the false block.
1587
1588 int true_block = chunk_->LookupDestination(instr->true_block_id());
1589 int false_block = chunk_->LookupDestination(instr->false_block_id());
1590
1591 __ cmp(reg, Factory::null_value());
1592 if (instr->is_strict()) {
1593 EmitBranch(true_block, false_block, equal);
1594 } else {
1595 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1596 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1597 __ j(equal, true_label);
1598 __ cmp(reg, Factory::undefined_value());
1599 __ j(equal, true_label);
1600 __ test(reg, Immediate(kSmiTagMask));
1601 __ j(zero, false_label);
1602 // Check for undetectable objects by looking in the bit field in
1603 // the map. The object has already been smi checked.
1604 Register scratch = ToRegister(instr->temp());
1605 __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
1606 __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
1607 __ test(scratch, Immediate(1 << Map::kIsUndetectable));
1608 EmitBranch(true_block, false_block, not_zero);
1609 }
1610}
1611
1612
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00001613Condition LCodeGen::EmitIsObject(Register input,
1614 Register temp1,
1615 Register temp2,
1616 Label* is_not_object,
1617 Label* is_object) {
1618 ASSERT(!input.is(temp1));
1619 ASSERT(!input.is(temp2));
1620 ASSERT(!temp1.is(temp2));
1621
1622 __ test(input, Immediate(kSmiTagMask));
1623 __ j(equal, is_not_object);
1624
1625 __ cmp(input, Factory::null_value());
1626 __ j(equal, is_object);
1627
1628 __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
1629 // Undetectable objects behave like undefined.
1630 __ movzx_b(temp2, FieldOperand(temp1, Map::kBitFieldOffset));
1631 __ test(temp2, Immediate(1 << Map::kIsUndetectable));
1632 __ j(not_zero, is_not_object);
1633
1634 __ movzx_b(temp2, FieldOperand(temp1, Map::kInstanceTypeOffset));
1635 __ cmp(temp2, FIRST_JS_OBJECT_TYPE);
1636 __ j(below, is_not_object);
1637 __ cmp(temp2, LAST_JS_OBJECT_TYPE);
1638 return below_equal;
1639}
1640
1641
1642void LCodeGen::DoIsObject(LIsObject* instr) {
1643 Register reg = ToRegister(instr->input());
1644 Register result = ToRegister(instr->result());
1645 Register temp = ToRegister(instr->temp());
1646 Label is_false, is_true, done;
1647
1648 Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
1649 __ j(true_cond, &is_true);
1650
1651 __ bind(&is_false);
1652 __ mov(result, Handle<Object>(Heap::false_value()));
1653 __ jmp(&done);
1654
1655 __ bind(&is_true);
1656 __ mov(result, Handle<Object>(Heap::true_value()));
1657
1658 __ bind(&done);
1659}
1660
1661
1662void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
1663 Register reg = ToRegister(instr->input());
1664 Register temp = ToRegister(instr->temp());
1665 Register temp2 = ToRegister(instr->temp2());
1666
1667 int true_block = chunk_->LookupDestination(instr->true_block_id());
1668 int false_block = chunk_->LookupDestination(instr->false_block_id());
1669 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1670 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1671
1672 Condition true_cond = EmitIsObject(reg, temp, temp2, false_label, true_label);
1673
1674 EmitBranch(true_block, false_block, true_cond);
1675}
1676
1677
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001678void LCodeGen::DoIsSmi(LIsSmi* instr) {
1679 Operand input = ToOperand(instr->input());
1680 Register result = ToRegister(instr->result());
1681
1682 ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1683 __ test(input, Immediate(kSmiTagMask));
1684 __ mov(result, Handle<Object>(Heap::true_value()));
1685 NearLabel done;
1686 __ j(zero, &done);
1687 __ mov(result, Handle<Object>(Heap::false_value()));
1688 __ bind(&done);
1689}
1690
1691
1692void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
1693 Operand input = ToOperand(instr->input());
1694
1695 int true_block = chunk_->LookupDestination(instr->true_block_id());
1696 int false_block = chunk_->LookupDestination(instr->false_block_id());
1697
1698 __ test(input, Immediate(kSmiTagMask));
1699 EmitBranch(true_block, false_block, zero);
1700}
1701
1702
1703InstanceType LHasInstanceType::TestType() {
1704 InstanceType from = hydrogen()->from();
1705 InstanceType to = hydrogen()->to();
1706 if (from == FIRST_TYPE) return to;
1707 ASSERT(from == to || to == LAST_TYPE);
1708 return from;
1709}
1710
1711
1712
1713Condition LHasInstanceType::BranchCondition() {
1714 InstanceType from = hydrogen()->from();
1715 InstanceType to = hydrogen()->to();
1716 if (from == to) return equal;
1717 if (to == LAST_TYPE) return above_equal;
1718 if (from == FIRST_TYPE) return below_equal;
1719 UNREACHABLE();
1720 return equal;
1721}
1722
1723
1724void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
1725 Register input = ToRegister(instr->input());
1726 Register result = ToRegister(instr->result());
1727
1728 ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1729 __ test(input, Immediate(kSmiTagMask));
1730 NearLabel done, is_false;
1731 __ j(zero, &is_false);
1732 __ CmpObjectType(input, instr->TestType(), result);
1733 __ j(NegateCondition(instr->BranchCondition()), &is_false);
1734 __ mov(result, Handle<Object>(Heap::true_value()));
1735 __ jmp(&done);
1736 __ bind(&is_false);
1737 __ mov(result, Handle<Object>(Heap::false_value()));
1738 __ bind(&done);
1739}
1740
1741
1742void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
1743 Register input = ToRegister(instr->input());
1744 Register temp = ToRegister(instr->temp());
1745
1746 int true_block = chunk_->LookupDestination(instr->true_block_id());
1747 int false_block = chunk_->LookupDestination(instr->false_block_id());
1748
1749 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1750
1751 __ test(input, Immediate(kSmiTagMask));
1752 __ j(zero, false_label);
1753
1754 __ CmpObjectType(input, instr->TestType(), temp);
1755 EmitBranch(true_block, false_block, instr->BranchCondition());
1756}
1757
1758
1759void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
1760 Register input = ToRegister(instr->input());
1761 Register result = ToRegister(instr->result());
1762
1763 ASSERT(instr->hydrogen()->value()->representation().IsTagged());
1764 __ mov(result, Handle<Object>(Heap::true_value()));
1765 __ test(FieldOperand(input, String::kHashFieldOffset),
1766 Immediate(String::kContainsCachedArrayIndexMask));
1767 NearLabel done;
1768 __ j(not_zero, &done);
1769 __ mov(result, Handle<Object>(Heap::false_value()));
1770 __ bind(&done);
1771}
1772
1773
1774void LCodeGen::DoHasCachedArrayIndexAndBranch(
1775 LHasCachedArrayIndexAndBranch* instr) {
1776 Register input = ToRegister(instr->input());
1777
1778 int true_block = chunk_->LookupDestination(instr->true_block_id());
1779 int false_block = chunk_->LookupDestination(instr->false_block_id());
1780
1781 __ test(FieldOperand(input, String::kHashFieldOffset),
1782 Immediate(String::kContainsCachedArrayIndexMask));
1783 EmitBranch(true_block, false_block, not_equal);
1784}
1785
1786
1787// Branches to a label or falls through with the answer in the z flag. Trashes
1788// the temp registers, but not the input. Only input and temp2 may alias.
1789void LCodeGen::EmitClassOfTest(Label* is_true,
1790 Label* is_false,
1791 Handle<String>class_name,
1792 Register input,
1793 Register temp,
1794 Register temp2) {
1795 ASSERT(!input.is(temp));
1796 ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
1797 __ test(input, Immediate(kSmiTagMask));
1798 __ j(zero, is_false);
1799 __ CmpObjectType(input, FIRST_JS_OBJECT_TYPE, temp);
1800 __ j(below, is_false);
1801
1802 // Map is now in temp.
1803 // Functions have class 'Function'.
1804 __ CmpInstanceType(temp, JS_FUNCTION_TYPE);
1805 if (class_name->IsEqualTo(CStrVector("Function"))) {
1806 __ j(equal, is_true);
1807 } else {
1808 __ j(equal, is_false);
1809 }
1810
1811 // Check if the constructor in the map is a function.
1812 __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
1813
1814 // As long as JS_FUNCTION_TYPE is the last instance type and it is
1815 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
1816 // LAST_JS_OBJECT_TYPE.
1817 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
1818 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
1819
1820 // Objects with a non-function constructor have class 'Object'.
1821 __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
1822 if (class_name->IsEqualTo(CStrVector("Object"))) {
1823 __ j(not_equal, is_true);
1824 } else {
1825 __ j(not_equal, is_false);
1826 }
1827
1828 // temp now contains the constructor function. Grab the
1829 // instance class name from there.
1830 __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
1831 __ mov(temp, FieldOperand(temp,
1832 SharedFunctionInfo::kInstanceClassNameOffset));
1833 // The class name we are testing against is a symbol because it's a literal.
1834 // The name in the constructor is a symbol because of the way the context is
1835 // booted. This routine isn't expected to work for random API-created
1836 // classes and it doesn't have to because you can't access it with natives
1837 // syntax. Since both sides are symbols it is sufficient to use an identity
1838 // comparison.
1839 __ cmp(temp, class_name);
1840 // End with the answer in the z flag.
1841}
1842
1843
1844void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
1845 Register input = ToRegister(instr->input());
1846 Register result = ToRegister(instr->result());
1847 ASSERT(input.is(result));
1848 Register temp = ToRegister(instr->temporary());
1849 Handle<String> class_name = instr->hydrogen()->class_name();
1850 NearLabel done;
1851 Label is_true, is_false;
1852
1853 EmitClassOfTest(&is_true, &is_false, class_name, input, temp, input);
1854
1855 __ j(not_equal, &is_false);
1856
1857 __ bind(&is_true);
1858 __ mov(result, Handle<Object>(Heap::true_value()));
1859 __ jmp(&done);
1860
1861 __ bind(&is_false);
1862 __ mov(result, Handle<Object>(Heap::false_value()));
1863 __ bind(&done);
1864}
1865
1866
1867void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
1868 Register input = ToRegister(instr->input());
1869 Register temp = ToRegister(instr->temporary());
1870 Register temp2 = ToRegister(instr->temporary2());
1871 if (input.is(temp)) {
1872 // Swap.
1873 Register swapper = temp;
1874 temp = temp2;
1875 temp2 = swapper;
1876 }
1877 Handle<String> class_name = instr->hydrogen()->class_name();
1878
1879 int true_block = chunk_->LookupDestination(instr->true_block_id());
1880 int false_block = chunk_->LookupDestination(instr->false_block_id());
1881
1882 Label* true_label = chunk_->GetAssemblyLabel(true_block);
1883 Label* false_label = chunk_->GetAssemblyLabel(false_block);
1884
1885 EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
1886
1887 EmitBranch(true_block, false_block, equal);
1888}
1889
1890
1891void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
1892 Register reg = ToRegister(instr->input());
1893 int true_block = instr->true_block_id();
1894 int false_block = instr->false_block_id();
1895
1896 __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
1897 EmitBranch(true_block, false_block, equal);
1898}
1899
1900
1901void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
kmillikin@chromium.orgd2c22f02011-01-10 08:15:37 +00001902 // Object and function are in fixed registers defined by the stub.
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00001903 InstanceofStub stub(InstanceofStub::kArgsInRegisters);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001904 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1905
1906 NearLabel true_value, done;
1907 __ test(eax, Operand(eax));
1908 __ j(zero, &true_value);
1909 __ mov(ToRegister(instr->result()), Factory::false_value());
1910 __ jmp(&done);
1911 __ bind(&true_value);
1912 __ mov(ToRegister(instr->result()), Factory::true_value());
1913 __ bind(&done);
1914}
1915
1916
1917void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
1918 int true_block = chunk_->LookupDestination(instr->true_block_id());
1919 int false_block = chunk_->LookupDestination(instr->false_block_id());
1920
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00001921 InstanceofStub stub(InstanceofStub::kArgsInRegisters);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001922 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
1923 __ test(eax, Operand(eax));
1924 EmitBranch(true_block, false_block, zero);
1925}
1926
1927
kmillikin@chromium.orgd2c22f02011-01-10 08:15:37 +00001928void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
1929 class DeferredInstanceOfKnownGlobal: public LDeferredCode {
1930 public:
1931 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
1932 LInstanceOfKnownGlobal* instr)
1933 : LDeferredCode(codegen), instr_(instr) { }
1934 virtual void Generate() {
1935 codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
1936 }
1937
1938 Label* map_check() { return &map_check_; }
1939
1940 private:
1941 LInstanceOfKnownGlobal* instr_;
1942 Label map_check_;
1943 };
1944
1945 DeferredInstanceOfKnownGlobal* deferred;
1946 deferred = new DeferredInstanceOfKnownGlobal(this, instr);
1947
1948 Label done, false_result;
1949 Register object = ToRegister(instr->input());
1950 Register temp = ToRegister(instr->temp());
1951
1952 // A Smi is not instance of anything.
1953 __ test(object, Immediate(kSmiTagMask));
1954 __ j(zero, &false_result, not_taken);
1955
1956 // This is the inlined call site instanceof cache. The two occourences of the
1957 // hole value will be patched to the last map/result pair generated by the
1958 // instanceof stub.
1959 NearLabel cache_miss;
1960 Register map = ToRegister(instr->temp());
1961 __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
1962 __ bind(deferred->map_check()); // Label for calculating code patching.
1963 __ cmp(map, Factory::the_hole_value()); // Patched to cached map.
1964 __ j(not_equal, &cache_miss, not_taken);
1965 __ mov(eax, Factory::the_hole_value()); // Patched to either true or false.
1966 __ jmp(&done);
1967
1968 // The inlined call site cache did not match. Check null and string before
1969 // calling the deferred code.
1970 __ bind(&cache_miss);
1971 // Null is not instance of anything.
1972 __ cmp(object, Factory::null_value());
1973 __ j(equal, &false_result);
1974
1975 // String values are not instances of anything.
1976 Condition is_string = masm_->IsObjectStringType(object, temp, temp);
1977 __ j(is_string, &false_result);
1978
1979 // Go to the deferred code.
1980 __ jmp(deferred->entry());
1981
1982 __ bind(&false_result);
1983 __ mov(ToRegister(instr->result()), Factory::false_value());
1984
1985 // Here result has either true or false. Deferred code also produces true or
1986 // false object.
1987 __ bind(deferred->exit());
1988 __ bind(&done);
1989}
1990
1991
1992void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
1993 Label* map_check) {
1994 __ PushSafepointRegisters();
1995
1996 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
1997 flags = static_cast<InstanceofStub::Flags>(
1998 flags | InstanceofStub::kArgsInRegisters);
1999 flags = static_cast<InstanceofStub::Flags>(
2000 flags | InstanceofStub::kCallSiteInlineCheck);
2001 flags = static_cast<InstanceofStub::Flags>(
2002 flags | InstanceofStub::kReturnTrueFalseObject);
2003 InstanceofStub stub(flags);
2004
2005 // Get the temp register reserved by the instruction. This needs to be edi as
2006 // its slot of the pushing of safepoint registers is used to communicate the
2007 // offset to the location of the map check.
2008 Register temp = ToRegister(instr->temp());
2009 ASSERT(temp.is(edi));
2010 __ mov(InstanceofStub::right(), Immediate(instr->function()));
2011 static const int kAdditionalDelta = 13;
2012 int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
2013 Label before_push_delta;
2014 __ bind(&before_push_delta);
2015 __ mov(temp, Immediate(delta));
2016 __ mov(Operand(esp, EspIndexForPushAll(temp) * kPointerSize), temp);
2017 __ call(stub.GetCode(), RelocInfo::CODE_TARGET);
2018 ASSERT_EQ(kAdditionalDelta,
2019 masm_->SizeOfCodeGeneratedSince(&before_push_delta));
2020 RecordSafepointWithRegisters(
2021 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
2022 // Put the result value into the eax slot and restore all registers.
2023 __ mov(Operand(esp, EspIndexForPushAll(eax) * kPointerSize), eax);
2024
2025 __ PopSafepointRegisters();
2026}
2027
2028
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002029static Condition ComputeCompareCondition(Token::Value op) {
2030 switch (op) {
2031 case Token::EQ_STRICT:
2032 case Token::EQ:
2033 return equal;
2034 case Token::LT:
2035 return less;
2036 case Token::GT:
2037 return greater;
2038 case Token::LTE:
2039 return less_equal;
2040 case Token::GTE:
2041 return greater_equal;
2042 default:
2043 UNREACHABLE();
2044 return no_condition;
2045 }
2046}
2047
2048
2049void LCodeGen::DoCmpT(LCmpT* instr) {
2050 Token::Value op = instr->op();
2051
2052 Handle<Code> ic = CompareIC::GetUninitialized(op);
2053 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2054
2055 Condition condition = ComputeCompareCondition(op);
2056 if (op == Token::GT || op == Token::LTE) {
2057 condition = ReverseCondition(condition);
2058 }
2059 NearLabel true_value, done;
2060 __ test(eax, Operand(eax));
2061 __ j(condition, &true_value);
2062 __ mov(ToRegister(instr->result()), Factory::false_value());
2063 __ jmp(&done);
2064 __ bind(&true_value);
2065 __ mov(ToRegister(instr->result()), Factory::true_value());
2066 __ bind(&done);
2067}
2068
2069
2070void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
2071 Token::Value op = instr->op();
2072 int true_block = chunk_->LookupDestination(instr->true_block_id());
2073 int false_block = chunk_->LookupDestination(instr->false_block_id());
2074
2075 Handle<Code> ic = CompareIC::GetUninitialized(op);
2076 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2077
2078 // The compare stub expects compare condition and the input operands
2079 // reversed for GT and LTE.
2080 Condition condition = ComputeCompareCondition(op);
2081 if (op == Token::GT || op == Token::LTE) {
2082 condition = ReverseCondition(condition);
2083 }
2084 __ test(eax, Operand(eax));
2085 EmitBranch(true_block, false_block, condition);
2086}
2087
2088
2089void LCodeGen::DoReturn(LReturn* instr) {
2090 if (FLAG_trace) {
2091 // Preserve the return value on the stack and rely on the runtime
2092 // call to return the value in the same register.
2093 __ push(eax);
2094 __ CallRuntime(Runtime::kTraceExit, 1);
2095 }
2096 __ mov(esp, ebp);
2097 __ pop(ebp);
2098 __ ret((ParameterCount() + 1) * kPointerSize);
2099}
2100
2101
2102void LCodeGen::DoLoadGlobal(LLoadGlobal* instr) {
2103 Register result = ToRegister(instr->result());
2104 __ mov(result, Operand::Cell(instr->hydrogen()->cell()));
2105 if (instr->hydrogen()->check_hole_value()) {
2106 __ cmp(result, Factory::the_hole_value());
2107 DeoptimizeIf(equal, instr->environment());
2108 }
2109}
2110
2111
2112void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
2113 Register value = ToRegister(instr->input());
2114 __ mov(Operand::Cell(instr->hydrogen()->cell()), value);
2115}
2116
2117
sgjesse@chromium.orgc6c57182011-01-17 12:24:25 +00002118void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2119 // TODO(antonm): load a context with a separate instruction.
2120 Register result = ToRegister(instr->result());
2121 __ LoadContext(result, instr->context_chain_length());
2122 __ mov(result, ContextOperand(result, instr->slot_index()));
2123}
2124
2125
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002126void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2127 Register object = ToRegister(instr->input());
2128 Register result = ToRegister(instr->result());
2129 if (instr->hydrogen()->is_in_object()) {
2130 __ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
2131 } else {
2132 __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
2133 __ mov(result, FieldOperand(result, instr->hydrogen()->offset()));
2134 }
2135}
2136
2137
2138void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
2139 ASSERT(ToRegister(instr->object()).is(eax));
2140 ASSERT(ToRegister(instr->result()).is(eax));
2141
2142 __ mov(ecx, instr->name());
2143 Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
2144 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2145}
2146
2147
fschneider@chromium.org9e3e0b62011-01-03 10:16:46 +00002148void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
2149 Register function = ToRegister(instr->function());
2150 Register temp = ToRegister(instr->temporary());
2151 Register result = ToRegister(instr->result());
2152
2153 // Check that the function really is a function.
2154 __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
2155 DeoptimizeIf(not_equal, instr->environment());
2156
2157 // Check whether the function has an instance prototype.
2158 NearLabel non_instance;
2159 __ test_b(FieldOperand(result, Map::kBitFieldOffset),
2160 1 << Map::kHasNonInstancePrototype);
2161 __ j(not_zero, &non_instance);
2162
2163 // Get the prototype or initial map from the function.
2164 __ mov(result,
2165 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2166
2167 // Check that the function has a prototype or an initial map.
2168 __ cmp(Operand(result), Immediate(Factory::the_hole_value()));
2169 DeoptimizeIf(equal, instr->environment());
2170
2171 // If the function does not have an initial map, we're done.
2172 NearLabel done;
2173 __ CmpObjectType(result, MAP_TYPE, temp);
2174 __ j(not_equal, &done);
2175
2176 // Get the prototype from the initial map.
2177 __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
2178 __ jmp(&done);
2179
2180 // Non-instance prototype: Fetch prototype from constructor field
2181 // in the function's map.
2182 __ bind(&non_instance);
2183 __ mov(result, FieldOperand(result, Map::kConstructorOffset));
2184
2185 // All done.
2186 __ bind(&done);
2187}
2188
2189
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002190void LCodeGen::DoLoadElements(LLoadElements* instr) {
2191 ASSERT(instr->result()->Equals(instr->input()));
2192 Register reg = ToRegister(instr->input());
2193 __ mov(reg, FieldOperand(reg, JSObject::kElementsOffset));
2194 if (FLAG_debug_code) {
2195 NearLabel done;
2196 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2197 Immediate(Factory::fixed_array_map()));
2198 __ j(equal, &done);
2199 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
2200 Immediate(Factory::fixed_cow_array_map()));
2201 __ Check(equal, "Check for fast elements failed.");
2202 __ bind(&done);
2203 }
2204}
2205
2206
2207void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
2208 Register arguments = ToRegister(instr->arguments());
2209 Register length = ToRegister(instr->length());
2210 Operand index = ToOperand(instr->index());
2211 Register result = ToRegister(instr->result());
2212
2213 __ sub(length, index);
2214 DeoptimizeIf(below_equal, instr->environment());
2215
kmillikin@chromium.orgd2c22f02011-01-10 08:15:37 +00002216 // There are two words between the frame pointer and the last argument.
2217 // Subtracting from length accounts for one of them add one more.
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002218 __ mov(result, Operand(arguments, length, times_4, kPointerSize));
2219}
2220
2221
2222void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
2223 Register elements = ToRegister(instr->elements());
2224 Register key = ToRegister(instr->key());
sgjesse@chromium.orgc6c57182011-01-17 12:24:25 +00002225 Register result = ToRegister(instr->result());
2226 ASSERT(result.is(elements));
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002227
2228 // Load the result.
2229 __ mov(result, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
2230
sgjesse@chromium.orgc6c57182011-01-17 12:24:25 +00002231 // Check for the hole value.
2232 __ cmp(result, Factory::the_hole_value());
2233 DeoptimizeIf(equal, instr->environment());
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002234}
2235
2236
2237void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
2238 ASSERT(ToRegister(instr->object()).is(edx));
2239 ASSERT(ToRegister(instr->key()).is(eax));
2240
2241 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
2242 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2243}
2244
2245
2246void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
2247 Register result = ToRegister(instr->result());
2248
2249 // Check for arguments adapter frame.
kmillikin@chromium.orgd2c22f02011-01-10 08:15:37 +00002250 NearLabel done, adapted;
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002251 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2252 __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
2253 __ cmp(Operand(result),
2254 Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
2255 __ j(equal, &adapted);
2256
2257 // No arguments adaptor frame.
2258 __ mov(result, Operand(ebp));
2259 __ jmp(&done);
2260
2261 // Arguments adaptor frame present.
2262 __ bind(&adapted);
2263 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2264
kmillikin@chromium.orgd2c22f02011-01-10 08:15:37 +00002265 // Result is the frame pointer for the frame if not adapted and for the real
2266 // frame below the adaptor frame if adapted.
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002267 __ bind(&done);
2268}
2269
2270
2271void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
2272 Operand elem = ToOperand(instr->input());
2273 Register result = ToRegister(instr->result());
2274
kmillikin@chromium.orgd2c22f02011-01-10 08:15:37 +00002275 NearLabel done;
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002276
kmillikin@chromium.orgd2c22f02011-01-10 08:15:37 +00002277 // If no arguments adaptor frame the number of arguments is fixed.
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002278 __ cmp(ebp, elem);
2279 __ mov(result, Immediate(scope()->num_parameters()));
2280 __ j(equal, &done);
2281
2282 // Arguments adaptor frame present. Get argument length from there.
2283 __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
2284 __ mov(result, Operand(result,
2285 ArgumentsAdaptorFrameConstants::kLengthOffset));
2286 __ SmiUntag(result);
2287
kmillikin@chromium.orgd2c22f02011-01-10 08:15:37 +00002288 // Argument length is in result register.
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002289 __ bind(&done);
2290}
2291
2292
2293void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
2294 Register receiver = ToRegister(instr->receiver());
2295 ASSERT(ToRegister(instr->function()).is(edi));
2296 ASSERT(ToRegister(instr->result()).is(eax));
2297
2298 // If the receiver is null or undefined, we have to pass the
2299 // global object as a receiver.
2300 NearLabel global_receiver, receiver_ok;
2301 __ cmp(receiver, Factory::null_value());
2302 __ j(equal, &global_receiver);
2303 __ cmp(receiver, Factory::undefined_value());
2304 __ j(not_equal, &receiver_ok);
2305 __ bind(&global_receiver);
2306 __ mov(receiver, GlobalObjectOperand());
2307 __ bind(&receiver_ok);
2308
2309 Register length = ToRegister(instr->length());
2310 Register elements = ToRegister(instr->elements());
2311
2312 Label invoke;
2313
2314 // Copy the arguments to this function possibly from the
2315 // adaptor frame below it.
2316 const uint32_t kArgumentsLimit = 1 * KB;
2317 __ cmp(length, kArgumentsLimit);
2318 DeoptimizeIf(above, instr->environment());
2319
2320 __ push(receiver);
2321 __ mov(receiver, length);
2322
2323 // Loop through the arguments pushing them onto the execution
2324 // stack.
2325 Label loop;
2326 // length is a small non-negative integer, due to the test above.
2327 __ test(length, Operand(length));
2328 __ j(zero, &invoke);
2329 __ bind(&loop);
2330 __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
2331 __ dec(length);
2332 __ j(not_zero, &loop);
2333
2334 // Invoke the function.
2335 __ bind(&invoke);
2336 ASSERT(receiver.is(eax));
2337 v8::internal::ParameterCount actual(eax);
2338 SafepointGenerator safepoint_generator(this,
2339 instr->pointer_map(),
2340 Safepoint::kNoDeoptimizationIndex);
2341 __ InvokeFunction(edi, actual, CALL_FUNCTION, &safepoint_generator);
2342}
2343
2344
2345void LCodeGen::DoPushArgument(LPushArgument* instr) {
2346 LOperand* argument = instr->input();
2347 if (argument->IsConstantOperand()) {
2348 __ push(ToImmediate(argument));
2349 } else {
2350 __ push(ToOperand(argument));
2351 }
2352}
2353
2354
2355void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
2356 Register result = ToRegister(instr->result());
2357 __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
2358}
2359
2360
2361void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
2362 Register result = ToRegister(instr->result());
2363 __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
2364 __ mov(result, FieldOperand(result, GlobalObject::kGlobalReceiverOffset));
2365}
2366
2367
2368void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
2369 int arity,
2370 LInstruction* instr) {
2371 // Change context if needed.
2372 bool change_context =
2373 (graph()->info()->closure()->context() != function->context()) ||
2374 scope()->contains_with() ||
2375 (scope()->num_heap_slots() > 0);
2376 if (change_context) {
2377 __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
2378 }
2379
2380 // Set eax to arguments count if adaption is not needed. Assumes that eax
2381 // is available to write to at this point.
2382 if (!function->NeedsArgumentsAdaption()) {
2383 __ mov(eax, arity);
2384 }
2385
2386 LPointerMap* pointers = instr->pointer_map();
2387 RecordPosition(pointers->position());
2388
2389 // Invoke function.
2390 if (*function == *graph()->info()->closure()) {
2391 __ CallSelf();
2392 } else {
2393 __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
2394 }
2395
2396 // Setup deoptimization.
2397 RegisterLazyDeoptimization(instr);
2398
2399 // Restore context.
2400 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2401}
2402
2403
2404void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
2405 ASSERT(ToRegister(instr->result()).is(eax));
2406 __ mov(edi, instr->function());
2407 CallKnownFunction(instr->function(), instr->arity(), instr);
2408}
2409
2410
2411void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
2412 Register input_reg = ToRegister(instr->input());
2413 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
2414 Factory::heap_number_map());
2415 DeoptimizeIf(not_equal, instr->environment());
2416
2417 Label done;
2418 Register tmp = input_reg.is(eax) ? ecx : eax;
2419 Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
2420
2421 // Preserve the value of all registers.
2422 __ PushSafepointRegisters();
2423
2424 Label negative;
2425 __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2426 // Check the sign of the argument. If the argument is positive,
2427 // just return it.
2428 __ test(tmp, Immediate(HeapNumber::kSignMask));
2429 __ j(not_zero, &negative);
2430 __ mov(tmp, input_reg);
2431 __ jmp(&done);
2432
2433 __ bind(&negative);
2434
2435 Label allocated, slow;
2436 __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
2437 __ jmp(&allocated);
2438
2439 // Slow case: Call the runtime system to do the number allocation.
2440 __ bind(&slow);
2441
2442 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
2443 RecordSafepointWithRegisters(
2444 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
2445 // Set the pointer to the new heap number in tmp.
2446 if (!tmp.is(eax)) __ mov(tmp, eax);
2447
2448 // Restore input_reg after call to runtime.
2449 __ mov(input_reg, Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize));
2450
2451 __ bind(&allocated);
2452 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
2453 __ and_(tmp2, ~HeapNumber::kSignMask);
2454 __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
2455 __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
2456 __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
2457
2458 __ bind(&done);
2459 __ mov(Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize), tmp);
2460
2461 __ PopSafepointRegisters();
2462}
2463
2464
2465void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
2466 // Class for deferred case.
2467 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
2468 public:
2469 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
2470 LUnaryMathOperation* instr)
2471 : LDeferredCode(codegen), instr_(instr) { }
2472 virtual void Generate() {
2473 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
2474 }
2475 private:
2476 LUnaryMathOperation* instr_;
2477 };
2478
2479 ASSERT(instr->input()->Equals(instr->result()));
2480 Representation r = instr->hydrogen()->value()->representation();
2481
2482 if (r.IsDouble()) {
2483 XMMRegister scratch = xmm0;
2484 XMMRegister input_reg = ToDoubleRegister(instr->input());
2485 __ pxor(scratch, scratch);
2486 __ subsd(scratch, input_reg);
2487 __ pand(input_reg, scratch);
2488 } else if (r.IsInteger32()) {
2489 Register input_reg = ToRegister(instr->input());
2490 __ test(input_reg, Operand(input_reg));
2491 Label is_positive;
2492 __ j(not_sign, &is_positive);
2493 __ neg(input_reg);
2494 __ test(input_reg, Operand(input_reg));
2495 DeoptimizeIf(negative, instr->environment());
2496 __ bind(&is_positive);
2497 } else { // Tagged case.
2498 DeferredMathAbsTaggedHeapNumber* deferred =
2499 new DeferredMathAbsTaggedHeapNumber(this, instr);
2500 Label not_smi;
2501 Register input_reg = ToRegister(instr->input());
2502 // Smi check.
2503 __ test(input_reg, Immediate(kSmiTagMask));
2504 __ j(not_zero, deferred->entry());
2505 __ test(input_reg, Operand(input_reg));
2506 Label is_positive;
2507 __ j(not_sign, &is_positive);
2508 __ neg(input_reg);
2509
2510 __ test(input_reg, Operand(input_reg));
2511 DeoptimizeIf(negative, instr->environment());
2512
2513 __ bind(&is_positive);
2514 __ bind(deferred->exit());
2515 }
2516}
2517
2518
2519void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
2520 XMMRegister xmm_scratch = xmm0;
2521 Register output_reg = ToRegister(instr->result());
2522 XMMRegister input_reg = ToDoubleRegister(instr->input());
2523 __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
2524 __ ucomisd(input_reg, xmm_scratch);
2525
2526 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2527 DeoptimizeIf(below_equal, instr->environment());
2528 } else {
2529 DeoptimizeIf(below, instr->environment());
2530 }
2531
2532 // Use truncating instruction (OK because input is positive).
2533 __ cvttsd2si(output_reg, Operand(input_reg));
2534
2535 // Overflow is signalled with minint.
2536 __ cmp(output_reg, 0x80000000u);
2537 DeoptimizeIf(equal, instr->environment());
2538}
2539
2540
2541void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
2542 XMMRegister xmm_scratch = xmm0;
2543 Register output_reg = ToRegister(instr->result());
2544 XMMRegister input_reg = ToDoubleRegister(instr->input());
2545
2546 // xmm_scratch = 0.5
2547 ExternalReference one_half = ExternalReference::address_of_one_half();
2548 __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
2549
2550 // input = input + 0.5
2551 __ addsd(input_reg, xmm_scratch);
2552
2553 // We need to return -0 for the input range [-0.5, 0[, otherwise
2554 // compute Math.floor(value + 0.5).
2555 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
2556 __ ucomisd(input_reg, xmm_scratch);
2557 DeoptimizeIf(below_equal, instr->environment());
2558 } else {
2559 // If we don't need to bailout on -0, we check only bailout
2560 // on negative inputs.
2561 __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
2562 __ ucomisd(input_reg, xmm_scratch);
2563 DeoptimizeIf(below, instr->environment());
2564 }
2565
2566 // Compute Math.floor(value + 0.5).
2567 // Use truncating instruction (OK because input is positive).
2568 __ cvttsd2si(output_reg, Operand(input_reg));
2569
2570 // Overflow is signalled with minint.
2571 __ cmp(output_reg, 0x80000000u);
2572 DeoptimizeIf(equal, instr->environment());
2573}
2574
2575
2576void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
2577 XMMRegister input_reg = ToDoubleRegister(instr->input());
2578 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
2579 __ sqrtsd(input_reg, input_reg);
2580}
2581
2582
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00002583void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
2584 XMMRegister xmm_scratch = xmm0;
2585 XMMRegister input_reg = ToDoubleRegister(instr->input());
2586 ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
2587 ExternalReference negative_infinity =
2588 ExternalReference::address_of_negative_infinity();
2589 __ movdbl(xmm_scratch, Operand::StaticVariable(negative_infinity));
2590 __ ucomisd(xmm_scratch, input_reg);
2591 DeoptimizeIf(equal, instr->environment());
2592 __ sqrtsd(input_reg, input_reg);
2593}
2594
2595
2596void LCodeGen::DoPower(LPower* instr) {
2597 LOperand* left = instr->left();
2598 LOperand* right = instr->right();
2599 DoubleRegister result_reg = ToDoubleRegister(instr->result());
2600 Representation exponent_type = instr->hydrogen()->right()->representation();
2601 if (exponent_type.IsDouble()) {
2602 // It is safe to use ebx directly since the instruction is marked
2603 // as a call.
2604 __ PrepareCallCFunction(4, ebx);
2605 __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
2606 __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
2607 __ CallCFunction(ExternalReference::power_double_double_function(), 4);
2608 } else if (exponent_type.IsInteger32()) {
2609 // It is safe to use ebx directly since the instruction is marked
2610 // as a call.
2611 ASSERT(!ToRegister(right).is(ebx));
2612 __ PrepareCallCFunction(4, ebx);
2613 __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
2614 __ mov(Operand(esp, 1 * kDoubleSize), ToRegister(right));
2615 __ CallCFunction(ExternalReference::power_double_int_function(), 4);
2616 } else {
2617 ASSERT(exponent_type.IsTagged());
2618 CpuFeatures::Scope scope(SSE2);
2619 Register right_reg = ToRegister(right);
2620
2621 Label non_smi, call;
2622 __ test(right_reg, Immediate(kSmiTagMask));
2623 __ j(not_zero, &non_smi);
2624 __ SmiUntag(right_reg);
2625 __ cvtsi2sd(result_reg, Operand(right_reg));
2626 __ jmp(&call);
2627
2628 __ bind(&non_smi);
2629 // It is safe to use ebx directly since the instruction is marked
2630 // as a call.
2631 ASSERT(!right_reg.is(ebx));
2632 __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , ebx);
2633 DeoptimizeIf(not_equal, instr->environment());
2634 __ movdbl(result_reg, FieldOperand(right_reg, HeapNumber::kValueOffset));
2635
2636 __ bind(&call);
2637 __ PrepareCallCFunction(4, ebx);
2638 __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
2639 __ movdbl(Operand(esp, 1 * kDoubleSize), result_reg);
2640 __ CallCFunction(ExternalReference::power_double_double_function(), 4);
2641 }
2642
2643 // Return value is in st(0) on ia32.
2644 // Store it into the (fixed) result register.
2645 __ sub(Operand(esp), Immediate(kDoubleSize));
2646 __ fstp_d(Operand(esp, 0));
2647 __ movdbl(result_reg, Operand(esp, 0));
2648 __ add(Operand(esp), Immediate(kDoubleSize));
2649}
2650
2651
2652void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
2653 ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
whesse@chromium.org023421e2010-12-21 12:19:12 +00002654 TranscendentalCacheStub stub(TranscendentalCache::LOG,
2655 TranscendentalCacheStub::UNTAGGED);
2656 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2657}
2658
2659
2660void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
2661 ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
2662 TranscendentalCacheStub stub(TranscendentalCache::COS,
2663 TranscendentalCacheStub::UNTAGGED);
2664 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2665}
2666
2667
2668void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
2669 ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
2670 TranscendentalCacheStub stub(TranscendentalCache::SIN,
2671 TranscendentalCacheStub::UNTAGGED);
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00002672 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2673}
2674
2675
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002676void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
2677 switch (instr->op()) {
2678 case kMathAbs:
2679 DoMathAbs(instr);
2680 break;
2681 case kMathFloor:
2682 DoMathFloor(instr);
2683 break;
2684 case kMathRound:
2685 DoMathRound(instr);
2686 break;
2687 case kMathSqrt:
2688 DoMathSqrt(instr);
2689 break;
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00002690 case kMathPowHalf:
2691 DoMathPowHalf(instr);
2692 break;
whesse@chromium.org023421e2010-12-21 12:19:12 +00002693 case kMathCos:
2694 DoMathCos(instr);
2695 break;
2696 case kMathSin:
2697 DoMathSin(instr);
2698 break;
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00002699 case kMathLog:
2700 DoMathLog(instr);
2701 break;
2702
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002703 default:
2704 UNREACHABLE();
2705 }
2706}
2707
2708
2709void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
2710 ASSERT(ToRegister(instr->result()).is(eax));
2711
2712 int arity = instr->arity();
2713 Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
2714 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2715 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2716}
2717
2718
2719void LCodeGen::DoCallNamed(LCallNamed* instr) {
2720 ASSERT(ToRegister(instr->result()).is(eax));
2721
2722 int arity = instr->arity();
2723 Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
2724 __ mov(ecx, instr->name());
2725 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2726 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2727}
2728
2729
2730void LCodeGen::DoCallFunction(LCallFunction* instr) {
2731 ASSERT(ToRegister(instr->result()).is(eax));
2732
2733 int arity = instr->arity();
2734 CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
2735 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2736 __ Drop(1);
2737 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2738}
2739
2740
2741void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
2742 ASSERT(ToRegister(instr->result()).is(eax));
2743
2744 int arity = instr->arity();
2745 Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
2746 __ mov(ecx, instr->name());
2747 CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
2748 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
2749}
2750
2751
2752void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
2753 ASSERT(ToRegister(instr->result()).is(eax));
2754 __ mov(edi, instr->target());
2755 CallKnownFunction(instr->target(), instr->arity(), instr);
2756}
2757
2758
2759void LCodeGen::DoCallNew(LCallNew* instr) {
2760 ASSERT(ToRegister(instr->input()).is(edi));
2761 ASSERT(ToRegister(instr->result()).is(eax));
2762
2763 Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
2764 __ Set(eax, Immediate(instr->arity()));
2765 CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
2766}
2767
2768
2769void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
2770 CallRuntime(instr->function(), instr->arity(), instr);
2771}
2772
2773
2774void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
2775 Register object = ToRegister(instr->object());
2776 Register value = ToRegister(instr->value());
2777 int offset = instr->offset();
2778
2779 if (!instr->transition().is_null()) {
2780 __ mov(FieldOperand(object, HeapObject::kMapOffset), instr->transition());
2781 }
2782
2783 // Do the store.
2784 if (instr->is_in_object()) {
2785 __ mov(FieldOperand(object, offset), value);
2786 if (instr->needs_write_barrier()) {
2787 Register temp = ToRegister(instr->temp());
2788 // Update the write barrier for the object for in-object properties.
2789 __ RecordWrite(object, offset, value, temp);
2790 }
2791 } else {
2792 Register temp = ToRegister(instr->temp());
2793 __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
2794 __ mov(FieldOperand(temp, offset), value);
2795 if (instr->needs_write_barrier()) {
2796 // Update the write barrier for the properties array.
2797 // object is used as a scratch register.
2798 __ RecordWrite(temp, offset, value, object);
2799 }
2800 }
2801}
2802
2803
2804void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
2805 ASSERT(ToRegister(instr->object()).is(edx));
2806 ASSERT(ToRegister(instr->value()).is(eax));
2807
2808 __ mov(ecx, instr->name());
2809 Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
2810 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2811}
2812
2813
2814void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
2815 __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
2816 DeoptimizeIf(above_equal, instr->environment());
2817}
2818
2819
2820void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
2821 Register value = ToRegister(instr->value());
2822 Register elements = ToRegister(instr->object());
2823 Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
2824
2825 // Do the store.
2826 if (instr->key()->IsConstantOperand()) {
2827 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
2828 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
2829 int offset =
2830 ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
2831 __ mov(FieldOperand(elements, offset), value);
2832 } else {
2833 __ mov(FieldOperand(elements, key, times_4, FixedArray::kHeaderSize),
2834 value);
2835 }
2836
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002837 if (instr->hydrogen()->NeedsWriteBarrier()) {
2838 // Compute address of modified element and store it into key register.
2839 __ lea(key, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
2840 __ RecordWrite(elements, key, value);
2841 }
2842}
2843
2844
2845void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
2846 ASSERT(ToRegister(instr->object()).is(edx));
2847 ASSERT(ToRegister(instr->key()).is(ecx));
2848 ASSERT(ToRegister(instr->value()).is(eax));
2849
2850 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
2851 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2852}
2853
2854
2855void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
2856 LOperand* input = instr->input();
2857 ASSERT(input->IsRegister() || input->IsStackSlot());
2858 LOperand* output = instr->result();
2859 ASSERT(output->IsDoubleRegister());
2860 __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
2861}
2862
2863
2864void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
2865 class DeferredNumberTagI: public LDeferredCode {
2866 public:
2867 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
2868 : LDeferredCode(codegen), instr_(instr) { }
2869 virtual void Generate() { codegen()->DoDeferredNumberTagI(instr_); }
2870 private:
2871 LNumberTagI* instr_;
2872 };
2873
2874 LOperand* input = instr->input();
2875 ASSERT(input->IsRegister() && input->Equals(instr->result()));
2876 Register reg = ToRegister(input);
2877
2878 DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
2879 __ SmiTag(reg);
2880 __ j(overflow, deferred->entry());
2881 __ bind(deferred->exit());
2882}
2883
2884
2885void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
2886 Label slow;
2887 Register reg = ToRegister(instr->input());
2888 Register tmp = reg.is(eax) ? ecx : eax;
2889
2890 // Preserve the value of all registers.
2891 __ PushSafepointRegisters();
2892
2893 // There was overflow, so bits 30 and 31 of the original integer
2894 // disagree. Try to allocate a heap number in new space and store
2895 // the value in there. If that fails, call the runtime system.
2896 NearLabel done;
2897 __ SmiUntag(reg);
2898 __ xor_(reg, 0x80000000);
2899 __ cvtsi2sd(xmm0, Operand(reg));
2900 if (FLAG_inline_new) {
2901 __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
2902 __ jmp(&done);
2903 }
2904
2905 // Slow case: Call the runtime system to do the number allocation.
2906 __ bind(&slow);
2907
2908 // TODO(3095996): Put a valid pointer value in the stack slot where the result
2909 // register is stored, as this register is in the pointer map, but contains an
2910 // integer value.
2911 __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), Immediate(0));
2912
2913 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
2914 RecordSafepointWithRegisters(
2915 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
2916 if (!reg.is(eax)) __ mov(reg, eax);
2917
2918 // Done. Put the value in xmm0 into the value of the allocated heap
2919 // number.
2920 __ bind(&done);
2921 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
2922 __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), reg);
2923 __ PopSafepointRegisters();
2924}
2925
2926
2927void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
2928 class DeferredNumberTagD: public LDeferredCode {
2929 public:
2930 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
2931 : LDeferredCode(codegen), instr_(instr) { }
2932 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
2933 private:
2934 LNumberTagD* instr_;
2935 };
2936
2937 XMMRegister input_reg = ToDoubleRegister(instr->input());
2938 Register reg = ToRegister(instr->result());
2939 Register tmp = ToRegister(instr->temp());
2940
2941 DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
2942 if (FLAG_inline_new) {
2943 __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
2944 } else {
2945 __ jmp(deferred->entry());
2946 }
2947 __ bind(deferred->exit());
2948 __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
2949}
2950
2951
2952void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
2953 // TODO(3095996): Get rid of this. For now, we need to make the
2954 // result register contain a valid pointer because it is already
2955 // contained in the register pointer map.
2956 Register reg = ToRegister(instr->result());
2957 __ Set(reg, Immediate(0));
2958
2959 __ PushSafepointRegisters();
2960 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
2961 RecordSafepointWithRegisters(
2962 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
2963 __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), eax);
2964 __ PopSafepointRegisters();
2965}
2966
2967
2968void LCodeGen::DoSmiTag(LSmiTag* instr) {
2969 LOperand* input = instr->input();
2970 ASSERT(input->IsRegister() && input->Equals(instr->result()));
2971 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
2972 __ SmiTag(ToRegister(input));
2973}
2974
2975
2976void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
2977 LOperand* input = instr->input();
2978 ASSERT(input->IsRegister() && input->Equals(instr->result()));
2979 if (instr->needs_check()) {
2980 __ test(ToRegister(input), Immediate(kSmiTagMask));
2981 DeoptimizeIf(not_zero, instr->environment());
2982 }
2983 __ SmiUntag(ToRegister(input));
2984}
2985
2986
2987void LCodeGen::EmitNumberUntagD(Register input_reg,
2988 XMMRegister result_reg,
2989 LEnvironment* env) {
2990 NearLabel load_smi, heap_number, done;
2991
2992 // Smi check.
2993 __ test(input_reg, Immediate(kSmiTagMask));
2994 __ j(zero, &load_smi, not_taken);
2995
2996 // Heap number map check.
2997 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
2998 Factory::heap_number_map());
2999 __ j(equal, &heap_number);
3000
3001 __ cmp(input_reg, Factory::undefined_value());
3002 DeoptimizeIf(not_equal, env);
3003
3004 // Convert undefined to NaN.
3005 __ push(input_reg);
3006 __ mov(input_reg, Factory::nan_value());
3007 __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
3008 __ pop(input_reg);
3009 __ jmp(&done);
3010
3011 // Heap number to XMM conversion.
3012 __ bind(&heap_number);
3013 __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
3014 __ jmp(&done);
3015
3016 // Smi to XMM conversion
3017 __ bind(&load_smi);
3018 __ SmiUntag(input_reg); // Untag smi before converting to float.
3019 __ cvtsi2sd(result_reg, Operand(input_reg));
3020 __ SmiTag(input_reg); // Retag smi.
3021 __ bind(&done);
3022}
3023
3024
3025class DeferredTaggedToI: public LDeferredCode {
3026 public:
3027 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
3028 : LDeferredCode(codegen), instr_(instr) { }
3029 virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
3030 private:
3031 LTaggedToI* instr_;
3032};
3033
3034
3035void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
3036 NearLabel done, heap_number;
3037 Register input_reg = ToRegister(instr->input());
3038
3039 // Heap number map check.
3040 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
3041 Factory::heap_number_map());
3042
3043 if (instr->truncating()) {
3044 __ j(equal, &heap_number);
3045 // Check for undefined. Undefined is converted to zero for truncating
3046 // conversions.
3047 __ cmp(input_reg, Factory::undefined_value());
3048 DeoptimizeIf(not_equal, instr->environment());
3049 __ mov(input_reg, 0);
3050 __ jmp(&done);
3051
3052 __ bind(&heap_number);
3053 if (CpuFeatures::IsSupported(SSE3)) {
3054 CpuFeatures::Scope scope(SSE3);
3055 NearLabel convert;
3056 // Use more powerful conversion when sse3 is available.
3057 // Load x87 register with heap number.
3058 __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
3059 // Get exponent alone and check for too-big exponent.
3060 __ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
3061 __ and_(input_reg, HeapNumber::kExponentMask);
3062 const uint32_t kTooBigExponent =
3063 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
3064 __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
3065 __ j(less, &convert);
3066 // Pop FPU stack before deoptimizing.
3067 __ ffree(0);
3068 __ fincstp();
3069 DeoptimizeIf(no_condition, instr->environment());
3070
3071 // Reserve space for 64 bit answer.
3072 __ bind(&convert);
3073 __ sub(Operand(esp), Immediate(kDoubleSize));
3074 // Do conversion, which cannot fail because we checked the exponent.
3075 __ fisttp_d(Operand(esp, 0));
3076 __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
3077 __ add(Operand(esp), Immediate(kDoubleSize));
3078 } else {
3079 NearLabel deopt;
3080 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
3081 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3082 __ cvttsd2si(input_reg, Operand(xmm0));
3083 __ cmp(input_reg, 0x80000000u);
3084 __ j(not_equal, &done);
3085 // Check if the input was 0x8000000 (kMinInt).
3086 // If no, then we got an overflow and we deoptimize.
3087 ExternalReference min_int = ExternalReference::address_of_min_int();
3088 __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
3089 __ ucomisd(xmm_temp, xmm0);
3090 DeoptimizeIf(not_equal, instr->environment());
3091 DeoptimizeIf(parity_even, instr->environment()); // NaN.
3092 }
3093 } else {
3094 // Deoptimize if we don't have a heap number.
3095 DeoptimizeIf(not_equal, instr->environment());
3096
3097 XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
3098 __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
3099 __ cvttsd2si(input_reg, Operand(xmm0));
3100 __ cvtsi2sd(xmm_temp, Operand(input_reg));
3101 __ ucomisd(xmm0, xmm_temp);
3102 DeoptimizeIf(not_equal, instr->environment());
3103 DeoptimizeIf(parity_even, instr->environment()); // NaN.
3104 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3105 __ test(input_reg, Operand(input_reg));
3106 __ j(not_zero, &done);
3107 __ movmskpd(input_reg, xmm0);
3108 __ and_(input_reg, 1);
3109 DeoptimizeIf(not_zero, instr->environment());
3110 }
3111 }
3112 __ bind(&done);
3113}
3114
3115
3116void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
3117 LOperand* input = instr->input();
3118 ASSERT(input->IsRegister());
3119 ASSERT(input->Equals(instr->result()));
3120
3121 Register input_reg = ToRegister(input);
3122
3123 DeferredTaggedToI* deferred = new DeferredTaggedToI(this, instr);
3124
3125 // Smi check.
3126 __ test(input_reg, Immediate(kSmiTagMask));
3127 __ j(not_zero, deferred->entry());
3128
3129 // Smi to int32 conversion
3130 __ SmiUntag(input_reg); // Untag smi.
3131
3132 __ bind(deferred->exit());
3133}
3134
3135
3136void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
3137 LOperand* input = instr->input();
3138 ASSERT(input->IsRegister());
3139 LOperand* result = instr->result();
3140 ASSERT(result->IsDoubleRegister());
3141
3142 Register input_reg = ToRegister(input);
3143 XMMRegister result_reg = ToDoubleRegister(result);
3144
3145 EmitNumberUntagD(input_reg, result_reg, instr->environment());
3146}
3147
3148
3149void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
3150 LOperand* input = instr->input();
3151 ASSERT(input->IsDoubleRegister());
3152 LOperand* result = instr->result();
3153 ASSERT(result->IsRegister());
3154
3155 XMMRegister input_reg = ToDoubleRegister(input);
3156 Register result_reg = ToRegister(result);
3157
3158 if (instr->truncating()) {
3159 // Performs a truncating conversion of a floating point number as used by
3160 // the JS bitwise operations.
3161 __ cvttsd2si(result_reg, Operand(input_reg));
3162 __ cmp(result_reg, 0x80000000u);
3163 if (CpuFeatures::IsSupported(SSE3)) {
3164 // This will deoptimize if the exponent of the input in out of range.
3165 CpuFeatures::Scope scope(SSE3);
3166 NearLabel convert, done;
3167 __ j(not_equal, &done);
3168 __ sub(Operand(esp), Immediate(kDoubleSize));
3169 __ movdbl(Operand(esp, 0), input_reg);
3170 // Get exponent alone and check for too-big exponent.
3171 __ mov(result_reg, Operand(esp, sizeof(int32_t)));
3172 __ and_(result_reg, HeapNumber::kExponentMask);
3173 const uint32_t kTooBigExponent =
3174 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
3175 __ cmp(Operand(result_reg), Immediate(kTooBigExponent));
3176 __ j(less, &convert);
3177 __ add(Operand(esp), Immediate(kDoubleSize));
3178 DeoptimizeIf(no_condition, instr->environment());
3179 __ bind(&convert);
3180 // Do conversion, which cannot fail because we checked the exponent.
3181 __ fld_d(Operand(esp, 0));
3182 __ fisttp_d(Operand(esp, 0));
3183 __ mov(result_reg, Operand(esp, 0)); // Low word of answer is the result.
3184 __ add(Operand(esp), Immediate(kDoubleSize));
3185 __ bind(&done);
3186 } else {
sgjesse@chromium.orgc6c57182011-01-17 12:24:25 +00003187 NearLabel done;
3188 Register temp_reg = ToRegister(instr->temporary());
3189 XMMRegister xmm_scratch = xmm0;
3190
3191 // If cvttsd2si succeeded, we're done. Otherwise, we attempt
3192 // manual conversion.
3193 __ j(not_equal, &done);
3194
3195 // Get high 32 bits of the input in result_reg and temp_reg.
3196 __ pshufd(xmm_scratch, input_reg, 1);
3197 __ movd(Operand(temp_reg), xmm_scratch);
3198 __ mov(result_reg, temp_reg);
3199
3200 // Prepare negation mask in temp_reg.
3201 __ sar(temp_reg, kBitsPerInt - 1);
3202
3203 // Extract the exponent from result_reg and subtract adjusted
3204 // bias from it. The adjustment is selected in a way such that
3205 // when the difference is zero, the answer is in the low 32 bits
3206 // of the input, otherwise a shift has to be performed.
3207 __ shr(result_reg, HeapNumber::kExponentShift);
3208 __ and_(result_reg,
3209 HeapNumber::kExponentMask >> HeapNumber::kExponentShift);
3210 __ sub(Operand(result_reg),
3211 Immediate(HeapNumber::kExponentBias +
3212 HeapNumber::kExponentBits +
3213 HeapNumber::kMantissaBits));
3214 // Don't handle big (> kMantissaBits + kExponentBits == 63) or
3215 // special exponents.
3216 DeoptimizeIf(greater, instr->environment());
3217
3218 // Zero out the sign and the exponent in the input (by shifting
3219 // it to the left) and restore the implicit mantissa bit,
3220 // i.e. convert the input to unsigned int64 shifted left by
3221 // kExponentBits.
3222 ExternalReference minus_zero = ExternalReference::address_of_minus_zero();
3223 // Minus zero has the most significant bit set and the other
3224 // bits cleared.
3225 __ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero));
3226 __ psllq(input_reg, HeapNumber::kExponentBits);
3227 __ por(input_reg, xmm_scratch);
3228
3229 // Get the amount to shift the input right in xmm_scratch.
3230 __ neg(result_reg);
3231 __ movd(xmm_scratch, Operand(result_reg));
3232
3233 // Shift the input right and extract low 32 bits.
3234 __ psrlq(input_reg, xmm_scratch);
3235 __ movd(Operand(result_reg), input_reg);
3236
3237 // Use the prepared mask in temp_reg to negate the result if necessary.
3238 __ xor_(result_reg, Operand(temp_reg));
3239 __ sub(result_reg, Operand(temp_reg));
3240 __ bind(&done);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00003241 }
3242 } else {
3243 NearLabel done;
3244 __ cvttsd2si(result_reg, Operand(input_reg));
3245 __ cvtsi2sd(xmm0, Operand(result_reg));
3246 __ ucomisd(xmm0, input_reg);
3247 DeoptimizeIf(not_equal, instr->environment());
3248 DeoptimizeIf(parity_even, instr->environment()); // NaN.
3249 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3250 // The integer converted back is equal to the original. We
3251 // only have to test if we got -0 as an input.
3252 __ test(result_reg, Operand(result_reg));
3253 __ j(not_zero, &done);
3254 __ movmskpd(result_reg, input_reg);
3255 // Bit 0 contains the sign of the double in input_reg.
3256 // If input was positive, we are ok and return 0, otherwise
3257 // deoptimize.
3258 __ and_(result_reg, 1);
3259 DeoptimizeIf(not_zero, instr->environment());
3260 }
3261 __ bind(&done);
3262 }
3263}
3264
3265
3266void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
3267 LOperand* input = instr->input();
3268 ASSERT(input->IsRegister());
3269 __ test(ToRegister(input), Immediate(kSmiTagMask));
3270 DeoptimizeIf(instr->condition(), instr->environment());
3271}
3272
3273
3274void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
3275 Register input = ToRegister(instr->input());
3276 Register temp = ToRegister(instr->temp());
3277 InstanceType first = instr->hydrogen()->first();
3278 InstanceType last = instr->hydrogen()->last();
3279
kasperl@chromium.orga5551262010-12-07 12:49:48 +00003280 __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
3281 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
3282 static_cast<int8_t>(first));
3283
3284 // If there is only one type in the interval check for equality.
3285 if (first == last) {
3286 DeoptimizeIf(not_equal, instr->environment());
3287 } else {
3288 DeoptimizeIf(below, instr->environment());
3289 // Omit check for the last type.
3290 if (last != LAST_TYPE) {
3291 __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
3292 static_cast<int8_t>(last));
3293 DeoptimizeIf(above, instr->environment());
3294 }
3295 }
3296}
3297
3298
3299void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
3300 ASSERT(instr->input()->IsRegister());
3301 Register reg = ToRegister(instr->input());
3302 __ cmp(reg, instr->hydrogen()->target());
3303 DeoptimizeIf(not_equal, instr->environment());
3304}
3305
3306
3307void LCodeGen::DoCheckMap(LCheckMap* instr) {
3308 LOperand* input = instr->input();
3309 ASSERT(input->IsRegister());
3310 Register reg = ToRegister(input);
3311 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
3312 instr->hydrogen()->map());
3313 DeoptimizeIf(not_equal, instr->environment());
3314}
3315
3316
sgjesse@chromium.orgc6c57182011-01-17 12:24:25 +00003317void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
3318 if (Heap::InNewSpace(*object)) {
kasperl@chromium.orga5551262010-12-07 12:49:48 +00003319 Handle<JSGlobalPropertyCell> cell =
sgjesse@chromium.orgc6c57182011-01-17 12:24:25 +00003320 Factory::NewJSGlobalPropertyCell(object);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00003321 __ mov(result, Operand::Cell(cell));
3322 } else {
sgjesse@chromium.orgc6c57182011-01-17 12:24:25 +00003323 __ mov(result, object);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00003324 }
3325}
3326
3327
3328void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
3329 Register reg = ToRegister(instr->temp());
3330
3331 Handle<JSObject> holder = instr->holder();
sgjesse@chromium.orgc6c57182011-01-17 12:24:25 +00003332 Handle<JSObject> current_prototype = instr->prototype();
kasperl@chromium.orga5551262010-12-07 12:49:48 +00003333
3334 // Load prototype object.
sgjesse@chromium.orgc6c57182011-01-17 12:24:25 +00003335 LoadHeapObject(reg, current_prototype);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00003336
3337 // Check prototype maps up to the holder.
3338 while (!current_prototype.is_identical_to(holder)) {
3339 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
3340 Handle<Map>(current_prototype->map()));
3341 DeoptimizeIf(not_equal, instr->environment());
3342 current_prototype =
3343 Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
3344 // Load next prototype object.
sgjesse@chromium.orgc6c57182011-01-17 12:24:25 +00003345 LoadHeapObject(reg, current_prototype);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00003346 }
3347
3348 // Check the holder map.
3349 __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
3350 Handle<Map>(current_prototype->map()));
3351 DeoptimizeIf(not_equal, instr->environment());
3352}
3353
3354
3355void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
3356 // Setup the parameters to the stub/runtime call.
3357 __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3358 __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
3359 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
3360 __ push(Immediate(instr->hydrogen()->constant_elements()));
3361
3362 // Pick the right runtime function or stub to call.
3363 int length = instr->hydrogen()->length();
3364 if (instr->hydrogen()->IsCopyOnWrite()) {
3365 ASSERT(instr->hydrogen()->depth() == 1);
3366 FastCloneShallowArrayStub::Mode mode =
3367 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
3368 FastCloneShallowArrayStub stub(mode, length);
3369 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3370 } else if (instr->hydrogen()->depth() > 1) {
3371 CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
3372 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
3373 CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
3374 } else {
3375 FastCloneShallowArrayStub::Mode mode =
3376 FastCloneShallowArrayStub::CLONE_ELEMENTS;
3377 FastCloneShallowArrayStub stub(mode, length);
3378 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3379 }
3380}
3381
3382
3383void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
3384 // Setup the parameters to the stub/runtime call.
3385 __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3386 __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
3387 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
3388 __ push(Immediate(instr->hydrogen()->constant_properties()));
3389 __ push(Immediate(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
3390
lrn@chromium.org5d00b602011-01-05 09:51:43 +00003391 // Pick the right runtime function to call.
kasperl@chromium.orga5551262010-12-07 12:49:48 +00003392 if (instr->hydrogen()->depth() > 1) {
3393 CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
3394 } else {
3395 CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
3396 }
3397}
3398
3399
3400void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
3401 NearLabel materialized;
3402 // Registers will be used as follows:
3403 // edi = JS function.
3404 // ecx = literals array.
3405 // ebx = regexp literal.
3406 // eax = regexp literal clone.
3407 __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
3408 __ mov(ecx, FieldOperand(edi, JSFunction::kLiteralsOffset));
3409 int literal_offset = FixedArray::kHeaderSize +
3410 instr->hydrogen()->literal_index() * kPointerSize;
3411 __ mov(ebx, FieldOperand(ecx, literal_offset));
3412 __ cmp(ebx, Factory::undefined_value());
3413 __ j(not_equal, &materialized);
3414
3415 // Create regexp literal using runtime function
3416 // Result will be in eax.
3417 __ push(ecx);
3418 __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
3419 __ push(Immediate(instr->hydrogen()->pattern()));
3420 __ push(Immediate(instr->hydrogen()->flags()));
3421 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
3422 __ mov(ebx, eax);
3423
3424 __ bind(&materialized);
3425 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
3426 Label allocated, runtime_allocate;
3427 __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
3428 __ jmp(&allocated);
3429
3430 __ bind(&runtime_allocate);
3431 __ push(ebx);
3432 __ push(Immediate(Smi::FromInt(size)));
3433 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
3434 __ pop(ebx);
3435
3436 __ bind(&allocated);
3437 // Copy the content into the newly allocated memory.
3438 // (Unroll copy loop once for better throughput).
3439 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
3440 __ mov(edx, FieldOperand(ebx, i));
3441 __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
3442 __ mov(FieldOperand(eax, i), edx);
3443 __ mov(FieldOperand(eax, i + kPointerSize), ecx);
3444 }
3445 if ((size % (2 * kPointerSize)) != 0) {
3446 __ mov(edx, FieldOperand(ebx, size - kPointerSize));
3447 __ mov(FieldOperand(eax, size - kPointerSize), edx);
3448 }
3449}
3450
3451
3452void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
3453 // Use the fast case closure allocation code that allocates in new
3454 // space for nested functions that don't need literals cloning.
3455 Handle<SharedFunctionInfo> shared_info = instr->shared_info();
3456 bool pretenure = !instr->hydrogen()->pretenure();
3457 if (shared_info->num_literals() == 0 && !pretenure) {
3458 FastNewClosureStub stub;
3459 __ push(Immediate(shared_info));
3460 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3461 } else {
3462 __ push(esi);
3463 __ push(Immediate(shared_info));
3464 __ push(Immediate(pretenure
3465 ? Factory::true_value()
3466 : Factory::false_value()));
3467 CallRuntime(Runtime::kNewClosure, 3, instr);
3468 }
3469}
3470
3471
3472void LCodeGen::DoTypeof(LTypeof* instr) {
3473 LOperand* input = instr->input();
3474 if (input->IsConstantOperand()) {
3475 __ push(ToImmediate(input));
3476 } else {
3477 __ push(ToOperand(input));
3478 }
3479 CallRuntime(Runtime::kTypeof, 1, instr);
3480}
3481
3482
3483void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
3484 Register input = ToRegister(instr->input());
3485 Register result = ToRegister(instr->result());
3486 Label true_label;
3487 Label false_label;
3488 NearLabel done;
3489
3490 Condition final_branch_condition = EmitTypeofIs(&true_label,
3491 &false_label,
3492 input,
3493 instr->type_literal());
3494 __ j(final_branch_condition, &true_label);
3495 __ bind(&false_label);
3496 __ mov(result, Handle<Object>(Heap::false_value()));
3497 __ jmp(&done);
3498
3499 __ bind(&true_label);
3500 __ mov(result, Handle<Object>(Heap::true_value()));
3501
3502 __ bind(&done);
3503}
3504
3505
3506void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
3507 Register input = ToRegister(instr->input());
3508 int true_block = chunk_->LookupDestination(instr->true_block_id());
3509 int false_block = chunk_->LookupDestination(instr->false_block_id());
3510 Label* true_label = chunk_->GetAssemblyLabel(true_block);
3511 Label* false_label = chunk_->GetAssemblyLabel(false_block);
3512
3513 Condition final_branch_condition = EmitTypeofIs(true_label,
3514 false_label,
3515 input,
3516 instr->type_literal());
3517
3518 EmitBranch(true_block, false_block, final_branch_condition);
3519}
3520
3521
3522Condition LCodeGen::EmitTypeofIs(Label* true_label,
3523 Label* false_label,
3524 Register input,
3525 Handle<String> type_name) {
3526 Condition final_branch_condition = no_condition;
3527 if (type_name->Equals(Heap::number_symbol())) {
3528 __ test(input, Immediate(kSmiTagMask));
3529 __ j(zero, true_label);
3530 __ cmp(FieldOperand(input, HeapObject::kMapOffset),
3531 Factory::heap_number_map());
3532 final_branch_condition = equal;
3533
3534 } else if (type_name->Equals(Heap::string_symbol())) {
3535 __ test(input, Immediate(kSmiTagMask));
3536 __ j(zero, false_label);
3537 __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
3538 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
3539 1 << Map::kIsUndetectable);
3540 __ j(not_zero, false_label);
3541 __ CmpInstanceType(input, FIRST_NONSTRING_TYPE);
3542 final_branch_condition = below;
3543
3544 } else if (type_name->Equals(Heap::boolean_symbol())) {
3545 __ cmp(input, Handle<Object>(Heap::true_value()));
3546 __ j(equal, true_label);
3547 __ cmp(input, Handle<Object>(Heap::false_value()));
3548 final_branch_condition = equal;
3549
3550 } else if (type_name->Equals(Heap::undefined_symbol())) {
3551 __ cmp(input, Factory::undefined_value());
3552 __ j(equal, true_label);
3553 __ test(input, Immediate(kSmiTagMask));
3554 __ j(zero, false_label);
3555 // Check for undetectable objects => true.
3556 __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
3557 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
3558 1 << Map::kIsUndetectable);
3559 final_branch_condition = not_zero;
3560
3561 } else if (type_name->Equals(Heap::function_symbol())) {
3562 __ test(input, Immediate(kSmiTagMask));
3563 __ j(zero, false_label);
3564 __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
3565 __ j(equal, true_label);
3566 // Regular expressions => 'function' (they are callable).
3567 __ CmpInstanceType(input, JS_REGEXP_TYPE);
3568 final_branch_condition = equal;
3569
3570 } else if (type_name->Equals(Heap::object_symbol())) {
3571 __ test(input, Immediate(kSmiTagMask));
3572 __ j(zero, false_label);
3573 __ cmp(input, Factory::null_value());
3574 __ j(equal, true_label);
3575 // Regular expressions => 'function', not 'object'.
3576 __ CmpObjectType(input, JS_REGEXP_TYPE, input);
3577 __ j(equal, false_label);
3578 // Check for undetectable objects => false.
3579 __ test_b(FieldOperand(input, Map::kBitFieldOffset),
3580 1 << Map::kIsUndetectable);
3581 __ j(not_zero, false_label);
3582 // Check for JS objects => true.
3583 __ CmpInstanceType(input, FIRST_JS_OBJECT_TYPE);
3584 __ j(below, false_label);
3585 __ CmpInstanceType(input, LAST_JS_OBJECT_TYPE);
3586 final_branch_condition = below_equal;
3587
3588 } else {
3589 final_branch_condition = not_equal;
3590 __ jmp(false_label);
3591 // A dead branch instruction will be generated after this point.
3592 }
3593
3594 return final_branch_condition;
3595}
3596
3597
3598void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
3599 // No code for lazy bailout instruction. Used to capture environment after a
3600 // call for populating the safepoint data with deoptimization data.
3601}
3602
3603
3604void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
3605 DeoptimizeIf(no_condition, instr->environment());
3606}
3607
3608
3609void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
3610 LOperand* obj = instr->object();
3611 LOperand* key = instr->key();
3612 __ push(ToOperand(obj));
3613 if (key->IsConstantOperand()) {
3614 __ push(ToImmediate(key));
3615 } else {
3616 __ push(ToOperand(key));
3617 }
3618 RecordPosition(instr->pointer_map()->position());
3619 SafepointGenerator safepoint_generator(this,
3620 instr->pointer_map(),
3621 Safepoint::kNoDeoptimizationIndex);
3622 __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
3623}
3624
3625
3626void LCodeGen::DoStackCheck(LStackCheck* instr) {
3627 // Perform stack overflow check.
3628 NearLabel done;
3629 ExternalReference stack_limit = ExternalReference::address_of_stack_limit();
3630 __ cmp(esp, Operand::StaticVariable(stack_limit));
3631 __ j(above_equal, &done);
3632
3633 StackCheckStub stub;
3634 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
3635 __ bind(&done);
3636}
3637
3638
3639void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
3640 // This is a pseudo-instruction that ensures that the environment here is
3641 // properly registered for deoptimization and records the assembler's PC
3642 // offset.
3643 LEnvironment* environment = instr->environment();
3644 environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
3645 instr->SpilledDoubleRegisterArray());
3646
3647 // If the environment were already registered, we would have no way of
3648 // backpatching it with the spill slot operands.
3649 ASSERT(!environment->HasBeenRegistered());
3650 RegisterEnvironmentForDeoptimization(environment);
3651 ASSERT(osr_pc_offset_ == -1);
3652 osr_pc_offset_ = masm()->pc_offset();
3653}
3654
3655
3656#undef __
3657
3658} } // namespace v8::internal
sgjesse@chromium.orgc6c57182011-01-17 12:24:25 +00003659
3660#endif // V8_TARGET_ARCH_IA32