blob: 637428db38ca9b2111715741170196ec36cff076 [file] [log] [blame]
ager@chromium.org5ec48922009-05-05 07:25:34 +00001// Copyright 2009 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
kasperl@chromium.org71affb52009-05-26 05:44:31 +000028#include "v8.h"
29
30#include "bootstrapper.h"
31#include "codegen-inl.h"
ager@chromium.orgeadaf222009-06-16 09:43:10 +000032#include "assembler-x64.h"
ager@chromium.orge2902be2009-06-08 12:21:35 +000033#include "macro-assembler-x64.h"
sgjesse@chromium.orgb9d7da12009-08-05 08:38:10 +000034#include "serialize.h"
ager@chromium.orgeadaf222009-06-16 09:43:10 +000035#include "debug.h"
kasperl@chromium.org71affb52009-05-26 05:44:31 +000036
37namespace v8 {
38namespace internal {
39
40MacroAssembler::MacroAssembler(void* buffer, int size)
41 : Assembler(buffer, size),
42 unresolved_(0),
43 generating_stub_(false),
44 allow_stub_calls_(true),
45 code_object_(Heap::undefined_value()) {
46}
47
ager@chromium.orge2902be2009-06-08 12:21:35 +000048
ager@chromium.org18ad94b2009-09-02 08:22:29 +000049void MacroAssembler::LoadRoot(Register destination,
50 Heap::RootListIndex index) {
51 movq(destination, Operand(r13, index << kPointerSizeLog2));
52}
53
54
55void MacroAssembler::PushRoot(Heap::RootListIndex index) {
56 push(Operand(r13, index << kPointerSizeLog2));
57}
58
59
60void MacroAssembler::CompareRoot(Register with,
61 Heap::RootListIndex index) {
62 cmpq(with, Operand(r13, index << kPointerSizeLog2));
63}
64
sgjesse@chromium.orgb9d7da12009-08-05 08:38:10 +000065
66static void RecordWriteHelper(MacroAssembler* masm,
67 Register object,
68 Register addr,
69 Register scratch) {
70 Label fast;
71
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +000072 // Compute the page start address from the heap object pointer, and reuse
73 // the 'object' register for it.
sgjesse@chromium.orgb9d7da12009-08-05 08:38:10 +000074 ASSERT(is_int32(~Page::kPageAlignmentMask));
75 masm->and_(object,
76 Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask)));
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +000077 Register page_start = object;
sgjesse@chromium.orgb9d7da12009-08-05 08:38:10 +000078
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +000079 // Compute the bit addr in the remembered set/index of the pointer in the
80 // page. Reuse 'addr' as pointer_offset.
81 masm->subq(addr, page_start);
sgjesse@chromium.orgb9d7da12009-08-05 08:38:10 +000082 masm->shr(addr, Immediate(kPointerSizeLog2));
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +000083 Register pointer_offset = addr;
sgjesse@chromium.orgb9d7da12009-08-05 08:38:10 +000084
85 // If the bit offset lies beyond the normal remembered set range, it is in
86 // the extra remembered set area of a large object.
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +000087 masm->cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize));
sgjesse@chromium.orgb9d7da12009-08-05 08:38:10 +000088 masm->j(less, &fast);
89
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +000090 // Adjust 'page_start' so that addressing using 'pointer_offset' hits the
91 // extra remembered set after the large object.
92
sgjesse@chromium.orgb9d7da12009-08-05 08:38:10 +000093 // Load the array length into 'scratch'.
94 masm->movl(scratch,
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +000095 Operand(page_start,
sgjesse@chromium.orgb9d7da12009-08-05 08:38:10 +000096 Page::kObjectStartOffset + FixedArray::kLengthOffset));
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +000097 Register array_length = scratch;
98
99 // Extra remembered set starts right after the large object (a FixedArray), at
100 // page_start + kObjectStartOffset + objectSize
101 // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
102 // Add the delta between the end of the normal RSet and the start of the
103 // extra RSet to 'page_start', so that addressing the bit using
104 // 'pointer_offset' hits the extra RSet words.
105 masm->lea(page_start,
106 Operand(page_start, array_length, times_pointer_size,
107 Page::kObjectStartOffset + FixedArray::kHeaderSize
108 - Page::kRSetEndOffset));
sgjesse@chromium.orgb9d7da12009-08-05 08:38:10 +0000109
110 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
111 // to limit code size. We should probably evaluate this decision by
112 // measuring the performance of an equivalent implementation using
113 // "simpler" instructions
114 masm->bind(&fast);
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +0000115 masm->bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
sgjesse@chromium.orgb9d7da12009-08-05 08:38:10 +0000116}
117
118
119class RecordWriteStub : public CodeStub {
120 public:
121 RecordWriteStub(Register object, Register addr, Register scratch)
122 : object_(object), addr_(addr), scratch_(scratch) { }
123
124 void Generate(MacroAssembler* masm);
125
126 private:
127 Register object_;
128 Register addr_;
129 Register scratch_;
130
131#ifdef DEBUG
132 void Print() {
133 PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
134 object_.code(), addr_.code(), scratch_.code());
135 }
136#endif
137
138 // Minor key encoding in 12 bits of three registers (object, address and
139 // scratch) OOOOAAAASSSS.
140 class ScratchBits: public BitField<uint32_t, 0, 4> {};
141 class AddressBits: public BitField<uint32_t, 4, 4> {};
142 class ObjectBits: public BitField<uint32_t, 8, 4> {};
143
144 Major MajorKey() { return RecordWrite; }
145
146 int MinorKey() {
147 // Encode the registers.
148 return ObjectBits::encode(object_.code()) |
149 AddressBits::encode(addr_.code()) |
150 ScratchBits::encode(scratch_.code());
151 }
152};
153
154
155void RecordWriteStub::Generate(MacroAssembler* masm) {
156 RecordWriteHelper(masm, object_, addr_, scratch_);
157 masm->ret(0);
158}
159
160
161// Set the remembered set bit for [object+offset].
162// object is the object being stored into, value is the object being stored.
163// If offset is zero, then the scratch register contains the array index into
164// the elements array represented as a Smi.
165// All registers are clobbered by the operation.
166void MacroAssembler::RecordWrite(Register object,
167 int offset,
168 Register value,
169 Register scratch) {
170 // First, check if a remembered set write is even needed. The tests below
171 // catch stores of Smis and stores into young gen (which does not have space
172 // for the remembered set bits.
173 Label done;
174
175 // Test that the object address is not in the new space. We cannot
176 // set remembered set bits in the new space.
177 movq(value, object);
178 ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
179 and_(value, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
180 movq(kScratchRegister, ExternalReference::new_space_start());
181 cmpq(value, kScratchRegister);
182 j(equal, &done);
183
184 if ((offset > 0) && (offset < Page::kMaxHeapObjectSize)) {
185 // Compute the bit offset in the remembered set, leave it in 'value'.
186 lea(value, Operand(object, offset));
187 ASSERT(is_int32(Page::kPageAlignmentMask));
188 and_(value, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
189 shr(value, Immediate(kObjectAlignmentBits));
190
191 // Compute the page address from the heap object pointer, leave it in
192 // 'object' (immediate value is sign extended).
193 and_(object, Immediate(~Page::kPageAlignmentMask));
194
195 // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
196 // to limit code size. We should probably evaluate this decision by
197 // measuring the performance of an equivalent implementation using
198 // "simpler" instructions
199 bts(Operand(object, Page::kRSetOffset), value);
200 } else {
201 Register dst = scratch;
202 if (offset != 0) {
203 lea(dst, Operand(object, offset));
204 } else {
205 // array access: calculate the destination address in the same manner as
206 // KeyedStoreIC::GenerateGeneric. Multiply a smi by 4 to get an offset
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +0000207 // into an array of pointers.
sgjesse@chromium.orgb9d7da12009-08-05 08:38:10 +0000208 lea(dst, Operand(object, dst, times_half_pointer_size,
209 FixedArray::kHeaderSize - kHeapObjectTag));
210 }
211 // If we are already generating a shared stub, not inlining the
212 // record write code isn't going to save us any memory.
213 if (generating_stub()) {
214 RecordWriteHelper(this, object, dst, value);
215 } else {
216 RecordWriteStub stub(object, dst, value);
217 CallStub(&stub);
218 }
219 }
220
221 bind(&done);
ager@chromium.org5aa501c2009-06-23 07:57:28 +0000222}
223
224
ager@chromium.orgeadaf222009-06-16 09:43:10 +0000225void MacroAssembler::Assert(Condition cc, const char* msg) {
226 if (FLAG_debug_code) Check(cc, msg);
227}
228
229
230void MacroAssembler::Check(Condition cc, const char* msg) {
231 Label L;
232 j(cc, &L);
233 Abort(msg);
234 // will not return here
235 bind(&L);
236}
237
238
ager@chromium.org5aa501c2009-06-23 07:57:28 +0000239void MacroAssembler::NegativeZeroTest(Register result,
240 Register op,
241 Label* then_label) {
242 Label ok;
sgjesse@chromium.org0b6db592009-07-30 14:48:31 +0000243 testl(result, result);
ager@chromium.org5aa501c2009-06-23 07:57:28 +0000244 j(not_zero, &ok);
sgjesse@chromium.org0b6db592009-07-30 14:48:31 +0000245 testl(op, op);
ager@chromium.org5aa501c2009-06-23 07:57:28 +0000246 j(sign, then_label);
247 bind(&ok);
248}
249
250
ager@chromium.orgeadaf222009-06-16 09:43:10 +0000251void MacroAssembler::Abort(const char* msg) {
252 // We want to pass the msg string like a smi to avoid GC
253 // problems, however msg is not guaranteed to be aligned
254 // properly. Instead, we pass an aligned pointer that is
255 // a proper v8 smi, but also pass the alignment difference
256 // from the real pointer as a smi.
257 intptr_t p1 = reinterpret_cast<intptr_t>(msg);
258 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
259 // Note: p0 might not be a valid Smi *value*, but it has a valid Smi tag.
260 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
261#ifdef DEBUG
262 if (msg != NULL) {
263 RecordComment("Abort message: ");
264 RecordComment(msg);
265 }
266#endif
267 push(rax);
268 movq(kScratchRegister, p0, RelocInfo::NONE);
269 push(kScratchRegister);
270 movq(kScratchRegister,
271 reinterpret_cast<intptr_t>(Smi::FromInt(p1 - p0)),
272 RelocInfo::NONE);
273 push(kScratchRegister);
274 CallRuntime(Runtime::kAbort, 2);
275 // will not return here
276}
277
278
279void MacroAssembler::CallStub(CodeStub* stub) {
280 ASSERT(allow_stub_calls()); // calls are not allowed in some stubs
sgjesse@chromium.org911335c2009-08-19 12:59:44 +0000281 Call(stub->GetCode(), RelocInfo::CODE_TARGET);
ager@chromium.orgeadaf222009-06-16 09:43:10 +0000282}
283
284
285void MacroAssembler::StubReturn(int argc) {
286 ASSERT(argc >= 1 && generating_stub());
287 ret((argc - 1) * kPointerSize);
288}
289
290
291void MacroAssembler::IllegalOperation(int num_arguments) {
292 if (num_arguments > 0) {
293 addq(rsp, Immediate(num_arguments * kPointerSize));
294 }
ager@chromium.org18ad94b2009-09-02 08:22:29 +0000295 LoadRoot(rax, Heap::kUndefinedValueRootIndex);
ager@chromium.orgeadaf222009-06-16 09:43:10 +0000296}
297
298
299void MacroAssembler::CallRuntime(Runtime::FunctionId id, int num_arguments) {
300 CallRuntime(Runtime::FunctionForId(id), num_arguments);
301}
302
303
304void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
305 // If the expected number of arguments of the runtime function is
306 // constant, we check that the actual number of arguments match the
307 // expectation.
308 if (f->nargs >= 0 && f->nargs != num_arguments) {
309 IllegalOperation(num_arguments);
310 return;
311 }
312
313 Runtime::FunctionId function_id =
314 static_cast<Runtime::FunctionId>(f->stub_id);
315 RuntimeStub stub(function_id, num_arguments);
316 CallStub(&stub);
317}
318
319
320void MacroAssembler::TailCallRuntime(ExternalReference const& ext,
ager@chromium.orga1645e22009-09-09 19:27:10 +0000321 int num_arguments,
322 int result_size) {
sgjesse@chromium.org0b6db592009-07-30 14:48:31 +0000323 // ----------- S t a t e -------------
324 // -- rsp[0] : return address
325 // -- rsp[8] : argument num_arguments - 1
326 // ...
327 // -- rsp[8 * num_arguments] : argument 0 (receiver)
328 // -----------------------------------
329
ager@chromium.orgeadaf222009-06-16 09:43:10 +0000330 // TODO(1236192): Most runtime routines don't need the number of
331 // arguments passed in because it is constant. At some point we
332 // should remove this need and make the runtime routine entry code
333 // smarter.
334 movq(rax, Immediate(num_arguments));
ager@chromium.orga1645e22009-09-09 19:27:10 +0000335 JumpToBuiltin(ext, result_size);
ager@chromium.orgeadaf222009-06-16 09:43:10 +0000336}
337
338
ager@chromium.orga1645e22009-09-09 19:27:10 +0000339void MacroAssembler::JumpToBuiltin(const ExternalReference& ext,
340 int result_size) {
ager@chromium.orgeadaf222009-06-16 09:43:10 +0000341 // Set the entry point and jump to the C entry runtime stub.
342 movq(rbx, ext);
ager@chromium.orga1645e22009-09-09 19:27:10 +0000343 CEntryStub ces(result_size);
ager@chromium.orgeadaf222009-06-16 09:43:10 +0000344 movq(kScratchRegister, ces.GetCode(), RelocInfo::CODE_TARGET);
345 jmp(kScratchRegister);
kasperl@chromium.org71affb52009-05-26 05:44:31 +0000346}
347
ager@chromium.orge2902be2009-06-08 12:21:35 +0000348
ager@chromium.org5aa501c2009-06-23 07:57:28 +0000349void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
350 bool resolved;
351 Handle<Code> code = ResolveBuiltin(id, &resolved);
352
353 const char* name = Builtins::GetName(id);
354 int argc = Builtins::GetArgumentsCount(id);
355
kasperl@chromium.org68ac0092009-07-09 06:00:35 +0000356 movq(target, code, RelocInfo::EMBEDDED_OBJECT);
ager@chromium.org5aa501c2009-06-23 07:57:28 +0000357 if (!resolved) {
358 uint32_t flags =
359 Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
360 Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
361 Bootstrapper::FixupFlagsUseCodeObject::encode(true);
362 Unresolved entry = { pc_offset() - sizeof(intptr_t), flags, name };
363 unresolved_.Add(entry);
364 }
365 addq(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
366}
367
368
369Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id,
370 bool* resolved) {
371 // Move the builtin function into the temporary function slot by
372 // reading it from the builtins object. NOTE: We should be able to
373 // reduce this to two instructions by putting the function table in
374 // the global object instead of the "builtins" object and by using a
375 // real register for the function.
376 movq(rdx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
377 movq(rdx, FieldOperand(rdx, GlobalObject::kBuiltinsOffset));
378 int builtins_offset =
379 JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize);
380 movq(rdi, FieldOperand(rdx, builtins_offset));
381
382
383 return Builtins::GetCode(id, resolved);
384}
385
386
ager@chromium.orge2902be2009-06-08 12:21:35 +0000387void MacroAssembler::Set(Register dst, int64_t x) {
kasperl@chromium.org68ac0092009-07-09 06:00:35 +0000388 if (x == 0) {
389 xor_(dst, dst);
390 } else if (is_int32(x)) {
ager@chromium.orge2902be2009-06-08 12:21:35 +0000391 movq(dst, Immediate(x));
392 } else if (is_uint32(x)) {
393 movl(dst, Immediate(x));
394 } else {
395 movq(dst, x, RelocInfo::NONE);
396 }
397}
398
399
400void MacroAssembler::Set(const Operand& dst, int64_t x) {
kasperl@chromium.org68ac0092009-07-09 06:00:35 +0000401 if (x == 0) {
402 xor_(kScratchRegister, kScratchRegister);
403 movq(dst, kScratchRegister);
404 } else if (is_int32(x)) {
405 movq(dst, Immediate(x));
ager@chromium.orge2902be2009-06-08 12:21:35 +0000406 } else if (is_uint32(x)) {
kasperl@chromium.org68ac0092009-07-09 06:00:35 +0000407 movl(dst, Immediate(x));
ager@chromium.orge2902be2009-06-08 12:21:35 +0000408 } else {
409 movq(kScratchRegister, x, RelocInfo::NONE);
kasperl@chromium.org68ac0092009-07-09 06:00:35 +0000410 movq(dst, kScratchRegister);
ager@chromium.orge2902be2009-06-08 12:21:35 +0000411 }
ager@chromium.orge2902be2009-06-08 12:21:35 +0000412}
413
414
ager@chromium.org4af710e2009-09-15 12:20:11 +0000415// ----------------------------------------------------------------------------
416// Smi tagging, untagging and tag detection.
417
418
419void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
420 ASSERT_EQ(1, kSmiTagSize);
421 ASSERT_EQ(0, kSmiTag);
422#ifdef DEBUG
423 cmpq(src, Immediate(0xC0000000u));
424 Check(positive, "Smi conversion overflow");
425#endif
426 if (dst.is(src)) {
427 addl(dst, src);
428 } else {
429 lea(dst, Operand(src, src, times_1, 0));
430 }
431}
432
433
434void MacroAssembler::Integer32ToSmi(Register dst,
435 Register src,
436 Label* on_overflow) {
437 ASSERT_EQ(1, kSmiTagSize);
438 ASSERT_EQ(0, kSmiTag);
439 if (!dst.is(src)) {
440 movl(dst, src);
441 }
442 addl(dst, src);
443 j(overflow, on_overflow);
444}
445
446
447void MacroAssembler::Integer64AddToSmi(Register dst,
448 Register src,
449 int constant) {
450#ifdef DEBUG
451 movl(kScratchRegister, src);
452 addl(kScratchRegister, Immediate(constant));
453 Check(no_overflow, "Add-and-smi-convert overflow");
454 Condition valid = CheckInteger32ValidSmiValue(kScratchRegister);
455 Check(valid, "Add-and-smi-convert overflow");
456#endif
457 lea(dst, Operand(src, src, times_1, constant << kSmiTagSize));
458}
459
460
461void MacroAssembler::SmiToInteger32(Register dst, Register src) {
462 ASSERT_EQ(1, kSmiTagSize);
463 ASSERT_EQ(0, kSmiTag);
464 if (!dst.is(src)) {
465 movl(dst, src);
466 }
467 sarl(dst, Immediate(kSmiTagSize));
468}
469
470
471void MacroAssembler::SmiToInteger64(Register dst, Register src) {
472 ASSERT_EQ(1, kSmiTagSize);
473 ASSERT_EQ(0, kSmiTag);
474 movsxlq(dst, src);
475 sar(dst, Immediate(kSmiTagSize));
476}
477
478
479void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
480 Register src,
481 int power) {
482 ASSERT(power >= 0);
483 ASSERT(power < 64);
484 if (power == 0) {
485 SmiToInteger64(dst, src);
486 return;
487 }
488 movsxlq(dst, src);
489 shl(dst, Immediate(power - 1));
490}
491
492void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) {
493 ASSERT_EQ(0, kSmiTag);
494 testl(src, Immediate(kSmiTagMask));
495 j(zero, on_smi);
496}
497
498
499void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi) {
500 Condition not_smi = CheckNotSmi(src);
501 j(not_smi, on_not_smi);
502}
503
504
505void MacroAssembler::JumpIfNotPositiveSmi(Register src,
506 Label* on_not_positive_smi) {
507 Condition not_positive_smi = CheckNotPositiveSmi(src);
508 j(not_positive_smi, on_not_positive_smi);
509}
510
511
512void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
513 int constant,
514 Label* on_equals) {
515 if (Smi::IsValid(constant)) {
516 Condition are_equal = CheckSmiEqualsConstant(src, constant);
517 j(are_equal, on_equals);
518 }
519}
520
521
522void MacroAssembler::JumpIfNotValidSmiValue(Register src, Label* on_invalid) {
523 Condition is_valid = CheckInteger32ValidSmiValue(src);
524 j(ReverseCondition(is_valid), on_invalid);
525}
526
527
528
529void MacroAssembler::JumpIfNotBothSmi(Register src1,
530 Register src2,
531 Label* on_not_both_smi) {
532 Condition not_both_smi = CheckNotBothSmi(src1, src2);
533 j(not_both_smi, on_not_both_smi);
534}
535
536Condition MacroAssembler::CheckSmi(Register src) {
537 testb(src, Immediate(kSmiTagMask));
538 return zero;
539}
540
541
542Condition MacroAssembler::CheckNotSmi(Register src) {
543 ASSERT_EQ(0, kSmiTag);
544 testb(src, Immediate(kSmiTagMask));
545 return not_zero;
546}
547
548
549Condition MacroAssembler::CheckPositiveSmi(Register src) {
550 ASSERT_EQ(0, kSmiTag);
551 testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
552 return zero;
553}
554
555
556Condition MacroAssembler::CheckNotPositiveSmi(Register src) {
557 ASSERT_EQ(0, kSmiTag);
558 testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask)));
559 return not_zero;
560}
561
562
563Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
564 if (first.is(second)) {
565 return CheckSmi(first);
566 }
567 movl(kScratchRegister, first);
568 orl(kScratchRegister, second);
569 return CheckSmi(kScratchRegister);
570}
571
572
573Condition MacroAssembler::CheckNotBothSmi(Register first, Register second) {
574 ASSERT_EQ(0, kSmiTag);
575 if (first.is(second)) {
576 return CheckNotSmi(first);
577 }
578 movl(kScratchRegister, first);
579 or_(kScratchRegister, second);
580 return CheckNotSmi(kScratchRegister);
581}
582
583
584Condition MacroAssembler::CheckIsMinSmi(Register src) {
585 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
586 cmpl(src, Immediate(0x40000000));
587 return equal;
588}
589
590Condition MacroAssembler::CheckSmiEqualsConstant(Register src, int constant) {
591 if (constant == 0) {
592 testl(src, src);
593 return zero;
594 }
595 if (Smi::IsValid(constant)) {
596 cmpl(src, Immediate(Smi::FromInt(constant)));
597 return zero;
598 }
599 // Can't be equal.
600 UNREACHABLE();
601 return no_condition;
602}
603
604
605Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
606 // A 32-bit integer value can be converted to a smi if it is in the
607 // range [-2^30 .. 2^30-1]. That is equivalent to having its 32-bit
608 // representation have bits 30 and 31 be equal.
609 cmpl(src, Immediate(0xC0000000u));
610 return positive;
611}
612
613
614void MacroAssembler::SmiNeg(Register dst,
615 Register src,
616 Label* on_not_smi_result) {
617 if (!dst.is(src)) {
618 movl(dst, src);
619 }
620 negl(dst);
621 testl(dst, Immediate(0x7fffffff));
622 // If the result is zero or 0x80000000, negation failed to create a smi.
623 j(equal, on_not_smi_result);
624}
625
626
627void MacroAssembler::SmiAdd(Register dst,
628 Register src1,
629 Register src2,
630 Label* on_not_smi_result) {
631 ASSERT(!dst.is(src2));
632 if (!dst.is(src1)) {
633 movl(dst, src1);
634 }
635 addl(dst, src2);
636 if (!dst.is(src1)) {
637 j(overflow, on_not_smi_result);
638 } else {
639 Label smi_result;
640 j(no_overflow, &smi_result);
641 // Restore src1.
642 subl(src1, src2);
643 jmp(on_not_smi_result);
644 bind(&smi_result);
645 }
646}
647
648
649
650void MacroAssembler::SmiSub(Register dst,
651 Register src1,
652 Register src2,
653 Label* on_not_smi_result) {
654 ASSERT(!dst.is(src2));
655 if (!dst.is(src1)) {
656 movl(dst, src1);
657 }
658 subl(dst, src2);
659 if (!dst.is(src1)) {
660 j(overflow, on_not_smi_result);
661 } else {
662 Label smi_result;
663 j(no_overflow, &smi_result);
664 // Restore src1.
665 addl(src1, src2);
666 jmp(on_not_smi_result);
667 bind(&smi_result);
668 }
669}
670
671
672void MacroAssembler::SmiMul(Register dst,
673 Register src1,
674 Register src2,
675 Label* on_not_smi_result) {
676 ASSERT(!dst.is(src2));
677
678 if (dst.is(src1)) {
679 movq(kScratchRegister, src1);
680 }
681 SmiToInteger32(dst, src1);
682
683 imull(dst, src2);
684 j(overflow, on_not_smi_result);
685
686 // Check for negative zero result. If product is zero, and one
687 // argument is negative, go to slow case. The frame is unchanged
688 // in this block, so local control flow can use a Label rather
689 // than a JumpTarget.
690 Label non_zero_result;
691 testl(dst, dst);
692 j(not_zero, &non_zero_result);
693
694 // Test whether either operand is negative (the other must be zero).
695 orl(kScratchRegister, src2);
696 j(negative, on_not_smi_result);
697 bind(&non_zero_result);
698}
699
700
701void MacroAssembler::SmiTryAddConstant(Register dst,
702 Register src,
703 int32_t constant,
704 Label* on_not_smi_result) {
705 // Does not assume that src is a smi.
706 ASSERT_EQ(1, kSmiTagMask);
707 ASSERT_EQ(0, kSmiTag);
708 ASSERT(Smi::IsValid(constant));
709
710 Register tmp = (src.is(dst) ? kScratchRegister : dst);
711 movl(tmp, src);
712 addl(tmp, Immediate(Smi::FromInt(constant)));
713 if (tmp.is(kScratchRegister)) {
714 j(overflow, on_not_smi_result);
715 testl(tmp, Immediate(kSmiTagMask));
716 j(not_zero, on_not_smi_result);
717 movl(dst, tmp);
718 } else {
719 movl(kScratchRegister, Immediate(kSmiTagMask));
720 cmovl(overflow, dst, kScratchRegister);
721 testl(dst, kScratchRegister);
722 j(not_zero, on_not_smi_result);
723 }
724}
725
726
727void MacroAssembler::SmiAddConstant(Register dst,
728 Register src,
729 int32_t constant,
730 Label* on_not_smi_result) {
731 ASSERT(Smi::IsValid(constant));
732 if (on_not_smi_result == NULL) {
733 if (dst.is(src)) {
734 movl(dst, src);
735 } else {
736 lea(dst, Operand(src, constant << kSmiTagSize));
737 }
738 } else {
739 if (!dst.is(src)) {
740 movl(dst, src);
741 }
742 addl(dst, Immediate(Smi::FromInt(constant)));
743 if (!dst.is(src)) {
744 j(overflow, on_not_smi_result);
745 } else {
746 Label result_ok;
747 j(no_overflow, &result_ok);
748 subl(dst, Immediate(Smi::FromInt(constant)));
749 jmp(on_not_smi_result);
750 bind(&result_ok);
751 }
752 }
753}
754
755
756void MacroAssembler::SmiSubConstant(Register dst,
757 Register src,
758 int32_t constant,
759 Label* on_not_smi_result) {
760 ASSERT(Smi::IsValid(constant));
761 Smi* smi_value = Smi::FromInt(constant);
762 if (dst.is(src)) {
763 // Optimistic subtract - may change value of dst register,
764 // if it has garbage bits in the higher half, but will not change
765 // the value as a tagged smi.
766 subl(dst, Immediate(smi_value));
767 if (on_not_smi_result != NULL) {
768 Label add_success;
769 j(no_overflow, &add_success);
770 addl(dst, Immediate(smi_value));
771 jmp(on_not_smi_result);
772 bind(&add_success);
773 }
774 } else {
775 UNIMPLEMENTED(); // Not used yet.
776 }
777}
778
779
780void MacroAssembler::SmiDiv(Register dst,
781 Register src1,
782 Register src2,
783 Label* on_not_smi_result) {
784 ASSERT(!src2.is(rax));
785 ASSERT(!src2.is(rdx));
786 ASSERT(!src1.is(rdx));
787
788 // Check for 0 divisor (result is +/-Infinity).
789 Label positive_divisor;
790 testl(src2, src2);
791 j(zero, on_not_smi_result);
792 j(positive, &positive_divisor);
793 // Check for negative zero result. If the dividend is zero, and the
794 // divisor is negative, return a floating point negative zero.
795 testl(src1, src1);
796 j(zero, on_not_smi_result);
797 bind(&positive_divisor);
798
799 // Sign extend src1 into edx:eax.
800 if (!src1.is(rax)) {
801 movl(rax, src1);
802 }
803 cdq();
804
805 idivl(src2);
806 // Check for the corner case of dividing the most negative smi by
807 // -1. We cannot use the overflow flag, since it is not set by
808 // idiv instruction.
809 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
810 cmpl(rax, Immediate(0x40000000));
811 j(equal, on_not_smi_result);
812 // Check that the remainder is zero.
813 testl(rdx, rdx);
814 j(not_zero, on_not_smi_result);
815 // Tag the result and store it in the destination register.
816 Integer32ToSmi(dst, rax);
817}
818
819
820void MacroAssembler::SmiMod(Register dst,
821 Register src1,
822 Register src2,
823 Label* on_not_smi_result) {
824 ASSERT(!dst.is(kScratchRegister));
825 ASSERT(!src1.is(kScratchRegister));
826 ASSERT(!src2.is(kScratchRegister));
827 ASSERT(!src2.is(rax));
828 ASSERT(!src2.is(rdx));
829 ASSERT(!src1.is(rdx));
830
831 testl(src2, src2);
832 j(zero, on_not_smi_result);
833
834 if (src1.is(rax)) {
835 // Mist remember the value to see if a zero result should
836 // be a negative zero.
837 movl(kScratchRegister, rax);
838 } else {
839 movl(rax, src1);
840 }
841 // Sign extend eax into edx:eax.
842 cdq();
843 idivl(src2);
844 // Check for a negative zero result. If the result is zero, and the
845 // dividend is negative, return a floating point negative zero.
846 Label non_zero_result;
847 testl(rdx, rdx);
848 j(not_zero, &non_zero_result);
849 if (src1.is(rax)) {
850 testl(kScratchRegister, kScratchRegister);
851 } else {
852 testl(src1, src1);
853 }
854 j(negative, on_not_smi_result);
855 bind(&non_zero_result);
856 if (!dst.is(rdx)) {
857 movl(dst, rdx);
858 }
859}
860
861
862void MacroAssembler::SmiNot(Register dst, Register src) {
863 if (dst.is(src)) {
864 not_(dst);
865 // Remove inverted smi-tag. The mask is sign-extended to 64 bits.
866 xor_(src, Immediate(kSmiTagMask));
867 } else {
868 ASSERT_EQ(0, kSmiTag);
869 lea(dst, Operand(src, kSmiTagMask));
870 not_(dst);
871 }
872}
873
874
875void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
876 if (!dst.is(src1)) {
877 movl(dst, src1);
878 }
879 and_(dst, src2);
880}
881
882
883void MacroAssembler::SmiAndConstant(Register dst, Register src, int constant) {
884 ASSERT(Smi::IsValid(constant));
885 if (!dst.is(src)) {
886 movl(dst, src);
887 }
888 and_(dst, Immediate(Smi::FromInt(constant)));
889}
890
891
892void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
893 if (!dst.is(src1)) {
894 movl(dst, src1);
895 }
896 or_(dst, src2);
897}
898
899
900void MacroAssembler::SmiOrConstant(Register dst, Register src, int constant) {
901 ASSERT(Smi::IsValid(constant));
902 if (!dst.is(src)) {
903 movl(dst, src);
904 }
905 or_(dst, Immediate(Smi::FromInt(constant)));
906}
907
908void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
909 if (!dst.is(src1)) {
910 movl(dst, src1);
911 }
912 xor_(dst, src2);
913}
914
915
916void MacroAssembler::SmiXorConstant(Register dst, Register src, int constant) {
917 ASSERT(Smi::IsValid(constant));
918 if (!dst.is(src)) {
919 movl(dst, src);
920 }
921 xor_(dst, Immediate(Smi::FromInt(constant)));
922}
923
924
925
926void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
927 Register src,
928 int shift_value) {
929 if (shift_value > 0) {
930 if (dst.is(src)) {
931 sarl(dst, Immediate(shift_value));
932 and_(dst, Immediate(~kSmiTagMask));
933 } else {
934 UNIMPLEMENTED(); // Not used.
935 }
936 }
937}
938
939
940void MacroAssembler::SmiShiftLogicalRightConstant(Register dst,
941 Register src,
942 int shift_value,
943 Label* on_not_smi_result) {
944 // Logic right shift interprets its result as an *unsigned* number.
945 if (dst.is(src)) {
946 UNIMPLEMENTED(); // Not used.
947 } else {
948 movl(dst, src);
949 // Untag the smi.
950 sarl(dst, Immediate(kSmiTagSize));
951 if (shift_value < 2) {
952 // A negative Smi shifted right two is in the positive Smi range,
953 // but if shifted only by zero or one, it never is.
954 j(negative, on_not_smi_result);
955 }
956 if (shift_value > 0) {
957 // Do the right shift on the integer value.
958 shrl(dst, Immediate(shift_value));
959 }
960 // Re-tag the result.
961 addl(dst, dst);
962 }
963}
964
965
966void MacroAssembler::SmiShiftLeftConstant(Register dst,
967 Register src,
968 int shift_value,
969 Label* on_not_smi_result) {
970 if (dst.is(src)) {
971 UNIMPLEMENTED(); // Not used.
972 } else {
973 movl(dst, src);
974 if (shift_value > 0) {
975 // Treat dst as an untagged integer value equal to two times the
976 // smi value of src, i.e., already shifted left by one.
977 if (shift_value > 1) {
978 shll(dst, Immediate(shift_value - 1));
979 }
980 // Convert int result to Smi, checking that it is in smi range.
981 ASSERT(kSmiTagSize == 1); // adjust code if not the case
982 Integer32ToSmi(dst, dst, on_not_smi_result);
983 }
984 }
985}
986
987
988void MacroAssembler::SmiShiftLeft(Register dst,
989 Register src1,
990 Register src2,
991 Label* on_not_smi_result) {
992 ASSERT(!dst.is(rcx));
993 Label result_ok;
994 // Untag both operands.
995 SmiToInteger32(dst, src1);
996 SmiToInteger32(rcx, src2);
997 shll(dst);
998 // Check that the *signed* result fits in a smi.
999 Condition is_valid = CheckInteger32ValidSmiValue(dst);
1000 j(is_valid, &result_ok);
1001 // Restore the relevant bits of the source registers
1002 // and call the slow version.
1003 if (dst.is(src1)) {
1004 shrl(dst);
1005 Integer32ToSmi(dst, dst);
1006 }
1007 Integer32ToSmi(rcx, rcx);
1008 jmp(on_not_smi_result);
1009 bind(&result_ok);
1010 Integer32ToSmi(dst, dst);
1011}
1012
1013
1014void MacroAssembler::SmiShiftLogicalRight(Register dst,
1015 Register src1,
1016 Register src2,
1017 Label* on_not_smi_result) {
1018 ASSERT(!dst.is(rcx));
1019 Label result_ok;
1020 // Untag both operands.
1021 SmiToInteger32(dst, src1);
1022 SmiToInteger32(rcx, src2);
1023
1024 shrl(dst);
1025 // Check that the *unsigned* result fits in a smi.
1026 // I.e., that it is a valid positive smi value. The positive smi
1027 // values are 0..0x3fffffff, i.e., neither of the top-most two
1028 // bits can be set.
1029 //
1030 // These two cases can only happen with shifts by 0 or 1 when
1031 // handed a valid smi. If the answer cannot be represented by a
1032 // smi, restore the left and right arguments, and jump to slow
1033 // case. The low bit of the left argument may be lost, but only
1034 // in a case where it is dropped anyway.
1035 testl(dst, Immediate(0xc0000000));
1036 j(zero, &result_ok);
1037 if (dst.is(src1)) {
1038 shll(dst);
1039 Integer32ToSmi(dst, dst);
1040 }
1041 Integer32ToSmi(rcx, rcx);
1042 jmp(on_not_smi_result);
1043 bind(&result_ok);
1044 // Smi-tag the result in answer.
1045 Integer32ToSmi(dst, dst);
1046}
1047
1048
1049void MacroAssembler::SmiShiftArithmeticRight(Register dst,
1050 Register src1,
1051 Register src2) {
1052 ASSERT(!dst.is(rcx));
1053 // Untag both operands.
1054 SmiToInteger32(dst, src1);
1055 SmiToInteger32(rcx, src2);
1056 // Shift as integer.
1057 sarl(dst);
1058 // Retag result.
1059 Integer32ToSmi(dst, dst);
1060}
1061
1062
1063void MacroAssembler::SelectNonSmi(Register dst,
1064 Register src1,
1065 Register src2,
1066 Label* on_not_smis) {
1067 ASSERT(!dst.is(src1));
1068 ASSERT(!dst.is(src2));
1069 // Both operands must not be smis.
1070#ifdef DEBUG
1071 Condition not_both_smis = CheckNotBothSmi(src1, src2);
1072 Check(not_both_smis, "Both registers were smis.");
1073#endif
1074 ASSERT_EQ(0, kSmiTag);
1075 ASSERT_EQ(0, Smi::FromInt(0));
1076 movq(kScratchRegister, Immediate(kSmiTagMask));
1077 and_(kScratchRegister, src1);
1078 testl(kScratchRegister, src2);
1079 j(not_zero, on_not_smis);
1080 // One operand is a smi.
1081
1082 ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
1083 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
1084 subq(kScratchRegister, Immediate(1));
1085 // If src1 is a smi, then scratch register all 1s, else it is all 0s.
1086 movq(dst, src1);
1087 xor_(dst, src2);
1088 and_(dst, kScratchRegister);
1089 // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
1090 xor_(dst, src1);
1091 // If src1 is a smi, dst is src2, else it is src1, i.e., a non-smi.
1092}
1093
1094
1095SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) {
1096 ASSERT(is_uint6(shift));
1097 if (shift == 0) { // times_1.
1098 SmiToInteger32(dst, src);
1099 return SmiIndex(dst, times_1);
1100 }
1101 if (shift <= 4) { // 2 - 16 times multiplier is handled using ScaleFactor.
1102 // We expect that all smis are actually zero-padded. If this holds after
1103 // checking, this line can be omitted.
1104 movl(dst, src); // Ensure that the smi is zero-padded.
1105 return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize));
1106 }
1107 // Shift by shift-kSmiTagSize.
1108 movl(dst, src); // Ensure that the smi is zero-padded.
1109 shl(dst, Immediate(shift - kSmiTagSize));
1110 return SmiIndex(dst, times_1);
1111}
1112
1113
1114SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
1115 Register src,
1116 int shift) {
1117 // Register src holds a positive smi.
1118 ASSERT(is_uint6(shift));
1119 if (shift == 0) { // times_1.
1120 SmiToInteger32(dst, src);
1121 neg(dst);
1122 return SmiIndex(dst, times_1);
1123 }
1124 if (shift <= 4) { // 2 - 16 times multiplier is handled using ScaleFactor.
1125 movl(dst, src);
1126 neg(dst);
1127 return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize));
1128 }
1129 // Shift by shift-kSmiTagSize.
1130 movl(dst, src);
1131 neg(dst);
1132 shl(dst, Immediate(shift - kSmiTagSize));
1133 return SmiIndex(dst, times_1);
1134}
1135
1136
1137
ager@chromium.org5aa501c2009-06-23 07:57:28 +00001138bool MacroAssembler::IsUnsafeSmi(Smi* value) {
1139 return false;
1140}
1141
1142void MacroAssembler::LoadUnsafeSmi(Register dst, Smi* source) {
1143 UNIMPLEMENTED();
1144}
1145
1146
1147void MacroAssembler::Move(Register dst, Handle<Object> source) {
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001148 ASSERT(!source->IsFailure());
ager@chromium.org5aa501c2009-06-23 07:57:28 +00001149 if (source->IsSmi()) {
1150 if (IsUnsafeSmi(source)) {
1151 LoadUnsafeSmi(dst, source);
1152 } else {
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001153 int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
1154 movq(dst, Immediate(smi));
ager@chromium.org5aa501c2009-06-23 07:57:28 +00001155 }
1156 } else {
1157 movq(dst, source, RelocInfo::EMBEDDED_OBJECT);
1158 }
1159}
1160
1161
1162void MacroAssembler::Move(const Operand& dst, Handle<Object> source) {
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001163 if (source->IsSmi()) {
1164 int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
1165 movq(dst, Immediate(smi));
1166 } else {
1167 movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
1168 movq(dst, kScratchRegister);
1169 }
ager@chromium.org5aa501c2009-06-23 07:57:28 +00001170}
1171
1172
1173void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
1174 Move(kScratchRegister, source);
1175 cmpq(dst, kScratchRegister);
1176}
1177
1178
ager@chromium.org3e875802009-06-29 08:26:34 +00001179void MacroAssembler::Cmp(const Operand& dst, Handle<Object> source) {
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001180 if (source->IsSmi()) {
1181 if (IsUnsafeSmi(source)) {
1182 LoadUnsafeSmi(kScratchRegister, source);
1183 cmpl(dst, kScratchRegister);
1184 } else {
1185 // For smi-comparison, it suffices to compare the low 32 bits.
1186 int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
1187 cmpl(dst, Immediate(smi));
1188 }
1189 } else {
1190 ASSERT(source->IsHeapObject());
1191 movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
1192 cmpq(dst, kScratchRegister);
1193 }
ager@chromium.org3e875802009-06-29 08:26:34 +00001194}
1195
1196
ager@chromium.org5aa501c2009-06-23 07:57:28 +00001197void MacroAssembler::Push(Handle<Object> source) {
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001198 if (source->IsSmi()) {
1199 if (IsUnsafeSmi(source)) {
1200 LoadUnsafeSmi(kScratchRegister, source);
1201 push(kScratchRegister);
1202 } else {
1203 int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(*source));
1204 push(Immediate(smi));
1205 }
1206 } else {
1207 ASSERT(source->IsHeapObject());
1208 movq(kScratchRegister, source, RelocInfo::EMBEDDED_OBJECT);
1209 push(kScratchRegister);
1210 }
ager@chromium.org5aa501c2009-06-23 07:57:28 +00001211}
1212
1213
sgjesse@chromium.org0b6db592009-07-30 14:48:31 +00001214void MacroAssembler::Push(Smi* source) {
1215 if (IsUnsafeSmi(source)) {
1216 LoadUnsafeSmi(kScratchRegister, source);
1217 push(kScratchRegister);
1218 } else {
1219 int32_t smi = static_cast<int32_t>(reinterpret_cast<intptr_t>(source));
1220 push(Immediate(smi));
1221 }
1222}
1223
1224
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001225void MacroAssembler::Jump(ExternalReference ext) {
1226 movq(kScratchRegister, ext);
1227 jmp(kScratchRegister);
1228}
1229
1230
1231void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
1232 movq(kScratchRegister, destination, rmode);
1233 jmp(kScratchRegister);
1234}
1235
1236
ager@chromium.org5aa501c2009-06-23 07:57:28 +00001237void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
ager@chromium.org5aa501c2009-06-23 07:57:28 +00001238 ASSERT(RelocInfo::IsCodeTarget(rmode));
1239 movq(kScratchRegister, code_object, rmode);
ager@chromium.org3e875802009-06-29 08:26:34 +00001240#ifdef DEBUG
1241 Label target;
1242 bind(&target);
1243#endif
ager@chromium.org5aa501c2009-06-23 07:57:28 +00001244 jmp(kScratchRegister);
ager@chromium.org3e875802009-06-29 08:26:34 +00001245#ifdef DEBUG
ager@chromium.org4af710e2009-09-15 12:20:11 +00001246 ASSERT_EQ(kCallTargetAddressOffset,
ager@chromium.org3e875802009-06-29 08:26:34 +00001247 SizeOfCodeGeneratedSince(&target) + kPointerSize);
1248#endif
ager@chromium.org5aa501c2009-06-23 07:57:28 +00001249}
1250
1251
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001252void MacroAssembler::Call(ExternalReference ext) {
1253 movq(kScratchRegister, ext);
1254 call(kScratchRegister);
1255}
1256
1257
1258void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) {
1259 movq(kScratchRegister, destination, rmode);
1260 call(kScratchRegister);
1261}
1262
1263
ager@chromium.org5aa501c2009-06-23 07:57:28 +00001264void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
ager@chromium.org5aa501c2009-06-23 07:57:28 +00001265 ASSERT(RelocInfo::IsCodeTarget(rmode));
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001266 WriteRecordedPositions();
ager@chromium.org5aa501c2009-06-23 07:57:28 +00001267 movq(kScratchRegister, code_object, rmode);
1268#ifdef DEBUG
sgjesse@chromium.org0b6db592009-07-30 14:48:31 +00001269 // Patch target is kPointer size bytes *before* target label.
ager@chromium.org5aa501c2009-06-23 07:57:28 +00001270 Label target;
1271 bind(&target);
1272#endif
1273 call(kScratchRegister);
1274#ifdef DEBUG
ager@chromium.org4af710e2009-09-15 12:20:11 +00001275 ASSERT_EQ(kCallTargetAddressOffset,
ager@chromium.org5aa501c2009-06-23 07:57:28 +00001276 SizeOfCodeGeneratedSince(&target) + kPointerSize);
1277#endif
1278}
1279
1280
ager@chromium.orge2902be2009-06-08 12:21:35 +00001281void MacroAssembler::PushTryHandler(CodeLocation try_location,
1282 HandlerType type) {
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001283 // Adjust this code if not the case.
1284 ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
1285
1286 // The pc (return address) is already on TOS. This code pushes state,
1287 // frame pointer and current handler. Check that they are expected
1288 // next on the stack, in that order.
ager@chromium.orge2902be2009-06-08 12:21:35 +00001289 ASSERT_EQ(StackHandlerConstants::kStateOffset,
1290 StackHandlerConstants::kPCOffset - kPointerSize);
ager@chromium.orge2902be2009-06-08 12:21:35 +00001291 ASSERT_EQ(StackHandlerConstants::kFPOffset,
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001292 StackHandlerConstants::kStateOffset - kPointerSize);
1293 ASSERT_EQ(StackHandlerConstants::kNextOffset,
ager@chromium.orge2902be2009-06-08 12:21:35 +00001294 StackHandlerConstants::kFPOffset - kPointerSize);
1295
1296 if (try_location == IN_JAVASCRIPT) {
1297 if (type == TRY_CATCH_HANDLER) {
1298 push(Immediate(StackHandler::TRY_CATCH));
1299 } else {
1300 push(Immediate(StackHandler::TRY_FINALLY));
1301 }
ager@chromium.orge2902be2009-06-08 12:21:35 +00001302 push(rbp);
ager@chromium.orge2902be2009-06-08 12:21:35 +00001303 } else {
1304 ASSERT(try_location == IN_JS_ENTRY);
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001305 // The frame pointer does not point to a JS frame so we save NULL
1306 // for rbp. We expect the code throwing an exception to check rbp
1307 // before dereferencing it to restore the context.
ager@chromium.orge2902be2009-06-08 12:21:35 +00001308 push(Immediate(StackHandler::ENTRY));
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001309 push(Immediate(0)); // NULL frame pointer.
ager@chromium.orge2902be2009-06-08 12:21:35 +00001310 }
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001311 // Save the current handler.
ager@chromium.orge2902be2009-06-08 12:21:35 +00001312 movq(kScratchRegister, ExternalReference(Top::k_handler_address));
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001313 push(Operand(kScratchRegister, 0));
ager@chromium.orge2902be2009-06-08 12:21:35 +00001314 // Link this handler.
1315 movq(Operand(kScratchRegister, 0), rsp);
1316}
1317
1318
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001319void MacroAssembler::Ret() {
1320 ret(0);
1321}
1322
1323
ager@chromium.org3e875802009-06-29 08:26:34 +00001324void MacroAssembler::FCmp() {
ager@chromium.org4af710e2009-09-15 12:20:11 +00001325 fucompp();
ager@chromium.org3e875802009-06-29 08:26:34 +00001326 push(rax);
1327 fnstsw_ax();
ager@chromium.org532c4972009-09-01 16:23:26 +00001328 if (CpuFeatures::IsSupported(CpuFeatures::SAHF)) {
1329 sahf();
1330 } else {
1331 shrl(rax, Immediate(8));
1332 and_(rax, Immediate(0xFF));
1333 push(rax);
1334 popfq();
1335 }
ager@chromium.org3e875802009-06-29 08:26:34 +00001336 pop(rax);
1337}
1338
1339
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001340void MacroAssembler::CmpObjectType(Register heap_object,
1341 InstanceType type,
1342 Register map) {
1343 movq(map, FieldOperand(heap_object, HeapObject::kMapOffset));
1344 CmpInstanceType(map, type);
1345}
1346
1347
1348void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
1349 cmpb(FieldOperand(map, Map::kInstanceTypeOffset),
1350 Immediate(static_cast<int8_t>(type)));
1351}
1352
1353
kasperl@chromium.org86f77b72009-07-06 08:21:57 +00001354void MacroAssembler::TryGetFunctionPrototype(Register function,
1355 Register result,
1356 Label* miss) {
1357 // Check that the receiver isn't a smi.
1358 testl(function, Immediate(kSmiTagMask));
1359 j(zero, miss);
1360
1361 // Check that the function really is a function.
1362 CmpObjectType(function, JS_FUNCTION_TYPE, result);
1363 j(not_equal, miss);
1364
1365 // Make sure that the function has an instance prototype.
1366 Label non_instance;
1367 testb(FieldOperand(result, Map::kBitFieldOffset),
1368 Immediate(1 << Map::kHasNonInstancePrototype));
1369 j(not_zero, &non_instance);
1370
1371 // Get the prototype or initial map from the function.
1372 movq(result,
1373 FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
1374
1375 // If the prototype or initial map is the hole, don't return it and
1376 // simply miss the cache instead. This will allow us to allocate a
1377 // prototype object on-demand in the runtime system.
ager@chromium.org18ad94b2009-09-02 08:22:29 +00001378 CompareRoot(result, Heap::kTheHoleValueRootIndex);
kasperl@chromium.org86f77b72009-07-06 08:21:57 +00001379 j(equal, miss);
1380
1381 // If the function does not have an initial map, we're done.
1382 Label done;
1383 CmpObjectType(result, MAP_TYPE, kScratchRegister);
1384 j(not_equal, &done);
1385
1386 // Get the prototype from the initial map.
1387 movq(result, FieldOperand(result, Map::kPrototypeOffset));
1388 jmp(&done);
1389
1390 // Non-instance prototype: Fetch prototype from constructor field
1391 // in initial map.
1392 bind(&non_instance);
1393 movq(result, FieldOperand(result, Map::kConstructorOffset));
1394
1395 // All done.
1396 bind(&done);
1397}
1398
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001399
1400void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
1401 if (FLAG_native_code_counters && counter->Enabled()) {
1402 movq(kScratchRegister, ExternalReference(counter));
1403 movl(Operand(kScratchRegister, 0), Immediate(value));
1404 }
1405}
1406
1407
1408void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
1409 ASSERT(value > 0);
1410 if (FLAG_native_code_counters && counter->Enabled()) {
1411 movq(kScratchRegister, ExternalReference(counter));
1412 Operand operand(kScratchRegister, 0);
1413 if (value == 1) {
1414 incl(operand);
1415 } else {
1416 addl(operand, Immediate(value));
1417 }
1418 }
1419}
1420
1421
1422void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
1423 ASSERT(value > 0);
1424 if (FLAG_native_code_counters && counter->Enabled()) {
1425 movq(kScratchRegister, ExternalReference(counter));
1426 Operand operand(kScratchRegister, 0);
1427 if (value == 1) {
1428 decl(operand);
1429 } else {
1430 subl(operand, Immediate(value));
1431 }
1432 }
1433}
1434
1435
1436#ifdef ENABLE_DEBUGGER_SUPPORT
1437
1438void MacroAssembler::PushRegistersFromMemory(RegList regs) {
1439 ASSERT((regs & ~kJSCallerSaved) == 0);
1440 // Push the content of the memory location to the stack.
1441 for (int i = 0; i < kNumJSCallerSaved; i++) {
1442 int r = JSCallerSavedCode(i);
1443 if ((regs & (1 << r)) != 0) {
1444 ExternalReference reg_addr =
1445 ExternalReference(Debug_Address::Register(i));
1446 movq(kScratchRegister, reg_addr);
1447 push(Operand(kScratchRegister, 0));
1448 }
1449 }
1450}
1451
1452void MacroAssembler::SaveRegistersToMemory(RegList regs) {
1453 ASSERT((regs & ~kJSCallerSaved) == 0);
1454 // Copy the content of registers to memory location.
1455 for (int i = 0; i < kNumJSCallerSaved; i++) {
1456 int r = JSCallerSavedCode(i);
1457 if ((regs & (1 << r)) != 0) {
1458 Register reg = { r };
1459 ExternalReference reg_addr =
1460 ExternalReference(Debug_Address::Register(i));
1461 movq(kScratchRegister, reg_addr);
1462 movq(Operand(kScratchRegister, 0), reg);
1463 }
1464 }
1465}
1466
1467
1468void MacroAssembler::RestoreRegistersFromMemory(RegList regs) {
1469 ASSERT((regs & ~kJSCallerSaved) == 0);
1470 // Copy the content of memory location to registers.
1471 for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
1472 int r = JSCallerSavedCode(i);
1473 if ((regs & (1 << r)) != 0) {
1474 Register reg = { r };
1475 ExternalReference reg_addr =
1476 ExternalReference(Debug_Address::Register(i));
1477 movq(kScratchRegister, reg_addr);
1478 movq(reg, Operand(kScratchRegister, 0));
1479 }
1480 }
1481}
1482
1483
1484void MacroAssembler::PopRegistersToMemory(RegList regs) {
1485 ASSERT((regs & ~kJSCallerSaved) == 0);
1486 // Pop the content from the stack to the memory location.
1487 for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
1488 int r = JSCallerSavedCode(i);
1489 if ((regs & (1 << r)) != 0) {
1490 ExternalReference reg_addr =
1491 ExternalReference(Debug_Address::Register(i));
1492 movq(kScratchRegister, reg_addr);
1493 pop(Operand(kScratchRegister, 0));
1494 }
1495 }
1496}
1497
1498
1499void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
1500 Register scratch,
1501 RegList regs) {
1502 ASSERT(!scratch.is(kScratchRegister));
1503 ASSERT(!base.is(kScratchRegister));
1504 ASSERT(!base.is(scratch));
1505 ASSERT((regs & ~kJSCallerSaved) == 0);
1506 // Copy the content of the stack to the memory location and adjust base.
1507 for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
1508 int r = JSCallerSavedCode(i);
1509 if ((regs & (1 << r)) != 0) {
1510 movq(scratch, Operand(base, 0));
1511 ExternalReference reg_addr =
1512 ExternalReference(Debug_Address::Register(i));
1513 movq(kScratchRegister, reg_addr);
1514 movq(Operand(kScratchRegister, 0), scratch);
1515 lea(base, Operand(base, kPointerSize));
1516 }
1517 }
1518}
1519
1520#endif // ENABLE_DEBUGGER_SUPPORT
1521
1522
ager@chromium.org3e875802009-06-29 08:26:34 +00001523void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
1524 bool resolved;
1525 Handle<Code> code = ResolveBuiltin(id, &resolved);
1526
1527 // Calls are not allowed in some stubs.
1528 ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
1529
1530 // Rely on the assertion to check that the number of provided
1531 // arguments match the expected number of arguments. Fake a
1532 // parameter count to avoid emitting code to do the check.
1533 ParameterCount expected(0);
1534 InvokeCode(Handle<Code>(code), expected, expected,
1535 RelocInfo::CODE_TARGET, flag);
1536
1537 const char* name = Builtins::GetName(id);
1538 int argc = Builtins::GetArgumentsCount(id);
1539 // The target address for the jump is stored as an immediate at offset
1540 // kInvokeCodeAddressOffset.
1541 if (!resolved) {
1542 uint32_t flags =
1543 Bootstrapper::FixupFlagsArgumentsCount::encode(argc) |
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001544 Bootstrapper::FixupFlagsIsPCRelative::encode(false) |
ager@chromium.org3e875802009-06-29 08:26:34 +00001545 Bootstrapper::FixupFlagsUseCodeObject::encode(false);
1546 Unresolved entry =
ager@chromium.org4af710e2009-09-15 12:20:11 +00001547 { pc_offset() - kCallTargetAddressOffset, flags, name };
ager@chromium.org3e875802009-06-29 08:26:34 +00001548 unresolved_.Add(entry);
1549 }
1550}
1551
1552
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001553void MacroAssembler::InvokePrologue(const ParameterCount& expected,
1554 const ParameterCount& actual,
1555 Handle<Code> code_constant,
1556 Register code_register,
1557 Label* done,
1558 InvokeFlag flag) {
1559 bool definitely_matches = false;
1560 Label invoke;
1561 if (expected.is_immediate()) {
1562 ASSERT(actual.is_immediate());
1563 if (expected.immediate() == actual.immediate()) {
1564 definitely_matches = true;
1565 } else {
1566 movq(rax, Immediate(actual.immediate()));
1567 if (expected.immediate() ==
1568 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
1569 // Don't worry about adapting arguments for built-ins that
1570 // don't want that done. Skip adaption code by making it look
1571 // like we have a match between expected and actual number of
1572 // arguments.
1573 definitely_matches = true;
1574 } else {
1575 movq(rbx, Immediate(expected.immediate()));
1576 }
1577 }
1578 } else {
1579 if (actual.is_immediate()) {
1580 // Expected is in register, actual is immediate. This is the
1581 // case when we invoke function values without going through the
1582 // IC mechanism.
1583 cmpq(expected.reg(), Immediate(actual.immediate()));
1584 j(equal, &invoke);
1585 ASSERT(expected.reg().is(rbx));
1586 movq(rax, Immediate(actual.immediate()));
1587 } else if (!expected.reg().is(actual.reg())) {
1588 // Both expected and actual are in (different) registers. This
1589 // is the case when we invoke functions using call and apply.
1590 cmpq(expected.reg(), actual.reg());
1591 j(equal, &invoke);
1592 ASSERT(actual.reg().is(rax));
1593 ASSERT(expected.reg().is(rbx));
1594 }
1595 }
1596
1597 if (!definitely_matches) {
1598 Handle<Code> adaptor =
1599 Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
1600 if (!code_constant.is_null()) {
1601 movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
1602 addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
1603 } else if (!code_register.is(rdx)) {
1604 movq(rdx, code_register);
1605 }
1606
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001607 if (flag == CALL_FUNCTION) {
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001608 Call(adaptor, RelocInfo::CODE_TARGET);
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001609 jmp(done);
1610 } else {
sgjesse@chromium.org911335c2009-08-19 12:59:44 +00001611 Jump(adaptor, RelocInfo::CODE_TARGET);
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001612 }
1613 bind(&invoke);
1614 }
1615}
1616
1617
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001618void MacroAssembler::InvokeCode(Register code,
1619 const ParameterCount& expected,
1620 const ParameterCount& actual,
1621 InvokeFlag flag) {
1622 Label done;
1623 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
1624 if (flag == CALL_FUNCTION) {
1625 call(code);
1626 } else {
1627 ASSERT(flag == JUMP_FUNCTION);
1628 jmp(code);
1629 }
1630 bind(&done);
1631}
1632
1633
1634void MacroAssembler::InvokeCode(Handle<Code> code,
1635 const ParameterCount& expected,
1636 const ParameterCount& actual,
1637 RelocInfo::Mode rmode,
1638 InvokeFlag flag) {
1639 Label done;
1640 Register dummy = rax;
1641 InvokePrologue(expected, actual, code, dummy, &done, flag);
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001642 if (flag == CALL_FUNCTION) {
ager@chromium.org3e875802009-06-29 08:26:34 +00001643 Call(code, rmode);
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001644 } else {
1645 ASSERT(flag == JUMP_FUNCTION);
ager@chromium.org3e875802009-06-29 08:26:34 +00001646 Jump(code, rmode);
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001647 }
1648 bind(&done);
1649}
1650
1651
1652void MacroAssembler::InvokeFunction(Register function,
1653 const ParameterCount& actual,
1654 InvokeFlag flag) {
1655 ASSERT(function.is(rdi));
1656 movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
1657 movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
ager@chromium.org3e875802009-06-29 08:26:34 +00001658 movsxlq(rbx,
1659 FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001660 movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
ager@chromium.org5aa501c2009-06-23 07:57:28 +00001661 // Advances rdx to the end of the Code object header, to the start of
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001662 // the executable code.
1663 lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
1664
1665 ParameterCount expected(rbx);
1666 InvokeCode(rdx, expected, actual, flag);
1667}
1668
1669
1670void MacroAssembler::EnterFrame(StackFrame::Type type) {
1671 push(rbp);
1672 movq(rbp, rsp);
1673 push(rsi); // Context.
1674 push(Immediate(Smi::FromInt(type)));
1675 movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
1676 push(kScratchRegister);
1677 if (FLAG_debug_code) {
1678 movq(kScratchRegister,
1679 Factory::undefined_value(),
1680 RelocInfo::EMBEDDED_OBJECT);
1681 cmpq(Operand(rsp, 0), kScratchRegister);
1682 Check(not_equal, "code object not properly patched");
1683 }
1684}
1685
1686
1687void MacroAssembler::LeaveFrame(StackFrame::Type type) {
1688 if (FLAG_debug_code) {
1689 movq(kScratchRegister, Immediate(Smi::FromInt(type)));
1690 cmpq(Operand(rbp, StandardFrameConstants::kMarkerOffset), kScratchRegister);
1691 Check(equal, "stack frame types must match");
1692 }
1693 movq(rsp, rbp);
1694 pop(rbp);
1695}
1696
1697
1698
ager@chromium.orga1645e22009-09-09 19:27:10 +00001699void MacroAssembler::EnterExitFrame(StackFrame::Type type, int result_size) {
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001700 ASSERT(type == StackFrame::EXIT || type == StackFrame::EXIT_DEBUG);
1701
1702 // Setup the frame structure on the stack.
kasperl@chromium.org86f77b72009-07-06 08:21:57 +00001703 // All constants are relative to the frame pointer of the exit frame.
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001704 ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
1705 ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
1706 ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
1707 push(rbp);
1708 movq(rbp, rsp);
1709
1710 // Reserve room for entry stack pointer and push the debug marker.
1711 ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
1712 push(Immediate(0)); // saved entry sp, patched before call
1713 push(Immediate(type == StackFrame::EXIT_DEBUG ? 1 : 0));
1714
1715 // Save the frame pointer and the context in top.
1716 ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
1717 ExternalReference context_address(Top::k_context_address);
kasperl@chromium.org86f77b72009-07-06 08:21:57 +00001718 movq(r14, rax); // Backup rax before we use it.
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001719
1720 movq(rax, rbp);
1721 store_rax(c_entry_fp_address);
1722 movq(rax, rsi);
1723 store_rax(context_address);
1724
1725 // Setup argv in callee-saved register r15. It is reused in LeaveExitFrame,
1726 // so it must be retained across the C-call.
1727 int offset = StandardFrameConstants::kCallerSPOffset - kPointerSize;
kasperl@chromium.org86f77b72009-07-06 08:21:57 +00001728 lea(r15, Operand(rbp, r14, times_pointer_size, offset));
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001729
1730#ifdef ENABLE_DEBUGGER_SUPPORT
1731 // Save the state of all registers to the stack from the memory
1732 // location. This is needed to allow nested break points.
1733 if (type == StackFrame::EXIT_DEBUG) {
1734 // TODO(1243899): This should be symmetric to
1735 // CopyRegistersFromStackToMemory() but it isn't! esp is assumed
1736 // correct here, but computed for the other call. Very error
1737 // prone! FIX THIS. Actually there are deeper problems with
1738 // register saving than this asymmetry (see the bug report
1739 // associated with this issue).
1740 PushRegistersFromMemory(kJSCallerSaved);
1741 }
1742#endif
1743
ager@chromium.orga1645e22009-09-09 19:27:10 +00001744#ifdef _WIN64
1745 // Reserve space on stack for result and argument structures, if necessary.
1746 int result_stack_space = (result_size < 2) ? 0 : result_size * kPointerSize;
1747 // Reserve space for the Arguments object. The Windows 64-bit ABI
1748 // requires us to pass this structure as a pointer to its location on
1749 // the stack. The structure contains 2 values.
1750 int argument_stack_space = 2 * kPointerSize;
1751 // We also need backing space for 4 parameters, even though
1752 // we only pass one or two parameter, and it is in a register.
1753 int argument_mirror_space = 4 * kPointerSize;
1754 int total_stack_space =
1755 argument_mirror_space + argument_stack_space + result_stack_space;
1756 subq(rsp, Immediate(total_stack_space));
1757#endif
1758
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001759 // Get the required frame alignment for the OS.
1760 static const int kFrameAlignment = OS::ActivationFrameAlignment();
1761 if (kFrameAlignment > 0) {
1762 ASSERT(IsPowerOf2(kFrameAlignment));
1763 movq(kScratchRegister, Immediate(-kFrameAlignment));
1764 and_(rsp, kScratchRegister);
1765 }
1766
1767 // Patch the saved entry sp.
1768 movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
1769}
1770
1771
ager@chromium.orga1645e22009-09-09 19:27:10 +00001772void MacroAssembler::LeaveExitFrame(StackFrame::Type type, int result_size) {
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001773 // Registers:
1774 // r15 : argv
1775#ifdef ENABLE_DEBUGGER_SUPPORT
1776 // Restore the memory copy of the registers by digging them out from
1777 // the stack. This is needed to allow nested break points.
1778 if (type == StackFrame::EXIT_DEBUG) {
ager@chromium.orga1645e22009-09-09 19:27:10 +00001779 // It's okay to clobber register rbx below because we don't need
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001780 // the function pointer after this.
1781 const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize;
1782 int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedSize;
1783 lea(rbx, Operand(rbp, kOffset));
1784 CopyRegistersFromStackToMemory(rbx, rcx, kJSCallerSaved);
1785 }
1786#endif
1787
1788 // Get the return address from the stack and restore the frame pointer.
1789 movq(rcx, Operand(rbp, 1 * kPointerSize));
1790 movq(rbp, Operand(rbp, 0 * kPointerSize));
1791
ager@chromium.orga1645e22009-09-09 19:27:10 +00001792#ifdef _WIN64
1793 // If return value is on the stack, pop it to registers.
1794 if (result_size > 1) {
1795 ASSERT_EQ(2, result_size);
1796 // Position above 4 argument mirrors and arguments object.
1797 movq(rax, Operand(rsp, 6 * kPointerSize));
1798 movq(rdx, Operand(rsp, 7 * kPointerSize));
1799 }
1800#endif
1801
1802 // Pop everything up to and including the arguments and the receiver
1803 // from the caller stack.
ager@chromium.orgeadaf222009-06-16 09:43:10 +00001804 lea(rsp, Operand(r15, 1 * kPointerSize));
1805
1806 // Restore current context from top and clear it in debug mode.
1807 ExternalReference context_address(Top::k_context_address);
1808 movq(kScratchRegister, context_address);
1809 movq(rsi, Operand(kScratchRegister, 0));
1810#ifdef DEBUG
1811 movq(Operand(kScratchRegister, 0), Immediate(0));
1812#endif
1813
1814 // Push the return address to get ready to return.
1815 push(rcx);
1816
1817 // Clear the top frame.
1818 ExternalReference c_entry_fp_address(Top::k_c_entry_fp_address);
1819 movq(kScratchRegister, c_entry_fp_address);
1820 movq(Operand(kScratchRegister, 0), Immediate(0));
1821}
1822
1823
kasperl@chromium.orge959c182009-07-27 08:59:04 +00001824Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg,
1825 JSObject* holder, Register holder_reg,
1826 Register scratch,
1827 Label* miss) {
1828 // Make sure there's no overlap between scratch and the other
1829 // registers.
1830 ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg));
1831
1832 // Keep track of the current object in register reg. On the first
1833 // iteration, reg is an alias for object_reg, on later iterations,
1834 // it is an alias for holder_reg.
1835 Register reg = object_reg;
1836 int depth = 1;
1837
1838 // Check the maps in the prototype chain.
1839 // Traverse the prototype chain from the object and do map checks.
1840 while (object != holder) {
1841 depth++;
1842
1843 // Only global objects and objects that do not require access
1844 // checks are allowed in stubs.
1845 ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
1846
1847 JSObject* prototype = JSObject::cast(object->GetPrototype());
1848 if (Heap::InNewSpace(prototype)) {
1849 // Get the map of the current object.
1850 movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
1851 Cmp(scratch, Handle<Map>(object->map()));
1852 // Branch on the result of the map check.
1853 j(not_equal, miss);
1854 // Check access rights to the global object. This has to happen
1855 // after the map check so that we know that the object is
1856 // actually a global object.
1857 if (object->IsJSGlobalProxy()) {
1858 CheckAccessGlobalProxy(reg, scratch, miss);
1859
1860 // Restore scratch register to be the map of the object.
1861 // We load the prototype from the map in the scratch register.
1862 movq(scratch, FieldOperand(reg, HeapObject::kMapOffset));
1863 }
1864 // The prototype is in new space; we cannot store a reference
1865 // to it in the code. Load it from the map.
1866 reg = holder_reg; // from now the object is in holder_reg
1867 movq(reg, FieldOperand(scratch, Map::kPrototypeOffset));
1868
1869 } else {
1870 // Check the map of the current object.
1871 Cmp(FieldOperand(reg, HeapObject::kMapOffset),
1872 Handle<Map>(object->map()));
1873 // Branch on the result of the map check.
1874 j(not_equal, miss);
1875 // Check access rights to the global object. This has to happen
1876 // after the map check so that we know that the object is
1877 // actually a global object.
1878 if (object->IsJSGlobalProxy()) {
1879 CheckAccessGlobalProxy(reg, scratch, miss);
1880 }
1881 // The prototype is in old space; load it directly.
1882 reg = holder_reg; // from now the object is in holder_reg
1883 Move(reg, Handle<JSObject>(prototype));
1884 }
1885
1886 // Go to the next object in the prototype chain.
1887 object = prototype;
1888 }
1889
1890 // Check the holder map.
1891 Cmp(FieldOperand(reg, HeapObject::kMapOffset),
1892 Handle<Map>(holder->map()));
1893 j(not_equal, miss);
1894
1895 // Log the check depth.
1896 LOG(IntEvent("check-maps-depth", depth));
1897
1898 // Perform security check for access to the global object and return
1899 // the holder register.
1900 ASSERT(object == holder);
1901 ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
1902 if (object->IsJSGlobalProxy()) {
1903 CheckAccessGlobalProxy(reg, scratch, miss);
1904 }
1905 return reg;
1906}
1907
1908
1909
1910
1911void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1912 Register scratch,
1913 Label* miss) {
1914 Label same_contexts;
1915
1916 ASSERT(!holder_reg.is(scratch));
1917 ASSERT(!scratch.is(kScratchRegister));
1918 // Load current lexical context from the stack frame.
1919 movq(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
1920
1921 // When generating debug code, make sure the lexical context is set.
1922 if (FLAG_debug_code) {
1923 cmpq(scratch, Immediate(0));
1924 Check(not_equal, "we should not have an empty lexical context");
1925 }
1926 // Load the global context of the current context.
1927 int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;
1928 movq(scratch, FieldOperand(scratch, offset));
1929 movq(scratch, FieldOperand(scratch, GlobalObject::kGlobalContextOffset));
1930
1931 // Check the context is a global context.
1932 if (FLAG_debug_code) {
1933 Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
1934 Factory::global_context_map());
1935 Check(equal, "JSGlobalObject::global_context should be a global context.");
1936 }
1937
1938 // Check if both contexts are the same.
1939 cmpq(scratch, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
1940 j(equal, &same_contexts);
1941
1942 // Compare security tokens.
1943 // Check that the security token in the calling global object is
1944 // compatible with the security token in the receiving global
1945 // object.
1946
1947 // Check the context is a global context.
1948 if (FLAG_debug_code) {
1949 // Preserve original value of holder_reg.
1950 push(holder_reg);
1951 movq(holder_reg, FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
ager@chromium.org18ad94b2009-09-02 08:22:29 +00001952 CompareRoot(holder_reg, Heap::kNullValueRootIndex);
kasperl@chromium.orge959c182009-07-27 08:59:04 +00001953 Check(not_equal, "JSGlobalProxy::context() should not be null.");
1954
1955 // Read the first word and compare to global_context_map(),
1956 movq(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
ager@chromium.org18ad94b2009-09-02 08:22:29 +00001957 CompareRoot(holder_reg, Heap::kGlobalContextMapRootIndex);
kasperl@chromium.orge959c182009-07-27 08:59:04 +00001958 Check(equal, "JSGlobalObject::global_context should be a global context.");
1959 pop(holder_reg);
1960 }
1961
1962 movq(kScratchRegister,
1963 FieldOperand(holder_reg, JSGlobalProxy::kContextOffset));
1964 int token_offset = Context::kHeaderSize +
1965 Context::SECURITY_TOKEN_INDEX * kPointerSize;
1966 movq(scratch, FieldOperand(scratch, token_offset));
1967 cmpq(scratch, FieldOperand(kScratchRegister, token_offset));
1968 j(not_equal, miss);
1969
1970 bind(&same_contexts);
1971}
1972
1973
ager@chromium.orga1645e22009-09-09 19:27:10 +00001974void MacroAssembler::LoadAllocationTopHelper(Register result,
1975 Register result_end,
1976 Register scratch,
1977 AllocationFlags flags) {
ager@chromium.org18ad94b2009-09-02 08:22:29 +00001978 ExternalReference new_space_allocation_top =
1979 ExternalReference::new_space_allocation_top_address();
1980
1981 // Just return if allocation top is already known.
ager@chromium.orga1645e22009-09-09 19:27:10 +00001982 if ((flags & RESULT_CONTAINS_TOP) != 0) {
ager@chromium.org18ad94b2009-09-02 08:22:29 +00001983 // No use of scratch if allocation top is provided.
1984 ASSERT(scratch.is(no_reg));
ager@chromium.orga1645e22009-09-09 19:27:10 +00001985#ifdef DEBUG
1986 // Assert that result actually contains top on entry.
1987 movq(kScratchRegister, new_space_allocation_top);
1988 cmpq(result, Operand(kScratchRegister, 0));
1989 Check(equal, "Unexpected allocation top");
1990#endif
ager@chromium.org18ad94b2009-09-02 08:22:29 +00001991 return;
1992 }
1993
1994 // Move address of new object to result. Use scratch register if available.
1995 if (scratch.is(no_reg)) {
1996 movq(kScratchRegister, new_space_allocation_top);
1997 movq(result, Operand(kScratchRegister, 0));
1998 } else {
1999 ASSERT(!scratch.is(result_end));
2000 movq(scratch, new_space_allocation_top);
2001 movq(result, Operand(scratch, 0));
2002 }
2003}
2004
2005
2006void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
2007 Register scratch) {
2008 ExternalReference new_space_allocation_top =
2009 ExternalReference::new_space_allocation_top_address();
2010
2011 // Update new top.
2012 if (result_end.is(rax)) {
2013 // rax can be stored directly to a memory location.
2014 store_rax(new_space_allocation_top);
2015 } else {
2016 // Register required - use scratch provided if available.
2017 if (scratch.is(no_reg)) {
2018 movq(kScratchRegister, new_space_allocation_top);
2019 movq(Operand(kScratchRegister, 0), result_end);
2020 } else {
2021 movq(Operand(scratch, 0), result_end);
2022 }
2023 }
2024}
2025
2026
ager@chromium.orga1645e22009-09-09 19:27:10 +00002027void MacroAssembler::AllocateObjectInNewSpace(int object_size,
2028 Register result,
2029 Register result_end,
2030 Register scratch,
2031 Label* gc_required,
2032 AllocationFlags flags) {
ager@chromium.org18ad94b2009-09-02 08:22:29 +00002033 ASSERT(!result.is(result_end));
2034
2035 // Load address of new object into result.
ager@chromium.orga1645e22009-09-09 19:27:10 +00002036 LoadAllocationTopHelper(result, result_end, scratch, flags);
ager@chromium.org18ad94b2009-09-02 08:22:29 +00002037
2038 // Calculate new top and bail out if new space is exhausted.
2039 ExternalReference new_space_allocation_limit =
2040 ExternalReference::new_space_allocation_limit_address();
2041 lea(result_end, Operand(result, object_size));
2042 movq(kScratchRegister, new_space_allocation_limit);
2043 cmpq(result_end, Operand(kScratchRegister, 0));
2044 j(above, gc_required);
2045
2046 // Update allocation top.
2047 UpdateAllocationTopHelper(result_end, scratch);
ager@chromium.orga1645e22009-09-09 19:27:10 +00002048
2049 // Tag the result if requested.
2050 if ((flags & TAG_OBJECT) != 0) {
2051 addq(result, Immediate(kHeapObjectTag));
2052 }
ager@chromium.org18ad94b2009-09-02 08:22:29 +00002053}
2054
2055
ager@chromium.orga1645e22009-09-09 19:27:10 +00002056void MacroAssembler::AllocateObjectInNewSpace(int header_size,
2057 ScaleFactor element_size,
2058 Register element_count,
2059 Register result,
2060 Register result_end,
2061 Register scratch,
2062 Label* gc_required,
2063 AllocationFlags flags) {
ager@chromium.org18ad94b2009-09-02 08:22:29 +00002064 ASSERT(!result.is(result_end));
2065
2066 // Load address of new object into result.
ager@chromium.orga1645e22009-09-09 19:27:10 +00002067 LoadAllocationTopHelper(result, result_end, scratch, flags);
ager@chromium.org18ad94b2009-09-02 08:22:29 +00002068
2069 // Calculate new top and bail out if new space is exhausted.
2070 ExternalReference new_space_allocation_limit =
2071 ExternalReference::new_space_allocation_limit_address();
2072 lea(result_end, Operand(result, element_count, element_size, header_size));
2073 movq(kScratchRegister, new_space_allocation_limit);
2074 cmpq(result_end, Operand(kScratchRegister, 0));
2075 j(above, gc_required);
2076
2077 // Update allocation top.
2078 UpdateAllocationTopHelper(result_end, scratch);
ager@chromium.orga1645e22009-09-09 19:27:10 +00002079
2080 // Tag the result if requested.
2081 if ((flags & TAG_OBJECT) != 0) {
2082 addq(result, Immediate(kHeapObjectTag));
2083 }
ager@chromium.org18ad94b2009-09-02 08:22:29 +00002084}
2085
2086
ager@chromium.orga1645e22009-09-09 19:27:10 +00002087void MacroAssembler::AllocateObjectInNewSpace(Register object_size,
2088 Register result,
2089 Register result_end,
2090 Register scratch,
2091 Label* gc_required,
2092 AllocationFlags flags) {
ager@chromium.org18ad94b2009-09-02 08:22:29 +00002093 // Load address of new object into result.
ager@chromium.orga1645e22009-09-09 19:27:10 +00002094 LoadAllocationTopHelper(result, result_end, scratch, flags);
ager@chromium.org18ad94b2009-09-02 08:22:29 +00002095
2096 // Calculate new top and bail out if new space is exhausted.
2097 ExternalReference new_space_allocation_limit =
2098 ExternalReference::new_space_allocation_limit_address();
2099 if (!object_size.is(result_end)) {
2100 movq(result_end, object_size);
2101 }
2102 addq(result_end, result);
2103 movq(kScratchRegister, new_space_allocation_limit);
2104 cmpq(result_end, Operand(kScratchRegister, 0));
2105 j(above, gc_required);
2106
2107 // Update allocation top.
2108 UpdateAllocationTopHelper(result_end, scratch);
ager@chromium.orga1645e22009-09-09 19:27:10 +00002109
2110 // Tag the result if requested.
2111 if ((flags & TAG_OBJECT) != 0) {
2112 addq(result, Immediate(kHeapObjectTag));
2113 }
ager@chromium.org18ad94b2009-09-02 08:22:29 +00002114}
2115
2116
2117void MacroAssembler::UndoAllocationInNewSpace(Register object) {
2118 ExternalReference new_space_allocation_top =
2119 ExternalReference::new_space_allocation_top_address();
2120
2121 // Make sure the object has no tag before resetting top.
2122 and_(object, Immediate(~kHeapObjectTagMask));
2123 movq(kScratchRegister, new_space_allocation_top);
2124#ifdef DEBUG
2125 cmpq(object, Operand(kScratchRegister, 0));
2126 Check(below, "Undo allocation of non allocated memory");
2127#endif
2128 movq(Operand(kScratchRegister, 0), object);
2129}
2130
2131
ager@chromium.org4af710e2009-09-15 12:20:11 +00002132CodePatcher::CodePatcher(byte* address, int size)
2133 : address_(address), size_(size), masm_(address, size + Assembler::kGap) {
2134 // Create a new macro assembler pointing to the address of the code to patch.
2135 // The size is adjusted with kGap on order for the assembler to generate size
2136 // bytes of instructions without failing with buffer size constraints.
2137 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2138}
2139
2140
2141CodePatcher::~CodePatcher() {
2142 // Indicate that code has changed.
2143 CPU::FlushICache(address_, size_);
2144
2145 // Check that the code was patched as expected.
2146 ASSERT(masm_.pc_ == address_ + size_);
2147 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2148}
2149
2150
kasperl@chromium.org71affb52009-05-26 05:44:31 +00002151} } // namespace v8::internal