blob: d75acab0826ef360b6afb3fb7ee98c0e6f78c86b [file] [log] [blame]
ricow@chromium.org65fae842010-08-25 15:26:24 +00001// Copyright 2010 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#if defined(V8_TARGET_ARCH_IA32)
31
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +000032#include "code-stubs.h"
ricow@chromium.org65fae842010-08-25 15:26:24 +000033#include "bootstrapper.h"
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +000034#include "jsregexp.h"
ricow@chromium.org65fae842010-08-25 15:26:24 +000035#include "regexp-macro-assembler.h"
36
37namespace v8 {
38namespace internal {
39
40#define __ ACCESS_MASM(masm)
41void FastNewClosureStub::Generate(MacroAssembler* masm) {
42 // Create a new closure from the given function info in new
43 // space. Set the context to the current context in esi.
44 Label gc;
45 __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
46
47 // Get the function info from the stack.
48 __ mov(edx, Operand(esp, 1 * kPointerSize));
49
50 // Compute the function map in the current global context and set that
51 // as the map of the allocated object.
52 __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
53 __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
54 __ mov(ecx, Operand(ecx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
55 __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
56
57 // Initialize the rest of the function. We don't have to update the
58 // write barrier because the allocated object is in new space.
59 __ mov(ebx, Immediate(Factory::empty_fixed_array()));
60 __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
61 __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
62 __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
63 Immediate(Factory::the_hole_value()));
64 __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
65 __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
66 __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
kasperl@chromium.orga5551262010-12-07 12:49:48 +000067 __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
68 Immediate(Factory::undefined_value()));
ricow@chromium.org65fae842010-08-25 15:26:24 +000069
70 // Initialize the code pointer in the function to be the one
71 // found in the shared function info object.
72 __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
73 __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
74 __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
75
76 // Return and remove the on-stack parameter.
77 __ ret(1 * kPointerSize);
78
79 // Create a new closure through the slower runtime call.
80 __ bind(&gc);
81 __ pop(ecx); // Temporarily remove return address.
82 __ pop(edx);
83 __ push(esi);
84 __ push(edx);
vegorov@chromium.org21b5e952010-11-23 10:24:40 +000085 __ push(Immediate(Factory::false_value()));
ricow@chromium.org65fae842010-08-25 15:26:24 +000086 __ push(ecx); // Restore return address.
vegorov@chromium.org21b5e952010-11-23 10:24:40 +000087 __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
ricow@chromium.org65fae842010-08-25 15:26:24 +000088}
89
90
91void FastNewContextStub::Generate(MacroAssembler* masm) {
92 // Try to allocate the context in new space.
93 Label gc;
94 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
95 __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
96 eax, ebx, ecx, &gc, TAG_OBJECT);
97
98 // Get the function from the stack.
99 __ mov(ecx, Operand(esp, 1 * kPointerSize));
100
101 // Setup the object header.
102 __ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
103 __ mov(FieldOperand(eax, Context::kLengthOffset),
104 Immediate(Smi::FromInt(length)));
105
106 // Setup the fixed slots.
107 __ xor_(ebx, Operand(ebx)); // Set to NULL.
108 __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
109 __ mov(Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)), eax);
110 __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), ebx);
111 __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
112
113 // Copy the global object from the surrounding context. We go through the
114 // context in the function (ecx) to match the allocation behavior we have
115 // in the runtime system (see Heap::AllocateFunctionContext).
116 __ mov(ebx, FieldOperand(ecx, JSFunction::kContextOffset));
117 __ mov(ebx, Operand(ebx, Context::SlotOffset(Context::GLOBAL_INDEX)));
118 __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
119
120 // Initialize the rest of the slots to undefined.
121 __ mov(ebx, Factory::undefined_value());
122 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
123 __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
124 }
125
126 // Return and remove the on-stack parameter.
127 __ mov(esi, Operand(eax));
128 __ ret(1 * kPointerSize);
129
130 // Need to collect. Call into runtime system.
131 __ bind(&gc);
132 __ TailCallRuntime(Runtime::kNewContext, 1, 1);
133}
134
135
136void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
137 // Stack layout on entry:
138 //
139 // [esp + kPointerSize]: constant elements.
140 // [esp + (2 * kPointerSize)]: literal index.
141 // [esp + (3 * kPointerSize)]: literals array.
142
143 // All sizes here are multiples of kPointerSize.
144 int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
145 int size = JSArray::kSize + elements_size;
146
147 // Load boilerplate object into ecx and check if we need to create a
148 // boilerplate.
149 Label slow_case;
150 __ mov(ecx, Operand(esp, 3 * kPointerSize));
151 __ mov(eax, Operand(esp, 2 * kPointerSize));
152 STATIC_ASSERT(kPointerSize == 4);
153 STATIC_ASSERT(kSmiTagSize == 1);
154 STATIC_ASSERT(kSmiTag == 0);
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +0000155 __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
156 FixedArray::kHeaderSize));
ricow@chromium.org65fae842010-08-25 15:26:24 +0000157 __ cmp(ecx, Factory::undefined_value());
158 __ j(equal, &slow_case);
159
160 if (FLAG_debug_code) {
161 const char* message;
162 Handle<Map> expected_map;
163 if (mode_ == CLONE_ELEMENTS) {
164 message = "Expected (writable) fixed array";
165 expected_map = Factory::fixed_array_map();
166 } else {
167 ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
168 message = "Expected copy-on-write fixed array";
169 expected_map = Factory::fixed_cow_array_map();
170 }
171 __ push(ecx);
172 __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
173 __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map);
174 __ Assert(equal, message);
175 __ pop(ecx);
176 }
177
178 // Allocate both the JS array and the elements array in one big
179 // allocation. This avoids multiple limit checks.
180 __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
181
182 // Copy the JS array part.
183 for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
184 if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
185 __ mov(ebx, FieldOperand(ecx, i));
186 __ mov(FieldOperand(eax, i), ebx);
187 }
188 }
189
190 if (length_ > 0) {
191 // Get hold of the elements array of the boilerplate and setup the
192 // elements pointer in the resulting object.
193 __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
194 __ lea(edx, Operand(eax, JSArray::kSize));
195 __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
196
197 // Copy the elements array.
198 for (int i = 0; i < elements_size; i += kPointerSize) {
199 __ mov(ebx, FieldOperand(ecx, i));
200 __ mov(FieldOperand(edx, i), ebx);
201 }
202 }
203
204 // Return and remove the on-stack parameters.
205 __ ret(3 * kPointerSize);
206
207 __ bind(&slow_case);
208 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
209}
210
211
212// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
213void ToBooleanStub::Generate(MacroAssembler* masm) {
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +0000214 NearLabel false_result, true_result, not_string;
ricow@chromium.org65fae842010-08-25 15:26:24 +0000215 __ mov(eax, Operand(esp, 1 * kPointerSize));
216
217 // 'null' => false.
218 __ cmp(eax, Factory::null_value());
219 __ j(equal, &false_result);
220
221 // Get the map and type of the heap object.
222 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
223 __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
224
225 // Undetectable => false.
226 __ test_b(FieldOperand(edx, Map::kBitFieldOffset),
227 1 << Map::kIsUndetectable);
228 __ j(not_zero, &false_result);
229
230 // JavaScript object => true.
231 __ CmpInstanceType(edx, FIRST_JS_OBJECT_TYPE);
232 __ j(above_equal, &true_result);
233
234 // String value => false iff empty.
235 __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
236 __ j(above_equal, &not_string);
237 STATIC_ASSERT(kSmiTag == 0);
238 __ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
239 __ j(zero, &false_result);
240 __ jmp(&true_result);
241
242 __ bind(&not_string);
243 // HeapNumber => false iff +0, -0, or NaN.
244 __ cmp(edx, Factory::heap_number_map());
245 __ j(not_equal, &true_result);
246 __ fldz();
247 __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
248 __ FCmp();
249 __ j(zero, &false_result);
250 // Fall through to |true_result|.
251
252 // Return 1/0 for true/false in eax.
253 __ bind(&true_result);
254 __ mov(eax, 1);
255 __ ret(1 * kPointerSize);
256 __ bind(&false_result);
257 __ mov(eax, 0);
258 __ ret(1 * kPointerSize);
259}
260
261
262const char* GenericBinaryOpStub::GetName() {
263 if (name_ != NULL) return name_;
264 const int kMaxNameLength = 100;
265 name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
266 if (name_ == NULL) return "OOM";
267 const char* op_name = Token::Name(op_);
268 const char* overwrite_name;
269 switch (mode_) {
270 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
271 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
272 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
273 default: overwrite_name = "UnknownOverwrite"; break;
274 }
275
276 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
277 "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
278 op_name,
279 overwrite_name,
280 (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
281 args_in_registers_ ? "RegArgs" : "StackArgs",
282 args_reversed_ ? "_R" : "",
283 static_operands_type_.ToString(),
284 BinaryOpIC::GetName(runtime_operands_type_));
285 return name_;
286}
287
288
289void GenericBinaryOpStub::GenerateCall(
290 MacroAssembler* masm,
291 Register left,
292 Register right) {
293 if (!ArgsInRegistersSupported()) {
294 // Pass arguments on the stack.
295 __ push(left);
296 __ push(right);
297 } else {
298 // The calling convention with registers is left in edx and right in eax.
299 Register left_arg = edx;
300 Register right_arg = eax;
301 if (!(left.is(left_arg) && right.is(right_arg))) {
302 if (left.is(right_arg) && right.is(left_arg)) {
303 if (IsOperationCommutative()) {
304 SetArgsReversed();
305 } else {
306 __ xchg(left, right);
307 }
308 } else if (left.is(left_arg)) {
309 __ mov(right_arg, right);
310 } else if (right.is(right_arg)) {
311 __ mov(left_arg, left);
312 } else if (left.is(right_arg)) {
313 if (IsOperationCommutative()) {
314 __ mov(left_arg, right);
315 SetArgsReversed();
316 } else {
317 // Order of moves important to avoid destroying left argument.
318 __ mov(left_arg, left);
319 __ mov(right_arg, right);
320 }
321 } else if (right.is(left_arg)) {
322 if (IsOperationCommutative()) {
323 __ mov(right_arg, left);
324 SetArgsReversed();
325 } else {
326 // Order of moves important to avoid destroying right argument.
327 __ mov(right_arg, right);
328 __ mov(left_arg, left);
329 }
330 } else {
331 // Order of moves is not important.
332 __ mov(left_arg, left);
333 __ mov(right_arg, right);
334 }
335 }
336
337 // Update flags to indicate that arguments are in registers.
338 SetArgsInRegisters();
339 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
340 }
341
342 // Call the stub.
343 __ CallStub(this);
344}
345
346
347void GenericBinaryOpStub::GenerateCall(
348 MacroAssembler* masm,
349 Register left,
350 Smi* right) {
351 if (!ArgsInRegistersSupported()) {
352 // Pass arguments on the stack.
353 __ push(left);
354 __ push(Immediate(right));
355 } else {
356 // The calling convention with registers is left in edx and right in eax.
357 Register left_arg = edx;
358 Register right_arg = eax;
359 if (left.is(left_arg)) {
360 __ mov(right_arg, Immediate(right));
361 } else if (left.is(right_arg) && IsOperationCommutative()) {
362 __ mov(left_arg, Immediate(right));
363 SetArgsReversed();
364 } else {
365 // For non-commutative operations, left and right_arg might be
366 // the same register. Therefore, the order of the moves is
367 // important here in order to not overwrite left before moving
368 // it to left_arg.
369 __ mov(left_arg, left);
370 __ mov(right_arg, Immediate(right));
371 }
372
373 // Update flags to indicate that arguments are in registers.
374 SetArgsInRegisters();
375 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
376 }
377
378 // Call the stub.
379 __ CallStub(this);
380}
381
382
383void GenericBinaryOpStub::GenerateCall(
384 MacroAssembler* masm,
385 Smi* left,
386 Register right) {
387 if (!ArgsInRegistersSupported()) {
388 // Pass arguments on the stack.
389 __ push(Immediate(left));
390 __ push(right);
391 } else {
392 // The calling convention with registers is left in edx and right in eax.
393 Register left_arg = edx;
394 Register right_arg = eax;
395 if (right.is(right_arg)) {
396 __ mov(left_arg, Immediate(left));
397 } else if (right.is(left_arg) && IsOperationCommutative()) {
398 __ mov(right_arg, Immediate(left));
399 SetArgsReversed();
400 } else {
401 // For non-commutative operations, right and left_arg might be
402 // the same register. Therefore, the order of the moves is
403 // important here in order to not overwrite right before moving
404 // it to right_arg.
405 __ mov(right_arg, right);
406 __ mov(left_arg, Immediate(left));
407 }
408 // Update flags to indicate that arguments are in registers.
409 SetArgsInRegisters();
410 __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
411 }
412
413 // Call the stub.
414 __ CallStub(this);
415}
416
417
418class FloatingPointHelper : public AllStatic {
419 public:
420
421 enum ArgLocation {
422 ARGS_ON_STACK,
423 ARGS_IN_REGISTERS
424 };
425
426 // Code pattern for loading a floating point value. Input value must
427 // be either a smi or a heap number object (fp value). Requirements:
428 // operand in register number. Returns operand as floating point number
429 // on FPU stack.
430 static void LoadFloatOperand(MacroAssembler* masm, Register number);
431
432 // Code pattern for loading floating point values. Input values must
433 // be either smi or heap number objects (fp values). Requirements:
434 // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
435 // Returns operands as floating point numbers on FPU stack.
436 static void LoadFloatOperands(MacroAssembler* masm,
437 Register scratch,
438 ArgLocation arg_location = ARGS_ON_STACK);
439
440 // Similar to LoadFloatOperand but assumes that both operands are smis.
441 // Expects operands in edx, eax.
442 static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
443
444 // Test if operands are smi or number objects (fp). Requirements:
445 // operand_1 in eax, operand_2 in edx; falls through on float
446 // operands, jumps to the non_float label otherwise.
447 static void CheckFloatOperands(MacroAssembler* masm,
448 Label* non_float,
449 Register scratch);
450
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000451 // Checks that the two floating point numbers on top of the FPU stack
452 // have int32 values.
453 static void CheckFloatOperandsAreInt32(MacroAssembler* masm,
454 Label* non_int32);
455
ricow@chromium.org65fae842010-08-25 15:26:24 +0000456 // Takes the operands in edx and eax and loads them as integers in eax
457 // and ecx.
458 static void LoadAsIntegers(MacroAssembler* masm,
459 TypeInfo type_info,
460 bool use_sse3,
461 Label* operand_conversion_failure);
462 static void LoadNumbersAsIntegers(MacroAssembler* masm,
463 TypeInfo type_info,
464 bool use_sse3,
465 Label* operand_conversion_failure);
466 static void LoadUnknownsAsIntegers(MacroAssembler* masm,
467 bool use_sse3,
468 Label* operand_conversion_failure);
469
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000470 // Must only be called after LoadUnknownsAsIntegers. Assumes that the
471 // operands are pushed on the stack, and that their conversions to int32
472 // are in eax and ecx. Checks that the original numbers were in the int32
473 // range.
474 static void CheckLoadedIntegersWereInt32(MacroAssembler* masm,
475 bool use_sse3,
476 Label* not_int32);
477
478 // Assumes that operands are smis or heap numbers and loads them
479 // into xmm0 and xmm1. Operands are in edx and eax.
ricow@chromium.org65fae842010-08-25 15:26:24 +0000480 // Leaves operands unchanged.
481 static void LoadSSE2Operands(MacroAssembler* masm);
482
483 // Test if operands are numbers (smi or HeapNumber objects), and load
484 // them into xmm0 and xmm1 if they are. Jump to label not_numbers if
485 // either operand is not a number. Operands are in edx and eax.
486 // Leaves operands unchanged.
487 static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
488
489 // Similar to LoadSSE2Operands but assumes that both operands are smis.
490 // Expects operands in edx, eax.
491 static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000492
493 // Checks that the two floating point numbers loaded into xmm0 and xmm1
494 // have int32 values.
495 static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
496 Label* non_int32,
497 Register scratch);
ricow@chromium.org65fae842010-08-25 15:26:24 +0000498};
499
500
501void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
502 // 1. Move arguments into edx, eax except for DIV and MOD, which need the
503 // dividend in eax and edx free for the division. Use eax, ebx for those.
504 Comment load_comment(masm, "-- Load arguments");
505 Register left = edx;
506 Register right = eax;
507 if (op_ == Token::DIV || op_ == Token::MOD) {
508 left = eax;
509 right = ebx;
510 if (HasArgsInRegisters()) {
511 __ mov(ebx, eax);
512 __ mov(eax, edx);
513 }
514 }
515 if (!HasArgsInRegisters()) {
516 __ mov(right, Operand(esp, 1 * kPointerSize));
517 __ mov(left, Operand(esp, 2 * kPointerSize));
518 }
519
520 if (static_operands_type_.IsSmi()) {
521 if (FLAG_debug_code) {
522 __ AbortIfNotSmi(left);
523 __ AbortIfNotSmi(right);
524 }
525 if (op_ == Token::BIT_OR) {
526 __ or_(right, Operand(left));
527 GenerateReturn(masm);
528 return;
529 } else if (op_ == Token::BIT_AND) {
530 __ and_(right, Operand(left));
531 GenerateReturn(masm);
532 return;
533 } else if (op_ == Token::BIT_XOR) {
534 __ xor_(right, Operand(left));
535 GenerateReturn(masm);
536 return;
537 }
538 }
539
540 // 2. Prepare the smi check of both operands by oring them together.
541 Comment smi_check_comment(masm, "-- Smi check arguments");
542 Label not_smis;
543 Register combined = ecx;
544 ASSERT(!left.is(combined) && !right.is(combined));
545 switch (op_) {
546 case Token::BIT_OR:
547 // Perform the operation into eax and smi check the result. Preserve
548 // eax in case the result is not a smi.
549 ASSERT(!left.is(ecx) && !right.is(ecx));
550 __ mov(ecx, right);
551 __ or_(right, Operand(left)); // Bitwise or is commutative.
552 combined = right;
553 break;
554
555 case Token::BIT_XOR:
556 case Token::BIT_AND:
557 case Token::ADD:
558 case Token::SUB:
559 case Token::MUL:
560 case Token::DIV:
561 case Token::MOD:
562 __ mov(combined, right);
563 __ or_(combined, Operand(left));
564 break;
565
566 case Token::SHL:
567 case Token::SAR:
568 case Token::SHR:
569 // Move the right operand into ecx for the shift operation, use eax
570 // for the smi check register.
571 ASSERT(!left.is(ecx) && !right.is(ecx));
572 __ mov(ecx, right);
573 __ or_(right, Operand(left));
574 combined = right;
575 break;
576
577 default:
578 break;
579 }
580
581 // 3. Perform the smi check of the operands.
582 STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
583 __ test(combined, Immediate(kSmiTagMask));
584 __ j(not_zero, &not_smis, not_taken);
585
586 // 4. Operands are both smis, perform the operation leaving the result in
587 // eax and check the result if necessary.
588 Comment perform_smi(masm, "-- Perform smi operation");
589 Label use_fp_on_smis;
590 switch (op_) {
591 case Token::BIT_OR:
592 // Nothing to do.
593 break;
594
595 case Token::BIT_XOR:
596 ASSERT(right.is(eax));
597 __ xor_(right, Operand(left)); // Bitwise xor is commutative.
598 break;
599
600 case Token::BIT_AND:
601 ASSERT(right.is(eax));
602 __ and_(right, Operand(left)); // Bitwise and is commutative.
603 break;
604
605 case Token::SHL:
606 // Remove tags from operands (but keep sign).
607 __ SmiUntag(left);
608 __ SmiUntag(ecx);
609 // Perform the operation.
610 __ shl_cl(left);
611 // Check that the *signed* result fits in a smi.
612 __ cmp(left, 0xc0000000);
613 __ j(sign, &use_fp_on_smis, not_taken);
614 // Tag the result and store it in register eax.
615 __ SmiTag(left);
616 __ mov(eax, left);
617 break;
618
619 case Token::SAR:
620 // Remove tags from operands (but keep sign).
621 __ SmiUntag(left);
622 __ SmiUntag(ecx);
623 // Perform the operation.
624 __ sar_cl(left);
625 // Tag the result and store it in register eax.
626 __ SmiTag(left);
627 __ mov(eax, left);
628 break;
629
630 case Token::SHR:
631 // Remove tags from operands (but keep sign).
632 __ SmiUntag(left);
633 __ SmiUntag(ecx);
634 // Perform the operation.
635 __ shr_cl(left);
636 // Check that the *unsigned* result fits in a smi.
637 // Neither of the two high-order bits can be set:
638 // - 0x80000000: high bit would be lost when smi tagging.
639 // - 0x40000000: this number would convert to negative when
640 // Smi tagging these two cases can only happen with shifts
641 // by 0 or 1 when handed a valid smi.
642 __ test(left, Immediate(0xc0000000));
643 __ j(not_zero, slow, not_taken);
644 // Tag the result and store it in register eax.
645 __ SmiTag(left);
646 __ mov(eax, left);
647 break;
648
649 case Token::ADD:
650 ASSERT(right.is(eax));
651 __ add(right, Operand(left)); // Addition is commutative.
652 __ j(overflow, &use_fp_on_smis, not_taken);
653 break;
654
655 case Token::SUB:
656 __ sub(left, Operand(right));
657 __ j(overflow, &use_fp_on_smis, not_taken);
658 __ mov(eax, left);
659 break;
660
661 case Token::MUL:
662 // If the smi tag is 0 we can just leave the tag on one operand.
663 STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
664 // We can't revert the multiplication if the result is not a smi
665 // so save the right operand.
666 __ mov(ebx, right);
667 // Remove tag from one of the operands (but keep sign).
668 __ SmiUntag(right);
669 // Do multiplication.
670 __ imul(right, Operand(left)); // Multiplication is commutative.
671 __ j(overflow, &use_fp_on_smis, not_taken);
672 // Check for negative zero result. Use combined = left | right.
673 __ NegativeZeroTest(right, combined, &use_fp_on_smis);
674 break;
675
676 case Token::DIV:
677 // We can't revert the division if the result is not a smi so
678 // save the left operand.
679 __ mov(edi, left);
680 // Check for 0 divisor.
681 __ test(right, Operand(right));
682 __ j(zero, &use_fp_on_smis, not_taken);
683 // Sign extend left into edx:eax.
684 ASSERT(left.is(eax));
685 __ cdq();
686 // Divide edx:eax by right.
687 __ idiv(right);
688 // Check for the corner case of dividing the most negative smi by
689 // -1. We cannot use the overflow flag, since it is not set by idiv
690 // instruction.
691 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
692 __ cmp(eax, 0x40000000);
693 __ j(equal, &use_fp_on_smis);
694 // Check for negative zero result. Use combined = left | right.
695 __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
696 // Check that the remainder is zero.
697 __ test(edx, Operand(edx));
698 __ j(not_zero, &use_fp_on_smis);
699 // Tag the result and store it in register eax.
700 __ SmiTag(eax);
701 break;
702
703 case Token::MOD:
704 // Check for 0 divisor.
705 __ test(right, Operand(right));
706 __ j(zero, &not_smis, not_taken);
707
708 // Sign extend left into edx:eax.
709 ASSERT(left.is(eax));
710 __ cdq();
711 // Divide edx:eax by right.
712 __ idiv(right);
713 // Check for negative zero result. Use combined = left | right.
714 __ NegativeZeroTest(edx, combined, slow);
715 // Move remainder to register eax.
716 __ mov(eax, edx);
717 break;
718
719 default:
720 UNREACHABLE();
721 }
722
723 // 5. Emit return of result in eax.
724 GenerateReturn(masm);
725
726 // 6. For some operations emit inline code to perform floating point
727 // operations on known smis (e.g., if the result of the operation
728 // overflowed the smi range).
729 switch (op_) {
730 case Token::SHL: {
731 Comment perform_float(masm, "-- Perform float operation on smis");
732 __ bind(&use_fp_on_smis);
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000733 if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
734 // Result we want is in left == edx, so we can put the allocated heap
735 // number in eax.
736 __ AllocateHeapNumber(eax, ecx, ebx, slow);
737 // Store the result in the HeapNumber and return.
738 if (CpuFeatures::IsSupported(SSE2)) {
739 CpuFeatures::Scope use_sse2(SSE2);
740 __ cvtsi2sd(xmm0, Operand(left));
741 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
742 } else {
743 // It's OK to overwrite the right argument on the stack because we
744 // are about to return.
745 __ mov(Operand(esp, 1 * kPointerSize), left);
746 __ fild_s(Operand(esp, 1 * kPointerSize));
747 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
748 }
749 GenerateReturn(masm);
ricow@chromium.org65fae842010-08-25 15:26:24 +0000750 } else {
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000751 ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
752 __ jmp(slow);
ricow@chromium.org65fae842010-08-25 15:26:24 +0000753 }
ricow@chromium.org65fae842010-08-25 15:26:24 +0000754 break;
755 }
756
757 case Token::ADD:
758 case Token::SUB:
759 case Token::MUL:
760 case Token::DIV: {
761 Comment perform_float(masm, "-- Perform float operation on smis");
762 __ bind(&use_fp_on_smis);
763 // Restore arguments to edx, eax.
764 switch (op_) {
765 case Token::ADD:
766 // Revert right = right + left.
767 __ sub(right, Operand(left));
768 break;
769 case Token::SUB:
770 // Revert left = left - right.
771 __ add(left, Operand(right));
772 break;
773 case Token::MUL:
774 // Right was clobbered but a copy is in ebx.
775 __ mov(right, ebx);
776 break;
777 case Token::DIV:
778 // Left was clobbered but a copy is in edi. Right is in ebx for
779 // division.
780 __ mov(edx, edi);
781 __ mov(eax, right);
782 break;
783 default: UNREACHABLE();
784 break;
785 }
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000786 if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
787 __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
788 if (CpuFeatures::IsSupported(SSE2)) {
789 CpuFeatures::Scope use_sse2(SSE2);
790 FloatingPointHelper::LoadSSE2Smis(masm, ebx);
791 switch (op_) {
792 case Token::ADD: __ addsd(xmm0, xmm1); break;
793 case Token::SUB: __ subsd(xmm0, xmm1); break;
794 case Token::MUL: __ mulsd(xmm0, xmm1); break;
795 case Token::DIV: __ divsd(xmm0, xmm1); break;
796 default: UNREACHABLE();
797 }
798 __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
799 } else { // SSE2 not available, use FPU.
800 FloatingPointHelper::LoadFloatSmis(masm, ebx);
801 switch (op_) {
802 case Token::ADD: __ faddp(1); break;
803 case Token::SUB: __ fsubp(1); break;
804 case Token::MUL: __ fmulp(1); break;
805 case Token::DIV: __ fdivp(1); break;
806 default: UNREACHABLE();
807 }
808 __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
ricow@chromium.org65fae842010-08-25 15:26:24 +0000809 }
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000810 __ mov(eax, ecx);
811 GenerateReturn(masm);
812 } else {
813 ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
814 __ jmp(slow);
ricow@chromium.org65fae842010-08-25 15:26:24 +0000815 }
ricow@chromium.org65fae842010-08-25 15:26:24 +0000816 break;
817 }
818
819 default:
820 break;
821 }
822
823 // 7. Non-smi operands, fall out to the non-smi code with the operands in
824 // edx and eax.
825 Comment done_comment(masm, "-- Enter non-smi code");
826 __ bind(&not_smis);
827 switch (op_) {
828 case Token::BIT_OR:
829 case Token::SHL:
830 case Token::SAR:
831 case Token::SHR:
832 // Right operand is saved in ecx and eax was destroyed by the smi
833 // check.
834 __ mov(eax, ecx);
835 break;
836
837 case Token::DIV:
838 case Token::MOD:
839 // Operands are in eax, ebx at this point.
840 __ mov(edx, eax);
841 __ mov(eax, ebx);
842 break;
843
844 default:
845 break;
846 }
847}
848
849
850void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
851 Label call_runtime;
852
853 __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
854
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000855 if (runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI) {
856 Label slow;
857 if (ShouldGenerateSmiCode()) GenerateSmiCode(masm, &slow);
858 __ bind(&slow);
859 GenerateTypeTransition(masm);
860 }
861
ricow@chromium.org65fae842010-08-25 15:26:24 +0000862 // Generate fast case smi code if requested. This flag is set when the fast
863 // case smi code is not generated by the caller. Generating it here will speed
864 // up common operations.
865 if (ShouldGenerateSmiCode()) {
866 GenerateSmiCode(masm, &call_runtime);
867 } else if (op_ != Token::MOD) { // MOD goes straight to runtime.
868 if (!HasArgsInRegisters()) {
869 GenerateLoadArguments(masm);
870 }
871 }
872
873 // Floating point case.
874 if (ShouldGenerateFPCode()) {
875 switch (op_) {
876 case Token::ADD:
877 case Token::SUB:
878 case Token::MUL:
879 case Token::DIV: {
880 if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
881 HasSmiCodeInStub()) {
882 // Execution reaches this point when the first non-smi argument occurs
883 // (and only if smi code is generated). This is the right moment to
884 // patch to HEAP_NUMBERS state. The transition is attempted only for
885 // the four basic operations. The stub stays in the DEFAULT state
886 // forever for all other operations (also if smi code is skipped).
887 GenerateTypeTransition(masm);
888 break;
889 }
890
891 Label not_floats;
892 if (CpuFeatures::IsSupported(SSE2)) {
893 CpuFeatures::Scope use_sse2(SSE2);
894 if (static_operands_type_.IsNumber()) {
895 if (FLAG_debug_code) {
896 // Assert at runtime that inputs are only numbers.
897 __ AbortIfNotNumber(edx);
898 __ AbortIfNotNumber(eax);
899 }
900 if (static_operands_type_.IsSmi()) {
901 if (FLAG_debug_code) {
902 __ AbortIfNotSmi(edx);
903 __ AbortIfNotSmi(eax);
904 }
905 FloatingPointHelper::LoadSSE2Smis(masm, ecx);
906 } else {
907 FloatingPointHelper::LoadSSE2Operands(masm);
908 }
909 } else {
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +0000910 FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
ricow@chromium.org65fae842010-08-25 15:26:24 +0000911 }
912
913 switch (op_) {
914 case Token::ADD: __ addsd(xmm0, xmm1); break;
915 case Token::SUB: __ subsd(xmm0, xmm1); break;
916 case Token::MUL: __ mulsd(xmm0, xmm1); break;
917 case Token::DIV: __ divsd(xmm0, xmm1); break;
918 default: UNREACHABLE();
919 }
920 GenerateHeapResultAllocation(masm, &call_runtime);
921 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
922 GenerateReturn(masm);
923 } else { // SSE2 not available, use FPU.
924 if (static_operands_type_.IsNumber()) {
925 if (FLAG_debug_code) {
926 // Assert at runtime that inputs are only numbers.
927 __ AbortIfNotNumber(edx);
928 __ AbortIfNotNumber(eax);
929 }
930 } else {
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +0000931 FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
ricow@chromium.org65fae842010-08-25 15:26:24 +0000932 }
933 FloatingPointHelper::LoadFloatOperands(
934 masm,
935 ecx,
936 FloatingPointHelper::ARGS_IN_REGISTERS);
937 switch (op_) {
938 case Token::ADD: __ faddp(1); break;
939 case Token::SUB: __ fsubp(1); break;
940 case Token::MUL: __ fmulp(1); break;
941 case Token::DIV: __ fdivp(1); break;
942 default: UNREACHABLE();
943 }
944 Label after_alloc_failure;
945 GenerateHeapResultAllocation(masm, &after_alloc_failure);
946 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
947 GenerateReturn(masm);
948 __ bind(&after_alloc_failure);
949 __ ffree();
950 __ jmp(&call_runtime);
951 }
952 __ bind(&not_floats);
953 if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
954 !HasSmiCodeInStub()) {
955 // Execution reaches this point when the first non-number argument
956 // occurs (and only if smi code is skipped from the stub, otherwise
957 // the patching has already been done earlier in this case branch).
958 // Try patching to STRINGS for ADD operation.
959 if (op_ == Token::ADD) {
960 GenerateTypeTransition(masm);
961 }
962 }
963 break;
964 }
965 case Token::MOD: {
966 // For MOD we go directly to runtime in the non-smi case.
967 break;
968 }
969 case Token::BIT_OR:
970 case Token::BIT_AND:
971 case Token::BIT_XOR:
972 case Token::SAR:
973 case Token::SHL:
974 case Token::SHR: {
975 Label non_smi_result;
976 FloatingPointHelper::LoadAsIntegers(masm,
977 static_operands_type_,
978 use_sse3_,
979 &call_runtime);
980 switch (op_) {
981 case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
982 case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
983 case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
984 case Token::SAR: __ sar_cl(eax); break;
985 case Token::SHL: __ shl_cl(eax); break;
986 case Token::SHR: __ shr_cl(eax); break;
987 default: UNREACHABLE();
988 }
989 if (op_ == Token::SHR) {
990 // Check if result is non-negative and fits in a smi.
991 __ test(eax, Immediate(0xc0000000));
992 __ j(not_zero, &call_runtime);
993 } else {
994 // Check if result fits in a smi.
995 __ cmp(eax, 0xc0000000);
996 __ j(negative, &non_smi_result);
997 }
998 // Tag smi result and return.
999 __ SmiTag(eax);
1000 GenerateReturn(masm);
1001
1002 // All ops except SHR return a signed int32 that we load in
1003 // a HeapNumber.
1004 if (op_ != Token::SHR) {
1005 __ bind(&non_smi_result);
1006 // Allocate a heap number if needed.
1007 __ mov(ebx, Operand(eax)); // ebx: result
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00001008 NearLabel skip_allocation;
ricow@chromium.org65fae842010-08-25 15:26:24 +00001009 switch (mode_) {
1010 case OVERWRITE_LEFT:
1011 case OVERWRITE_RIGHT:
1012 // If the operand was an object, we skip the
1013 // allocation of a heap number.
1014 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1015 1 * kPointerSize : 2 * kPointerSize));
1016 __ test(eax, Immediate(kSmiTagMask));
1017 __ j(not_zero, &skip_allocation, not_taken);
1018 // Fall through!
1019 case NO_OVERWRITE:
1020 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1021 __ bind(&skip_allocation);
1022 break;
1023 default: UNREACHABLE();
1024 }
1025 // Store the result in the HeapNumber and return.
1026 if (CpuFeatures::IsSupported(SSE2)) {
1027 CpuFeatures::Scope use_sse2(SSE2);
1028 __ cvtsi2sd(xmm0, Operand(ebx));
1029 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1030 } else {
1031 __ mov(Operand(esp, 1 * kPointerSize), ebx);
1032 __ fild_s(Operand(esp, 1 * kPointerSize));
1033 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1034 }
1035 GenerateReturn(masm);
1036 }
1037 break;
1038 }
1039 default: UNREACHABLE(); break;
1040 }
1041 }
1042
ager@chromium.org5b2fbee2010-09-08 06:38:15 +00001043 // If all else fails, use the runtime system to get the correct
1044 // result. If arguments was passed in registers now place them on the
1045 // stack in the correct order below the return address.
1046
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00001047 // Avoid hitting the string ADD code below when allocation fails in
1048 // the floating point code above.
1049 if (op_ != Token::ADD) {
1050 __ bind(&call_runtime);
1051 }
1052
ricow@chromium.org65fae842010-08-25 15:26:24 +00001053 if (HasArgsInRegisters()) {
1054 GenerateRegisterArgsPush(masm);
1055 }
1056
1057 switch (op_) {
1058 case Token::ADD: {
1059 // Test for string arguments before calling runtime.
ricow@chromium.org65fae842010-08-25 15:26:24 +00001060
1061 // If this stub has already generated FP-specific code then the arguments
1062 // are already in edx, eax
1063 if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
1064 GenerateLoadArguments(masm);
1065 }
1066
1067 // Registers containing left and right operands respectively.
1068 Register lhs, rhs;
1069 if (HasArgsReversed()) {
1070 lhs = eax;
1071 rhs = edx;
1072 } else {
1073 lhs = edx;
1074 rhs = eax;
1075 }
1076
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00001077 // Test if left operand is a string.
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00001078 NearLabel lhs_not_string;
ricow@chromium.org65fae842010-08-25 15:26:24 +00001079 __ test(lhs, Immediate(kSmiTagMask));
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00001080 __ j(zero, &lhs_not_string);
ricow@chromium.org65fae842010-08-25 15:26:24 +00001081 __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00001082 __ j(above_equal, &lhs_not_string);
ricow@chromium.org65fae842010-08-25 15:26:24 +00001083
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00001084 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
1085 __ TailCallStub(&string_add_left_stub);
1086
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00001087 NearLabel call_runtime_with_args;
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00001088 // Left operand is not a string, test right.
1089 __ bind(&lhs_not_string);
ricow@chromium.org65fae842010-08-25 15:26:24 +00001090 __ test(rhs, Immediate(kSmiTagMask));
ager@chromium.org5b2fbee2010-09-08 06:38:15 +00001091 __ j(zero, &call_runtime_with_args);
ricow@chromium.org65fae842010-08-25 15:26:24 +00001092 __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
ager@chromium.org5b2fbee2010-09-08 06:38:15 +00001093 __ j(above_equal, &call_runtime_with_args);
ricow@chromium.org65fae842010-08-25 15:26:24 +00001094
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00001095 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
1096 __ TailCallStub(&string_add_right_stub);
ricow@chromium.org65fae842010-08-25 15:26:24 +00001097
ricow@chromium.org65fae842010-08-25 15:26:24 +00001098 // Neither argument is a string.
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00001099 __ bind(&call_runtime);
1100 if (HasArgsInRegisters()) {
1101 GenerateRegisterArgsPush(masm);
1102 }
ager@chromium.org5b2fbee2010-09-08 06:38:15 +00001103 __ bind(&call_runtime_with_args);
ricow@chromium.org65fae842010-08-25 15:26:24 +00001104 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1105 break;
1106 }
1107 case Token::SUB:
1108 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1109 break;
1110 case Token::MUL:
1111 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1112 break;
1113 case Token::DIV:
1114 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1115 break;
1116 case Token::MOD:
1117 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1118 break;
1119 case Token::BIT_OR:
1120 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1121 break;
1122 case Token::BIT_AND:
1123 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
1124 break;
1125 case Token::BIT_XOR:
1126 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
1127 break;
1128 case Token::SAR:
1129 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
1130 break;
1131 case Token::SHL:
1132 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
1133 break;
1134 case Token::SHR:
1135 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
1136 break;
1137 default:
1138 UNREACHABLE();
1139 }
1140}
1141
1142
1143void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
1144 Label* alloc_failure) {
1145 Label skip_allocation;
1146 OverwriteMode mode = mode_;
1147 if (HasArgsReversed()) {
1148 if (mode == OVERWRITE_RIGHT) {
1149 mode = OVERWRITE_LEFT;
1150 } else if (mode == OVERWRITE_LEFT) {
1151 mode = OVERWRITE_RIGHT;
1152 }
1153 }
1154 switch (mode) {
1155 case OVERWRITE_LEFT: {
1156 // If the argument in edx is already an object, we skip the
1157 // allocation of a heap number.
1158 __ test(edx, Immediate(kSmiTagMask));
1159 __ j(not_zero, &skip_allocation, not_taken);
1160 // Allocate a heap number for the result. Keep eax and edx intact
1161 // for the possible runtime call.
1162 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
1163 // Now edx can be overwritten losing one of the arguments as we are
1164 // now done and will not need it any more.
1165 __ mov(edx, Operand(ebx));
1166 __ bind(&skip_allocation);
1167 // Use object in edx as a result holder
1168 __ mov(eax, Operand(edx));
1169 break;
1170 }
1171 case OVERWRITE_RIGHT:
1172 // If the argument in eax is already an object, we skip the
1173 // allocation of a heap number.
1174 __ test(eax, Immediate(kSmiTagMask));
1175 __ j(not_zero, &skip_allocation, not_taken);
1176 // Fall through!
1177 case NO_OVERWRITE:
1178 // Allocate a heap number for the result. Keep eax and edx intact
1179 // for the possible runtime call.
1180 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
1181 // Now eax can be overwritten losing one of the arguments as we are
1182 // now done and will not need it any more.
1183 __ mov(eax, ebx);
1184 __ bind(&skip_allocation);
1185 break;
1186 default: UNREACHABLE();
1187 }
1188}
1189
1190
1191void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
1192 // If arguments are not passed in registers read them from the stack.
1193 ASSERT(!HasArgsInRegisters());
1194 __ mov(eax, Operand(esp, 1 * kPointerSize));
1195 __ mov(edx, Operand(esp, 2 * kPointerSize));
1196}
1197
1198
1199void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
1200 // If arguments are not passed in registers remove them from the stack before
1201 // returning.
1202 if (!HasArgsInRegisters()) {
1203 __ ret(2 * kPointerSize); // Remove both operands
1204 } else {
1205 __ ret(0);
1206 }
1207}
1208
1209
1210void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
1211 ASSERT(HasArgsInRegisters());
1212 __ pop(ecx);
1213 if (HasArgsReversed()) {
1214 __ push(eax);
1215 __ push(edx);
1216 } else {
1217 __ push(edx);
1218 __ push(eax);
1219 }
1220 __ push(ecx);
1221}
1222
1223
1224void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1225 // Ensure the operands are on the stack.
1226 if (HasArgsInRegisters()) {
1227 GenerateRegisterArgsPush(masm);
1228 }
1229
1230 __ pop(ecx); // Save return address.
1231
1232 // Left and right arguments are now on top.
1233 // Push this stub's key. Although the operation and the type info are
1234 // encoded into the key, the encoding is opaque, so push them too.
1235 __ push(Immediate(Smi::FromInt(MinorKey())));
1236 __ push(Immediate(Smi::FromInt(op_)));
1237 __ push(Immediate(Smi::FromInt(runtime_operands_type_)));
1238
1239 __ push(ecx); // Push return address.
1240
1241 // Patch the caller to an appropriate specialized stub and return the
1242 // operation result to the caller of the stub.
1243 __ TailCallExternalReference(
1244 ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
1245 5,
1246 1);
1247}
1248
1249
1250Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
1251 GenericBinaryOpStub stub(key, type_info);
1252 return stub.GetCode();
1253}
1254
1255
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001256Handle<Code> GetTypeRecordingBinaryOpStub(int key,
1257 TRBinaryOpIC::TypeInfo type_info,
1258 TRBinaryOpIC::TypeInfo result_type_info) {
1259 TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
1260 return stub.GetCode();
1261}
1262
1263
1264void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1265 __ pop(ecx); // Save return address.
1266 __ push(edx);
1267 __ push(eax);
1268 // Left and right arguments are now on top.
1269 // Push this stub's key. Although the operation and the type info are
1270 // encoded into the key, the encoding is opaque, so push them too.
1271 __ push(Immediate(Smi::FromInt(MinorKey())));
1272 __ push(Immediate(Smi::FromInt(op_)));
1273 __ push(Immediate(Smi::FromInt(operands_type_)));
1274
1275 __ push(ecx); // Push return address.
1276
1277 // Patch the caller to an appropriate specialized stub and return the
1278 // operation result to the caller of the stub.
1279 __ TailCallExternalReference(
1280 ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
1281 5,
1282 1);
1283}
1284
1285
1286// Prepare for a type transition runtime call when the args are already on
1287// the stack, under the return address.
1288void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
1289 MacroAssembler* masm) {
1290 __ pop(ecx); // Save return address.
1291 // Left and right arguments are already on top of the stack.
1292 // Push this stub's key. Although the operation and the type info are
1293 // encoded into the key, the encoding is opaque, so push them too.
1294 __ push(Immediate(Smi::FromInt(MinorKey())));
1295 __ push(Immediate(Smi::FromInt(op_)));
1296 __ push(Immediate(Smi::FromInt(operands_type_)));
1297
1298 __ push(ecx); // Push return address.
1299
1300 // Patch the caller to an appropriate specialized stub and return the
1301 // operation result to the caller of the stub.
1302 __ TailCallExternalReference(
1303 ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
1304 5,
1305 1);
1306}
1307
1308
1309void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
1310 switch (operands_type_) {
1311 case TRBinaryOpIC::UNINITIALIZED:
1312 GenerateTypeTransition(masm);
1313 break;
1314 case TRBinaryOpIC::SMI:
1315 GenerateSmiStub(masm);
1316 break;
1317 case TRBinaryOpIC::INT32:
1318 GenerateInt32Stub(masm);
1319 break;
1320 case TRBinaryOpIC::HEAP_NUMBER:
1321 GenerateHeapNumberStub(masm);
1322 break;
1323 case TRBinaryOpIC::STRING:
1324 GenerateStringStub(masm);
1325 break;
1326 case TRBinaryOpIC::GENERIC:
1327 GenerateGeneric(masm);
1328 break;
1329 default:
1330 UNREACHABLE();
1331 }
1332}
1333
1334
1335const char* TypeRecordingBinaryOpStub::GetName() {
1336 if (name_ != NULL) return name_;
1337 const int kMaxNameLength = 100;
1338 name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
1339 if (name_ == NULL) return "OOM";
1340 const char* op_name = Token::Name(op_);
1341 const char* overwrite_name;
1342 switch (mode_) {
1343 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
1344 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
1345 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
1346 default: overwrite_name = "UnknownOverwrite"; break;
1347 }
1348
1349 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
1350 "TypeRecordingBinaryOpStub_%s_%s_%s",
1351 op_name,
1352 overwrite_name,
1353 TRBinaryOpIC::GetName(operands_type_));
1354 return name_;
1355}
1356
1357
1358void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
1359 Label* slow,
1360 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
1361 // 1. Move arguments into edx, eax except for DIV and MOD, which need the
1362 // dividend in eax and edx free for the division. Use eax, ebx for those.
1363 Comment load_comment(masm, "-- Load arguments");
1364 Register left = edx;
1365 Register right = eax;
1366 if (op_ == Token::DIV || op_ == Token::MOD) {
1367 left = eax;
1368 right = ebx;
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00001369 __ mov(ebx, eax);
1370 __ mov(eax, edx);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001371 }
1372
1373
1374 // 2. Prepare the smi check of both operands by oring them together.
1375 Comment smi_check_comment(masm, "-- Smi check arguments");
1376 Label not_smis;
1377 Register combined = ecx;
1378 ASSERT(!left.is(combined) && !right.is(combined));
1379 switch (op_) {
1380 case Token::BIT_OR:
1381 // Perform the operation into eax and smi check the result. Preserve
1382 // eax in case the result is not a smi.
1383 ASSERT(!left.is(ecx) && !right.is(ecx));
1384 __ mov(ecx, right);
1385 __ or_(right, Operand(left)); // Bitwise or is commutative.
1386 combined = right;
1387 break;
1388
1389 case Token::BIT_XOR:
1390 case Token::BIT_AND:
1391 case Token::ADD:
1392 case Token::SUB:
1393 case Token::MUL:
1394 case Token::DIV:
1395 case Token::MOD:
1396 __ mov(combined, right);
1397 __ or_(combined, Operand(left));
1398 break;
1399
1400 case Token::SHL:
1401 case Token::SAR:
1402 case Token::SHR:
1403 // Move the right operand into ecx for the shift operation, use eax
1404 // for the smi check register.
1405 ASSERT(!left.is(ecx) && !right.is(ecx));
1406 __ mov(ecx, right);
1407 __ or_(right, Operand(left));
1408 combined = right;
1409 break;
1410
1411 default:
1412 break;
1413 }
1414
1415 // 3. Perform the smi check of the operands.
1416 STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
1417 __ test(combined, Immediate(kSmiTagMask));
1418 __ j(not_zero, &not_smis, not_taken);
1419
1420 // 4. Operands are both smis, perform the operation leaving the result in
1421 // eax and check the result if necessary.
1422 Comment perform_smi(masm, "-- Perform smi operation");
1423 Label use_fp_on_smis;
1424 switch (op_) {
1425 case Token::BIT_OR:
1426 // Nothing to do.
1427 break;
1428
1429 case Token::BIT_XOR:
1430 ASSERT(right.is(eax));
1431 __ xor_(right, Operand(left)); // Bitwise xor is commutative.
1432 break;
1433
1434 case Token::BIT_AND:
1435 ASSERT(right.is(eax));
1436 __ and_(right, Operand(left)); // Bitwise and is commutative.
1437 break;
1438
1439 case Token::SHL:
1440 // Remove tags from operands (but keep sign).
1441 __ SmiUntag(left);
1442 __ SmiUntag(ecx);
1443 // Perform the operation.
1444 __ shl_cl(left);
1445 // Check that the *signed* result fits in a smi.
1446 __ cmp(left, 0xc0000000);
1447 __ j(sign, &use_fp_on_smis, not_taken);
1448 // Tag the result and store it in register eax.
1449 __ SmiTag(left);
1450 __ mov(eax, left);
1451 break;
1452
1453 case Token::SAR:
1454 // Remove tags from operands (but keep sign).
1455 __ SmiUntag(left);
1456 __ SmiUntag(ecx);
1457 // Perform the operation.
1458 __ sar_cl(left);
1459 // Tag the result and store it in register eax.
1460 __ SmiTag(left);
1461 __ mov(eax, left);
1462 break;
1463
1464 case Token::SHR:
1465 // Remove tags from operands (but keep sign).
1466 __ SmiUntag(left);
1467 __ SmiUntag(ecx);
1468 // Perform the operation.
1469 __ shr_cl(left);
1470 // Check that the *unsigned* result fits in a smi.
1471 // Neither of the two high-order bits can be set:
1472 // - 0x80000000: high bit would be lost when smi tagging.
1473 // - 0x40000000: this number would convert to negative when
1474 // Smi tagging these two cases can only happen with shifts
1475 // by 0 or 1 when handed a valid smi.
1476 __ test(left, Immediate(0xc0000000));
1477 __ j(not_zero, slow, not_taken);
1478 // Tag the result and store it in register eax.
1479 __ SmiTag(left);
1480 __ mov(eax, left);
1481 break;
1482
1483 case Token::ADD:
1484 ASSERT(right.is(eax));
1485 __ add(right, Operand(left)); // Addition is commutative.
1486 __ j(overflow, &use_fp_on_smis, not_taken);
1487 break;
1488
1489 case Token::SUB:
1490 __ sub(left, Operand(right));
1491 __ j(overflow, &use_fp_on_smis, not_taken);
1492 __ mov(eax, left);
1493 break;
1494
1495 case Token::MUL:
1496 // If the smi tag is 0 we can just leave the tag on one operand.
1497 STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
1498 // We can't revert the multiplication if the result is not a smi
1499 // so save the right operand.
1500 __ mov(ebx, right);
1501 // Remove tag from one of the operands (but keep sign).
1502 __ SmiUntag(right);
1503 // Do multiplication.
1504 __ imul(right, Operand(left)); // Multiplication is commutative.
1505 __ j(overflow, &use_fp_on_smis, not_taken);
1506 // Check for negative zero result. Use combined = left | right.
1507 __ NegativeZeroTest(right, combined, &use_fp_on_smis);
1508 break;
1509
1510 case Token::DIV:
1511 // We can't revert the division if the result is not a smi so
1512 // save the left operand.
1513 __ mov(edi, left);
1514 // Check for 0 divisor.
1515 __ test(right, Operand(right));
1516 __ j(zero, &use_fp_on_smis, not_taken);
1517 // Sign extend left into edx:eax.
1518 ASSERT(left.is(eax));
1519 __ cdq();
1520 // Divide edx:eax by right.
1521 __ idiv(right);
1522 // Check for the corner case of dividing the most negative smi by
1523 // -1. We cannot use the overflow flag, since it is not set by idiv
1524 // instruction.
1525 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
1526 __ cmp(eax, 0x40000000);
1527 __ j(equal, &use_fp_on_smis);
1528 // Check for negative zero result. Use combined = left | right.
1529 __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
1530 // Check that the remainder is zero.
1531 __ test(edx, Operand(edx));
1532 __ j(not_zero, &use_fp_on_smis);
1533 // Tag the result and store it in register eax.
1534 __ SmiTag(eax);
1535 break;
1536
1537 case Token::MOD:
1538 // Check for 0 divisor.
1539 __ test(right, Operand(right));
1540 __ j(zero, &not_smis, not_taken);
1541
1542 // Sign extend left into edx:eax.
1543 ASSERT(left.is(eax));
1544 __ cdq();
1545 // Divide edx:eax by right.
1546 __ idiv(right);
1547 // Check for negative zero result. Use combined = left | right.
1548 __ NegativeZeroTest(edx, combined, slow);
1549 // Move remainder to register eax.
1550 __ mov(eax, edx);
1551 break;
1552
1553 default:
1554 UNREACHABLE();
1555 }
1556
1557 // 5. Emit return of result in eax. Some operations have registers pushed.
1558 switch (op_) {
1559 case Token::ADD:
1560 case Token::SUB:
1561 case Token::MUL:
1562 case Token::DIV:
1563 __ ret(0);
1564 break;
1565 case Token::MOD:
1566 case Token::BIT_OR:
1567 case Token::BIT_AND:
1568 case Token::BIT_XOR:
1569 case Token::SAR:
1570 case Token::SHL:
1571 case Token::SHR:
1572 __ ret(2 * kPointerSize);
1573 break;
1574 default:
1575 UNREACHABLE();
1576 }
1577
1578 // 6. For some operations emit inline code to perform floating point
1579 // operations on known smis (e.g., if the result of the operation
1580 // overflowed the smi range).
1581 if (allow_heapnumber_results == NO_HEAPNUMBER_RESULTS) {
1582 __ bind(&use_fp_on_smis);
1583 switch (op_) {
1584 // Undo the effects of some operations, and some register moves.
1585 case Token::SHL:
1586 // The arguments are saved on the stack, and only used from there.
1587 break;
1588 case Token::ADD:
1589 // Revert right = right + left.
1590 __ sub(right, Operand(left));
1591 break;
1592 case Token::SUB:
1593 // Revert left = left - right.
1594 __ add(left, Operand(right));
1595 break;
1596 case Token::MUL:
1597 // Right was clobbered but a copy is in ebx.
1598 __ mov(right, ebx);
1599 break;
1600 case Token::DIV:
1601 // Left was clobbered but a copy is in edi. Right is in ebx for
1602 // division. They should be in eax, ebx for jump to not_smi.
1603 __ mov(eax, edi);
1604 break;
1605 default:
1606 // No other operators jump to use_fp_on_smis.
1607 break;
1608 }
1609 __ jmp(&not_smis);
1610 } else {
1611 ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS);
1612 switch (op_) {
1613 case Token::SHL: {
1614 Comment perform_float(masm, "-- Perform float operation on smis");
1615 __ bind(&use_fp_on_smis);
1616 // Result we want is in left == edx, so we can put the allocated heap
1617 // number in eax.
1618 __ AllocateHeapNumber(eax, ecx, ebx, slow);
1619 // Store the result in the HeapNumber and return.
1620 if (CpuFeatures::IsSupported(SSE2)) {
1621 CpuFeatures::Scope use_sse2(SSE2);
1622 __ cvtsi2sd(xmm0, Operand(left));
1623 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1624 } else {
1625 // It's OK to overwrite the right argument on the stack because we
1626 // are about to return.
1627 __ mov(Operand(esp, 1 * kPointerSize), left);
1628 __ fild_s(Operand(esp, 1 * kPointerSize));
1629 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1630 }
1631 __ ret(2 * kPointerSize);
1632 break;
1633 }
1634
1635 case Token::ADD:
1636 case Token::SUB:
1637 case Token::MUL:
1638 case Token::DIV: {
1639 Comment perform_float(masm, "-- Perform float operation on smis");
1640 __ bind(&use_fp_on_smis);
1641 // Restore arguments to edx, eax.
1642 switch (op_) {
1643 case Token::ADD:
1644 // Revert right = right + left.
1645 __ sub(right, Operand(left));
1646 break;
1647 case Token::SUB:
1648 // Revert left = left - right.
1649 __ add(left, Operand(right));
1650 break;
1651 case Token::MUL:
1652 // Right was clobbered but a copy is in ebx.
1653 __ mov(right, ebx);
1654 break;
1655 case Token::DIV:
1656 // Left was clobbered but a copy is in edi. Right is in ebx for
1657 // division.
1658 __ mov(edx, edi);
1659 __ mov(eax, right);
1660 break;
1661 default: UNREACHABLE();
1662 break;
1663 }
1664 __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
1665 if (CpuFeatures::IsSupported(SSE2)) {
1666 CpuFeatures::Scope use_sse2(SSE2);
1667 FloatingPointHelper::LoadSSE2Smis(masm, ebx);
1668 switch (op_) {
1669 case Token::ADD: __ addsd(xmm0, xmm1); break;
1670 case Token::SUB: __ subsd(xmm0, xmm1); break;
1671 case Token::MUL: __ mulsd(xmm0, xmm1); break;
1672 case Token::DIV: __ divsd(xmm0, xmm1); break;
1673 default: UNREACHABLE();
1674 }
1675 __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
1676 } else { // SSE2 not available, use FPU.
1677 FloatingPointHelper::LoadFloatSmis(masm, ebx);
1678 switch (op_) {
1679 case Token::ADD: __ faddp(1); break;
1680 case Token::SUB: __ fsubp(1); break;
1681 case Token::MUL: __ fmulp(1); break;
1682 case Token::DIV: __ fdivp(1); break;
1683 default: UNREACHABLE();
1684 }
1685 __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
1686 }
1687 __ mov(eax, ecx);
1688 __ ret(0);
1689 break;
1690 }
1691
1692 default:
1693 break;
1694 }
1695 }
1696
1697 // 7. Non-smi operands, fall out to the non-smi code with the operands in
1698 // edx and eax.
1699 Comment done_comment(masm, "-- Enter non-smi code");
1700 __ bind(&not_smis);
1701 switch (op_) {
1702 case Token::BIT_OR:
1703 case Token::SHL:
1704 case Token::SAR:
1705 case Token::SHR:
1706 // Right operand is saved in ecx and eax was destroyed by the smi
1707 // check.
1708 __ mov(eax, ecx);
1709 break;
1710
1711 case Token::DIV:
1712 case Token::MOD:
1713 // Operands are in eax, ebx at this point.
1714 __ mov(edx, eax);
1715 __ mov(eax, ebx);
1716 break;
1717
1718 default:
1719 break;
1720 }
1721}
1722
1723
1724void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1725 Label call_runtime;
1726
1727 switch (op_) {
1728 case Token::ADD:
1729 case Token::SUB:
1730 case Token::MUL:
1731 case Token::DIV:
1732 break;
1733 case Token::MOD:
1734 case Token::BIT_OR:
1735 case Token::BIT_AND:
1736 case Token::BIT_XOR:
1737 case Token::SAR:
1738 case Token::SHL:
1739 case Token::SHR:
1740 GenerateRegisterArgsPush(masm);
1741 break;
1742 default:
1743 UNREACHABLE();
1744 }
1745
1746 if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
1747 result_type_ == TRBinaryOpIC::SMI) {
1748 GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
1749 } else {
1750 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
1751 }
1752 __ bind(&call_runtime);
1753 switch (op_) {
1754 case Token::ADD:
1755 case Token::SUB:
1756 case Token::MUL:
1757 case Token::DIV:
1758 GenerateTypeTransition(masm);
1759 break;
1760 case Token::MOD:
1761 case Token::BIT_OR:
1762 case Token::BIT_AND:
1763 case Token::BIT_XOR:
1764 case Token::SAR:
1765 case Token::SHL:
1766 case Token::SHR:
1767 GenerateTypeTransitionWithSavedArgs(masm);
1768 break;
1769 default:
1770 UNREACHABLE();
1771 }
1772}
1773
1774
1775
1776void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
1777 Label call_runtime;
1778 ASSERT(operands_type_ == TRBinaryOpIC::STRING);
1779 ASSERT(op_ == Token::ADD);
1780 // If one of the arguments is a string, call the string add stub.
1781 // Otherwise, transition to the generic TRBinaryOpIC type.
1782
1783 // Registers containing left and right operands respectively.
1784 Register left = edx;
1785 Register right = eax;
1786
1787 // Test if left operand is a string.
1788 NearLabel left_not_string;
1789 __ test(left, Immediate(kSmiTagMask));
1790 __ j(zero, &left_not_string);
1791 __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
1792 __ j(above_equal, &left_not_string);
1793
1794 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
1795 GenerateRegisterArgsPush(masm);
1796 __ TailCallStub(&string_add_left_stub);
1797
1798 // Left operand is not a string, test right.
1799 __ bind(&left_not_string);
1800 __ test(right, Immediate(kSmiTagMask));
1801 __ j(zero, &call_runtime);
1802 __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
1803 __ j(above_equal, &call_runtime);
1804
1805 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
1806 GenerateRegisterArgsPush(masm);
1807 __ TailCallStub(&string_add_right_stub);
1808
1809 // Neither argument is a string.
1810 __ bind(&call_runtime);
1811 GenerateTypeTransition(masm);
1812}
1813
1814
1815void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1816 Label call_runtime;
1817 ASSERT(operands_type_ == TRBinaryOpIC::INT32);
1818
1819 // Floating point case.
1820 switch (op_) {
1821 case Token::ADD:
1822 case Token::SUB:
1823 case Token::MUL:
1824 case Token::DIV: {
1825 Label not_floats;
1826 Label not_int32;
1827 if (CpuFeatures::IsSupported(SSE2)) {
1828 CpuFeatures::Scope use_sse2(SSE2);
1829 FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1830 FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
1831 switch (op_) {
1832 case Token::ADD: __ addsd(xmm0, xmm1); break;
1833 case Token::SUB: __ subsd(xmm0, xmm1); break;
1834 case Token::MUL: __ mulsd(xmm0, xmm1); break;
1835 case Token::DIV: __ divsd(xmm0, xmm1); break;
1836 default: UNREACHABLE();
1837 }
1838 // Check result type if it is currently Int32.
1839 if (result_type_ <= TRBinaryOpIC::INT32) {
1840 __ cvttsd2si(ecx, Operand(xmm0));
1841 __ cvtsi2sd(xmm2, Operand(ecx));
1842 __ ucomisd(xmm0, xmm2);
1843 __ j(not_zero, &not_int32);
1844 __ j(carry, &not_int32);
1845 }
1846 GenerateHeapResultAllocation(masm, &call_runtime);
1847 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1848 __ ret(0);
1849 } else { // SSE2 not available, use FPU.
1850 FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
1851 FloatingPointHelper::LoadFloatOperands(
1852 masm,
1853 ecx,
1854 FloatingPointHelper::ARGS_IN_REGISTERS);
1855 FloatingPointHelper::CheckFloatOperandsAreInt32(masm, &not_int32);
1856 switch (op_) {
1857 case Token::ADD: __ faddp(1); break;
1858 case Token::SUB: __ fsubp(1); break;
1859 case Token::MUL: __ fmulp(1); break;
1860 case Token::DIV: __ fdivp(1); break;
1861 default: UNREACHABLE();
1862 }
1863 Label after_alloc_failure;
1864 GenerateHeapResultAllocation(masm, &after_alloc_failure);
1865 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1866 __ ret(0);
1867 __ bind(&after_alloc_failure);
1868 __ ffree();
1869 __ jmp(&call_runtime);
1870 }
1871
1872 __ bind(&not_floats);
1873 __ bind(&not_int32);
1874 GenerateTypeTransition(masm);
1875 break;
1876 }
1877
1878 case Token::MOD: {
1879 // For MOD we go directly to runtime in the non-smi case.
1880 break;
1881 }
1882 case Token::BIT_OR:
1883 case Token::BIT_AND:
1884 case Token::BIT_XOR:
1885 case Token::SAR:
1886 case Token::SHL:
1887 case Token::SHR: {
1888 GenerateRegisterArgsPush(masm);
1889 Label not_floats;
1890 Label not_int32;
1891 Label non_smi_result;
1892 /* {
1893 CpuFeatures::Scope use_sse2(SSE2);
1894 FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
1895 FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
1896 }*/
1897 FloatingPointHelper::LoadUnknownsAsIntegers(masm,
1898 use_sse3_,
1899 &not_floats);
1900 FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
1901 &not_int32);
1902 switch (op_) {
1903 case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
1904 case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
1905 case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
1906 case Token::SAR: __ sar_cl(eax); break;
1907 case Token::SHL: __ shl_cl(eax); break;
1908 case Token::SHR: __ shr_cl(eax); break;
1909 default: UNREACHABLE();
1910 }
1911 if (op_ == Token::SHR) {
1912 // Check if result is non-negative and fits in a smi.
1913 __ test(eax, Immediate(0xc0000000));
1914 __ j(not_zero, &call_runtime);
1915 } else {
1916 // Check if result fits in a smi.
1917 __ cmp(eax, 0xc0000000);
1918 __ j(negative, &non_smi_result);
1919 }
1920 // Tag smi result and return.
1921 __ SmiTag(eax);
1922 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
1923
1924 // All ops except SHR return a signed int32 that we load in
1925 // a HeapNumber.
1926 if (op_ != Token::SHR) {
1927 __ bind(&non_smi_result);
1928 // Allocate a heap number if needed.
1929 __ mov(ebx, Operand(eax)); // ebx: result
1930 NearLabel skip_allocation;
1931 switch (mode_) {
1932 case OVERWRITE_LEFT:
1933 case OVERWRITE_RIGHT:
1934 // If the operand was an object, we skip the
1935 // allocation of a heap number.
1936 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1937 1 * kPointerSize : 2 * kPointerSize));
1938 __ test(eax, Immediate(kSmiTagMask));
1939 __ j(not_zero, &skip_allocation, not_taken);
1940 // Fall through!
1941 case NO_OVERWRITE:
1942 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1943 __ bind(&skip_allocation);
1944 break;
1945 default: UNREACHABLE();
1946 }
1947 // Store the result in the HeapNumber and return.
1948 if (CpuFeatures::IsSupported(SSE2)) {
1949 CpuFeatures::Scope use_sse2(SSE2);
1950 __ cvtsi2sd(xmm0, Operand(ebx));
1951 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1952 } else {
1953 __ mov(Operand(esp, 1 * kPointerSize), ebx);
1954 __ fild_s(Operand(esp, 1 * kPointerSize));
1955 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1956 }
1957 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
1958 }
1959
1960 __ bind(&not_floats);
1961 __ bind(&not_int32);
1962 GenerateTypeTransitionWithSavedArgs(masm);
1963 break;
1964 }
1965 default: UNREACHABLE(); break;
1966 }
1967
1968 // If an allocation fails, or SHR or MOD hit a hard case,
1969 // use the runtime system to get the correct result.
1970 __ bind(&call_runtime);
1971
1972 switch (op_) {
1973 case Token::ADD:
1974 GenerateRegisterArgsPush(masm);
1975 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1976 break;
1977 case Token::SUB:
1978 GenerateRegisterArgsPush(masm);
1979 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1980 break;
1981 case Token::MUL:
1982 GenerateRegisterArgsPush(masm);
1983 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1984 break;
1985 case Token::DIV:
1986 GenerateRegisterArgsPush(masm);
1987 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1988 break;
1989 case Token::MOD:
1990 GenerateRegisterArgsPush(masm);
1991 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1992 break;
1993 case Token::BIT_OR:
1994 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1995 break;
1996 case Token::BIT_AND:
1997 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
1998 break;
1999 case Token::BIT_XOR:
2000 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
2001 break;
2002 case Token::SAR:
2003 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
2004 break;
2005 case Token::SHL:
2006 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
2007 break;
2008 case Token::SHR:
2009 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
2010 break;
2011 default:
2012 UNREACHABLE();
2013 }
2014}
2015
2016
2017void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2018 Label call_runtime;
2019 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER ||
2020 operands_type_ == TRBinaryOpIC::INT32);
2021
2022 // Floating point case.
2023 switch (op_) {
2024 case Token::ADD:
2025 case Token::SUB:
2026 case Token::MUL:
2027 case Token::DIV: {
2028 Label not_floats;
2029 if (CpuFeatures::IsSupported(SSE2)) {
2030 CpuFeatures::Scope use_sse2(SSE2);
2031 FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
2032
2033 switch (op_) {
2034 case Token::ADD: __ addsd(xmm0, xmm1); break;
2035 case Token::SUB: __ subsd(xmm0, xmm1); break;
2036 case Token::MUL: __ mulsd(xmm0, xmm1); break;
2037 case Token::DIV: __ divsd(xmm0, xmm1); break;
2038 default: UNREACHABLE();
2039 }
2040 GenerateHeapResultAllocation(masm, &call_runtime);
2041 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
2042 __ ret(0);
2043 } else { // SSE2 not available, use FPU.
2044 FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
2045 FloatingPointHelper::LoadFloatOperands(
2046 masm,
2047 ecx,
2048 FloatingPointHelper::ARGS_IN_REGISTERS);
2049 switch (op_) {
2050 case Token::ADD: __ faddp(1); break;
2051 case Token::SUB: __ fsubp(1); break;
2052 case Token::MUL: __ fmulp(1); break;
2053 case Token::DIV: __ fdivp(1); break;
2054 default: UNREACHABLE();
2055 }
2056 Label after_alloc_failure;
2057 GenerateHeapResultAllocation(masm, &after_alloc_failure);
2058 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2059 __ ret(0);
2060 __ bind(&after_alloc_failure);
2061 __ ffree();
2062 __ jmp(&call_runtime);
2063 }
2064
2065 __ bind(&not_floats);
2066 GenerateTypeTransition(masm);
2067 break;
2068 }
2069
2070 case Token::MOD: {
2071 // For MOD we go directly to runtime in the non-smi case.
2072 break;
2073 }
2074 case Token::BIT_OR:
2075 case Token::BIT_AND:
2076 case Token::BIT_XOR:
2077 case Token::SAR:
2078 case Token::SHL:
2079 case Token::SHR: {
2080 GenerateRegisterArgsPush(masm);
2081 Label not_floats;
2082 Label non_smi_result;
2083 FloatingPointHelper::LoadUnknownsAsIntegers(masm,
2084 use_sse3_,
2085 &not_floats);
2086 switch (op_) {
2087 case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
2088 case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
2089 case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
2090 case Token::SAR: __ sar_cl(eax); break;
2091 case Token::SHL: __ shl_cl(eax); break;
2092 case Token::SHR: __ shr_cl(eax); break;
2093 default: UNREACHABLE();
2094 }
2095 if (op_ == Token::SHR) {
2096 // Check if result is non-negative and fits in a smi.
2097 __ test(eax, Immediate(0xc0000000));
2098 __ j(not_zero, &call_runtime);
2099 } else {
2100 // Check if result fits in a smi.
2101 __ cmp(eax, 0xc0000000);
2102 __ j(negative, &non_smi_result);
2103 }
2104 // Tag smi result and return.
2105 __ SmiTag(eax);
2106 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
2107
2108 // All ops except SHR return a signed int32 that we load in
2109 // a HeapNumber.
2110 if (op_ != Token::SHR) {
2111 __ bind(&non_smi_result);
2112 // Allocate a heap number if needed.
2113 __ mov(ebx, Operand(eax)); // ebx: result
2114 NearLabel skip_allocation;
2115 switch (mode_) {
2116 case OVERWRITE_LEFT:
2117 case OVERWRITE_RIGHT:
2118 // If the operand was an object, we skip the
2119 // allocation of a heap number.
2120 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
2121 1 * kPointerSize : 2 * kPointerSize));
2122 __ test(eax, Immediate(kSmiTagMask));
2123 __ j(not_zero, &skip_allocation, not_taken);
2124 // Fall through!
2125 case NO_OVERWRITE:
2126 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
2127 __ bind(&skip_allocation);
2128 break;
2129 default: UNREACHABLE();
2130 }
2131 // Store the result in the HeapNumber and return.
2132 if (CpuFeatures::IsSupported(SSE2)) {
2133 CpuFeatures::Scope use_sse2(SSE2);
2134 __ cvtsi2sd(xmm0, Operand(ebx));
2135 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
2136 } else {
2137 __ mov(Operand(esp, 1 * kPointerSize), ebx);
2138 __ fild_s(Operand(esp, 1 * kPointerSize));
2139 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2140 }
2141 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
2142 }
2143
2144 __ bind(&not_floats);
2145 GenerateTypeTransitionWithSavedArgs(masm);
2146 break;
2147 }
2148 default: UNREACHABLE(); break;
2149 }
2150
2151 // If an allocation fails, or SHR or MOD hit a hard case,
2152 // use the runtime system to get the correct result.
2153 __ bind(&call_runtime);
2154
2155 switch (op_) {
2156 case Token::ADD:
2157 GenerateRegisterArgsPush(masm);
2158 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
2159 break;
2160 case Token::SUB:
2161 GenerateRegisterArgsPush(masm);
2162 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
2163 break;
2164 case Token::MUL:
2165 GenerateRegisterArgsPush(masm);
2166 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
2167 break;
2168 case Token::DIV:
2169 GenerateRegisterArgsPush(masm);
2170 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
2171 break;
2172 case Token::MOD:
2173 GenerateRegisterArgsPush(masm);
2174 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
2175 break;
2176 case Token::BIT_OR:
2177 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
2178 break;
2179 case Token::BIT_AND:
2180 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
2181 break;
2182 case Token::BIT_XOR:
2183 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
2184 break;
2185 case Token::SAR:
2186 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
2187 break;
2188 case Token::SHL:
2189 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
2190 break;
2191 case Token::SHR:
2192 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
2193 break;
2194 default:
2195 UNREACHABLE();
2196 }
2197}
2198
2199
2200void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2201 Label call_runtime;
2202
2203 __ IncrementCounter(&Counters::generic_binary_stub_calls, 1);
2204
2205 switch (op_) {
2206 case Token::ADD:
2207 case Token::SUB:
2208 case Token::MUL:
2209 case Token::DIV:
2210 break;
2211 case Token::MOD:
2212 case Token::BIT_OR:
2213 case Token::BIT_AND:
2214 case Token::BIT_XOR:
2215 case Token::SAR:
2216 case Token::SHL:
2217 case Token::SHR:
2218 GenerateRegisterArgsPush(masm);
2219 break;
2220 default:
2221 UNREACHABLE();
2222 }
2223
2224 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
2225
2226 // Floating point case.
2227 switch (op_) {
2228 case Token::ADD:
2229 case Token::SUB:
2230 case Token::MUL:
2231 case Token::DIV: {
2232 Label not_floats;
2233 if (CpuFeatures::IsSupported(SSE2)) {
2234 CpuFeatures::Scope use_sse2(SSE2);
2235 FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
2236
2237 switch (op_) {
2238 case Token::ADD: __ addsd(xmm0, xmm1); break;
2239 case Token::SUB: __ subsd(xmm0, xmm1); break;
2240 case Token::MUL: __ mulsd(xmm0, xmm1); break;
2241 case Token::DIV: __ divsd(xmm0, xmm1); break;
2242 default: UNREACHABLE();
2243 }
2244 GenerateHeapResultAllocation(masm, &call_runtime);
2245 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
2246 __ ret(0);
2247 } else { // SSE2 not available, use FPU.
2248 FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
2249 FloatingPointHelper::LoadFloatOperands(
2250 masm,
2251 ecx,
2252 FloatingPointHelper::ARGS_IN_REGISTERS);
2253 switch (op_) {
2254 case Token::ADD: __ faddp(1); break;
2255 case Token::SUB: __ fsubp(1); break;
2256 case Token::MUL: __ fmulp(1); break;
2257 case Token::DIV: __ fdivp(1); break;
2258 default: UNREACHABLE();
2259 }
2260 Label after_alloc_failure;
2261 GenerateHeapResultAllocation(masm, &after_alloc_failure);
2262 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2263 __ ret(0);
2264 __ bind(&after_alloc_failure);
2265 __ ffree();
2266 __ jmp(&call_runtime);
2267 }
2268 __ bind(&not_floats);
2269 break;
2270 }
2271 case Token::MOD: {
2272 // For MOD we go directly to runtime in the non-smi case.
2273 break;
2274 }
2275 case Token::BIT_OR:
2276 case Token::BIT_AND:
2277 case Token::BIT_XOR:
2278 case Token::SAR:
2279 case Token::SHL:
2280 case Token::SHR: {
2281 Label non_smi_result;
2282 FloatingPointHelper::LoadUnknownsAsIntegers(masm,
2283 use_sse3_,
2284 &call_runtime);
2285 switch (op_) {
2286 case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
2287 case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
2288 case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
2289 case Token::SAR: __ sar_cl(eax); break;
2290 case Token::SHL: __ shl_cl(eax); break;
2291 case Token::SHR: __ shr_cl(eax); break;
2292 default: UNREACHABLE();
2293 }
2294 if (op_ == Token::SHR) {
2295 // Check if result is non-negative and fits in a smi.
2296 __ test(eax, Immediate(0xc0000000));
2297 __ j(not_zero, &call_runtime);
2298 } else {
2299 // Check if result fits in a smi.
2300 __ cmp(eax, 0xc0000000);
2301 __ j(negative, &non_smi_result);
2302 }
2303 // Tag smi result and return.
2304 __ SmiTag(eax);
2305 __ ret(2 * kPointerSize); // Drop the arguments from the stack.
2306
2307 // All ops except SHR return a signed int32 that we load in
2308 // a HeapNumber.
2309 if (op_ != Token::SHR) {
2310 __ bind(&non_smi_result);
2311 // Allocate a heap number if needed.
2312 __ mov(ebx, Operand(eax)); // ebx: result
2313 NearLabel skip_allocation;
2314 switch (mode_) {
2315 case OVERWRITE_LEFT:
2316 case OVERWRITE_RIGHT:
2317 // If the operand was an object, we skip the
2318 // allocation of a heap number.
2319 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
2320 1 * kPointerSize : 2 * kPointerSize));
2321 __ test(eax, Immediate(kSmiTagMask));
2322 __ j(not_zero, &skip_allocation, not_taken);
2323 // Fall through!
2324 case NO_OVERWRITE:
2325 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
2326 __ bind(&skip_allocation);
2327 break;
2328 default: UNREACHABLE();
2329 }
2330 // Store the result in the HeapNumber and return.
2331 if (CpuFeatures::IsSupported(SSE2)) {
2332 CpuFeatures::Scope use_sse2(SSE2);
2333 __ cvtsi2sd(xmm0, Operand(ebx));
2334 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
2335 } else {
2336 __ mov(Operand(esp, 1 * kPointerSize), ebx);
2337 __ fild_s(Operand(esp, 1 * kPointerSize));
2338 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2339 }
2340 __ ret(2 * kPointerSize);
2341 }
2342 break;
2343 }
2344 default: UNREACHABLE(); break;
2345 }
2346
2347 // If all else fails, use the runtime system to get the correct
2348 // result.
2349 __ bind(&call_runtime);
2350 switch (op_) {
2351 case Token::ADD: {
2352 GenerateRegisterArgsPush(masm);
2353 // Test for string arguments before calling runtime.
2354 // Registers containing left and right operands respectively.
2355 Register lhs, rhs;
2356 lhs = edx;
2357 rhs = eax;
2358
2359 // Test if left operand is a string.
2360 NearLabel lhs_not_string;
2361 __ test(lhs, Immediate(kSmiTagMask));
2362 __ j(zero, &lhs_not_string);
2363 __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
2364 __ j(above_equal, &lhs_not_string);
2365
2366 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
2367 __ TailCallStub(&string_add_left_stub);
2368
2369 NearLabel call_add_runtime;
2370 // Left operand is not a string, test right.
2371 __ bind(&lhs_not_string);
2372 __ test(rhs, Immediate(kSmiTagMask));
2373 __ j(zero, &call_add_runtime);
2374 __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
2375 __ j(above_equal, &call_add_runtime);
2376
2377 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
2378 __ TailCallStub(&string_add_right_stub);
2379
2380 // Neither argument is a string.
2381 __ bind(&call_add_runtime);
2382 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
2383 break;
2384 }
2385 case Token::SUB:
2386 GenerateRegisterArgsPush(masm);
2387 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
2388 break;
2389 case Token::MUL:
2390 GenerateRegisterArgsPush(masm);
2391 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
2392 break;
2393 case Token::DIV:
2394 GenerateRegisterArgsPush(masm);
2395 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
2396 break;
2397 case Token::MOD:
2398 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
2399 break;
2400 case Token::BIT_OR:
2401 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
2402 break;
2403 case Token::BIT_AND:
2404 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
2405 break;
2406 case Token::BIT_XOR:
2407 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
2408 break;
2409 case Token::SAR:
2410 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
2411 break;
2412 case Token::SHL:
2413 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
2414 break;
2415 case Token::SHR:
2416 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
2417 break;
2418 default:
2419 UNREACHABLE();
2420 }
2421}
2422
2423
2424void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
2425 MacroAssembler* masm,
2426 Label* alloc_failure) {
2427 Label skip_allocation;
2428 OverwriteMode mode = mode_;
2429 switch (mode) {
2430 case OVERWRITE_LEFT: {
2431 // If the argument in edx is already an object, we skip the
2432 // allocation of a heap number.
2433 __ test(edx, Immediate(kSmiTagMask));
2434 __ j(not_zero, &skip_allocation, not_taken);
2435 // Allocate a heap number for the result. Keep eax and edx intact
2436 // for the possible runtime call.
2437 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
2438 // Now edx can be overwritten losing one of the arguments as we are
2439 // now done and will not need it any more.
2440 __ mov(edx, Operand(ebx));
2441 __ bind(&skip_allocation);
2442 // Use object in edx as a result holder
2443 __ mov(eax, Operand(edx));
2444 break;
2445 }
2446 case OVERWRITE_RIGHT:
2447 // If the argument in eax is already an object, we skip the
2448 // allocation of a heap number.
2449 __ test(eax, Immediate(kSmiTagMask));
2450 __ j(not_zero, &skip_allocation, not_taken);
2451 // Fall through!
2452 case NO_OVERWRITE:
2453 // Allocate a heap number for the result. Keep eax and edx intact
2454 // for the possible runtime call.
2455 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
2456 // Now eax can be overwritten losing one of the arguments as we are
2457 // now done and will not need it any more.
2458 __ mov(eax, ebx);
2459 __ bind(&skip_allocation);
2460 break;
2461 default: UNREACHABLE();
2462 }
2463}
2464
2465
2466void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
2467 __ pop(ecx);
2468 __ push(edx);
2469 __ push(eax);
2470 __ push(ecx);
2471}
2472
2473
ricow@chromium.org65fae842010-08-25 15:26:24 +00002474void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
2475 // Input on stack:
2476 // esp[4]: argument (should be number).
2477 // esp[0]: return address.
2478 // Test that eax is a number.
2479 Label runtime_call;
2480 Label runtime_call_clear_stack;
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00002481 NearLabel input_not_smi;
2482 NearLabel loaded;
ricow@chromium.org65fae842010-08-25 15:26:24 +00002483 __ mov(eax, Operand(esp, kPointerSize));
2484 __ test(eax, Immediate(kSmiTagMask));
2485 __ j(not_zero, &input_not_smi);
2486 // Input is a smi. Untag and load it onto the FPU stack.
2487 // Then load the low and high words of the double into ebx, edx.
2488 STATIC_ASSERT(kSmiTagSize == 1);
2489 __ sar(eax, 1);
2490 __ sub(Operand(esp), Immediate(2 * kPointerSize));
2491 __ mov(Operand(esp, 0), eax);
2492 __ fild_s(Operand(esp, 0));
2493 __ fst_d(Operand(esp, 0));
2494 __ pop(edx);
2495 __ pop(ebx);
2496 __ jmp(&loaded);
2497 __ bind(&input_not_smi);
2498 // Check if input is a HeapNumber.
2499 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
2500 __ cmp(Operand(ebx), Immediate(Factory::heap_number_map()));
2501 __ j(not_equal, &runtime_call);
2502 // Input is a HeapNumber. Push it on the FPU stack and load its
2503 // low and high words into ebx, edx.
2504 __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
2505 __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
2506 __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
2507
2508 __ bind(&loaded);
2509 // ST[0] == double value
2510 // ebx = low 32 bits of double value
2511 // edx = high 32 bits of double value
2512 // Compute hash (the shifts are arithmetic):
2513 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
2514 __ mov(ecx, ebx);
2515 __ xor_(ecx, Operand(edx));
2516 __ mov(eax, ecx);
2517 __ sar(eax, 16);
2518 __ xor_(ecx, Operand(eax));
2519 __ mov(eax, ecx);
2520 __ sar(eax, 8);
2521 __ xor_(ecx, Operand(eax));
2522 ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
2523 __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
2524
2525 // ST[0] == double value.
2526 // ebx = low 32 bits of double value.
2527 // edx = high 32 bits of double value.
2528 // ecx = TranscendentalCache::hash(double value).
2529 __ mov(eax,
2530 Immediate(ExternalReference::transcendental_cache_array_address()));
2531 // Eax points to cache array.
2532 __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0])));
2533 // Eax points to the cache for the type type_.
2534 // If NULL, the cache hasn't been initialized yet, so go through runtime.
2535 __ test(eax, Operand(eax));
2536 __ j(zero, &runtime_call_clear_stack);
2537#ifdef DEBUG
2538 // Check that the layout of cache elements match expectations.
2539 { TranscendentalCache::Element test_elem[2];
2540 char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
2541 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
2542 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
2543 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
2544 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
2545 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
2546 CHECK_EQ(0, elem_in0 - elem_start);
2547 CHECK_EQ(kIntSize, elem_in1 - elem_start);
2548 CHECK_EQ(2 * kIntSize, elem_out - elem_start);
2549 }
2550#endif
2551 // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
2552 __ lea(ecx, Operand(ecx, ecx, times_2, 0));
2553 __ lea(ecx, Operand(eax, ecx, times_4, 0));
2554 // Check if cache matches: Double value is stored in uint32_t[2] array.
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00002555 NearLabel cache_miss;
ricow@chromium.org65fae842010-08-25 15:26:24 +00002556 __ cmp(ebx, Operand(ecx, 0));
2557 __ j(not_equal, &cache_miss);
2558 __ cmp(edx, Operand(ecx, kIntSize));
2559 __ j(not_equal, &cache_miss);
2560 // Cache hit!
2561 __ mov(eax, Operand(ecx, 2 * kIntSize));
2562 __ fstp(0);
2563 __ ret(kPointerSize);
2564
2565 __ bind(&cache_miss);
2566 // Update cache with new value.
2567 // We are short on registers, so use no_reg as scratch.
2568 // This gives slightly larger code.
2569 __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
2570 GenerateOperation(masm);
2571 __ mov(Operand(ecx, 0), ebx);
2572 __ mov(Operand(ecx, kIntSize), edx);
2573 __ mov(Operand(ecx, 2 * kIntSize), eax);
2574 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2575 __ ret(kPointerSize);
2576
2577 __ bind(&runtime_call_clear_stack);
2578 __ fstp(0);
2579 __ bind(&runtime_call);
2580 __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
2581}
2582
2583
2584Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
2585 switch (type_) {
2586 // Add more cases when necessary.
2587 case TranscendentalCache::SIN: return Runtime::kMath_sin;
2588 case TranscendentalCache::COS: return Runtime::kMath_cos;
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002589 case TranscendentalCache::LOG: return Runtime::kMath_log;
ricow@chromium.org65fae842010-08-25 15:26:24 +00002590 default:
2591 UNIMPLEMENTED();
2592 return Runtime::kAbort;
2593 }
2594}
2595
2596
2597void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
2598 // Only free register is edi.
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002599 // Input value is on FP stack, and also in ebx/edx. Address of result
2600 // (a newly allocated HeapNumber) is in eax.
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00002601 NearLabel done;
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002602 if (type_ == TranscendentalCache::SIN || type_ == TranscendentalCache::COS) {
2603 // Both fsin and fcos require arguments in the range +/-2^63 and
2604 // return NaN for infinities and NaN. They can share all code except
2605 // the actual fsin/fcos operation.
2606 NearLabel in_range;
2607 // If argument is outside the range -2^63..2^63, fsin/cos doesn't
2608 // work. We must reduce it to the appropriate range.
2609 __ mov(edi, edx);
2610 __ and_(Operand(edi), Immediate(0x7ff00000)); // Exponent only.
2611 int supported_exponent_limit =
2612 (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
2613 __ cmp(Operand(edi), Immediate(supported_exponent_limit));
2614 __ j(below, &in_range, taken);
2615 // Check for infinity and NaN. Both return NaN for sin.
2616 __ cmp(Operand(edi), Immediate(0x7ff00000));
2617 NearLabel non_nan_result;
2618 __ j(not_equal, &non_nan_result, taken);
2619 // Input is +/-Infinity or NaN. Result is NaN.
2620 __ fstp(0);
2621 // NaN is represented by 0x7ff8000000000000.
2622 __ push(Immediate(0x7ff80000));
2623 __ push(Immediate(0));
2624 __ fld_d(Operand(esp, 0));
2625 __ add(Operand(esp), Immediate(2 * kPointerSize));
2626 __ jmp(&done);
ricow@chromium.org65fae842010-08-25 15:26:24 +00002627
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002628 __ bind(&non_nan_result);
ricow@chromium.org65fae842010-08-25 15:26:24 +00002629
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002630 // Use fpmod to restrict argument to the range +/-2*PI.
2631 __ mov(edi, eax); // Save eax before using fnstsw_ax.
2632 __ fldpi();
2633 __ fadd(0);
2634 __ fld(1);
2635 // FPU Stack: input, 2*pi, input.
2636 {
2637 NearLabel no_exceptions;
2638 __ fwait();
2639 __ fnstsw_ax();
2640 // Clear if Illegal Operand or Zero Division exceptions are set.
2641 __ test(Operand(eax), Immediate(5));
2642 __ j(zero, &no_exceptions);
2643 __ fnclex();
2644 __ bind(&no_exceptions);
2645 }
ricow@chromium.org65fae842010-08-25 15:26:24 +00002646
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002647 // Compute st(0) % st(1)
2648 {
2649 NearLabel partial_remainder_loop;
2650 __ bind(&partial_remainder_loop);
2651 __ fprem1();
2652 __ fwait();
2653 __ fnstsw_ax();
2654 __ test(Operand(eax), Immediate(0x400 /* C2 */));
2655 // If C2 is set, computation only has partial result. Loop to
2656 // continue computation.
2657 __ j(not_zero, &partial_remainder_loop);
2658 }
2659 // FPU Stack: input, 2*pi, input % 2*pi
2660 __ fstp(2);
2661 __ fstp(0);
2662 __ mov(eax, edi); // Restore eax (allocated HeapNumber pointer).
2663
2664 // FPU Stack: input % 2*pi
2665 __ bind(&in_range);
2666 switch (type_) {
2667 case TranscendentalCache::SIN:
2668 __ fsin();
2669 break;
2670 case TranscendentalCache::COS:
2671 __ fcos();
2672 break;
2673 default:
2674 UNREACHABLE();
2675 }
2676 __ bind(&done);
2677 } else {
2678 ASSERT(type_ == TranscendentalCache::LOG);
2679 __ fldln2();
2680 __ fxch();
2681 __ fyl2x();
ricow@chromium.org65fae842010-08-25 15:26:24 +00002682 }
ricow@chromium.org65fae842010-08-25 15:26:24 +00002683}
2684
2685
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00002686void TranscendentalCacheSSE2Stub::Generate(MacroAssembler* masm) {
2687 // Input on stack:
2688 // esp[0]: return address.
2689 // Input in registers:
2690 // xmm1: untagged double input argument.
2691 // Output:
2692 // xmm1: untagged double result.
2693 Label skip_cache;
2694 Label call_runtime;
2695
2696 // Input is an untagged double in xmm1.
2697 // Compute hash (the shifts are arithmetic):
2698 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
2699 if (CpuFeatures::IsSupported(SSE4_1)) {
2700 CpuFeatures::Scope sse4_scope(SSE4_1);
2701 __ pextrd(Operand(edx), xmm1, 0x1); // copy xmm1[63..32] to edx.
2702 } else {
2703 __ pshufd(xmm0, xmm1, 0x1);
2704 __ movd(Operand(edx), xmm0);
2705 }
2706 __ movd(Operand(ebx), xmm1);
2707
2708 // xmm1 = double value
2709 // ebx = low 32 bits of double value
2710 // edx = high 32 bits of double value
2711 // Compute hash (the shifts are arithmetic):
2712 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
2713 __ mov(ecx, ebx);
2714 __ xor_(ecx, Operand(edx));
2715 __ mov(eax, ecx);
2716 __ sar(eax, 16);
2717 __ xor_(ecx, Operand(eax));
2718 __ mov(eax, ecx);
2719 __ sar(eax, 8);
2720 __ xor_(ecx, Operand(eax));
2721 ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize));
2722 __ and_(Operand(ecx), Immediate(TranscendentalCache::kCacheSize - 1));
2723
2724 // xmm1 = double value.
2725 // ebx = low 32 bits of double value.
2726 // edx = high 32 bits of double value.
2727 // ecx = TranscendentalCache::hash(double value).
2728 __ mov(eax,
2729 Immediate(ExternalReference::transcendental_cache_array_address()));
2730 // Eax points to cache array.
2731 __ mov(eax, Operand(eax, type_ * sizeof(TranscendentalCache::caches_[0])));
2732 // Eax points to the cache for the type type_.
2733 // If NULL, the cache hasn't been initialized yet, so go through runtime.
2734 __ test(eax, Operand(eax));
2735 __ j(zero, &call_runtime);
2736#ifdef DEBUG
2737 // Check that the layout of cache elements match expectations.
2738 { TranscendentalCache::Element test_elem[2];
2739 char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
2740 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
2741 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
2742 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
2743 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
2744 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
2745 CHECK_EQ(0, elem_in0 - elem_start);
2746 CHECK_EQ(kIntSize, elem_in1 - elem_start);
2747 CHECK_EQ(2 * kIntSize, elem_out - elem_start);
2748 }
2749#endif
2750 // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
2751 __ lea(ecx, Operand(ecx, ecx, times_2, 0));
2752 __ lea(ecx, Operand(eax, ecx, times_4, 0));
2753 // Check if cache matches: Double value is stored in uint32_t[2] array.
2754 NearLabel cache_miss;
2755 __ cmp(ebx, Operand(ecx, 0));
2756 __ j(not_equal, &cache_miss);
2757 __ cmp(edx, Operand(ecx, kIntSize));
2758 __ j(not_equal, &cache_miss);
2759 // Cache hit!
2760 __ mov(eax, Operand(ecx, 2 * kIntSize));
2761 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2762 __ Ret();
2763
2764 __ bind(&cache_miss);
2765 // Update cache with new value.
2766 // We are short on registers, so use no_reg as scratch.
2767 // This gives slightly larger code.
2768 __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
2769 __ sub(Operand(esp), Immediate(kDoubleSize));
2770 __ movdbl(Operand(esp, 0), xmm1);
2771 __ fld_d(Operand(esp, 0));
2772 __ add(Operand(esp), Immediate(kDoubleSize));
2773 GenerateOperation(masm);
2774 __ mov(Operand(ecx, 0), ebx);
2775 __ mov(Operand(ecx, kIntSize), edx);
2776 __ mov(Operand(ecx, 2 * kIntSize), eax);
2777 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2778 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2779 __ Ret();
2780
2781 __ bind(&skip_cache);
2782 __ sub(Operand(esp), Immediate(kDoubleSize));
2783 __ movdbl(Operand(esp, 0), xmm1);
2784 __ fld_d(Operand(esp, 0));
2785 GenerateOperation(masm);
2786 __ fstp_d(Operand(esp, 0));
2787 __ movdbl(xmm1, Operand(esp, 0));
2788 __ add(Operand(esp), Immediate(kDoubleSize));
2789 __ Ret();
2790
2791 __ bind(&call_runtime);
2792 __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
2793 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
2794 __ EnterInternalFrame();
2795 __ push(eax);
2796 __ CallRuntime(RuntimeFunction(), 1);
2797 __ LeaveInternalFrame();
2798 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2799 __ Ret();
2800}
2801
2802
2803Runtime::FunctionId TranscendentalCacheSSE2Stub::RuntimeFunction() {
2804 switch (type_) {
2805 // Add more cases when necessary.
2806 case TranscendentalCache::LOG: return Runtime::kMath_log;
2807 default:
2808 UNIMPLEMENTED();
2809 return Runtime::kAbort;
2810 }
2811}
2812
2813
2814void TranscendentalCacheSSE2Stub::GenerateOperation(MacroAssembler* masm) {
2815 // Only free register is edi.
2816 // Input value is on FP stack and in xmm1.
2817
2818 ASSERT(type_ == TranscendentalCache::LOG);
2819 __ fldln2();
2820 __ fxch();
2821 __ fyl2x();
2822}
2823
2824
ricow@chromium.org65fae842010-08-25 15:26:24 +00002825// Get the integer part of a heap number. Surprisingly, all this bit twiddling
2826// is faster than using the built-in instructions on floating point registers.
2827// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
2828// trashed registers.
2829void IntegerConvert(MacroAssembler* masm,
2830 Register source,
2831 TypeInfo type_info,
2832 bool use_sse3,
2833 Label* conversion_failure) {
2834 ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
2835 Label done, right_exponent, normal_exponent;
2836 Register scratch = ebx;
2837 Register scratch2 = edi;
2838 if (type_info.IsInteger32() && CpuFeatures::IsEnabled(SSE2)) {
2839 CpuFeatures::Scope scope(SSE2);
2840 __ cvttsd2si(ecx, FieldOperand(source, HeapNumber::kValueOffset));
2841 return;
2842 }
2843 if (!type_info.IsInteger32() || !use_sse3) {
2844 // Get exponent word.
2845 __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
2846 // Get exponent alone in scratch2.
2847 __ mov(scratch2, scratch);
2848 __ and_(scratch2, HeapNumber::kExponentMask);
2849 }
2850 if (use_sse3) {
2851 CpuFeatures::Scope scope(SSE3);
2852 if (!type_info.IsInteger32()) {
2853 // Check whether the exponent is too big for a 64 bit signed integer.
2854 static const uint32_t kTooBigExponent =
2855 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
2856 __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
2857 __ j(greater_equal, conversion_failure);
2858 }
2859 // Load x87 register with heap number.
2860 __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
2861 // Reserve space for 64 bit answer.
2862 __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
2863 // Do conversion, which cannot fail because we checked the exponent.
2864 __ fisttp_d(Operand(esp, 0));
2865 __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
2866 __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
2867 } else {
2868 // Load ecx with zero. We use this either for the final shift or
2869 // for the answer.
2870 __ xor_(ecx, Operand(ecx));
2871 // Check whether the exponent matches a 32 bit signed int that cannot be
2872 // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
2873 // exponent is 30 (biased). This is the exponent that we are fastest at and
2874 // also the highest exponent we can handle here.
2875 const uint32_t non_smi_exponent =
2876 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
2877 __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
2878 // If we have a match of the int32-but-not-Smi exponent then skip some
2879 // logic.
2880 __ j(equal, &right_exponent);
2881 // If the exponent is higher than that then go to slow case. This catches
2882 // numbers that don't fit in a signed int32, infinities and NaNs.
2883 __ j(less, &normal_exponent);
2884
2885 {
2886 // Handle a big exponent. The only reason we have this code is that the
2887 // >>> operator has a tendency to generate numbers with an exponent of 31.
2888 const uint32_t big_non_smi_exponent =
2889 (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
2890 __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
2891 __ j(not_equal, conversion_failure);
2892 // We have the big exponent, typically from >>>. This means the number is
2893 // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
2894 __ mov(scratch2, scratch);
2895 __ and_(scratch2, HeapNumber::kMantissaMask);
2896 // Put back the implicit 1.
2897 __ or_(scratch2, 1 << HeapNumber::kExponentShift);
2898 // Shift up the mantissa bits to take up the space the exponent used to
2899 // take. We just orred in the implicit bit so that took care of one and
2900 // we want to use the full unsigned range so we subtract 1 bit from the
2901 // shift distance.
2902 const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
2903 __ shl(scratch2, big_shift_distance);
2904 // Get the second half of the double.
2905 __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
2906 // Shift down 21 bits to get the most significant 11 bits or the low
2907 // mantissa word.
2908 __ shr(ecx, 32 - big_shift_distance);
2909 __ or_(ecx, Operand(scratch2));
2910 // We have the answer in ecx, but we may need to negate it.
2911 __ test(scratch, Operand(scratch));
2912 __ j(positive, &done);
2913 __ neg(ecx);
2914 __ jmp(&done);
2915 }
2916
2917 __ bind(&normal_exponent);
2918 // Exponent word in scratch, exponent part of exponent word in scratch2.
2919 // Zero in ecx.
2920 // We know the exponent is smaller than 30 (biased). If it is less than
2921 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
2922 // it rounds to zero.
2923 const uint32_t zero_exponent =
2924 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
2925 __ sub(Operand(scratch2), Immediate(zero_exponent));
2926 // ecx already has a Smi zero.
2927 __ j(less, &done);
2928
2929 // We have a shifted exponent between 0 and 30 in scratch2.
2930 __ shr(scratch2, HeapNumber::kExponentShift);
2931 __ mov(ecx, Immediate(30));
2932 __ sub(ecx, Operand(scratch2));
2933
2934 __ bind(&right_exponent);
2935 // Here ecx is the shift, scratch is the exponent word.
2936 // Get the top bits of the mantissa.
2937 __ and_(scratch, HeapNumber::kMantissaMask);
2938 // Put back the implicit 1.
2939 __ or_(scratch, 1 << HeapNumber::kExponentShift);
2940 // Shift up the mantissa bits to take up the space the exponent used to
2941 // take. We have kExponentShift + 1 significant bits int he low end of the
2942 // word. Shift them to the top bits.
2943 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
2944 __ shl(scratch, shift_distance);
2945 // Get the second half of the double. For some exponents we don't
2946 // actually need this because the bits get shifted out again, but
2947 // it's probably slower to test than just to do it.
2948 __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
2949 // Shift down 22 bits to get the most significant 10 bits or the low
2950 // mantissa word.
2951 __ shr(scratch2, 32 - shift_distance);
2952 __ or_(scratch2, Operand(scratch));
2953 // Move down according to the exponent.
2954 __ shr_cl(scratch2);
2955 // Now the unsigned answer is in scratch2. We need to move it to ecx and
2956 // we may need to fix the sign.
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00002957 NearLabel negative;
ricow@chromium.org65fae842010-08-25 15:26:24 +00002958 __ xor_(ecx, Operand(ecx));
2959 __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
2960 __ j(greater, &negative);
2961 __ mov(ecx, scratch2);
2962 __ jmp(&done);
2963 __ bind(&negative);
2964 __ sub(ecx, Operand(scratch2));
2965 __ bind(&done);
2966 }
2967}
2968
2969
2970// Input: edx, eax are the left and right objects of a bit op.
2971// Output: eax, ecx are left and right integers for a bit op.
2972void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm,
2973 TypeInfo type_info,
2974 bool use_sse3,
2975 Label* conversion_failure) {
2976 // Check float operands.
2977 Label arg1_is_object, check_undefined_arg1;
2978 Label arg2_is_object, check_undefined_arg2;
2979 Label load_arg2, done;
2980
2981 if (!type_info.IsDouble()) {
2982 if (!type_info.IsSmi()) {
2983 __ test(edx, Immediate(kSmiTagMask));
2984 __ j(not_zero, &arg1_is_object);
2985 } else {
2986 if (FLAG_debug_code) __ AbortIfNotSmi(edx);
2987 }
2988 __ SmiUntag(edx);
2989 __ jmp(&load_arg2);
2990 }
2991
2992 __ bind(&arg1_is_object);
2993
2994 // Get the untagged integer version of the edx heap number in ecx.
2995 IntegerConvert(masm, edx, type_info, use_sse3, conversion_failure);
2996 __ mov(edx, ecx);
2997
2998 // Here edx has the untagged integer, eax has a Smi or a heap number.
2999 __ bind(&load_arg2);
3000 if (!type_info.IsDouble()) {
3001 // Test if arg2 is a Smi.
3002 if (!type_info.IsSmi()) {
3003 __ test(eax, Immediate(kSmiTagMask));
3004 __ j(not_zero, &arg2_is_object);
3005 } else {
3006 if (FLAG_debug_code) __ AbortIfNotSmi(eax);
3007 }
3008 __ SmiUntag(eax);
3009 __ mov(ecx, eax);
3010 __ jmp(&done);
3011 }
3012
3013 __ bind(&arg2_is_object);
3014
3015 // Get the untagged integer version of the eax heap number in ecx.
3016 IntegerConvert(masm, eax, type_info, use_sse3, conversion_failure);
3017 __ bind(&done);
3018 __ mov(eax, edx);
3019}
3020
3021
3022// Input: edx, eax are the left and right objects of a bit op.
3023// Output: eax, ecx are left and right integers for a bit op.
3024void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
3025 bool use_sse3,
3026 Label* conversion_failure) {
3027 // Check float operands.
3028 Label arg1_is_object, check_undefined_arg1;
3029 Label arg2_is_object, check_undefined_arg2;
3030 Label load_arg2, done;
3031
3032 // Test if arg1 is a Smi.
3033 __ test(edx, Immediate(kSmiTagMask));
3034 __ j(not_zero, &arg1_is_object);
3035
3036 __ SmiUntag(edx);
3037 __ jmp(&load_arg2);
3038
3039 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
3040 __ bind(&check_undefined_arg1);
3041 __ cmp(edx, Factory::undefined_value());
3042 __ j(not_equal, conversion_failure);
3043 __ mov(edx, Immediate(0));
3044 __ jmp(&load_arg2);
3045
3046 __ bind(&arg1_is_object);
3047 __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
3048 __ cmp(ebx, Factory::heap_number_map());
3049 __ j(not_equal, &check_undefined_arg1);
3050
3051 // Get the untagged integer version of the edx heap number in ecx.
3052 IntegerConvert(masm,
3053 edx,
3054 TypeInfo::Unknown(),
3055 use_sse3,
3056 conversion_failure);
3057 __ mov(edx, ecx);
3058
3059 // Here edx has the untagged integer, eax has a Smi or a heap number.
3060 __ bind(&load_arg2);
3061
3062 // Test if arg2 is a Smi.
3063 __ test(eax, Immediate(kSmiTagMask));
3064 __ j(not_zero, &arg2_is_object);
3065
3066 __ SmiUntag(eax);
3067 __ mov(ecx, eax);
3068 __ jmp(&done);
3069
3070 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
3071 __ bind(&check_undefined_arg2);
3072 __ cmp(eax, Factory::undefined_value());
3073 __ j(not_equal, conversion_failure);
3074 __ mov(ecx, Immediate(0));
3075 __ jmp(&done);
3076
3077 __ bind(&arg2_is_object);
3078 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
3079 __ cmp(ebx, Factory::heap_number_map());
3080 __ j(not_equal, &check_undefined_arg2);
3081
3082 // Get the untagged integer version of the eax heap number in ecx.
3083 IntegerConvert(masm,
3084 eax,
3085 TypeInfo::Unknown(),
3086 use_sse3,
3087 conversion_failure);
3088 __ bind(&done);
3089 __ mov(eax, edx);
3090}
3091
3092
3093void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
3094 TypeInfo type_info,
3095 bool use_sse3,
3096 Label* conversion_failure) {
3097 if (type_info.IsNumber()) {
3098 LoadNumbersAsIntegers(masm, type_info, use_sse3, conversion_failure);
3099 } else {
3100 LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure);
3101 }
3102}
3103
3104
kasperl@chromium.orga5551262010-12-07 12:49:48 +00003105void FloatingPointHelper::CheckLoadedIntegersWereInt32(MacroAssembler* masm,
3106 bool use_sse3,
3107 Label* not_int32) {
3108 return;
3109}
3110
3111
ricow@chromium.org65fae842010-08-25 15:26:24 +00003112void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
3113 Register number) {
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00003114 NearLabel load_smi, done;
ricow@chromium.org65fae842010-08-25 15:26:24 +00003115
3116 __ test(number, Immediate(kSmiTagMask));
3117 __ j(zero, &load_smi, not_taken);
3118 __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
3119 __ jmp(&done);
3120
3121 __ bind(&load_smi);
3122 __ SmiUntag(number);
3123 __ push(number);
3124 __ fild_s(Operand(esp, 0));
3125 __ pop(number);
3126
3127 __ bind(&done);
3128}
3129
3130
3131void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00003132 NearLabel load_smi_edx, load_eax, load_smi_eax, done;
ricow@chromium.org65fae842010-08-25 15:26:24 +00003133 // Load operand in edx into xmm0.
3134 __ test(edx, Immediate(kSmiTagMask));
3135 __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
3136 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
3137
3138 __ bind(&load_eax);
3139 // Load operand in eax into xmm1.
3140 __ test(eax, Immediate(kSmiTagMask));
3141 __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
3142 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
3143 __ jmp(&done);
3144
3145 __ bind(&load_smi_edx);
3146 __ SmiUntag(edx); // Untag smi before converting to float.
3147 __ cvtsi2sd(xmm0, Operand(edx));
3148 __ SmiTag(edx); // Retag smi for heap number overwriting test.
3149 __ jmp(&load_eax);
3150
3151 __ bind(&load_smi_eax);
3152 __ SmiUntag(eax); // Untag smi before converting to float.
3153 __ cvtsi2sd(xmm1, Operand(eax));
3154 __ SmiTag(eax); // Retag smi for heap number overwriting test.
3155
3156 __ bind(&done);
3157}
3158
3159
3160void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
3161 Label* not_numbers) {
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00003162 NearLabel load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
ricow@chromium.org65fae842010-08-25 15:26:24 +00003163 // Load operand in edx into xmm0, or branch to not_numbers.
3164 __ test(edx, Immediate(kSmiTagMask));
3165 __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
3166 __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map());
3167 __ j(not_equal, not_numbers); // Argument in edx is not a number.
3168 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
3169 __ bind(&load_eax);
3170 // Load operand in eax into xmm1, or branch to not_numbers.
3171 __ test(eax, Immediate(kSmiTagMask));
3172 __ j(zero, &load_smi_eax, not_taken); // Argument in eax is a smi.
3173 __ cmp(FieldOperand(eax, HeapObject::kMapOffset), Factory::heap_number_map());
3174 __ j(equal, &load_float_eax);
3175 __ jmp(not_numbers); // Argument in eax is not a number.
3176 __ bind(&load_smi_edx);
3177 __ SmiUntag(edx); // Untag smi before converting to float.
3178 __ cvtsi2sd(xmm0, Operand(edx));
3179 __ SmiTag(edx); // Retag smi for heap number overwriting test.
3180 __ jmp(&load_eax);
3181 __ bind(&load_smi_eax);
3182 __ SmiUntag(eax); // Untag smi before converting to float.
3183 __ cvtsi2sd(xmm1, Operand(eax));
3184 __ SmiTag(eax); // Retag smi for heap number overwriting test.
3185 __ jmp(&done);
3186 __ bind(&load_float_eax);
3187 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
3188 __ bind(&done);
3189}
3190
3191
3192void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
3193 Register scratch) {
3194 const Register left = edx;
3195 const Register right = eax;
3196 __ mov(scratch, left);
3197 ASSERT(!scratch.is(right)); // We're about to clobber scratch.
3198 __ SmiUntag(scratch);
3199 __ cvtsi2sd(xmm0, Operand(scratch));
3200
3201 __ mov(scratch, right);
3202 __ SmiUntag(scratch);
3203 __ cvtsi2sd(xmm1, Operand(scratch));
3204}
3205
3206
kasperl@chromium.orga5551262010-12-07 12:49:48 +00003207void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
3208 Label* non_int32,
3209 Register scratch) {
3210 __ cvttsd2si(scratch, Operand(xmm0));
3211 __ cvtsi2sd(xmm2, Operand(scratch));
3212 __ ucomisd(xmm0, xmm2);
3213 __ j(not_zero, non_int32);
3214 __ j(carry, non_int32);
3215 __ cvttsd2si(scratch, Operand(xmm1));
3216 __ cvtsi2sd(xmm2, Operand(scratch));
3217 __ ucomisd(xmm1, xmm2);
3218 __ j(not_zero, non_int32);
3219 __ j(carry, non_int32);
3220}
3221
3222
ricow@chromium.org65fae842010-08-25 15:26:24 +00003223void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
3224 Register scratch,
3225 ArgLocation arg_location) {
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00003226 NearLabel load_smi_1, load_smi_2, done_load_1, done;
ricow@chromium.org65fae842010-08-25 15:26:24 +00003227 if (arg_location == ARGS_IN_REGISTERS) {
3228 __ mov(scratch, edx);
3229 } else {
3230 __ mov(scratch, Operand(esp, 2 * kPointerSize));
3231 }
3232 __ test(scratch, Immediate(kSmiTagMask));
3233 __ j(zero, &load_smi_1, not_taken);
3234 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
3235 __ bind(&done_load_1);
3236
3237 if (arg_location == ARGS_IN_REGISTERS) {
3238 __ mov(scratch, eax);
3239 } else {
3240 __ mov(scratch, Operand(esp, 1 * kPointerSize));
3241 }
3242 __ test(scratch, Immediate(kSmiTagMask));
3243 __ j(zero, &load_smi_2, not_taken);
3244 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
3245 __ jmp(&done);
3246
3247 __ bind(&load_smi_1);
3248 __ SmiUntag(scratch);
3249 __ push(scratch);
3250 __ fild_s(Operand(esp, 0));
3251 __ pop(scratch);
3252 __ jmp(&done_load_1);
3253
3254 __ bind(&load_smi_2);
3255 __ SmiUntag(scratch);
3256 __ push(scratch);
3257 __ fild_s(Operand(esp, 0));
3258 __ pop(scratch);
3259
3260 __ bind(&done);
3261}
3262
3263
3264void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
3265 Register scratch) {
3266 const Register left = edx;
3267 const Register right = eax;
3268 __ mov(scratch, left);
3269 ASSERT(!scratch.is(right)); // We're about to clobber scratch.
3270 __ SmiUntag(scratch);
3271 __ push(scratch);
3272 __ fild_s(Operand(esp, 0));
3273
3274 __ mov(scratch, right);
3275 __ SmiUntag(scratch);
3276 __ mov(Operand(esp, 0), scratch);
3277 __ fild_s(Operand(esp, 0));
3278 __ pop(scratch);
3279}
3280
3281
3282void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
3283 Label* non_float,
3284 Register scratch) {
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00003285 NearLabel test_other, done;
ricow@chromium.org65fae842010-08-25 15:26:24 +00003286 // Test if both operands are floats or smi -> scratch=k_is_float;
3287 // Otherwise scratch = k_not_float.
3288 __ test(edx, Immediate(kSmiTagMask));
3289 __ j(zero, &test_other, not_taken); // argument in edx is OK
3290 __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
3291 __ cmp(scratch, Factory::heap_number_map());
3292 __ j(not_equal, non_float); // argument in edx is not a number -> NaN
3293
3294 __ bind(&test_other);
3295 __ test(eax, Immediate(kSmiTagMask));
3296 __ j(zero, &done); // argument in eax is OK
3297 __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
3298 __ cmp(scratch, Factory::heap_number_map());
3299 __ j(not_equal, non_float); // argument in eax is not a number -> NaN
3300
3301 // Fall-through: Both operands are numbers.
3302 __ bind(&done);
3303}
3304
3305
kasperl@chromium.orga5551262010-12-07 12:49:48 +00003306void FloatingPointHelper::CheckFloatOperandsAreInt32(MacroAssembler* masm,
3307 Label* non_int32) {
3308 return;
3309}
3310
3311
ricow@chromium.org65fae842010-08-25 15:26:24 +00003312void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
erik.corry@gmail.comd88afa22010-09-15 12:33:05 +00003313 Label slow, done, undo;
ricow@chromium.org65fae842010-08-25 15:26:24 +00003314
3315 if (op_ == Token::SUB) {
erik.corry@gmail.comd88afa22010-09-15 12:33:05 +00003316 if (include_smi_code_) {
3317 // Check whether the value is a smi.
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00003318 NearLabel try_float;
erik.corry@gmail.comd88afa22010-09-15 12:33:05 +00003319 __ test(eax, Immediate(kSmiTagMask));
3320 __ j(not_zero, &try_float, not_taken);
ricow@chromium.org65fae842010-08-25 15:26:24 +00003321
erik.corry@gmail.comd88afa22010-09-15 12:33:05 +00003322 if (negative_zero_ == kStrictNegativeZero) {
3323 // Go slow case if the value of the expression is zero
3324 // to make sure that we switch between 0 and -0.
3325 __ test(eax, Operand(eax));
3326 __ j(zero, &slow, not_taken);
3327 }
3328
3329 // The value of the expression is a smi that is not zero. Try
3330 // optimistic subtraction '0 - value'.
3331 __ mov(edx, Operand(eax));
3332 __ Set(eax, Immediate(0));
3333 __ sub(eax, Operand(edx));
3334 __ j(overflow, &undo, not_taken);
3335 __ StubReturn(1);
3336
3337 // Try floating point case.
3338 __ bind(&try_float);
3339 } else if (FLAG_debug_code) {
3340 __ AbortIfSmi(eax);
ricow@chromium.org65fae842010-08-25 15:26:24 +00003341 }
3342
ricow@chromium.org65fae842010-08-25 15:26:24 +00003343 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
3344 __ cmp(edx, Factory::heap_number_map());
3345 __ j(not_equal, &slow);
3346 if (overwrite_ == UNARY_OVERWRITE) {
3347 __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
3348 __ xor_(edx, HeapNumber::kSignMask); // Flip sign.
3349 __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
3350 } else {
3351 __ mov(edx, Operand(eax));
3352 // edx: operand
3353 __ AllocateHeapNumber(eax, ebx, ecx, &undo);
3354 // eax: allocated 'empty' number
3355 __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
3356 __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
3357 __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
3358 __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
3359 __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
3360 }
3361 } else if (op_ == Token::BIT_NOT) {
erik.corry@gmail.comd88afa22010-09-15 12:33:05 +00003362 if (include_smi_code_) {
3363 Label non_smi;
3364 __ test(eax, Immediate(kSmiTagMask));
3365 __ j(not_zero, &non_smi);
3366 __ not_(eax);
3367 __ and_(eax, ~kSmiTagMask); // Remove inverted smi-tag.
3368 __ ret(0);
3369 __ bind(&non_smi);
3370 } else if (FLAG_debug_code) {
3371 __ AbortIfSmi(eax);
3372 }
3373
ricow@chromium.org65fae842010-08-25 15:26:24 +00003374 // Check if the operand is a heap number.
3375 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
3376 __ cmp(edx, Factory::heap_number_map());
3377 __ j(not_equal, &slow, not_taken);
3378
3379 // Convert the heap number in eax to an untagged integer in ecx.
3380 IntegerConvert(masm,
3381 eax,
3382 TypeInfo::Unknown(),
3383 CpuFeatures::IsSupported(SSE3),
3384 &slow);
3385
3386 // Do the bitwise operation and check if the result fits in a smi.
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00003387 NearLabel try_float;
ricow@chromium.org65fae842010-08-25 15:26:24 +00003388 __ not_(ecx);
3389 __ cmp(ecx, 0xc0000000);
3390 __ j(sign, &try_float, not_taken);
3391
3392 // Tag the result as a smi and we're done.
3393 STATIC_ASSERT(kSmiTagSize == 1);
3394 __ lea(eax, Operand(ecx, times_2, kSmiTag));
3395 __ jmp(&done);
3396
3397 // Try to store the result in a heap number.
3398 __ bind(&try_float);
3399 if (overwrite_ == UNARY_NO_OVERWRITE) {
3400 // Allocate a fresh heap number, but don't overwrite eax until
3401 // we're sure we can do it without going through the slow case
3402 // that needs the value in eax.
3403 __ AllocateHeapNumber(ebx, edx, edi, &slow);
3404 __ mov(eax, Operand(ebx));
3405 }
3406 if (CpuFeatures::IsSupported(SSE2)) {
3407 CpuFeatures::Scope use_sse2(SSE2);
3408 __ cvtsi2sd(xmm0, Operand(ecx));
3409 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
3410 } else {
3411 __ push(ecx);
3412 __ fild_s(Operand(esp, 0));
3413 __ pop(ecx);
3414 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
3415 }
3416 } else {
3417 UNIMPLEMENTED();
3418 }
3419
3420 // Return from the stub.
3421 __ bind(&done);
3422 __ StubReturn(1);
3423
erik.corry@gmail.comd88afa22010-09-15 12:33:05 +00003424 // Restore eax and go slow case.
3425 __ bind(&undo);
3426 __ mov(eax, Operand(edx));
3427
ricow@chromium.org65fae842010-08-25 15:26:24 +00003428 // Handle the slow case by jumping to the JavaScript builtin.
3429 __ bind(&slow);
3430 __ pop(ecx); // pop return address.
3431 __ push(eax);
3432 __ push(ecx); // push return address
3433 switch (op_) {
3434 case Token::SUB:
3435 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
3436 break;
3437 case Token::BIT_NOT:
3438 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
3439 break;
3440 default:
3441 UNREACHABLE();
3442 }
3443}
3444
3445
kasperl@chromium.orga5551262010-12-07 12:49:48 +00003446void MathPowStub::Generate(MacroAssembler* masm) {
3447 // Registers are used as follows:
3448 // edx = base
3449 // eax = exponent
3450 // ecx = temporary, result
3451
3452 CpuFeatures::Scope use_sse2(SSE2);
3453 Label allocate_return, call_runtime;
3454
3455 // Load input parameters.
3456 __ mov(edx, Operand(esp, 2 * kPointerSize));
3457 __ mov(eax, Operand(esp, 1 * kPointerSize));
3458
3459 // Save 1 in xmm3 - we need this several times later on.
3460 __ mov(ecx, Immediate(1));
3461 __ cvtsi2sd(xmm3, Operand(ecx));
3462
3463 Label exponent_nonsmi;
3464 Label base_nonsmi;
3465 // If the exponent is a heap number go to that specific case.
3466 __ test(eax, Immediate(kSmiTagMask));
3467 __ j(not_zero, &exponent_nonsmi);
3468 __ test(edx, Immediate(kSmiTagMask));
3469 __ j(not_zero, &base_nonsmi);
3470
3471 // Optimized version when both exponent and base is a smi.
3472 Label powi;
3473 __ SmiUntag(edx);
3474 __ cvtsi2sd(xmm0, Operand(edx));
3475 __ jmp(&powi);
3476 // exponent is smi and base is a heapnumber.
3477 __ bind(&base_nonsmi);
3478 __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
3479 Factory::heap_number_map());
3480 __ j(not_equal, &call_runtime);
3481
3482 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
3483
3484 // Optimized version of pow if exponent is a smi.
3485 // xmm0 contains the base.
3486 __ bind(&powi);
3487 __ SmiUntag(eax);
3488
3489 // Save exponent in base as we need to check if exponent is negative later.
3490 // We know that base and exponent are in different registers.
3491 __ mov(edx, eax);
3492
3493 // Get absolute value of exponent.
3494 NearLabel no_neg;
3495 __ cmp(eax, 0);
3496 __ j(greater_equal, &no_neg);
3497 __ neg(eax);
3498 __ bind(&no_neg);
3499
3500 // Load xmm1 with 1.
3501 __ movsd(xmm1, xmm3);
3502 NearLabel while_true;
3503 NearLabel no_multiply;
3504
3505 __ bind(&while_true);
3506 __ shr(eax, 1);
3507 __ j(not_carry, &no_multiply);
3508 __ mulsd(xmm1, xmm0);
3509 __ bind(&no_multiply);
3510 __ test(eax, Operand(eax));
3511 __ mulsd(xmm0, xmm0);
3512 __ j(not_zero, &while_true);
3513
3514 // base has the original value of the exponent - if the exponent is
3515 // negative return 1/result.
3516 __ test(edx, Operand(edx));
3517 __ j(positive, &allocate_return);
3518 // Special case if xmm1 has reached infinity.
3519 __ mov(ecx, Immediate(0x7FB00000));
3520 __ movd(xmm0, Operand(ecx));
3521 __ cvtss2sd(xmm0, xmm0);
3522 __ ucomisd(xmm0, xmm1);
3523 __ j(equal, &call_runtime);
3524 __ divsd(xmm3, xmm1);
3525 __ movsd(xmm1, xmm3);
3526 __ jmp(&allocate_return);
3527
3528 // exponent (or both) is a heapnumber - no matter what we should now work
3529 // on doubles.
3530 __ bind(&exponent_nonsmi);
3531 __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
3532 Factory::heap_number_map());
3533 __ j(not_equal, &call_runtime);
3534 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
3535 // Test if exponent is nan.
3536 __ ucomisd(xmm1, xmm1);
3537 __ j(parity_even, &call_runtime);
3538
3539 NearLabel base_not_smi;
3540 NearLabel handle_special_cases;
3541 __ test(edx, Immediate(kSmiTagMask));
3542 __ j(not_zero, &base_not_smi);
3543 __ SmiUntag(edx);
3544 __ cvtsi2sd(xmm0, Operand(edx));
3545 __ jmp(&handle_special_cases);
3546
3547 __ bind(&base_not_smi);
3548 __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
3549 Factory::heap_number_map());
3550 __ j(not_equal, &call_runtime);
3551 __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
3552 __ and_(ecx, HeapNumber::kExponentMask);
3553 __ cmp(Operand(ecx), Immediate(HeapNumber::kExponentMask));
3554 // base is NaN or +/-Infinity
3555 __ j(greater_equal, &call_runtime);
3556 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
3557
3558 // base is in xmm0 and exponent is in xmm1.
3559 __ bind(&handle_special_cases);
3560 NearLabel not_minus_half;
3561 // Test for -0.5.
3562 // Load xmm2 with -0.5.
3563 __ mov(ecx, Immediate(0xBF000000));
3564 __ movd(xmm2, Operand(ecx));
3565 __ cvtss2sd(xmm2, xmm2);
3566 // xmm2 now has -0.5.
3567 __ ucomisd(xmm2, xmm1);
3568 __ j(not_equal, &not_minus_half);
3569
3570 // Calculates reciprocal of square root.
3571 // Note that 1/sqrt(x) = sqrt(1/x))
3572 __ divsd(xmm3, xmm0);
3573 __ movsd(xmm1, xmm3);
3574 __ sqrtsd(xmm1, xmm1);
3575 __ jmp(&allocate_return);
3576
3577 // Test for 0.5.
3578 __ bind(&not_minus_half);
3579 // Load xmm2 with 0.5.
3580 // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
3581 __ addsd(xmm2, xmm3);
3582 // xmm2 now has 0.5.
3583 __ ucomisd(xmm2, xmm1);
3584 __ j(not_equal, &call_runtime);
3585 // Calculates square root.
3586 __ movsd(xmm1, xmm0);
3587 __ sqrtsd(xmm1, xmm1);
3588
3589 __ bind(&allocate_return);
3590 __ AllocateHeapNumber(ecx, eax, edx, &call_runtime);
3591 __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm1);
3592 __ mov(eax, ecx);
3593 __ ret(2);
3594
3595 __ bind(&call_runtime);
3596 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
3597}
3598
3599
ricow@chromium.org65fae842010-08-25 15:26:24 +00003600void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
3601 // The key is in edx and the parameter count is in eax.
3602
3603 // The displacement is used for skipping the frame pointer on the
3604 // stack. It is the offset of the last parameter (if any) relative
3605 // to the frame pointer.
3606 static const int kDisplacement = 1 * kPointerSize;
3607
3608 // Check that the key is a smi.
3609 Label slow;
3610 __ test(edx, Immediate(kSmiTagMask));
3611 __ j(not_zero, &slow, not_taken);
3612
3613 // Check if the calling frame is an arguments adaptor frame.
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00003614 NearLabel adaptor;
ricow@chromium.org65fae842010-08-25 15:26:24 +00003615 __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3616 __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
3617 __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3618 __ j(equal, &adaptor);
3619
3620 // Check index against formal parameters count limit passed in
3621 // through register eax. Use unsigned comparison to get negative
3622 // check for free.
3623 __ cmp(edx, Operand(eax));
3624 __ j(above_equal, &slow, not_taken);
3625
3626 // Read the argument from the stack and return it.
3627 STATIC_ASSERT(kSmiTagSize == 1);
3628 STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
3629 __ lea(ebx, Operand(ebp, eax, times_2, 0));
3630 __ neg(edx);
3631 __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
3632 __ ret(0);
3633
3634 // Arguments adaptor case: Check index against actual arguments
3635 // limit found in the arguments adaptor frame. Use unsigned
3636 // comparison to get negative check for free.
3637 __ bind(&adaptor);
3638 __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
3639 __ cmp(edx, Operand(ecx));
3640 __ j(above_equal, &slow, not_taken);
3641
3642 // Read the argument from the stack and return it.
3643 STATIC_ASSERT(kSmiTagSize == 1);
3644 STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
3645 __ lea(ebx, Operand(ebx, ecx, times_2, 0));
3646 __ neg(edx);
3647 __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
3648 __ ret(0);
3649
3650 // Slow-case: Handle non-smi or out-of-bounds access to arguments
3651 // by calling the runtime system.
3652 __ bind(&slow);
3653 __ pop(ebx); // Return address.
3654 __ push(edx);
3655 __ push(ebx);
3656 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
3657}
3658
3659
3660void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
3661 // esp[0] : return address
3662 // esp[4] : number of parameters
3663 // esp[8] : receiver displacement
3664 // esp[16] : function
3665
3666 // The displacement is used for skipping the return address and the
3667 // frame pointer on the stack. It is the offset of the last
3668 // parameter (if any) relative to the frame pointer.
3669 static const int kDisplacement = 2 * kPointerSize;
3670
3671 // Check if the calling frame is an arguments adaptor frame.
3672 Label adaptor_frame, try_allocate, runtime;
3673 __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3674 __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
3675 __ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3676 __ j(equal, &adaptor_frame);
3677
3678 // Get the length from the frame.
3679 __ mov(ecx, Operand(esp, 1 * kPointerSize));
3680 __ jmp(&try_allocate);
3681
3682 // Patch the arguments.length and the parameters pointer.
3683 __ bind(&adaptor_frame);
3684 __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
3685 __ mov(Operand(esp, 1 * kPointerSize), ecx);
3686 __ lea(edx, Operand(edx, ecx, times_2, kDisplacement));
3687 __ mov(Operand(esp, 2 * kPointerSize), edx);
3688
3689 // Try the new space allocation. Start out with computing the size of
3690 // the arguments object and the elements array.
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00003691 NearLabel add_arguments_object;
ricow@chromium.org65fae842010-08-25 15:26:24 +00003692 __ bind(&try_allocate);
3693 __ test(ecx, Operand(ecx));
3694 __ j(zero, &add_arguments_object);
3695 __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
3696 __ bind(&add_arguments_object);
3697 __ add(Operand(ecx), Immediate(Heap::kArgumentsObjectSize));
3698
3699 // Do the allocation of both objects in one go.
3700 __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
3701
3702 // Get the arguments boilerplate from the current (global) context.
3703 int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
3704 __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
3705 __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
3706 __ mov(edi, Operand(edi, offset));
3707
3708 // Copy the JS object part.
3709 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
3710 __ mov(ebx, FieldOperand(edi, i));
3711 __ mov(FieldOperand(eax, i), ebx);
3712 }
3713
3714 // Setup the callee in-object property.
3715 STATIC_ASSERT(Heap::arguments_callee_index == 0);
3716 __ mov(ebx, Operand(esp, 3 * kPointerSize));
3717 __ mov(FieldOperand(eax, JSObject::kHeaderSize), ebx);
3718
3719 // Get the length (smi tagged) and set that as an in-object property too.
3720 STATIC_ASSERT(Heap::arguments_length_index == 1);
3721 __ mov(ecx, Operand(esp, 1 * kPointerSize));
3722 __ mov(FieldOperand(eax, JSObject::kHeaderSize + kPointerSize), ecx);
3723
3724 // If there are no actual arguments, we're done.
3725 Label done;
3726 __ test(ecx, Operand(ecx));
3727 __ j(zero, &done);
3728
3729 // Get the parameters pointer from the stack.
3730 __ mov(edx, Operand(esp, 2 * kPointerSize));
3731
3732 // Setup the elements pointer in the allocated arguments object and
3733 // initialize the header in the elements fixed array.
3734 __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
3735 __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
3736 __ mov(FieldOperand(edi, FixedArray::kMapOffset),
3737 Immediate(Factory::fixed_array_map()));
3738 __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
3739 // Untag the length for the loop below.
3740 __ SmiUntag(ecx);
3741
3742 // Copy the fixed array slots.
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00003743 NearLabel loop;
ricow@chromium.org65fae842010-08-25 15:26:24 +00003744 __ bind(&loop);
3745 __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
3746 __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
3747 __ add(Operand(edi), Immediate(kPointerSize));
3748 __ sub(Operand(edx), Immediate(kPointerSize));
3749 __ dec(ecx);
3750 __ j(not_zero, &loop);
3751
3752 // Return and remove the on-stack parameters.
3753 __ bind(&done);
3754 __ ret(3 * kPointerSize);
3755
3756 // Do the runtime call to allocate the arguments object.
3757 __ bind(&runtime);
3758 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
3759}
3760
3761
3762void RegExpExecStub::Generate(MacroAssembler* masm) {
3763 // Just jump directly to runtime if native RegExp is not selected at compile
3764 // time or if regexp entry in generated code is turned off runtime switch or
3765 // at compilation.
3766#ifdef V8_INTERPRETED_REGEXP
3767 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
3768#else // V8_INTERPRETED_REGEXP
3769 if (!FLAG_regexp_entry_native) {
3770 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
3771 return;
3772 }
3773
3774 // Stack frame on entry.
3775 // esp[0]: return address
3776 // esp[4]: last_match_info (expected JSArray)
3777 // esp[8]: previous index
3778 // esp[12]: subject string
3779 // esp[16]: JSRegExp object
3780
3781 static const int kLastMatchInfoOffset = 1 * kPointerSize;
3782 static const int kPreviousIndexOffset = 2 * kPointerSize;
3783 static const int kSubjectOffset = 3 * kPointerSize;
3784 static const int kJSRegExpOffset = 4 * kPointerSize;
3785
3786 Label runtime, invoke_regexp;
3787
3788 // Ensure that a RegExp stack is allocated.
3789 ExternalReference address_of_regexp_stack_memory_address =
3790 ExternalReference::address_of_regexp_stack_memory_address();
3791 ExternalReference address_of_regexp_stack_memory_size =
3792 ExternalReference::address_of_regexp_stack_memory_size();
3793 __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
3794 __ test(ebx, Operand(ebx));
3795 __ j(zero, &runtime, not_taken);
3796
3797 // Check that the first argument is a JSRegExp object.
3798 __ mov(eax, Operand(esp, kJSRegExpOffset));
3799 STATIC_ASSERT(kSmiTag == 0);
3800 __ test(eax, Immediate(kSmiTagMask));
3801 __ j(zero, &runtime);
3802 __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
3803 __ j(not_equal, &runtime);
3804 // Check that the RegExp has been compiled (data contains a fixed array).
3805 __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
3806 if (FLAG_debug_code) {
3807 __ test(ecx, Immediate(kSmiTagMask));
3808 __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
3809 __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
3810 __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
3811 }
3812
3813 // ecx: RegExp data (FixedArray)
3814 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
3815 __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
3816 __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
3817 __ j(not_equal, &runtime);
3818
3819 // ecx: RegExp data (FixedArray)
3820 // Check that the number of captures fit in the static offsets vector buffer.
3821 __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
3822 // Calculate number of capture registers (number_of_captures + 1) * 2. This
3823 // uses the asumption that smis are 2 * their untagged value.
3824 STATIC_ASSERT(kSmiTag == 0);
3825 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3826 __ add(Operand(edx), Immediate(2)); // edx was a smi.
3827 // Check that the static offsets vector buffer is large enough.
3828 __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
3829 __ j(above, &runtime);
3830
3831 // ecx: RegExp data (FixedArray)
3832 // edx: Number of capture registers
3833 // Check that the second argument is a string.
3834 __ mov(eax, Operand(esp, kSubjectOffset));
3835 __ test(eax, Immediate(kSmiTagMask));
3836 __ j(zero, &runtime);
3837 Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
3838 __ j(NegateCondition(is_string), &runtime);
3839 // Get the length of the string to ebx.
3840 __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
3841
3842 // ebx: Length of subject string as a smi
3843 // ecx: RegExp data (FixedArray)
3844 // edx: Number of capture registers
3845 // Check that the third argument is a positive smi less than the subject
3846 // string length. A negative value will be greater (unsigned comparison).
3847 __ mov(eax, Operand(esp, kPreviousIndexOffset));
3848 __ test(eax, Immediate(kSmiTagMask));
3849 __ j(not_zero, &runtime);
3850 __ cmp(eax, Operand(ebx));
3851 __ j(above_equal, &runtime);
3852
3853 // ecx: RegExp data (FixedArray)
3854 // edx: Number of capture registers
3855 // Check that the fourth object is a JSArray object.
3856 __ mov(eax, Operand(esp, kLastMatchInfoOffset));
3857 __ test(eax, Immediate(kSmiTagMask));
3858 __ j(zero, &runtime);
3859 __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
3860 __ j(not_equal, &runtime);
3861 // Check that the JSArray is in fast case.
3862 __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
3863 __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
3864 __ cmp(eax, Factory::fixed_array_map());
3865 __ j(not_equal, &runtime);
3866 // Check that the last match info has space for the capture registers and the
3867 // additional information.
3868 __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
3869 __ SmiUntag(eax);
3870 __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
3871 __ cmp(edx, Operand(eax));
3872 __ j(greater, &runtime);
3873
3874 // ecx: RegExp data (FixedArray)
3875 // Check the representation and encoding of the subject string.
3876 Label seq_ascii_string, seq_two_byte_string, check_code;
3877 __ mov(eax, Operand(esp, kSubjectOffset));
3878 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
3879 __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
3880 // First check for flat two byte string.
3881 __ and_(ebx,
3882 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
3883 STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
3884 __ j(zero, &seq_two_byte_string);
3885 // Any other flat string must be a flat ascii string.
3886 __ test(Operand(ebx),
3887 Immediate(kIsNotStringMask | kStringRepresentationMask));
3888 __ j(zero, &seq_ascii_string);
3889
3890 // Check for flat cons string.
3891 // A flat cons string is a cons string where the second part is the empty
3892 // string. In that case the subject string is just the first part of the cons
3893 // string. Also in this case the first part of the cons string is known to be
3894 // a sequential string or an external string.
3895 STATIC_ASSERT(kExternalStringTag != 0);
3896 STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
3897 __ test(Operand(ebx),
3898 Immediate(kIsNotStringMask | kExternalStringTag));
3899 __ j(not_zero, &runtime);
3900 // String is a cons string.
3901 __ mov(edx, FieldOperand(eax, ConsString::kSecondOffset));
3902 __ cmp(Operand(edx), Factory::empty_string());
3903 __ j(not_equal, &runtime);
3904 __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
3905 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
3906 // String is a cons string with empty second part.
3907 // eax: first part of cons string.
3908 // ebx: map of first part of cons string.
3909 // Is first part a flat two byte string?
3910 __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
3911 kStringRepresentationMask | kStringEncodingMask);
3912 STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
3913 __ j(zero, &seq_two_byte_string);
3914 // Any other flat string must be ascii.
3915 __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
3916 kStringRepresentationMask);
3917 __ j(not_zero, &runtime);
3918
3919 __ bind(&seq_ascii_string);
3920 // eax: subject string (flat ascii)
3921 // ecx: RegExp data (FixedArray)
3922 __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
3923 __ Set(edi, Immediate(1)); // Type is ascii.
3924 __ jmp(&check_code);
3925
3926 __ bind(&seq_two_byte_string);
3927 // eax: subject string (flat two byte)
3928 // ecx: RegExp data (FixedArray)
3929 __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
3930 __ Set(edi, Immediate(0)); // Type is two byte.
3931
3932 __ bind(&check_code);
3933 // Check that the irregexp code has been generated for the actual string
3934 // encoding. If it has, the field contains a code object otherwise it contains
3935 // the hole.
3936 __ CmpObjectType(edx, CODE_TYPE, ebx);
3937 __ j(not_equal, &runtime);
3938
3939 // eax: subject string
3940 // edx: code
3941 // edi: encoding of subject string (1 if ascii, 0 if two_byte);
3942 // Load used arguments before starting to push arguments for call to native
3943 // RegExp code to avoid handling changing stack height.
3944 __ mov(ebx, Operand(esp, kPreviousIndexOffset));
3945 __ SmiUntag(ebx); // Previous index from smi.
3946
3947 // eax: subject string
3948 // ebx: previous index
3949 // edx: code
3950 // edi: encoding of subject string (1 if ascii 0 if two_byte);
3951 // All checks done. Now push arguments for native regexp code.
3952 __ IncrementCounter(&Counters::regexp_entry_native, 1);
3953
3954 static const int kRegExpExecuteArguments = 7;
3955 __ PrepareCallCFunction(kRegExpExecuteArguments, ecx);
3956
3957 // Argument 7: Indicate that this is a direct call from JavaScript.
3958 __ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
3959
3960 // Argument 6: Start (high end) of backtracking stack memory area.
3961 __ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address));
3962 __ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
3963 __ mov(Operand(esp, 5 * kPointerSize), ecx);
3964
3965 // Argument 5: static offsets vector buffer.
3966 __ mov(Operand(esp, 4 * kPointerSize),
3967 Immediate(ExternalReference::address_of_static_offsets_vector()));
3968
3969 // Argument 4: End of string data
3970 // Argument 3: Start of string data
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00003971 NearLabel setup_two_byte, setup_rest;
ricow@chromium.org65fae842010-08-25 15:26:24 +00003972 __ test(edi, Operand(edi));
3973 __ mov(edi, FieldOperand(eax, String::kLengthOffset));
3974 __ j(zero, &setup_two_byte);
3975 __ SmiUntag(edi);
3976 __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
3977 __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
3978 __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
3979 __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
3980 __ jmp(&setup_rest);
3981
3982 __ bind(&setup_two_byte);
3983 STATIC_ASSERT(kSmiTag == 0);
3984 STATIC_ASSERT(kSmiTagSize == 1); // edi is smi (powered by 2).
3985 __ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize));
3986 __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
3987 __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
3988 __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
3989
3990 __ bind(&setup_rest);
3991
3992 // Argument 2: Previous index.
3993 __ mov(Operand(esp, 1 * kPointerSize), ebx);
3994
3995 // Argument 1: Subject string.
3996 __ mov(Operand(esp, 0 * kPointerSize), eax);
3997
3998 // Locate the code entry and call it.
3999 __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
4000 __ CallCFunction(edx, kRegExpExecuteArguments);
4001
4002 // Check the result.
4003 Label success;
4004 __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
4005 __ j(equal, &success, taken);
4006 Label failure;
4007 __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
4008 __ j(equal, &failure, taken);
4009 __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
4010 // If not exception it can only be retry. Handle that in the runtime system.
4011 __ j(not_equal, &runtime);
4012 // Result must now be exception. If there is no pending exception already a
4013 // stack overflow (on the backtrack stack) was detected in RegExp code but
4014 // haven't created the exception yet. Handle that in the runtime system.
4015 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
4016 ExternalReference pending_exception(Top::k_pending_exception_address);
4017 __ mov(eax,
4018 Operand::StaticVariable(ExternalReference::the_hole_value_location()));
4019 __ cmp(eax, Operand::StaticVariable(pending_exception));
4020 __ j(equal, &runtime);
4021 __ bind(&failure);
4022 // For failure and exception return null.
4023 __ mov(Operand(eax), Factory::null_value());
4024 __ ret(4 * kPointerSize);
4025
4026 // Load RegExp data.
4027 __ bind(&success);
4028 __ mov(eax, Operand(esp, kJSRegExpOffset));
4029 __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
4030 __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
4031 // Calculate number of capture registers (number_of_captures + 1) * 2.
4032 STATIC_ASSERT(kSmiTag == 0);
4033 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4034 __ add(Operand(edx), Immediate(2)); // edx was a smi.
4035
4036 // edx: Number of capture registers
4037 // Load last_match_info which is still known to be a fast case JSArray.
4038 __ mov(eax, Operand(esp, kLastMatchInfoOffset));
4039 __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
4040
4041 // ebx: last_match_info backing store (FixedArray)
4042 // edx: number of capture registers
4043 // Store the capture count.
4044 __ SmiTag(edx); // Number of capture registers to smi.
4045 __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
4046 __ SmiUntag(edx); // Number of capture registers back from smi.
4047 // Store last subject and last input.
4048 __ mov(eax, Operand(esp, kSubjectOffset));
4049 __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
4050 __ mov(ecx, ebx);
4051 __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
4052 __ mov(eax, Operand(esp, kSubjectOffset));
4053 __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
4054 __ mov(ecx, ebx);
4055 __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
4056
4057 // Get the static offsets vector filled by the native regexp code.
4058 ExternalReference address_of_static_offsets_vector =
4059 ExternalReference::address_of_static_offsets_vector();
4060 __ mov(ecx, Immediate(address_of_static_offsets_vector));
4061
4062 // ebx: last_match_info backing store (FixedArray)
4063 // ecx: offsets vector
4064 // edx: number of capture registers
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00004065 NearLabel next_capture, done;
ricow@chromium.org65fae842010-08-25 15:26:24 +00004066 // Capture register counter starts from number of capture registers and
4067 // counts down until wraping after zero.
4068 __ bind(&next_capture);
4069 __ sub(Operand(edx), Immediate(1));
4070 __ j(negative, &done);
4071 // Read the value from the static offsets vector buffer.
4072 __ mov(edi, Operand(ecx, edx, times_int_size, 0));
4073 __ SmiTag(edi);
4074 // Store the smi value in the last match info.
4075 __ mov(FieldOperand(ebx,
4076 edx,
4077 times_pointer_size,
4078 RegExpImpl::kFirstCaptureOffset),
4079 edi);
4080 __ jmp(&next_capture);
4081 __ bind(&done);
4082
4083 // Return last match info.
4084 __ mov(eax, Operand(esp, kLastMatchInfoOffset));
4085 __ ret(4 * kPointerSize);
4086
4087 // Do the runtime call to execute the regexp.
4088 __ bind(&runtime);
4089 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4090#endif // V8_INTERPRETED_REGEXP
4091}
4092
4093
kasperl@chromium.orga5551262010-12-07 12:49:48 +00004094void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
4095 const int kMaxInlineLength = 100;
4096 Label slowcase;
4097 NearLabel done;
4098 __ mov(ebx, Operand(esp, kPointerSize * 3));
4099 __ test(ebx, Immediate(kSmiTagMask));
4100 __ j(not_zero, &slowcase);
4101 __ cmp(Operand(ebx), Immediate(Smi::FromInt(kMaxInlineLength)));
4102 __ j(above, &slowcase);
4103 // Smi-tagging is equivalent to multiplying by 2.
4104 STATIC_ASSERT(kSmiTag == 0);
4105 STATIC_ASSERT(kSmiTagSize == 1);
4106 // Allocate RegExpResult followed by FixedArray with size in ebx.
4107 // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
4108 // Elements: [Map][Length][..elements..]
4109 __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
4110 times_half_pointer_size,
4111 ebx, // In: Number of elements (times 2, being a smi)
4112 eax, // Out: Start of allocation (tagged).
4113 ecx, // Out: End of allocation.
4114 edx, // Scratch register
4115 &slowcase,
4116 TAG_OBJECT);
4117 // eax: Start of allocated area, object-tagged.
4118
4119 // Set JSArray map to global.regexp_result_map().
4120 // Set empty properties FixedArray.
4121 // Set elements to point to FixedArray allocated right after the JSArray.
4122 // Interleave operations for better latency.
4123 __ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX));
4124 __ mov(ecx, Immediate(Factory::empty_fixed_array()));
4125 __ lea(ebx, Operand(eax, JSRegExpResult::kSize));
4126 __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset));
4127 __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
4128 __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
4129 __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
4130 __ mov(FieldOperand(eax, HeapObject::kMapOffset), edx);
4131
4132 // Set input, index and length fields from arguments.
4133 __ mov(ecx, Operand(esp, kPointerSize * 1));
4134 __ mov(FieldOperand(eax, JSRegExpResult::kInputOffset), ecx);
4135 __ mov(ecx, Operand(esp, kPointerSize * 2));
4136 __ mov(FieldOperand(eax, JSRegExpResult::kIndexOffset), ecx);
4137 __ mov(ecx, Operand(esp, kPointerSize * 3));
4138 __ mov(FieldOperand(eax, JSArray::kLengthOffset), ecx);
4139
4140 // Fill out the elements FixedArray.
4141 // eax: JSArray.
4142 // ebx: FixedArray.
4143 // ecx: Number of elements in array, as smi.
4144
4145 // Set map.
4146 __ mov(FieldOperand(ebx, HeapObject::kMapOffset),
4147 Immediate(Factory::fixed_array_map()));
4148 // Set length.
4149 __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
4150 // Fill contents of fixed-array with the-hole.
4151 __ SmiUntag(ecx);
4152 __ mov(edx, Immediate(Factory::the_hole_value()));
4153 __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
4154 // Fill fixed array elements with hole.
4155 // eax: JSArray.
4156 // ecx: Number of elements to fill.
4157 // ebx: Start of elements in FixedArray.
4158 // edx: the hole.
4159 Label loop;
4160 __ test(ecx, Operand(ecx));
4161 __ bind(&loop);
4162 __ j(less_equal, &done); // Jump if ecx is negative or zero.
4163 __ sub(Operand(ecx), Immediate(1));
4164 __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
4165 __ jmp(&loop);
4166
4167 __ bind(&done);
4168 __ ret(3 * kPointerSize);
4169
4170 __ bind(&slowcase);
4171 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
4172}
4173
4174
ricow@chromium.org65fae842010-08-25 15:26:24 +00004175void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
4176 Register object,
4177 Register result,
4178 Register scratch1,
4179 Register scratch2,
4180 bool object_is_smi,
4181 Label* not_found) {
4182 // Use of registers. Register result is used as a temporary.
4183 Register number_string_cache = result;
4184 Register mask = scratch1;
4185 Register scratch = scratch2;
4186
4187 // Load the number string cache.
4188 ExternalReference roots_address = ExternalReference::roots_address();
4189 __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
4190 __ mov(number_string_cache,
4191 Operand::StaticArray(scratch, times_pointer_size, roots_address));
4192 // Make the hash mask from the length of the number string cache. It
4193 // contains two elements (number and string) for each cache entry.
4194 __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
4195 __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
4196 __ sub(Operand(mask), Immediate(1)); // Make mask.
4197
4198 // Calculate the entry in the number string cache. The hash value in the
4199 // number string cache for smis is just the smi value, and the hash for
4200 // doubles is the xor of the upper and lower words. See
4201 // Heap::GetNumberStringCache.
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00004202 NearLabel smi_hash_calculated;
4203 NearLabel load_result_from_cache;
ricow@chromium.org65fae842010-08-25 15:26:24 +00004204 if (object_is_smi) {
4205 __ mov(scratch, object);
4206 __ SmiUntag(scratch);
4207 } else {
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00004208 NearLabel not_smi, hash_calculated;
ricow@chromium.org65fae842010-08-25 15:26:24 +00004209 STATIC_ASSERT(kSmiTag == 0);
4210 __ test(object, Immediate(kSmiTagMask));
4211 __ j(not_zero, &not_smi);
4212 __ mov(scratch, object);
4213 __ SmiUntag(scratch);
4214 __ jmp(&smi_hash_calculated);
4215 __ bind(&not_smi);
4216 __ cmp(FieldOperand(object, HeapObject::kMapOffset),
4217 Factory::heap_number_map());
4218 __ j(not_equal, not_found);
4219 STATIC_ASSERT(8 == kDoubleSize);
4220 __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
4221 __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
4222 // Object is heap number and hash is now in scratch. Calculate cache index.
4223 __ and_(scratch, Operand(mask));
4224 Register index = scratch;
4225 Register probe = mask;
4226 __ mov(probe,
4227 FieldOperand(number_string_cache,
4228 index,
4229 times_twice_pointer_size,
4230 FixedArray::kHeaderSize));
4231 __ test(probe, Immediate(kSmiTagMask));
4232 __ j(zero, not_found);
4233 if (CpuFeatures::IsSupported(SSE2)) {
4234 CpuFeatures::Scope fscope(SSE2);
4235 __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
4236 __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
4237 __ ucomisd(xmm0, xmm1);
4238 } else {
4239 __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
4240 __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
4241 __ FCmp();
4242 }
4243 __ j(parity_even, not_found); // Bail out if NaN is involved.
4244 __ j(not_equal, not_found); // The cache did not contain this value.
4245 __ jmp(&load_result_from_cache);
4246 }
4247
4248 __ bind(&smi_hash_calculated);
4249 // Object is smi and hash is now in scratch. Calculate cache index.
4250 __ and_(scratch, Operand(mask));
4251 Register index = scratch;
4252 // Check if the entry is the smi we are looking for.
4253 __ cmp(object,
4254 FieldOperand(number_string_cache,
4255 index,
4256 times_twice_pointer_size,
4257 FixedArray::kHeaderSize));
4258 __ j(not_equal, not_found);
4259
4260 // Get the result from the cache.
4261 __ bind(&load_result_from_cache);
4262 __ mov(result,
4263 FieldOperand(number_string_cache,
4264 index,
4265 times_twice_pointer_size,
4266 FixedArray::kHeaderSize + kPointerSize));
4267 __ IncrementCounter(&Counters::number_to_string_native, 1);
4268}
4269
4270
4271void NumberToStringStub::Generate(MacroAssembler* masm) {
4272 Label runtime;
4273
4274 __ mov(ebx, Operand(esp, kPointerSize));
4275
4276 // Generate code to lookup number in the number string cache.
4277 GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
4278 __ ret(1 * kPointerSize);
4279
4280 __ bind(&runtime);
4281 // Handle number to string in the runtime system if not found in the cache.
4282 __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
4283}
4284
4285
4286static int NegativeComparisonResult(Condition cc) {
4287 ASSERT(cc != equal);
4288 ASSERT((cc == less) || (cc == less_equal)
4289 || (cc == greater) || (cc == greater_equal));
4290 return (cc == greater || cc == greater_equal) ? LESS : GREATER;
4291}
4292
4293void CompareStub::Generate(MacroAssembler* masm) {
4294 ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
4295
4296 Label check_unequal_objects, done;
4297
erik.corry@gmail.comd88afa22010-09-15 12:33:05 +00004298 // Compare two smis if required.
4299 if (include_smi_compare_) {
4300 Label non_smi, smi_done;
4301 __ mov(ecx, Operand(edx));
4302 __ or_(ecx, Operand(eax));
4303 __ test(ecx, Immediate(kSmiTagMask));
4304 __ j(not_zero, &non_smi, not_taken);
4305 __ sub(edx, Operand(eax)); // Return on the result of the subtraction.
4306 __ j(no_overflow, &smi_done);
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00004307 __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
erik.corry@gmail.comd88afa22010-09-15 12:33:05 +00004308 __ bind(&smi_done);
4309 __ mov(eax, edx);
4310 __ ret(0);
4311 __ bind(&non_smi);
4312 } else if (FLAG_debug_code) {
4313 __ mov(ecx, Operand(edx));
4314 __ or_(ecx, Operand(eax));
4315 __ test(ecx, Immediate(kSmiTagMask));
4316 __ Assert(not_zero, "Unexpected smi operands.");
4317 }
4318
ricow@chromium.org65fae842010-08-25 15:26:24 +00004319 // NOTICE! This code is only reached after a smi-fast-case check, so
4320 // it is certain that at least one operand isn't a smi.
4321
4322 // Identical objects can be compared fast, but there are some tricky cases
4323 // for NaN and undefined.
4324 {
4325 Label not_identical;
4326 __ cmp(eax, Operand(edx));
4327 __ j(not_equal, &not_identical);
4328
4329 if (cc_ != equal) {
4330 // Check for undefined. undefined OP undefined is false even though
4331 // undefined == undefined.
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00004332 NearLabel check_for_nan;
ricow@chromium.org65fae842010-08-25 15:26:24 +00004333 __ cmp(edx, Factory::undefined_value());
4334 __ j(not_equal, &check_for_nan);
4335 __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
4336 __ ret(0);
4337 __ bind(&check_for_nan);
4338 }
4339
4340 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
4341 // so we do the second best thing - test it ourselves.
4342 // Note: if cc_ != equal, never_nan_nan_ is not used.
4343 if (never_nan_nan_ && (cc_ == equal)) {
4344 __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
4345 __ ret(0);
4346 } else {
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00004347 NearLabel heap_number;
ricow@chromium.org65fae842010-08-25 15:26:24 +00004348 __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
4349 Immediate(Factory::heap_number_map()));
4350 __ j(equal, &heap_number);
4351 if (cc_ != equal) {
4352 // Call runtime on identical JSObjects. Otherwise return equal.
4353 __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
4354 __ j(above_equal, &not_identical);
4355 }
4356 __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
4357 __ ret(0);
4358
4359 __ bind(&heap_number);
4360 // It is a heap number, so return non-equal if it's NaN and equal if
4361 // it's not NaN.
4362 // The representation of NaN values has all exponent bits (52..62) set,
4363 // and not all mantissa bits (0..51) clear.
4364 // We only accept QNaNs, which have bit 51 set.
4365 // Read top bits of double representation (second word of value).
4366
4367 // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
4368 // all bits in the mask are set. We only need to check the word
4369 // that contains the exponent and high bit of the mantissa.
4370 STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
4371 __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
4372 __ xor_(eax, Operand(eax));
4373 // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
4374 // bits.
4375 __ add(edx, Operand(edx));
4376 __ cmp(edx, kQuietNaNHighBitsMask << 1);
4377 if (cc_ == equal) {
4378 STATIC_ASSERT(EQUAL != 1);
4379 __ setcc(above_equal, eax);
4380 __ ret(0);
4381 } else {
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00004382 NearLabel nan;
ricow@chromium.org65fae842010-08-25 15:26:24 +00004383 __ j(above_equal, &nan);
4384 __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
4385 __ ret(0);
4386 __ bind(&nan);
4387 __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
4388 __ ret(0);
4389 }
4390 }
4391
4392 __ bind(&not_identical);
4393 }
4394
4395 // Strict equality can quickly decide whether objects are equal.
4396 // Non-strict object equality is slower, so it is handled later in the stub.
4397 if (cc_ == equal && strict_) {
4398 Label slow; // Fallthrough label.
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00004399 NearLabel not_smis;
ricow@chromium.org65fae842010-08-25 15:26:24 +00004400 // If we're doing a strict equality comparison, we don't have to do
4401 // type conversion, so we generate code to do fast comparison for objects
4402 // and oddballs. Non-smi numbers and strings still go through the usual
4403 // slow-case code.
4404 // If either is a Smi (we know that not both are), then they can only
4405 // be equal if the other is a HeapNumber. If so, use the slow case.
4406 STATIC_ASSERT(kSmiTag == 0);
4407 ASSERT_EQ(0, Smi::FromInt(0));
4408 __ mov(ecx, Immediate(kSmiTagMask));
4409 __ and_(ecx, Operand(eax));
4410 __ test(ecx, Operand(edx));
4411 __ j(not_zero, &not_smis);
4412 // One operand is a smi.
4413
4414 // Check whether the non-smi is a heap number.
4415 STATIC_ASSERT(kSmiTagMask == 1);
4416 // ecx still holds eax & kSmiTag, which is either zero or one.
4417 __ sub(Operand(ecx), Immediate(0x01));
4418 __ mov(ebx, edx);
4419 __ xor_(ebx, Operand(eax));
4420 __ and_(ebx, Operand(ecx)); // ebx holds either 0 or eax ^ edx.
4421 __ xor_(ebx, Operand(eax));
4422 // if eax was smi, ebx is now edx, else eax.
4423
4424 // Check if the non-smi operand is a heap number.
4425 __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
4426 Immediate(Factory::heap_number_map()));
4427 // If heap number, handle it in the slow case.
4428 __ j(equal, &slow);
4429 // Return non-equal (ebx is not zero)
4430 __ mov(eax, ebx);
4431 __ ret(0);
4432
4433 __ bind(&not_smis);
4434 // If either operand is a JSObject or an oddball value, then they are not
4435 // equal since their pointers are different
4436 // There is no test for undetectability in strict equality.
4437
4438 // Get the type of the first operand.
4439 // If the first object is a JS object, we have done pointer comparison.
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00004440 NearLabel first_non_object;
ricow@chromium.org65fae842010-08-25 15:26:24 +00004441 STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
4442 __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
4443 __ j(below, &first_non_object);
4444
4445 // Return non-zero (eax is not zero)
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00004446 NearLabel return_not_equal;
ricow@chromium.org65fae842010-08-25 15:26:24 +00004447 STATIC_ASSERT(kHeapObjectTag != 0);
4448 __ bind(&return_not_equal);
4449 __ ret(0);
4450
4451 __ bind(&first_non_object);
4452 // Check for oddballs: true, false, null, undefined.
4453 __ CmpInstanceType(ecx, ODDBALL_TYPE);
4454 __ j(equal, &return_not_equal);
4455
4456 __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ecx);
4457 __ j(above_equal, &return_not_equal);
4458
4459 // Check for oddballs: true, false, null, undefined.
4460 __ CmpInstanceType(ecx, ODDBALL_TYPE);
4461 __ j(equal, &return_not_equal);
4462
4463 // Fall through to the general case.
4464 __ bind(&slow);
4465 }
4466
4467 // Generate the number comparison code.
4468 if (include_number_compare_) {
4469 Label non_number_comparison;
4470 Label unordered;
4471 if (CpuFeatures::IsSupported(SSE2)) {
4472 CpuFeatures::Scope use_sse2(SSE2);
4473 CpuFeatures::Scope use_cmov(CMOV);
4474
4475 FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
4476 __ ucomisd(xmm0, xmm1);
4477
4478 // Don't base result on EFLAGS when a NaN is involved.
4479 __ j(parity_even, &unordered, not_taken);
4480 // Return a result of -1, 0, or 1, based on EFLAGS.
4481 __ mov(eax, 0); // equal
4482 __ mov(ecx, Immediate(Smi::FromInt(1)));
4483 __ cmov(above, eax, Operand(ecx));
4484 __ mov(ecx, Immediate(Smi::FromInt(-1)));
4485 __ cmov(below, eax, Operand(ecx));
4486 __ ret(0);
4487 } else {
4488 FloatingPointHelper::CheckFloatOperands(
4489 masm, &non_number_comparison, ebx);
4490 FloatingPointHelper::LoadFloatOperand(masm, eax);
4491 FloatingPointHelper::LoadFloatOperand(masm, edx);
4492 __ FCmp();
4493
4494 // Don't base result on EFLAGS when a NaN is involved.
4495 __ j(parity_even, &unordered, not_taken);
4496
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00004497 NearLabel below_label, above_label;
ricow@chromium.org65fae842010-08-25 15:26:24 +00004498 // Return a result of -1, 0, or 1, based on EFLAGS.
4499 __ j(below, &below_label, not_taken);
4500 __ j(above, &above_label, not_taken);
4501
4502 __ xor_(eax, Operand(eax));
4503 __ ret(0);
4504
4505 __ bind(&below_label);
4506 __ mov(eax, Immediate(Smi::FromInt(-1)));
4507 __ ret(0);
4508
4509 __ bind(&above_label);
4510 __ mov(eax, Immediate(Smi::FromInt(1)));
4511 __ ret(0);
4512 }
4513
4514 // If one of the numbers was NaN, then the result is always false.
4515 // The cc is never not-equal.
4516 __ bind(&unordered);
4517 ASSERT(cc_ != not_equal);
4518 if (cc_ == less || cc_ == less_equal) {
4519 __ mov(eax, Immediate(Smi::FromInt(1)));
4520 } else {
4521 __ mov(eax, Immediate(Smi::FromInt(-1)));
4522 }
4523 __ ret(0);
4524
4525 // The number comparison code did not provide a valid result.
4526 __ bind(&non_number_comparison);
4527 }
4528
4529 // Fast negative check for symbol-to-symbol equality.
4530 Label check_for_strings;
4531 if (cc_ == equal) {
4532 BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
4533 BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
4534
4535 // We've already checked for object identity, so if both operands
4536 // are symbols they aren't equal. Register eax already holds a
4537 // non-zero value, which indicates not equal, so just return.
4538 __ ret(0);
4539 }
4540
4541 __ bind(&check_for_strings);
4542
4543 __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
4544 &check_unequal_objects);
4545
4546 // Inline comparison of ascii strings.
4547 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
4548 edx,
4549 eax,
4550 ecx,
4551 ebx,
4552 edi);
4553#ifdef DEBUG
4554 __ Abort("Unexpected fall-through from string comparison");
4555#endif
4556
4557 __ bind(&check_unequal_objects);
4558 if (cc_ == equal && !strict_) {
4559 // Non-strict equality. Objects are unequal if
4560 // they are both JSObjects and not undetectable,
4561 // and their pointers are different.
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00004562 NearLabel not_both_objects;
4563 NearLabel return_unequal;
ricow@chromium.org65fae842010-08-25 15:26:24 +00004564 // At most one is a smi, so we can test for smi by adding the two.
4565 // A smi plus a heap object has the low bit set, a heap object plus
4566 // a heap object has the low bit clear.
4567 STATIC_ASSERT(kSmiTag == 0);
4568 STATIC_ASSERT(kSmiTagMask == 1);
4569 __ lea(ecx, Operand(eax, edx, times_1, 0));
4570 __ test(ecx, Immediate(kSmiTagMask));
4571 __ j(not_zero, &not_both_objects);
4572 __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
4573 __ j(below, &not_both_objects);
4574 __ CmpObjectType(edx, FIRST_JS_OBJECT_TYPE, ebx);
4575 __ j(below, &not_both_objects);
4576 // We do not bail out after this point. Both are JSObjects, and
4577 // they are equal if and only if both are undetectable.
4578 // The and of the undetectable flags is 1 if and only if they are equal.
4579 __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
4580 1 << Map::kIsUndetectable);
4581 __ j(zero, &return_unequal);
4582 __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
4583 1 << Map::kIsUndetectable);
4584 __ j(zero, &return_unequal);
4585 // The objects are both undetectable, so they both compare as the value
4586 // undefined, and are equal.
4587 __ Set(eax, Immediate(EQUAL));
4588 __ bind(&return_unequal);
4589 // Return non-equal by returning the non-zero object pointer in eax,
4590 // or return equal if we fell through to here.
4591 __ ret(0); // rax, rdx were pushed
4592 __ bind(&not_both_objects);
4593 }
4594
4595 // Push arguments below the return address.
4596 __ pop(ecx);
4597 __ push(edx);
4598 __ push(eax);
4599
4600 // Figure out which native to call and setup the arguments.
4601 Builtins::JavaScript builtin;
4602 if (cc_ == equal) {
4603 builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
4604 } else {
4605 builtin = Builtins::COMPARE;
4606 __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
4607 }
4608
4609 // Restore return address on the stack.
4610 __ push(ecx);
4611
4612 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
4613 // tagged as a small integer.
4614 __ InvokeBuiltin(builtin, JUMP_FUNCTION);
4615}
4616
4617
4618void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
4619 Label* label,
4620 Register object,
4621 Register scratch) {
4622 __ test(object, Immediate(kSmiTagMask));
4623 __ j(zero, label);
4624 __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
4625 __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
4626 __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
4627 __ cmp(scratch, kSymbolTag | kStringTag);
4628 __ j(not_equal, label);
4629}
4630
4631
4632void StackCheckStub::Generate(MacroAssembler* masm) {
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00004633 __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
ricow@chromium.org65fae842010-08-25 15:26:24 +00004634}
4635
4636
4637void CallFunctionStub::Generate(MacroAssembler* masm) {
4638 Label slow;
4639
4640 // If the receiver might be a value (string, number or boolean) check for this
4641 // and box it if it is.
4642 if (ReceiverMightBeValue()) {
4643 // Get the receiver from the stack.
4644 // +1 ~ return address
4645 Label receiver_is_value, receiver_is_js_object;
4646 __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
4647
4648 // Check if receiver is a smi (which is a number value).
4649 __ test(eax, Immediate(kSmiTagMask));
4650 __ j(zero, &receiver_is_value, not_taken);
4651
4652 // Check if the receiver is a valid JS object.
4653 __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, edi);
4654 __ j(above_equal, &receiver_is_js_object);
4655
4656 // Call the runtime to box the value.
4657 __ bind(&receiver_is_value);
4658 __ EnterInternalFrame();
4659 __ push(eax);
4660 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
4661 __ LeaveInternalFrame();
4662 __ mov(Operand(esp, (argc_ + 1) * kPointerSize), eax);
4663
4664 __ bind(&receiver_is_js_object);
4665 }
4666
4667 // Get the function to call from the stack.
4668 // +2 ~ receiver, return address
4669 __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
4670
4671 // Check that the function really is a JavaScript function.
4672 __ test(edi, Immediate(kSmiTagMask));
4673 __ j(zero, &slow, not_taken);
4674 // Goto slow case if we do not have a function.
4675 __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
4676 __ j(not_equal, &slow, not_taken);
4677
4678 // Fast-case: Just invoke the function.
4679 ParameterCount actual(argc_);
4680 __ InvokeFunction(edi, actual, JUMP_FUNCTION);
4681
4682 // Slow-case: Non-function called.
4683 __ bind(&slow);
4684 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
4685 // of the original receiver from the call site).
4686 __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
4687 __ Set(eax, Immediate(argc_));
4688 __ Set(ebx, Immediate(0));
4689 __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
4690 Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
4691 __ jmp(adaptor, RelocInfo::CODE_TARGET);
4692}
4693
4694
4695void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
4696 // eax holds the exception.
4697
4698 // Adjust this code if not the case.
4699 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
4700
4701 // Drop the sp to the top of the handler.
4702 ExternalReference handler_address(Top::k_handler_address);
4703 __ mov(esp, Operand::StaticVariable(handler_address));
4704
4705 // Restore next handler and frame pointer, discard handler state.
4706 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4707 __ pop(Operand::StaticVariable(handler_address));
4708 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
4709 __ pop(ebp);
4710 __ pop(edx); // Remove state.
4711
4712 // Before returning we restore the context from the frame pointer if
4713 // not NULL. The frame pointer is NULL in the exception handler of
4714 // a JS entry frame.
4715 __ xor_(esi, Operand(esi)); // Tentatively set context pointer to NULL.
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00004716 NearLabel skip;
ricow@chromium.org65fae842010-08-25 15:26:24 +00004717 __ cmp(ebp, 0);
4718 __ j(equal, &skip, not_taken);
4719 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
4720 __ bind(&skip);
4721
4722 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
4723 __ ret(0);
4724}
4725
4726
ricow@chromium.org65fae842010-08-25 15:26:24 +00004727void CEntryStub::GenerateCore(MacroAssembler* masm,
4728 Label* throw_normal_exception,
4729 Label* throw_termination_exception,
4730 Label* throw_out_of_memory_exception,
4731 bool do_gc,
4732 bool always_allocate_scope,
4733 int /* alignment_skew */) {
4734 // eax: result parameter for PerformGC, if any
4735 // ebx: pointer to C function (C callee-saved)
4736 // ebp: frame pointer (restored after C call)
4737 // esp: stack pointer (restored after C call)
4738 // edi: number of arguments including receiver (C callee-saved)
4739 // esi: pointer to the first argument (C callee-saved)
4740
4741 // Result returned in eax, or eax+edx if result_size_ is 2.
4742
4743 // Check stack alignment.
4744 if (FLAG_debug_code) {
4745 __ CheckStackAlignment();
4746 }
4747
4748 if (do_gc) {
4749 // Pass failure code returned from last attempt as first argument to
4750 // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
4751 // stack alignment is known to be correct. This function takes one argument
4752 // which is passed on the stack, and we know that the stack has been
4753 // prepared to pass at least one argument.
4754 __ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
4755 __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
4756 }
4757
4758 ExternalReference scope_depth =
4759 ExternalReference::heap_always_allocate_scope_depth();
4760 if (always_allocate_scope) {
4761 __ inc(Operand::StaticVariable(scope_depth));
4762 }
4763
4764 // Call C function.
4765 __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
4766 __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
4767 __ call(Operand(ebx));
4768 // Result is in eax or edx:eax - do not destroy these registers!
4769
4770 if (always_allocate_scope) {
4771 __ dec(Operand::StaticVariable(scope_depth));
4772 }
4773
4774 // Make sure we're not trying to return 'the hole' from the runtime
4775 // call as this may lead to crashes in the IC code later.
4776 if (FLAG_debug_code) {
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00004777 NearLabel okay;
ricow@chromium.org65fae842010-08-25 15:26:24 +00004778 __ cmp(eax, Factory::the_hole_value());
4779 __ j(not_equal, &okay);
4780 __ int3();
4781 __ bind(&okay);
4782 }
4783
4784 // Check for failure result.
4785 Label failure_returned;
4786 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
4787 __ lea(ecx, Operand(eax, 1));
4788 // Lower 2 bits of ecx are 0 iff eax has failure tag.
4789 __ test(ecx, Immediate(kFailureTagMask));
4790 __ j(zero, &failure_returned, not_taken);
4791
4792 // Exit the JavaScript to C++ exit frame.
kasperl@chromium.orga5551262010-12-07 12:49:48 +00004793 __ LeaveExitFrame(save_doubles_);
ricow@chromium.org65fae842010-08-25 15:26:24 +00004794 __ ret(0);
4795
4796 // Handling of failure.
4797 __ bind(&failure_returned);
4798
4799 Label retry;
4800 // If the returned exception is RETRY_AFTER_GC continue at retry label
4801 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
4802 __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
4803 __ j(zero, &retry, taken);
4804
4805 // Special handling of out of memory exceptions.
4806 __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
4807 __ j(equal, throw_out_of_memory_exception);
4808
4809 // Retrieve the pending exception and clear the variable.
4810 ExternalReference pending_exception_address(Top::k_pending_exception_address);
4811 __ mov(eax, Operand::StaticVariable(pending_exception_address));
4812 __ mov(edx,
4813 Operand::StaticVariable(ExternalReference::the_hole_value_location()));
4814 __ mov(Operand::StaticVariable(pending_exception_address), edx);
4815
4816 // Special handling of termination exceptions which are uncatchable
4817 // by javascript code.
4818 __ cmp(eax, Factory::termination_exception());
4819 __ j(equal, throw_termination_exception);
4820
4821 // Handle normal exception.
4822 __ jmp(throw_normal_exception);
4823
4824 // Retry.
4825 __ bind(&retry);
4826}
4827
4828
4829void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
4830 UncatchableExceptionType type) {
4831 // Adjust this code if not the case.
4832 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
4833
4834 // Drop sp to the top stack handler.
4835 ExternalReference handler_address(Top::k_handler_address);
4836 __ mov(esp, Operand::StaticVariable(handler_address));
4837
4838 // Unwind the handlers until the ENTRY handler is found.
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00004839 NearLabel loop, done;
ricow@chromium.org65fae842010-08-25 15:26:24 +00004840 __ bind(&loop);
4841 // Load the type of the current stack handler.
4842 const int kStateOffset = StackHandlerConstants::kStateOffset;
4843 __ cmp(Operand(esp, kStateOffset), Immediate(StackHandler::ENTRY));
4844 __ j(equal, &done);
4845 // Fetch the next handler in the list.
4846 const int kNextOffset = StackHandlerConstants::kNextOffset;
4847 __ mov(esp, Operand(esp, kNextOffset));
4848 __ jmp(&loop);
4849 __ bind(&done);
4850
4851 // Set the top handler address to next handler past the current ENTRY handler.
4852 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
4853 __ pop(Operand::StaticVariable(handler_address));
4854
4855 if (type == OUT_OF_MEMORY) {
4856 // Set external caught exception to false.
4857 ExternalReference external_caught(Top::k_external_caught_exception_address);
4858 __ mov(eax, false);
4859 __ mov(Operand::StaticVariable(external_caught), eax);
4860
4861 // Set pending exception and eax to out of memory exception.
4862 ExternalReference pending_exception(Top::k_pending_exception_address);
4863 __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
4864 __ mov(Operand::StaticVariable(pending_exception), eax);
4865 }
4866
4867 // Clear the context pointer.
4868 __ xor_(esi, Operand(esi));
4869
4870 // Restore fp from handler and discard handler state.
4871 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
4872 __ pop(ebp);
4873 __ pop(edx); // State.
4874
4875 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
4876 __ ret(0);
4877}
4878
4879
4880void CEntryStub::Generate(MacroAssembler* masm) {
4881 // eax: number of arguments including receiver
4882 // ebx: pointer to C function (C callee-saved)
4883 // ebp: frame pointer (restored after C call)
4884 // esp: stack pointer (restored after C call)
4885 // esi: current context (C callee-saved)
4886 // edi: JS function of the caller (C callee-saved)
4887
4888 // NOTE: Invocations of builtins may return failure objects instead
4889 // of a proper result. The builtin entry handles this by performing
4890 // a garbage collection and retrying the builtin (twice).
4891
4892 // Enter the exit frame that transitions from JavaScript to C++.
kasperl@chromium.orga5551262010-12-07 12:49:48 +00004893 __ EnterExitFrame(save_doubles_);
ricow@chromium.org65fae842010-08-25 15:26:24 +00004894
4895 // eax: result parameter for PerformGC, if any (setup below)
4896 // ebx: pointer to builtin function (C callee-saved)
4897 // ebp: frame pointer (restored after C call)
4898 // esp: stack pointer (restored after C call)
4899 // edi: number of arguments including receiver (C callee-saved)
4900 // esi: argv pointer (C callee-saved)
4901
4902 Label throw_normal_exception;
4903 Label throw_termination_exception;
4904 Label throw_out_of_memory_exception;
4905
4906 // Call into the runtime system.
4907 GenerateCore(masm,
4908 &throw_normal_exception,
4909 &throw_termination_exception,
4910 &throw_out_of_memory_exception,
4911 false,
4912 false);
4913
4914 // Do space-specific GC and retry runtime call.
4915 GenerateCore(masm,
4916 &throw_normal_exception,
4917 &throw_termination_exception,
4918 &throw_out_of_memory_exception,
4919 true,
4920 false);
4921
4922 // Do full GC and retry runtime call one final time.
4923 Failure* failure = Failure::InternalError();
4924 __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure)));
4925 GenerateCore(masm,
4926 &throw_normal_exception,
4927 &throw_termination_exception,
4928 &throw_out_of_memory_exception,
4929 true,
4930 true);
4931
4932 __ bind(&throw_out_of_memory_exception);
4933 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
4934
4935 __ bind(&throw_termination_exception);
4936 GenerateThrowUncatchable(masm, TERMINATION);
4937
4938 __ bind(&throw_normal_exception);
4939 GenerateThrowTOS(masm);
4940}
4941
4942
4943void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
4944 Label invoke, exit;
4945#ifdef ENABLE_LOGGING_AND_PROFILING
4946 Label not_outermost_js, not_outermost_js_2;
4947#endif
4948
4949 // Setup frame.
4950 __ push(ebp);
4951 __ mov(ebp, Operand(esp));
4952
4953 // Push marker in two places.
4954 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
4955 __ push(Immediate(Smi::FromInt(marker))); // context slot
4956 __ push(Immediate(Smi::FromInt(marker))); // function slot
4957 // Save callee-saved registers (C calling conventions).
4958 __ push(edi);
4959 __ push(esi);
4960 __ push(ebx);
4961
4962 // Save copies of the top frame descriptor on the stack.
4963 ExternalReference c_entry_fp(Top::k_c_entry_fp_address);
4964 __ push(Operand::StaticVariable(c_entry_fp));
4965
4966#ifdef ENABLE_LOGGING_AND_PROFILING
4967 // If this is the outermost JS call, set js_entry_sp value.
4968 ExternalReference js_entry_sp(Top::k_js_entry_sp_address);
4969 __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
4970 __ j(not_equal, &not_outermost_js);
4971 __ mov(Operand::StaticVariable(js_entry_sp), ebp);
4972 __ bind(&not_outermost_js);
4973#endif
4974
4975 // Call a faked try-block that does the invoke.
4976 __ call(&invoke);
4977
4978 // Caught exception: Store result (exception) in the pending
4979 // exception field in the JSEnv and return a failure sentinel.
4980 ExternalReference pending_exception(Top::k_pending_exception_address);
4981 __ mov(Operand::StaticVariable(pending_exception), eax);
4982 __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
4983 __ jmp(&exit);
4984
4985 // Invoke: Link this frame into the handler chain.
4986 __ bind(&invoke);
4987 __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER);
4988
4989 // Clear any pending exceptions.
4990 __ mov(edx,
4991 Operand::StaticVariable(ExternalReference::the_hole_value_location()));
4992 __ mov(Operand::StaticVariable(pending_exception), edx);
4993
4994 // Fake a receiver (NULL).
4995 __ push(Immediate(0)); // receiver
4996
4997 // Invoke the function by calling through JS entry trampoline
4998 // builtin and pop the faked function when we return. Notice that we
4999 // cannot store a reference to the trampoline code directly in this
5000 // stub, because the builtin stubs may not have been generated yet.
5001 if (is_construct) {
5002 ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline);
5003 __ mov(edx, Immediate(construct_entry));
5004 } else {
5005 ExternalReference entry(Builtins::JSEntryTrampoline);
5006 __ mov(edx, Immediate(entry));
5007 }
5008 __ mov(edx, Operand(edx, 0)); // deref address
5009 __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
5010 __ call(Operand(edx));
5011
5012 // Unlink this frame from the handler chain.
5013 __ pop(Operand::StaticVariable(ExternalReference(Top::k_handler_address)));
5014 // Pop next_sp.
5015 __ add(Operand(esp), Immediate(StackHandlerConstants::kSize - kPointerSize));
5016
5017#ifdef ENABLE_LOGGING_AND_PROFILING
5018 // If current EBP value is the same as js_entry_sp value, it means that
5019 // the current function is the outermost.
5020 __ cmp(ebp, Operand::StaticVariable(js_entry_sp));
5021 __ j(not_equal, &not_outermost_js_2);
5022 __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
5023 __ bind(&not_outermost_js_2);
5024#endif
5025
5026 // Restore the top frame descriptor from the stack.
5027 __ bind(&exit);
5028 __ pop(Operand::StaticVariable(ExternalReference(Top::k_c_entry_fp_address)));
5029
5030 // Restore callee-saved registers (C calling conventions).
5031 __ pop(ebx);
5032 __ pop(esi);
5033 __ pop(edi);
5034 __ add(Operand(esp), Immediate(2 * kPointerSize)); // remove markers
5035
5036 // Restore frame pointer and return.
5037 __ pop(ebp);
5038 __ ret(0);
5039}
5040
5041
5042void InstanceofStub::Generate(MacroAssembler* masm) {
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00005043 // Fixed register usage throughout the stub.
5044 Register object = eax; // Object (lhs).
5045 Register map = ebx; // Map of the object.
5046 Register function = edx; // Function (rhs).
5047 Register prototype = edi; // Prototype of the function.
5048 Register scratch = ecx;
5049
5050 // Get the object and function - they are always both needed.
5051 Label slow, not_js_object;
5052 if (!args_in_registers()) {
5053 __ mov(object, Operand(esp, 2 * kPointerSize));
5054 __ mov(function, Operand(esp, 1 * kPointerSize));
5055 }
ricow@chromium.org65fae842010-08-25 15:26:24 +00005056
5057 // Check that the left hand is a JS object.
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00005058 __ test(object, Immediate(kSmiTagMask));
5059 __ j(zero, &not_js_object, not_taken);
5060 __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
ricow@chromium.org65fae842010-08-25 15:26:24 +00005061
5062 // Look up the function and the map in the instanceof cache.
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00005063 NearLabel miss;
ricow@chromium.org65fae842010-08-25 15:26:24 +00005064 ExternalReference roots_address = ExternalReference::roots_address();
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00005065 __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
5066 __ cmp(function,
5067 Operand::StaticArray(scratch, times_pointer_size, roots_address));
ricow@chromium.org65fae842010-08-25 15:26:24 +00005068 __ j(not_equal, &miss);
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00005069 __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
5070 __ cmp(map, Operand::StaticArray(scratch, times_pointer_size, roots_address));
ricow@chromium.org65fae842010-08-25 15:26:24 +00005071 __ j(not_equal, &miss);
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00005072 __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
5073 __ mov(eax, Operand::StaticArray(scratch, times_pointer_size, roots_address));
5074 __ IncrementCounter(&Counters::instance_of_cache, 1);
5075 __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
ricow@chromium.org65fae842010-08-25 15:26:24 +00005076
5077 __ bind(&miss);
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00005078 // Get the prototype of the function.
5079 __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
ricow@chromium.org65fae842010-08-25 15:26:24 +00005080
5081 // Check that the function prototype is a JS object.
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00005082 __ test(prototype, Immediate(kSmiTagMask));
ricow@chromium.org65fae842010-08-25 15:26:24 +00005083 __ j(zero, &slow, not_taken);
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00005084 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
ricow@chromium.org65fae842010-08-25 15:26:24 +00005085
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00005086 // Update the golbal instanceof cache with the current map and function. The
5087 // cached answer will be set when it is known.
5088 __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
5089 __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), map);
5090 __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
5091 __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address),
5092 function);
ricow@chromium.org65fae842010-08-25 15:26:24 +00005093
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00005094 // Loop through the prototype chain of the object looking for the function
5095 // prototype.
5096 __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00005097 NearLabel loop, is_instance, is_not_instance;
ricow@chromium.org65fae842010-08-25 15:26:24 +00005098 __ bind(&loop);
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00005099 __ cmp(scratch, Operand(prototype));
ricow@chromium.org65fae842010-08-25 15:26:24 +00005100 __ j(equal, &is_instance);
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00005101 __ cmp(Operand(scratch), Immediate(Factory::null_value()));
ricow@chromium.org65fae842010-08-25 15:26:24 +00005102 __ j(equal, &is_not_instance);
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00005103 __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
5104 __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
ricow@chromium.org65fae842010-08-25 15:26:24 +00005105 __ jmp(&loop);
5106
5107 __ bind(&is_instance);
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00005108 __ IncrementCounter(&Counters::instance_of_stub_true, 1);
ricow@chromium.org65fae842010-08-25 15:26:24 +00005109 __ Set(eax, Immediate(0));
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00005110 __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
5111 __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), eax);
5112 __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
ricow@chromium.org65fae842010-08-25 15:26:24 +00005113
5114 __ bind(&is_not_instance);
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00005115 __ IncrementCounter(&Counters::instance_of_stub_false, 1);
ricow@chromium.org65fae842010-08-25 15:26:24 +00005116 __ Set(eax, Immediate(Smi::FromInt(1)));
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00005117 __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
5118 __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_address), eax);
5119 __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
5120
5121 Label object_not_null, object_not_null_or_smi;
5122 __ bind(&not_js_object);
5123 // Before null, smi and string value checks, check that the rhs is a function
5124 // as for a non-function rhs an exception needs to be thrown.
5125 __ test(function, Immediate(kSmiTagMask));
5126 __ j(zero, &slow, not_taken);
5127 __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
5128 __ j(not_equal, &slow, not_taken);
5129
5130 // Null is not instance of anything.
5131 __ cmp(object, Factory::null_value());
5132 __ j(not_equal, &object_not_null);
5133 __ IncrementCounter(&Counters::instance_of_stub_false_null, 1);
5134 __ Set(eax, Immediate(Smi::FromInt(1)));
5135 __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
5136
5137 __ bind(&object_not_null);
5138 // Smi values is not instance of anything.
5139 __ test(object, Immediate(kSmiTagMask));
5140 __ j(not_zero, &object_not_null_or_smi, not_taken);
5141 __ Set(eax, Immediate(Smi::FromInt(1)));
5142 __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
5143
5144 __ bind(&object_not_null_or_smi);
5145 // String values is not instance of anything.
5146 Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
5147 __ j(NegateCondition(is_string), &slow);
5148 __ IncrementCounter(&Counters::instance_of_stub_false_string, 1);
5149 __ Set(eax, Immediate(Smi::FromInt(1)));
5150 __ ret((args_in_registers() ? 0 : 2) * kPointerSize);
ricow@chromium.org65fae842010-08-25 15:26:24 +00005151
5152 // Slow-case: Go through the JavaScript implementation.
5153 __ bind(&slow);
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00005154 if (args_in_registers()) {
5155 // Push arguments below return address.
5156 __ pop(scratch);
5157 __ push(object);
5158 __ push(function);
5159 __ push(scratch);
5160 }
5161 __ IncrementCounter(&Counters::instance_of_slow, 1);
ricow@chromium.org65fae842010-08-25 15:26:24 +00005162 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
5163}
5164
5165
5166int CompareStub::MinorKey() {
5167 // Encode the three parameters in a unique 16 bit value. To avoid duplicate
5168 // stubs the never NaN NaN condition is only taken into account if the
5169 // condition is equals.
5170 ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
5171 ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
5172 return ConditionField::encode(static_cast<unsigned>(cc_))
5173 | RegisterField::encode(false) // lhs_ and rhs_ are not used
5174 | StrictField::encode(strict_)
5175 | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
erik.corry@gmail.comd88afa22010-09-15 12:33:05 +00005176 | IncludeNumberCompareField::encode(include_number_compare_)
5177 | IncludeSmiCompareField::encode(include_smi_compare_);
ricow@chromium.org65fae842010-08-25 15:26:24 +00005178}
5179
5180
5181// Unfortunately you have to run without snapshots to see most of these
5182// names in the profile since most compare stubs end up in the snapshot.
5183const char* CompareStub::GetName() {
5184 ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
5185
5186 if (name_ != NULL) return name_;
5187 const int kMaxNameLength = 100;
5188 name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
5189 if (name_ == NULL) return "OOM";
5190
5191 const char* cc_name;
5192 switch (cc_) {
5193 case less: cc_name = "LT"; break;
5194 case greater: cc_name = "GT"; break;
5195 case less_equal: cc_name = "LE"; break;
5196 case greater_equal: cc_name = "GE"; break;
5197 case equal: cc_name = "EQ"; break;
5198 case not_equal: cc_name = "NE"; break;
5199 default: cc_name = "UnknownCondition"; break;
5200 }
5201
5202 const char* strict_name = "";
5203 if (strict_ && (cc_ == equal || cc_ == not_equal)) {
5204 strict_name = "_STRICT";
5205 }
5206
5207 const char* never_nan_nan_name = "";
5208 if (never_nan_nan_ && (cc_ == equal || cc_ == not_equal)) {
5209 never_nan_nan_name = "_NO_NAN";
5210 }
5211
5212 const char* include_number_compare_name = "";
5213 if (!include_number_compare_) {
5214 include_number_compare_name = "_NO_NUMBER";
5215 }
5216
erik.corry@gmail.comd88afa22010-09-15 12:33:05 +00005217 const char* include_smi_compare_name = "";
5218 if (!include_smi_compare_) {
5219 include_smi_compare_name = "_NO_SMI";
5220 }
5221
ricow@chromium.org65fae842010-08-25 15:26:24 +00005222 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
erik.corry@gmail.comd88afa22010-09-15 12:33:05 +00005223 "CompareStub_%s%s%s%s%s",
ricow@chromium.org65fae842010-08-25 15:26:24 +00005224 cc_name,
5225 strict_name,
5226 never_nan_nan_name,
erik.corry@gmail.comd88afa22010-09-15 12:33:05 +00005227 include_number_compare_name,
5228 include_smi_compare_name);
ricow@chromium.org65fae842010-08-25 15:26:24 +00005229 return name_;
5230}
5231
5232
5233// -------------------------------------------------------------------------
5234// StringCharCodeAtGenerator
5235
5236void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
5237 Label flat_string;
5238 Label ascii_string;
5239 Label got_char_code;
5240
5241 // If the receiver is a smi trigger the non-string case.
5242 STATIC_ASSERT(kSmiTag == 0);
5243 __ test(object_, Immediate(kSmiTagMask));
5244 __ j(zero, receiver_not_string_);
5245
5246 // Fetch the instance type of the receiver into result register.
5247 __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
5248 __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
5249 // If the receiver is not a string trigger the non-string case.
5250 __ test(result_, Immediate(kIsNotStringMask));
5251 __ j(not_zero, receiver_not_string_);
5252
5253 // If the index is non-smi trigger the non-smi case.
5254 STATIC_ASSERT(kSmiTag == 0);
5255 __ test(index_, Immediate(kSmiTagMask));
5256 __ j(not_zero, &index_not_smi_);
5257
5258 // Put smi-tagged index into scratch register.
5259 __ mov(scratch_, index_);
5260 __ bind(&got_smi_index_);
5261
5262 // Check for index out of range.
5263 __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset));
5264 __ j(above_equal, index_out_of_range_);
5265
5266 // We need special handling for non-flat strings.
5267 STATIC_ASSERT(kSeqStringTag == 0);
5268 __ test(result_, Immediate(kStringRepresentationMask));
5269 __ j(zero, &flat_string);
5270
5271 // Handle non-flat strings.
5272 __ test(result_, Immediate(kIsConsStringMask));
5273 __ j(zero, &call_runtime_);
5274
5275 // ConsString.
5276 // Check whether the right hand side is the empty string (i.e. if
5277 // this is really a flat string in a cons string). If that is not
5278 // the case we would rather go to the runtime system now to flatten
5279 // the string.
5280 __ cmp(FieldOperand(object_, ConsString::kSecondOffset),
5281 Immediate(Factory::empty_string()));
5282 __ j(not_equal, &call_runtime_);
5283 // Get the first of the two strings and load its instance type.
5284 __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
5285 __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
5286 __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
5287 // If the first cons component is also non-flat, then go to runtime.
5288 STATIC_ASSERT(kSeqStringTag == 0);
5289 __ test(result_, Immediate(kStringRepresentationMask));
5290 __ j(not_zero, &call_runtime_);
5291
5292 // Check for 1-byte or 2-byte string.
5293 __ bind(&flat_string);
5294 STATIC_ASSERT(kAsciiStringTag != 0);
5295 __ test(result_, Immediate(kStringEncodingMask));
5296 __ j(not_zero, &ascii_string);
5297
5298 // 2-byte string.
5299 // Load the 2-byte character code into the result register.
5300 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
5301 __ movzx_w(result_, FieldOperand(object_,
5302 scratch_, times_1, // Scratch is smi-tagged.
5303 SeqTwoByteString::kHeaderSize));
5304 __ jmp(&got_char_code);
5305
5306 // ASCII string.
5307 // Load the byte into the result register.
5308 __ bind(&ascii_string);
5309 __ SmiUntag(scratch_);
5310 __ movzx_b(result_, FieldOperand(object_,
5311 scratch_, times_1,
5312 SeqAsciiString::kHeaderSize));
5313 __ bind(&got_char_code);
5314 __ SmiTag(result_);
5315 __ bind(&exit_);
5316}
5317
5318
5319void StringCharCodeAtGenerator::GenerateSlow(
5320 MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
5321 __ Abort("Unexpected fallthrough to CharCodeAt slow case");
5322
5323 // Index is not a smi.
5324 __ bind(&index_not_smi_);
5325 // If index is a heap number, try converting it to an integer.
5326 __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
5327 call_helper.BeforeCall(masm);
5328 __ push(object_);
5329 __ push(index_);
5330 __ push(index_); // Consumed by runtime conversion function.
5331 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
5332 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
5333 } else {
5334 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
5335 // NumberToSmi discards numbers that are not exact integers.
5336 __ CallRuntime(Runtime::kNumberToSmi, 1);
5337 }
5338 if (!scratch_.is(eax)) {
5339 // Save the conversion result before the pop instructions below
5340 // have a chance to overwrite it.
5341 __ mov(scratch_, eax);
5342 }
5343 __ pop(index_);
5344 __ pop(object_);
5345 // Reload the instance type.
5346 __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
5347 __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
5348 call_helper.AfterCall(masm);
5349 // If index is still not a smi, it must be out of range.
5350 STATIC_ASSERT(kSmiTag == 0);
5351 __ test(scratch_, Immediate(kSmiTagMask));
5352 __ j(not_zero, index_out_of_range_);
5353 // Otherwise, return to the fast path.
5354 __ jmp(&got_smi_index_);
5355
5356 // Call runtime. We get here when the receiver is a string and the
5357 // index is a number, but the code of getting the actual character
5358 // is too complex (e.g., when the string needs to be flattened).
5359 __ bind(&call_runtime_);
5360 call_helper.BeforeCall(masm);
5361 __ push(object_);
5362 __ push(index_);
5363 __ CallRuntime(Runtime::kStringCharCodeAt, 2);
5364 if (!result_.is(eax)) {
5365 __ mov(result_, eax);
5366 }
5367 call_helper.AfterCall(masm);
5368 __ jmp(&exit_);
5369
5370 __ Abort("Unexpected fallthrough from CharCodeAt slow case");
5371}
5372
5373
5374// -------------------------------------------------------------------------
5375// StringCharFromCodeGenerator
5376
5377void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
5378 // Fast case of Heap::LookupSingleCharacterStringFromCode.
5379 STATIC_ASSERT(kSmiTag == 0);
5380 STATIC_ASSERT(kSmiShiftSize == 0);
5381 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
5382 __ test(code_,
5383 Immediate(kSmiTagMask |
5384 ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
5385 __ j(not_zero, &slow_case_, not_taken);
5386
5387 __ Set(result_, Immediate(Factory::single_character_string_cache()));
5388 STATIC_ASSERT(kSmiTag == 0);
5389 STATIC_ASSERT(kSmiTagSize == 1);
5390 STATIC_ASSERT(kSmiShiftSize == 0);
5391 // At this point code register contains smi tagged ascii char code.
5392 __ mov(result_, FieldOperand(result_,
5393 code_, times_half_pointer_size,
5394 FixedArray::kHeaderSize));
5395 __ cmp(result_, Factory::undefined_value());
5396 __ j(equal, &slow_case_, not_taken);
5397 __ bind(&exit_);
5398}
5399
5400
5401void StringCharFromCodeGenerator::GenerateSlow(
5402 MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
5403 __ Abort("Unexpected fallthrough to CharFromCode slow case");
5404
5405 __ bind(&slow_case_);
5406 call_helper.BeforeCall(masm);
5407 __ push(code_);
5408 __ CallRuntime(Runtime::kCharFromCode, 1);
5409 if (!result_.is(eax)) {
5410 __ mov(result_, eax);
5411 }
5412 call_helper.AfterCall(masm);
5413 __ jmp(&exit_);
5414
5415 __ Abort("Unexpected fallthrough from CharFromCode slow case");
5416}
5417
5418
5419// -------------------------------------------------------------------------
5420// StringCharAtGenerator
5421
5422void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
5423 char_code_at_generator_.GenerateFast(masm);
5424 char_from_code_generator_.GenerateFast(masm);
5425}
5426
5427
5428void StringCharAtGenerator::GenerateSlow(
5429 MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
5430 char_code_at_generator_.GenerateSlow(masm, call_helper);
5431 char_from_code_generator_.GenerateSlow(masm, call_helper);
5432}
5433
5434
5435void StringAddStub::Generate(MacroAssembler* masm) {
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00005436 Label string_add_runtime, call_builtin;
5437 Builtins::JavaScript builtin_id = Builtins::ADD;
ricow@chromium.org65fae842010-08-25 15:26:24 +00005438
5439 // Load the two arguments.
5440 __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
5441 __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
5442
5443 // Make sure that both arguments are strings if not known in advance.
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00005444 if (flags_ == NO_STRING_ADD_FLAGS) {
ricow@chromium.org65fae842010-08-25 15:26:24 +00005445 __ test(eax, Immediate(kSmiTagMask));
5446 __ j(zero, &string_add_runtime);
5447 __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
5448 __ j(above_equal, &string_add_runtime);
5449
5450 // First argument is a a string, test second.
5451 __ test(edx, Immediate(kSmiTagMask));
5452 __ j(zero, &string_add_runtime);
5453 __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
5454 __ j(above_equal, &string_add_runtime);
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00005455 } else {
5456 // Here at least one of the arguments is definitely a string.
5457 // We convert the one that is not known to be a string.
5458 if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
5459 ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
5460 GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi,
5461 &call_builtin);
5462 builtin_id = Builtins::STRING_ADD_RIGHT;
5463 } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
5464 ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
5465 GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi,
5466 &call_builtin);
5467 builtin_id = Builtins::STRING_ADD_LEFT;
5468 }
ricow@chromium.org65fae842010-08-25 15:26:24 +00005469 }
5470
5471 // Both arguments are strings.
5472 // eax: first string
5473 // edx: second string
5474 // Check if either of the strings are empty. In that case return the other.
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00005475 NearLabel second_not_zero_length, both_not_zero_length;
ricow@chromium.org65fae842010-08-25 15:26:24 +00005476 __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
5477 STATIC_ASSERT(kSmiTag == 0);
5478 __ test(ecx, Operand(ecx));
5479 __ j(not_zero, &second_not_zero_length);
5480 // Second string is empty, result is first string which is already in eax.
5481 __ IncrementCounter(&Counters::string_add_native, 1);
5482 __ ret(2 * kPointerSize);
5483 __ bind(&second_not_zero_length);
5484 __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
5485 STATIC_ASSERT(kSmiTag == 0);
5486 __ test(ebx, Operand(ebx));
5487 __ j(not_zero, &both_not_zero_length);
5488 // First string is empty, result is second string which is in edx.
5489 __ mov(eax, edx);
5490 __ IncrementCounter(&Counters::string_add_native, 1);
5491 __ ret(2 * kPointerSize);
5492
5493 // Both strings are non-empty.
5494 // eax: first string
5495 // ebx: length of first string as a smi
5496 // ecx: length of second string as a smi
5497 // edx: second string
5498 // Look at the length of the result of adding the two strings.
5499 Label string_add_flat_result, longer_than_two;
5500 __ bind(&both_not_zero_length);
5501 __ add(ebx, Operand(ecx));
5502 STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
5503 // Handle exceptionally long strings in the runtime system.
5504 __ j(overflow, &string_add_runtime);
5505 // Use the runtime system when adding two one character strings, as it
5506 // contains optimizations for this specific case using the symbol table.
5507 __ cmp(Operand(ebx), Immediate(Smi::FromInt(2)));
5508 __ j(not_equal, &longer_than_two);
5509
5510 // Check that both strings are non-external ascii strings.
5511 __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx,
5512 &string_add_runtime);
5513
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00005514 // Get the two characters forming the new string.
ricow@chromium.org65fae842010-08-25 15:26:24 +00005515 __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
5516 __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
5517
5518 // Try to lookup two character string in symbol table. If it is not found
5519 // just allocate a new one.
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00005520 Label make_two_character_string, make_two_character_string_no_reload;
ricow@chromium.org65fae842010-08-25 15:26:24 +00005521 StringHelper::GenerateTwoCharacterSymbolTableProbe(
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00005522 masm, ebx, ecx, eax, edx, edi,
5523 &make_two_character_string_no_reload, &make_two_character_string);
ricow@chromium.org65fae842010-08-25 15:26:24 +00005524 __ IncrementCounter(&Counters::string_add_native, 1);
5525 __ ret(2 * kPointerSize);
5526
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00005527 // Allocate a two character string.
ricow@chromium.org65fae842010-08-25 15:26:24 +00005528 __ bind(&make_two_character_string);
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00005529 // Reload the arguments.
5530 __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
5531 __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
5532 // Get the two characters forming the new string.
5533 __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
5534 __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
5535 __ bind(&make_two_character_string_no_reload);
5536 __ IncrementCounter(&Counters::string_add_make_two_char, 1);
5537 __ AllocateAsciiString(eax, // Result.
5538 2, // Length.
5539 edi, // Scratch 1.
5540 edx, // Scratch 2.
5541 &string_add_runtime);
5542 // Pack both characters in ebx.
5543 __ shl(ecx, kBitsPerByte);
5544 __ or_(ebx, Operand(ecx));
5545 // Set the characters in the new string.
5546 __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
5547 __ IncrementCounter(&Counters::string_add_native, 1);
5548 __ ret(2 * kPointerSize);
ricow@chromium.org65fae842010-08-25 15:26:24 +00005549
5550 __ bind(&longer_than_two);
5551 // Check if resulting string will be flat.
5552 __ cmp(Operand(ebx), Immediate(Smi::FromInt(String::kMinNonFlatLength)));
5553 __ j(below, &string_add_flat_result);
5554
5555 // If result is not supposed to be flat allocate a cons string object. If both
5556 // strings are ascii the result is an ascii cons string.
5557 Label non_ascii, allocated, ascii_data;
5558 __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
5559 __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
5560 __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
5561 __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
5562 __ and_(ecx, Operand(edi));
5563 STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
5564 __ test(ecx, Immediate(kAsciiStringTag));
5565 __ j(zero, &non_ascii);
5566 __ bind(&ascii_data);
5567 // Allocate an acsii cons string.
5568 __ AllocateAsciiConsString(ecx, edi, no_reg, &string_add_runtime);
5569 __ bind(&allocated);
5570 // Fill the fields of the cons string.
5571 if (FLAG_debug_code) __ AbortIfNotSmi(ebx);
5572 __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
5573 __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
5574 Immediate(String::kEmptyHashField));
5575 __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
5576 __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
5577 __ mov(eax, ecx);
5578 __ IncrementCounter(&Counters::string_add_native, 1);
5579 __ ret(2 * kPointerSize);
5580 __ bind(&non_ascii);
5581 // At least one of the strings is two-byte. Check whether it happens
5582 // to contain only ascii characters.
5583 // ecx: first instance type AND second instance type.
5584 // edi: second instance type.
5585 __ test(ecx, Immediate(kAsciiDataHintMask));
5586 __ j(not_zero, &ascii_data);
5587 __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
5588 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
5589 __ xor_(edi, Operand(ecx));
5590 STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
5591 __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
5592 __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
5593 __ j(equal, &ascii_data);
5594 // Allocate a two byte cons string.
5595 __ AllocateConsString(ecx, edi, no_reg, &string_add_runtime);
5596 __ jmp(&allocated);
5597
5598 // Handle creating a flat result. First check that both strings are not
5599 // external strings.
5600 // eax: first string
5601 // ebx: length of resulting flat string as a smi
5602 // edx: second string
5603 __ bind(&string_add_flat_result);
5604 __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
5605 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
5606 __ and_(ecx, kStringRepresentationMask);
5607 __ cmp(ecx, kExternalStringTag);
5608 __ j(equal, &string_add_runtime);
5609 __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
5610 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
5611 __ and_(ecx, kStringRepresentationMask);
5612 __ cmp(ecx, kExternalStringTag);
5613 __ j(equal, &string_add_runtime);
5614 // Now check if both strings are ascii strings.
5615 // eax: first string
5616 // ebx: length of resulting flat string as a smi
5617 // edx: second string
5618 Label non_ascii_string_add_flat_result;
5619 STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
5620 __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
5621 __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
5622 __ j(zero, &non_ascii_string_add_flat_result);
5623 __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
5624 __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
5625 __ j(zero, &string_add_runtime);
5626
ricow@chromium.org65fae842010-08-25 15:26:24 +00005627 // Both strings are ascii strings. As they are short they are both flat.
5628 // ebx: length of resulting flat string as a smi
5629 __ SmiUntag(ebx);
5630 __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &string_add_runtime);
5631 // eax: result string
5632 __ mov(ecx, eax);
5633 // Locate first character of result.
5634 __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5635 // Load first argument and locate first character.
5636 __ mov(edx, Operand(esp, 2 * kPointerSize));
5637 __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5638 __ SmiUntag(edi);
5639 __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5640 // eax: result string
5641 // ecx: first character of result
5642 // edx: first char of first argument
5643 // edi: length of first argument
5644 StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
5645 // Load second argument and locate first character.
5646 __ mov(edx, Operand(esp, 1 * kPointerSize));
5647 __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5648 __ SmiUntag(edi);
5649 __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5650 // eax: result string
5651 // ecx: next character of result
5652 // edx: first char of second argument
5653 // edi: length of second argument
5654 StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
5655 __ IncrementCounter(&Counters::string_add_native, 1);
5656 __ ret(2 * kPointerSize);
5657
5658 // Handle creating a flat two byte result.
5659 // eax: first string - known to be two byte
5660 // ebx: length of resulting flat string as a smi
5661 // edx: second string
5662 __ bind(&non_ascii_string_add_flat_result);
5663 __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
5664 __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
5665 __ j(not_zero, &string_add_runtime);
5666 // Both strings are two byte strings. As they are short they are both
5667 // flat.
5668 __ SmiUntag(ebx);
5669 __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &string_add_runtime);
5670 // eax: result string
5671 __ mov(ecx, eax);
5672 // Locate first character of result.
5673 __ add(Operand(ecx),
5674 Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5675 // Load first argument and locate first character.
5676 __ mov(edx, Operand(esp, 2 * kPointerSize));
5677 __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5678 __ SmiUntag(edi);
5679 __ add(Operand(edx),
5680 Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5681 // eax: result string
5682 // ecx: first character of result
5683 // edx: first char of first argument
5684 // edi: length of first argument
5685 StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
5686 // Load second argument and locate first character.
5687 __ mov(edx, Operand(esp, 1 * kPointerSize));
5688 __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5689 __ SmiUntag(edi);
5690 __ add(Operand(edx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5691 // eax: result string
5692 // ecx: next character of result
5693 // edx: first char of second argument
5694 // edi: length of second argument
5695 StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
5696 __ IncrementCounter(&Counters::string_add_native, 1);
5697 __ ret(2 * kPointerSize);
5698
5699 // Just jump to runtime to add the two strings.
5700 __ bind(&string_add_runtime);
5701 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00005702
5703 if (call_builtin.is_linked()) {
5704 __ bind(&call_builtin);
5705 __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
5706 }
5707}
5708
5709
5710void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
5711 int stack_offset,
5712 Register arg,
5713 Register scratch1,
5714 Register scratch2,
5715 Register scratch3,
5716 Label* slow) {
5717 // First check if the argument is already a string.
5718 Label not_string, done;
5719 __ test(arg, Immediate(kSmiTagMask));
5720 __ j(zero, &not_string);
5721 __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
5722 __ j(below, &done);
5723
5724 // Check the number to string cache.
5725 Label not_cached;
5726 __ bind(&not_string);
5727 // Puts the cached result into scratch1.
5728 NumberToStringStub::GenerateLookupNumberStringCache(masm,
5729 arg,
5730 scratch1,
5731 scratch2,
5732 scratch3,
5733 false,
5734 &not_cached);
5735 __ mov(arg, scratch1);
5736 __ mov(Operand(esp, stack_offset), arg);
5737 __ jmp(&done);
5738
5739 // Check if the argument is a safe string wrapper.
5740 __ bind(&not_cached);
5741 __ test(arg, Immediate(kSmiTagMask));
5742 __ j(zero, slow);
5743 __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
5744 __ j(not_equal, slow);
5745 __ test_b(FieldOperand(scratch1, Map::kBitField2Offset),
5746 1 << Map::kStringWrapperSafeForDefaultValueOf);
5747 __ j(zero, slow);
5748 __ mov(arg, FieldOperand(arg, JSValue::kValueOffset));
5749 __ mov(Operand(esp, stack_offset), arg);
5750
5751 __ bind(&done);
ricow@chromium.org65fae842010-08-25 15:26:24 +00005752}
5753
5754
5755void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5756 Register dest,
5757 Register src,
5758 Register count,
5759 Register scratch,
5760 bool ascii) {
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00005761 NearLabel loop;
ricow@chromium.org65fae842010-08-25 15:26:24 +00005762 __ bind(&loop);
5763 // This loop just copies one character at a time, as it is only used for very
5764 // short strings.
5765 if (ascii) {
5766 __ mov_b(scratch, Operand(src, 0));
5767 __ mov_b(Operand(dest, 0), scratch);
5768 __ add(Operand(src), Immediate(1));
5769 __ add(Operand(dest), Immediate(1));
5770 } else {
5771 __ mov_w(scratch, Operand(src, 0));
5772 __ mov_w(Operand(dest, 0), scratch);
5773 __ add(Operand(src), Immediate(2));
5774 __ add(Operand(dest), Immediate(2));
5775 }
5776 __ sub(Operand(count), Immediate(1));
5777 __ j(not_zero, &loop);
5778}
5779
5780
5781void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
5782 Register dest,
5783 Register src,
5784 Register count,
5785 Register scratch,
5786 bool ascii) {
5787 // Copy characters using rep movs of doublewords.
5788 // The destination is aligned on a 4 byte boundary because we are
5789 // copying to the beginning of a newly allocated string.
5790 ASSERT(dest.is(edi)); // rep movs destination
5791 ASSERT(src.is(esi)); // rep movs source
5792 ASSERT(count.is(ecx)); // rep movs count
5793 ASSERT(!scratch.is(dest));
5794 ASSERT(!scratch.is(src));
5795 ASSERT(!scratch.is(count));
5796
5797 // Nothing to do for zero characters.
5798 Label done;
5799 __ test(count, Operand(count));
5800 __ j(zero, &done);
5801
5802 // Make count the number of bytes to copy.
5803 if (!ascii) {
5804 __ shl(count, 1);
5805 }
5806
5807 // Don't enter the rep movs if there are less than 4 bytes to copy.
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00005808 NearLabel last_bytes;
ricow@chromium.org65fae842010-08-25 15:26:24 +00005809 __ test(count, Immediate(~3));
5810 __ j(zero, &last_bytes);
5811
5812 // Copy from edi to esi using rep movs instruction.
5813 __ mov(scratch, count);
5814 __ sar(count, 2); // Number of doublewords to copy.
5815 __ cld();
5816 __ rep_movs();
5817
5818 // Find number of bytes left.
5819 __ mov(count, scratch);
5820 __ and_(count, 3);
5821
5822 // Check if there are more bytes to copy.
5823 __ bind(&last_bytes);
5824 __ test(count, Operand(count));
5825 __ j(zero, &done);
5826
5827 // Copy remaining characters.
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00005828 NearLabel loop;
ricow@chromium.org65fae842010-08-25 15:26:24 +00005829 __ bind(&loop);
5830 __ mov_b(scratch, Operand(src, 0));
5831 __ mov_b(Operand(dest, 0), scratch);
5832 __ add(Operand(src), Immediate(1));
5833 __ add(Operand(dest), Immediate(1));
5834 __ sub(Operand(count), Immediate(1));
5835 __ j(not_zero, &loop);
5836
5837 __ bind(&done);
5838}
5839
5840
5841void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5842 Register c1,
5843 Register c2,
5844 Register scratch1,
5845 Register scratch2,
5846 Register scratch3,
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00005847 Label* not_probed,
ricow@chromium.org65fae842010-08-25 15:26:24 +00005848 Label* not_found) {
5849 // Register scratch3 is the general scratch register in this function.
5850 Register scratch = scratch3;
5851
5852 // Make sure that both characters are not digits as such strings has a
5853 // different hash algorithm. Don't try to look for these in the symbol table.
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00005854 NearLabel not_array_index;
ricow@chromium.org65fae842010-08-25 15:26:24 +00005855 __ mov(scratch, c1);
5856 __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
5857 __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
5858 __ j(above, &not_array_index);
5859 __ mov(scratch, c2);
5860 __ sub(Operand(scratch), Immediate(static_cast<int>('0')));
5861 __ cmp(Operand(scratch), Immediate(static_cast<int>('9' - '0')));
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00005862 __ j(below_equal, not_probed);
ricow@chromium.org65fae842010-08-25 15:26:24 +00005863
5864 __ bind(&not_array_index);
5865 // Calculate the two character string hash.
5866 Register hash = scratch1;
5867 GenerateHashInit(masm, hash, c1, scratch);
5868 GenerateHashAddCharacter(masm, hash, c2, scratch);
5869 GenerateHashGetHash(masm, hash, scratch);
5870
5871 // Collect the two characters in a register.
5872 Register chars = c1;
5873 __ shl(c2, kBitsPerByte);
5874 __ or_(chars, Operand(c2));
5875
5876 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5877 // hash: hash of two character string.
5878
5879 // Load the symbol table.
5880 Register symbol_table = c2;
5881 ExternalReference roots_address = ExternalReference::roots_address();
5882 __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
5883 __ mov(symbol_table,
5884 Operand::StaticArray(scratch, times_pointer_size, roots_address));
5885
5886 // Calculate capacity mask from the symbol table capacity.
5887 Register mask = scratch2;
5888 __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
5889 __ SmiUntag(mask);
5890 __ sub(Operand(mask), Immediate(1));
5891
5892 // Registers
5893 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5894 // hash: hash of two character string
5895 // symbol_table: symbol table
5896 // mask: capacity mask
5897 // scratch: -
5898
5899 // Perform a number of probes in the symbol table.
5900 static const int kProbes = 4;
5901 Label found_in_symbol_table;
5902 Label next_probe[kProbes], next_probe_pop_mask[kProbes];
5903 for (int i = 0; i < kProbes; i++) {
5904 // Calculate entry in symbol table.
5905 __ mov(scratch, hash);
5906 if (i > 0) {
5907 __ add(Operand(scratch), Immediate(SymbolTable::GetProbeOffset(i)));
5908 }
5909 __ and_(scratch, Operand(mask));
5910
5911 // Load the entry from the symbol table.
5912 Register candidate = scratch; // Scratch register contains candidate.
5913 STATIC_ASSERT(SymbolTable::kEntrySize == 1);
5914 __ mov(candidate,
5915 FieldOperand(symbol_table,
5916 scratch,
5917 times_pointer_size,
5918 SymbolTable::kElementsStartOffset));
5919
5920 // If entry is undefined no string with this hash can be found.
5921 __ cmp(candidate, Factory::undefined_value());
5922 __ j(equal, not_found);
5923
5924 // If length is not 2 the string is not a candidate.
5925 __ cmp(FieldOperand(candidate, String::kLengthOffset),
5926 Immediate(Smi::FromInt(2)));
5927 __ j(not_equal, &next_probe[i]);
5928
5929 // As we are out of registers save the mask on the stack and use that
5930 // register as a temporary.
5931 __ push(mask);
5932 Register temp = mask;
5933
5934 // Check that the candidate is a non-external ascii string.
5935 __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
5936 __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
5937 __ JumpIfInstanceTypeIsNotSequentialAscii(
5938 temp, temp, &next_probe_pop_mask[i]);
5939
5940 // Check if the two characters match.
5941 __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
5942 __ and_(temp, 0x0000ffff);
5943 __ cmp(chars, Operand(temp));
5944 __ j(equal, &found_in_symbol_table);
5945 __ bind(&next_probe_pop_mask[i]);
5946 __ pop(mask);
5947 __ bind(&next_probe[i]);
5948 }
5949
5950 // No matching 2 character string found by probing.
5951 __ jmp(not_found);
5952
5953 // Scratch register contains result when we fall through to here.
5954 Register result = scratch;
5955 __ bind(&found_in_symbol_table);
5956 __ pop(mask); // Pop saved mask from the stack.
5957 if (!result.is(eax)) {
5958 __ mov(eax, result);
5959 }
5960}
5961
5962
5963void StringHelper::GenerateHashInit(MacroAssembler* masm,
5964 Register hash,
5965 Register character,
5966 Register scratch) {
5967 // hash = character + (character << 10);
5968 __ mov(hash, character);
5969 __ shl(hash, 10);
5970 __ add(hash, Operand(character));
5971 // hash ^= hash >> 6;
5972 __ mov(scratch, hash);
5973 __ sar(scratch, 6);
5974 __ xor_(hash, Operand(scratch));
5975}
5976
5977
5978void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
5979 Register hash,
5980 Register character,
5981 Register scratch) {
5982 // hash += character;
5983 __ add(hash, Operand(character));
5984 // hash += hash << 10;
5985 __ mov(scratch, hash);
5986 __ shl(scratch, 10);
5987 __ add(hash, Operand(scratch));
5988 // hash ^= hash >> 6;
5989 __ mov(scratch, hash);
5990 __ sar(scratch, 6);
5991 __ xor_(hash, Operand(scratch));
5992}
5993
5994
5995void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
5996 Register hash,
5997 Register scratch) {
5998 // hash += hash << 3;
5999 __ mov(scratch, hash);
6000 __ shl(scratch, 3);
6001 __ add(hash, Operand(scratch));
6002 // hash ^= hash >> 11;
6003 __ mov(scratch, hash);
6004 __ sar(scratch, 11);
6005 __ xor_(hash, Operand(scratch));
6006 // hash += hash << 15;
6007 __ mov(scratch, hash);
6008 __ shl(scratch, 15);
6009 __ add(hash, Operand(scratch));
6010
6011 // if (hash == 0) hash = 27;
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00006012 NearLabel hash_not_zero;
ricow@chromium.org65fae842010-08-25 15:26:24 +00006013 __ test(hash, Operand(hash));
6014 __ j(not_zero, &hash_not_zero);
6015 __ mov(hash, Immediate(27));
6016 __ bind(&hash_not_zero);
6017}
6018
6019
6020void SubStringStub::Generate(MacroAssembler* masm) {
6021 Label runtime;
6022
6023 // Stack frame on entry.
6024 // esp[0]: return address
6025 // esp[4]: to
6026 // esp[8]: from
6027 // esp[12]: string
6028
6029 // Make sure first argument is a string.
6030 __ mov(eax, Operand(esp, 3 * kPointerSize));
6031 STATIC_ASSERT(kSmiTag == 0);
6032 __ test(eax, Immediate(kSmiTagMask));
6033 __ j(zero, &runtime);
6034 Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
6035 __ j(NegateCondition(is_string), &runtime);
6036
6037 // eax: string
6038 // ebx: instance type
6039
6040 // Calculate length of sub string using the smi values.
6041 Label result_longer_than_two;
6042 __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
6043 __ test(ecx, Immediate(kSmiTagMask));
6044 __ j(not_zero, &runtime);
6045 __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
6046 __ test(edx, Immediate(kSmiTagMask));
6047 __ j(not_zero, &runtime);
6048 __ sub(ecx, Operand(edx));
6049 __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
6050 Label return_eax;
6051 __ j(equal, &return_eax);
6052 // Special handling of sub-strings of length 1 and 2. One character strings
6053 // are handled in the runtime system (looked up in the single character
6054 // cache). Two character strings are looked for in the symbol cache.
6055 __ SmiUntag(ecx); // Result length is no longer smi.
6056 __ cmp(ecx, 2);
6057 __ j(greater, &result_longer_than_two);
6058 __ j(less, &runtime);
6059
6060 // Sub string of length 2 requested.
6061 // eax: string
6062 // ebx: instance type
6063 // ecx: sub string length (value is 2)
6064 // edx: from index (smi)
6065 __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
6066
6067 // Get the two characters forming the sub string.
6068 __ SmiUntag(edx); // From index is no longer smi.
6069 __ movzx_b(ebx, FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize));
6070 __ movzx_b(ecx,
6071 FieldOperand(eax, edx, times_1, SeqAsciiString::kHeaderSize + 1));
6072
6073 // Try to lookup two character string in symbol table.
6074 Label make_two_character_string;
6075 StringHelper::GenerateTwoCharacterSymbolTableProbe(
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +00006076 masm, ebx, ecx, eax, edx, edi,
6077 &make_two_character_string, &make_two_character_string);
ricow@chromium.org65fae842010-08-25 15:26:24 +00006078 __ ret(3 * kPointerSize);
6079
6080 __ bind(&make_two_character_string);
6081 // Setup registers for allocating the two character string.
6082 __ mov(eax, Operand(esp, 3 * kPointerSize));
6083 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
6084 __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
6085 __ Set(ecx, Immediate(2));
6086
6087 __ bind(&result_longer_than_two);
6088 // eax: string
6089 // ebx: instance type
6090 // ecx: result string length
6091 // Check for flat ascii string
6092 Label non_ascii_flat;
6093 __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat);
6094
6095 // Allocate the result.
6096 __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);
6097
6098 // eax: result string
6099 // ecx: result string length
6100 __ mov(edx, esi); // esi used by following code.
6101 // Locate first character of result.
6102 __ mov(edi, eax);
6103 __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6104 // Load string argument and locate character of sub string start.
6105 __ mov(esi, Operand(esp, 3 * kPointerSize));
6106 __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6107 __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
6108 __ SmiUntag(ebx);
6109 __ add(esi, Operand(ebx));
6110
6111 // eax: result string
6112 // ecx: result length
6113 // edx: original value of esi
6114 // edi: first character of result
6115 // esi: character of sub string start
6116 StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
6117 __ mov(esi, edx); // Restore esi.
6118 __ IncrementCounter(&Counters::sub_string_native, 1);
6119 __ ret(3 * kPointerSize);
6120
6121 __ bind(&non_ascii_flat);
6122 // eax: string
6123 // ebx: instance type & kStringRepresentationMask | kStringEncodingMask
6124 // ecx: result string length
6125 // Check for flat two byte string
6126 __ cmp(ebx, kSeqStringTag | kTwoByteStringTag);
6127 __ j(not_equal, &runtime);
6128
6129 // Allocate the result.
6130 __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime);
6131
6132 // eax: result string
6133 // ecx: result string length
6134 __ mov(edx, esi); // esi used by following code.
6135 // Locate first character of result.
6136 __ mov(edi, eax);
6137 __ add(Operand(edi),
6138 Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6139 // Load string argument and locate character of sub string start.
6140 __ mov(esi, Operand(esp, 3 * kPointerSize));
6141 __ add(Operand(esi),
6142 Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6143 __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
6144 // As from is a smi it is 2 times the value which matches the size of a two
6145 // byte character.
6146 STATIC_ASSERT(kSmiTag == 0);
6147 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
6148 __ add(esi, Operand(ebx));
6149
6150 // eax: result string
6151 // ecx: result length
6152 // edx: original value of esi
6153 // edi: first character of result
6154 // esi: character of sub string start
6155 StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
6156 __ mov(esi, edx); // Restore esi.
6157
6158 __ bind(&return_eax);
6159 __ IncrementCounter(&Counters::sub_string_native, 1);
6160 __ ret(3 * kPointerSize);
6161
6162 // Just jump to runtime to create the sub string.
6163 __ bind(&runtime);
6164 __ TailCallRuntime(Runtime::kSubString, 3, 1);
6165}
6166
6167
6168void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
6169 Register left,
6170 Register right,
6171 Register scratch1,
6172 Register scratch2,
6173 Register scratch3) {
6174 Label result_not_equal;
6175 Label result_greater;
6176 Label compare_lengths;
6177
6178 __ IncrementCounter(&Counters::string_compare_native, 1);
6179
6180 // Find minimum length.
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00006181 NearLabel left_shorter;
ricow@chromium.org65fae842010-08-25 15:26:24 +00006182 __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
6183 __ mov(scratch3, scratch1);
6184 __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
6185
6186 Register length_delta = scratch3;
6187
6188 __ j(less_equal, &left_shorter);
6189 // Right string is shorter. Change scratch1 to be length of right string.
6190 __ sub(scratch1, Operand(length_delta));
6191 __ bind(&left_shorter);
6192
6193 Register min_length = scratch1;
6194
6195 // If either length is zero, just compare lengths.
6196 __ test(min_length, Operand(min_length));
6197 __ j(zero, &compare_lengths);
6198
6199 // Change index to run from -min_length to -1 by adding min_length
6200 // to string start. This means that loop ends when index reaches zero,
6201 // which doesn't need an additional compare.
6202 __ SmiUntag(min_length);
6203 __ lea(left,
6204 FieldOperand(left,
6205 min_length, times_1,
6206 SeqAsciiString::kHeaderSize));
6207 __ lea(right,
6208 FieldOperand(right,
6209 min_length, times_1,
6210 SeqAsciiString::kHeaderSize));
6211 __ neg(min_length);
6212
6213 Register index = min_length; // index = -min_length;
6214
6215 {
6216 // Compare loop.
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00006217 NearLabel loop;
ricow@chromium.org65fae842010-08-25 15:26:24 +00006218 __ bind(&loop);
6219 // Compare characters.
6220 __ mov_b(scratch2, Operand(left, index, times_1, 0));
6221 __ cmpb(scratch2, Operand(right, index, times_1, 0));
6222 __ j(not_equal, &result_not_equal);
6223 __ add(Operand(index), Immediate(1));
6224 __ j(not_zero, &loop);
6225 }
6226
6227 // Compare lengths - strings up to min-length are equal.
6228 __ bind(&compare_lengths);
6229 __ test(length_delta, Operand(length_delta));
6230 __ j(not_zero, &result_not_equal);
6231
6232 // Result is EQUAL.
6233 STATIC_ASSERT(EQUAL == 0);
6234 STATIC_ASSERT(kSmiTag == 0);
6235 __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6236 __ ret(0);
6237
6238 __ bind(&result_not_equal);
6239 __ j(greater, &result_greater);
6240
6241 // Result is LESS.
6242 __ Set(eax, Immediate(Smi::FromInt(LESS)));
6243 __ ret(0);
6244
6245 // Result is GREATER.
6246 __ bind(&result_greater);
6247 __ Set(eax, Immediate(Smi::FromInt(GREATER)));
6248 __ ret(0);
6249}
6250
6251
6252void StringCompareStub::Generate(MacroAssembler* masm) {
6253 Label runtime;
6254
6255 // Stack frame on entry.
6256 // esp[0]: return address
6257 // esp[4]: right string
6258 // esp[8]: left string
6259
6260 __ mov(edx, Operand(esp, 2 * kPointerSize)); // left
6261 __ mov(eax, Operand(esp, 1 * kPointerSize)); // right
6262
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00006263 NearLabel not_same;
ricow@chromium.org65fae842010-08-25 15:26:24 +00006264 __ cmp(edx, Operand(eax));
6265 __ j(not_equal, &not_same);
6266 STATIC_ASSERT(EQUAL == 0);
6267 STATIC_ASSERT(kSmiTag == 0);
6268 __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6269 __ IncrementCounter(&Counters::string_compare_native, 1);
6270 __ ret(2 * kPointerSize);
6271
6272 __ bind(&not_same);
6273
6274 // Check that both objects are sequential ascii strings.
6275 __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
6276
6277 // Compare flat ascii strings.
6278 // Drop arguments from the stack.
6279 __ pop(ecx);
6280 __ add(Operand(esp), Immediate(2 * kPointerSize));
6281 __ push(ecx);
6282 GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
6283
6284 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
6285 // tagged as a small integer.
6286 __ bind(&runtime);
6287 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6288}
6289
kasperl@chromium.orga5551262010-12-07 12:49:48 +00006290
6291void StringCharAtStub::Generate(MacroAssembler* masm) {
6292 // Expects two arguments (object, index) on the stack:
6293
6294 // Stack frame on entry.
6295 // esp[0]: return address
6296 // esp[4]: index
6297 // esp[8]: object
6298
6299 Register object = ebx;
6300 Register index = eax;
6301 Register scratch1 = ecx;
6302 Register scratch2 = edx;
6303 Register result = eax;
6304
6305 __ pop(scratch1); // Return address.
6306 __ pop(index);
6307 __ pop(object);
6308 __ push(scratch1);
6309
6310 Label need_conversion;
6311 Label index_out_of_range;
6312 Label done;
6313 StringCharAtGenerator generator(object,
6314 index,
6315 scratch1,
6316 scratch2,
6317 result,
6318 &need_conversion,
6319 &need_conversion,
6320 &index_out_of_range,
6321 STRING_INDEX_IS_NUMBER);
6322 generator.GenerateFast(masm);
6323 __ jmp(&done);
6324
6325 __ bind(&index_out_of_range);
6326 // When the index is out of range, the spec requires us to return
6327 // the empty string.
6328 __ Set(result, Immediate(Factory::empty_string()));
6329 __ jmp(&done);
6330
6331 __ bind(&need_conversion);
6332 // Move smi zero into the result register, which will trigger
6333 // conversion.
6334 __ Set(result, Immediate(Smi::FromInt(0)));
6335 __ jmp(&done);
6336
6337 StubRuntimeCallHelper call_helper;
6338 generator.GenerateSlow(masm, call_helper);
6339
6340 __ bind(&done);
6341 __ ret(0);
6342}
6343
6344void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6345 ASSERT(state_ == CompareIC::SMIS);
6346 NearLabel miss;
6347 __ mov(ecx, Operand(edx));
6348 __ or_(ecx, Operand(eax));
6349 __ test(ecx, Immediate(kSmiTagMask));
6350 __ j(not_zero, &miss, not_taken);
6351
6352 if (GetCondition() == equal) {
6353 // For equality we do not care about the sign of the result.
6354 __ sub(eax, Operand(edx));
6355 } else {
6356 NearLabel done;
6357 __ sub(edx, Operand(eax));
6358 __ j(no_overflow, &done);
6359 // Correct sign of result in case of overflow.
6360 __ not_(edx);
6361 __ bind(&done);
6362 __ mov(eax, edx);
6363 }
6364 __ ret(0);
6365
6366 __ bind(&miss);
6367 GenerateMiss(masm);
6368}
6369
6370
6371void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6372 ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6373
6374 NearLabel generic_stub;
6375 NearLabel unordered;
6376 NearLabel miss;
6377 __ mov(ecx, Operand(edx));
6378 __ and_(ecx, Operand(eax));
6379 __ test(ecx, Immediate(kSmiTagMask));
6380 __ j(zero, &generic_stub, not_taken);
6381
6382 __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
6383 __ j(not_equal, &miss, not_taken);
6384 __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
6385 __ j(not_equal, &miss, not_taken);
6386
6387 // Inlining the double comparison and falling back to the general compare
6388 // stub if NaN is involved or SS2 or CMOV is unsupported.
6389 if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
6390 CpuFeatures::Scope scope1(SSE2);
6391 CpuFeatures::Scope scope2(CMOV);
6392
6393 // Load left and right operand
6394 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
6395 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
6396
6397 // Compare operands
6398 __ ucomisd(xmm0, xmm1);
6399
6400 // Don't base result on EFLAGS when a NaN is involved.
6401 __ j(parity_even, &unordered, not_taken);
6402
6403 // Return a result of -1, 0, or 1, based on EFLAGS.
6404 // Performing mov, because xor would destroy the flag register.
6405 __ mov(eax, 0); // equal
6406 __ mov(ecx, Immediate(Smi::FromInt(1)));
6407 __ cmov(above, eax, Operand(ecx));
6408 __ mov(ecx, Immediate(Smi::FromInt(-1)));
6409 __ cmov(below, eax, Operand(ecx));
6410 __ ret(0);
6411
6412 __ bind(&unordered);
6413 }
6414
6415 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
6416 __ bind(&generic_stub);
6417 __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
6418
6419 __ bind(&miss);
6420 GenerateMiss(masm);
6421}
6422
6423
6424void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
6425 ASSERT(state_ == CompareIC::OBJECTS);
6426 NearLabel miss;
6427 __ mov(ecx, Operand(edx));
6428 __ and_(ecx, Operand(eax));
6429 __ test(ecx, Immediate(kSmiTagMask));
6430 __ j(zero, &miss, not_taken);
6431
6432 __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
6433 __ j(not_equal, &miss, not_taken);
6434 __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
6435 __ j(not_equal, &miss, not_taken);
6436
6437 ASSERT(GetCondition() == equal);
6438 __ sub(eax, Operand(edx));
6439 __ ret(0);
6440
6441 __ bind(&miss);
6442 GenerateMiss(masm);
6443}
6444
6445
6446void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
6447 // Save the registers.
6448 __ pop(ecx);
6449 __ push(edx);
6450 __ push(eax);
6451 __ push(ecx);
6452
6453 // Call the runtime system in a fresh internal frame.
6454 ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss));
6455 __ EnterInternalFrame();
6456 __ push(edx);
6457 __ push(eax);
6458 __ push(Immediate(Smi::FromInt(op_)));
6459 __ CallExternalReference(miss, 3);
6460 __ LeaveInternalFrame();
6461
6462 // Compute the entry point of the rewritten stub.
6463 __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
6464
6465 // Restore registers.
6466 __ pop(ecx);
6467 __ pop(eax);
6468 __ pop(edx);
6469 __ push(ecx);
6470
6471 // Do a tail call to the rewritten stub.
6472 __ jmp(Operand(edi));
6473}
6474
6475
ricow@chromium.org65fae842010-08-25 15:26:24 +00006476#undef __
6477
6478} } // namespace v8::internal
6479
6480#endif // V8_TARGET_ARCH_IA32