| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 1 | // Copyright 2011 the V8 project authors. All rights reserved. | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 2 | // Redistribution and use in source and binary forms, with or without | 
|  | 3 | // modification, are permitted provided that the following conditions are | 
|  | 4 | // met: | 
|  | 5 | // | 
|  | 6 | //     * Redistributions of source code must retain the above copyright | 
|  | 7 | //       notice, this list of conditions and the following disclaimer. | 
|  | 8 | //     * Redistributions in binary form must reproduce the above | 
|  | 9 | //       copyright notice, this list of conditions and the following | 
|  | 10 | //       disclaimer in the documentation and/or other materials provided | 
|  | 11 | //       with the distribution. | 
|  | 12 | //     * Neither the name of Google Inc. nor the names of its | 
|  | 13 | //       contributors may be used to endorse or promote products derived | 
|  | 14 | //       from this software without specific prior written permission. | 
|  | 15 | // | 
|  | 16 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 
|  | 17 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 
|  | 18 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 
|  | 19 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 
|  | 20 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 
|  | 21 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 
|  | 22 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 
|  | 23 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 
|  | 24 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 
|  | 25 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 
|  | 26 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 
|  | 27 |  | 
|  | 28 | #include "v8.h" | 
|  | 29 |  | 
|  | 30 | #if defined(V8_TARGET_ARCH_ARM) | 
|  | 31 |  | 
|  | 32 | #include "bootstrapper.h" | 
|  | 33 | #include "code-stubs.h" | 
|  | 34 | #include "regexp-macro-assembler.h" | 
|  | 35 |  | 
|  | 36 | namespace v8 { | 
|  | 37 | namespace internal { | 
|  | 38 |  | 
|  | 39 |  | 
|  | 40 | #define __ ACCESS_MASM(masm) | 
|  | 41 |  | 
|  | 42 | static void EmitIdenticalObjectComparison(MacroAssembler* masm, | 
|  | 43 | Label* slow, | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 44 | Condition cond, | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 45 | bool never_nan_nan); | 
|  | 46 | static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 
|  | 47 | Register lhs, | 
|  | 48 | Register rhs, | 
|  | 49 | Label* lhs_not_nan, | 
|  | 50 | Label* slow, | 
|  | 51 | bool strict); | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 52 | static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 53 | static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | 
|  | 54 | Register lhs, | 
|  | 55 | Register rhs); | 
|  | 56 |  | 
|  | 57 |  | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 58 | void ToNumberStub::Generate(MacroAssembler* masm) { | 
|  | 59 | // The ToNumber stub takes one argument in eax. | 
|  | 60 | Label check_heap_number, call_builtin; | 
|  | 61 | __ tst(r0, Operand(kSmiTagMask)); | 
|  | 62 | __ b(ne, &check_heap_number); | 
|  | 63 | __ Ret(); | 
|  | 64 |  | 
|  | 65 | __ bind(&check_heap_number); | 
|  | 66 | __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); | 
|  | 67 | __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 
|  | 68 | __ cmp(r1, ip); | 
|  | 69 | __ b(ne, &call_builtin); | 
|  | 70 | __ Ret(); | 
|  | 71 |  | 
|  | 72 | __ bind(&call_builtin); | 
|  | 73 | __ push(r0); | 
|  | 74 | __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_JS); | 
|  | 75 | } | 
|  | 76 |  | 
|  | 77 |  | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 78 | void FastNewClosureStub::Generate(MacroAssembler* masm) { | 
|  | 79 | // Create a new closure from the given function info in new | 
|  | 80 | // space. Set the context to the current context in cp. | 
|  | 81 | Label gc; | 
|  | 82 |  | 
|  | 83 | // Pop the function info from the stack. | 
|  | 84 | __ pop(r3); | 
|  | 85 |  | 
|  | 86 | // Attempt to allocate new JSFunction in new space. | 
|  | 87 | __ AllocateInNewSpace(JSFunction::kSize, | 
|  | 88 | r0, | 
|  | 89 | r1, | 
|  | 90 | r2, | 
|  | 91 | &gc, | 
|  | 92 | TAG_OBJECT); | 
|  | 93 |  | 
|  | 94 | // Compute the function map in the current global context and set that | 
|  | 95 | // as the map of the allocated object. | 
|  | 96 | __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); | 
|  | 97 | __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset)); | 
|  | 98 | __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX))); | 
|  | 99 | __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); | 
|  | 100 |  | 
|  | 101 | // Initialize the rest of the function. We don't have to update the | 
|  | 102 | // write barrier because the allocated object is in new space. | 
|  | 103 | __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex); | 
|  | 104 | __ LoadRoot(r2, Heap::kTheHoleValueRootIndex); | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 105 | __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 106 | __ str(r1, FieldMemOperand(r0, JSObject::kPropertiesOffset)); | 
|  | 107 | __ str(r1, FieldMemOperand(r0, JSObject::kElementsOffset)); | 
|  | 108 | __ str(r2, FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset)); | 
|  | 109 | __ str(r3, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset)); | 
|  | 110 | __ str(cp, FieldMemOperand(r0, JSFunction::kContextOffset)); | 
|  | 111 | __ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset)); | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 112 | __ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset)); | 
|  | 113 |  | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 114 |  | 
|  | 115 | // Initialize the code pointer in the function to be the one | 
|  | 116 | // found in the shared function info object. | 
|  | 117 | __ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset)); | 
|  | 118 | __ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); | 
|  | 119 | __ str(r3, FieldMemOperand(r0, JSFunction::kCodeEntryOffset)); | 
|  | 120 |  | 
|  | 121 | // Return result. The argument function info has been popped already. | 
|  | 122 | __ Ret(); | 
|  | 123 |  | 
|  | 124 | // Create a new closure through the slower runtime call. | 
|  | 125 | __ bind(&gc); | 
| Shimeng (Simon) Wang | 8a31eba | 2010-12-06 19:01:33 -0800 | [diff] [blame] | 126 | __ LoadRoot(r4, Heap::kFalseValueRootIndex); | 
|  | 127 | __ Push(cp, r3, r4); | 
|  | 128 | __ TailCallRuntime(Runtime::kNewClosure, 3, 1); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 129 | } | 
|  | 130 |  | 
|  | 131 |  | 
|  | 132 | void FastNewContextStub::Generate(MacroAssembler* masm) { | 
|  | 133 | // Try to allocate the context in new space. | 
|  | 134 | Label gc; | 
|  | 135 | int length = slots_ + Context::MIN_CONTEXT_SLOTS; | 
|  | 136 |  | 
|  | 137 | // Attempt to allocate the context in new space. | 
|  | 138 | __ AllocateInNewSpace(FixedArray::SizeFor(length), | 
|  | 139 | r0, | 
|  | 140 | r1, | 
|  | 141 | r2, | 
|  | 142 | &gc, | 
|  | 143 | TAG_OBJECT); | 
|  | 144 |  | 
|  | 145 | // Load the function from the stack. | 
|  | 146 | __ ldr(r3, MemOperand(sp, 0)); | 
|  | 147 |  | 
|  | 148 | // Setup the object header. | 
|  | 149 | __ LoadRoot(r2, Heap::kContextMapRootIndex); | 
|  | 150 | __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); | 
|  | 151 | __ mov(r2, Operand(Smi::FromInt(length))); | 
|  | 152 | __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset)); | 
|  | 153 |  | 
|  | 154 | // Setup the fixed slots. | 
|  | 155 | __ mov(r1, Operand(Smi::FromInt(0))); | 
|  | 156 | __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX))); | 
|  | 157 | __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX))); | 
|  | 158 | __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX))); | 
|  | 159 | __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX))); | 
|  | 160 |  | 
|  | 161 | // Copy the global object from the surrounding context. | 
|  | 162 | __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); | 
|  | 163 | __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX))); | 
|  | 164 |  | 
|  | 165 | // Initialize the rest of the slots to undefined. | 
|  | 166 | __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); | 
|  | 167 | for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) { | 
|  | 168 | __ str(r1, MemOperand(r0, Context::SlotOffset(i))); | 
|  | 169 | } | 
|  | 170 |  | 
|  | 171 | // Remove the on-stack argument and return. | 
|  | 172 | __ mov(cp, r0); | 
|  | 173 | __ pop(); | 
|  | 174 | __ Ret(); | 
|  | 175 |  | 
|  | 176 | // Need to collect. Call into runtime system. | 
|  | 177 | __ bind(&gc); | 
|  | 178 | __ TailCallRuntime(Runtime::kNewContext, 1, 1); | 
|  | 179 | } | 
|  | 180 |  | 
|  | 181 |  | 
|  | 182 | void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { | 
|  | 183 | // Stack layout on entry: | 
|  | 184 | // | 
|  | 185 | // [sp]: constant elements. | 
|  | 186 | // [sp + kPointerSize]: literal index. | 
|  | 187 | // [sp + (2 * kPointerSize)]: literals array. | 
|  | 188 |  | 
|  | 189 | // All sizes here are multiples of kPointerSize. | 
|  | 190 | int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0; | 
|  | 191 | int size = JSArray::kSize + elements_size; | 
|  | 192 |  | 
|  | 193 | // Load boilerplate object into r3 and check if we need to create a | 
|  | 194 | // boilerplate. | 
|  | 195 | Label slow_case; | 
|  | 196 | __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); | 
|  | 197 | __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); | 
|  | 198 | __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 
|  | 199 | __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); | 
|  | 200 | __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 
|  | 201 | __ cmp(r3, ip); | 
|  | 202 | __ b(eq, &slow_case); | 
|  | 203 |  | 
|  | 204 | if (FLAG_debug_code) { | 
|  | 205 | const char* message; | 
|  | 206 | Heap::RootListIndex expected_map_index; | 
|  | 207 | if (mode_ == CLONE_ELEMENTS) { | 
|  | 208 | message = "Expected (writable) fixed array"; | 
|  | 209 | expected_map_index = Heap::kFixedArrayMapRootIndex; | 
|  | 210 | } else { | 
|  | 211 | ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS); | 
|  | 212 | message = "Expected copy-on-write fixed array"; | 
|  | 213 | expected_map_index = Heap::kFixedCOWArrayMapRootIndex; | 
|  | 214 | } | 
|  | 215 | __ push(r3); | 
|  | 216 | __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset)); | 
|  | 217 | __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset)); | 
|  | 218 | __ LoadRoot(ip, expected_map_index); | 
|  | 219 | __ cmp(r3, ip); | 
|  | 220 | __ Assert(eq, message); | 
|  | 221 | __ pop(r3); | 
|  | 222 | } | 
|  | 223 |  | 
|  | 224 | // Allocate both the JS array and the elements array in one big | 
|  | 225 | // allocation. This avoids multiple limit checks. | 
|  | 226 | __ AllocateInNewSpace(size, | 
|  | 227 | r0, | 
|  | 228 | r1, | 
|  | 229 | r2, | 
|  | 230 | &slow_case, | 
|  | 231 | TAG_OBJECT); | 
|  | 232 |  | 
|  | 233 | // Copy the JS array part. | 
|  | 234 | for (int i = 0; i < JSArray::kSize; i += kPointerSize) { | 
|  | 235 | if ((i != JSArray::kElementsOffset) || (length_ == 0)) { | 
|  | 236 | __ ldr(r1, FieldMemOperand(r3, i)); | 
|  | 237 | __ str(r1, FieldMemOperand(r0, i)); | 
|  | 238 | } | 
|  | 239 | } | 
|  | 240 |  | 
|  | 241 | if (length_ > 0) { | 
|  | 242 | // Get hold of the elements array of the boilerplate and setup the | 
|  | 243 | // elements pointer in the resulting object. | 
|  | 244 | __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset)); | 
|  | 245 | __ add(r2, r0, Operand(JSArray::kSize)); | 
|  | 246 | __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset)); | 
|  | 247 |  | 
|  | 248 | // Copy the elements array. | 
|  | 249 | __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize); | 
|  | 250 | } | 
|  | 251 |  | 
|  | 252 | // Return and remove the on-stack parameters. | 
|  | 253 | __ add(sp, sp, Operand(3 * kPointerSize)); | 
|  | 254 | __ Ret(); | 
|  | 255 |  | 
|  | 256 | __ bind(&slow_case); | 
|  | 257 | __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); | 
|  | 258 | } | 
|  | 259 |  | 
|  | 260 |  | 
|  | 261 | // Takes a Smi and converts to an IEEE 64 bit floating point value in two | 
|  | 262 | // registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and | 
|  | 263 | // 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a | 
|  | 264 | // scratch register.  Destroys the source register.  No GC occurs during this | 
|  | 265 | // stub so you don't have to set up the frame. | 
|  | 266 | class ConvertToDoubleStub : public CodeStub { | 
|  | 267 | public: | 
|  | 268 | ConvertToDoubleStub(Register result_reg_1, | 
|  | 269 | Register result_reg_2, | 
|  | 270 | Register source_reg, | 
|  | 271 | Register scratch_reg) | 
|  | 272 | : result1_(result_reg_1), | 
|  | 273 | result2_(result_reg_2), | 
|  | 274 | source_(source_reg), | 
|  | 275 | zeros_(scratch_reg) { } | 
|  | 276 |  | 
|  | 277 | private: | 
|  | 278 | Register result1_; | 
|  | 279 | Register result2_; | 
|  | 280 | Register source_; | 
|  | 281 | Register zeros_; | 
|  | 282 |  | 
|  | 283 | // Minor key encoding in 16 bits. | 
|  | 284 | class ModeBits: public BitField<OverwriteMode, 0, 2> {}; | 
|  | 285 | class OpBits: public BitField<Token::Value, 2, 14> {}; | 
|  | 286 |  | 
|  | 287 | Major MajorKey() { return ConvertToDouble; } | 
|  | 288 | int MinorKey() { | 
|  | 289 | // Encode the parameters in a unique 16 bit value. | 
|  | 290 | return  result1_.code() + | 
|  | 291 | (result2_.code() << 4) + | 
|  | 292 | (source_.code() << 8) + | 
|  | 293 | (zeros_.code() << 12); | 
|  | 294 | } | 
|  | 295 |  | 
|  | 296 | void Generate(MacroAssembler* masm); | 
|  | 297 |  | 
|  | 298 | const char* GetName() { return "ConvertToDoubleStub"; } | 
|  | 299 |  | 
|  | 300 | #ifdef DEBUG | 
|  | 301 | void Print() { PrintF("ConvertToDoubleStub\n"); } | 
|  | 302 | #endif | 
|  | 303 | }; | 
|  | 304 |  | 
|  | 305 |  | 
|  | 306 | void ConvertToDoubleStub::Generate(MacroAssembler* masm) { | 
|  | 307 | #ifndef BIG_ENDIAN_FLOATING_POINT | 
|  | 308 | Register exponent = result1_; | 
|  | 309 | Register mantissa = result2_; | 
|  | 310 | #else | 
|  | 311 | Register exponent = result2_; | 
|  | 312 | Register mantissa = result1_; | 
|  | 313 | #endif | 
|  | 314 | Label not_special; | 
|  | 315 | // Convert from Smi to integer. | 
|  | 316 | __ mov(source_, Operand(source_, ASR, kSmiTagSize)); | 
|  | 317 | // Move sign bit from source to destination.  This works because the sign bit | 
|  | 318 | // in the exponent word of the double has the same position and polarity as | 
|  | 319 | // the 2's complement sign bit in a Smi. | 
|  | 320 | STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); | 
|  | 321 | __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC); | 
|  | 322 | // Subtract from 0 if source was negative. | 
| Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 323 | __ rsb(source_, source_, Operand(0, RelocInfo::NONE), LeaveCC, ne); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 324 |  | 
|  | 325 | // We have -1, 0 or 1, which we treat specially. Register source_ contains | 
|  | 326 | // absolute value: it is either equal to 1 (special case of -1 and 1), | 
|  | 327 | // greater than 1 (not a special case) or less than 1 (special case of 0). | 
|  | 328 | __ cmp(source_, Operand(1)); | 
|  | 329 | __ b(gt, ¬_special); | 
|  | 330 |  | 
|  | 331 | // For 1 or -1 we need to or in the 0 exponent (biased to 1023). | 
|  | 332 | static const uint32_t exponent_word_for_1 = | 
|  | 333 | HeapNumber::kExponentBias << HeapNumber::kExponentShift; | 
|  | 334 | __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq); | 
|  | 335 | // 1, 0 and -1 all have 0 for the second word. | 
| Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 336 | __ mov(mantissa, Operand(0, RelocInfo::NONE)); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 337 | __ Ret(); | 
|  | 338 |  | 
|  | 339 | __ bind(¬_special); | 
|  | 340 | // Count leading zeros.  Uses mantissa for a scratch register on pre-ARM5. | 
|  | 341 | // Gets the wrong answer for 0, but we already checked for that case above. | 
|  | 342 | __ CountLeadingZeros(zeros_, source_, mantissa); | 
|  | 343 | // Compute exponent and or it into the exponent register. | 
|  | 344 | // We use mantissa as a scratch register here.  Use a fudge factor to | 
|  | 345 | // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts | 
|  | 346 | // that fit in the ARM's constant field. | 
|  | 347 | int fudge = 0x400; | 
|  | 348 | __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge)); | 
|  | 349 | __ add(mantissa, mantissa, Operand(fudge)); | 
|  | 350 | __ orr(exponent, | 
|  | 351 | exponent, | 
|  | 352 | Operand(mantissa, LSL, HeapNumber::kExponentShift)); | 
|  | 353 | // Shift up the source chopping the top bit off. | 
|  | 354 | __ add(zeros_, zeros_, Operand(1)); | 
|  | 355 | // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0. | 
|  | 356 | __ mov(source_, Operand(source_, LSL, zeros_)); | 
|  | 357 | // Compute lower part of fraction (last 12 bits). | 
|  | 358 | __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord)); | 
|  | 359 | // And the top (top 20 bits). | 
|  | 360 | __ orr(exponent, | 
|  | 361 | exponent, | 
|  | 362 | Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); | 
|  | 363 | __ Ret(); | 
|  | 364 | } | 
|  | 365 |  | 
|  | 366 |  | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 367 | class FloatingPointHelper : public AllStatic { | 
|  | 368 | public: | 
|  | 369 |  | 
|  | 370 | enum Destination { | 
|  | 371 | kVFPRegisters, | 
|  | 372 | kCoreRegisters | 
|  | 373 | }; | 
|  | 374 |  | 
|  | 375 |  | 
|  | 376 | // Loads smis from r0 and r1 (right and left in binary operations) into | 
|  | 377 | // floating point registers. Depending on the destination the values ends up | 
|  | 378 | // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is | 
|  | 379 | // floating point registers VFP3 must be supported. If core registers are | 
|  | 380 | // requested when VFP3 is supported d6 and d7 will be scratched. | 
|  | 381 | static void LoadSmis(MacroAssembler* masm, | 
|  | 382 | Destination destination, | 
|  | 383 | Register scratch1, | 
|  | 384 | Register scratch2); | 
|  | 385 |  | 
|  | 386 | // Loads objects from r0 and r1 (right and left in binary operations) into | 
|  | 387 | // floating point registers. Depending on the destination the values ends up | 
|  | 388 | // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is | 
|  | 389 | // floating point registers VFP3 must be supported. If core registers are | 
|  | 390 | // requested when VFP3 is supported d6 and d7 will still be scratched. If | 
|  | 391 | // either r0 or r1 is not a number (not smi and not heap number object) the | 
|  | 392 | // not_number label is jumped to with r0 and r1 intact. | 
|  | 393 | static void LoadOperands(MacroAssembler* masm, | 
|  | 394 | FloatingPointHelper::Destination destination, | 
|  | 395 | Register heap_number_map, | 
|  | 396 | Register scratch1, | 
|  | 397 | Register scratch2, | 
|  | 398 | Label* not_number); | 
|  | 399 |  | 
|  | 400 | // Loads the number from object into dst as a 32-bit integer if possible. If | 
|  | 401 | // the object is not a 32-bit integer control continues at the label | 
|  | 402 | // not_int32. If VFP is supported double_scratch is used but not scratch2. | 
|  | 403 | static void LoadNumberAsInteger(MacroAssembler* masm, | 
|  | 404 | Register object, | 
|  | 405 | Register dst, | 
|  | 406 | Register heap_number_map, | 
|  | 407 | Register scratch1, | 
|  | 408 | Register scratch2, | 
|  | 409 | DwVfpRegister double_scratch, | 
|  | 410 | Label* not_int32); | 
|  | 411 |  | 
|  | 412 | private: | 
|  | 413 | static void LoadNumber(MacroAssembler* masm, | 
|  | 414 | FloatingPointHelper::Destination destination, | 
|  | 415 | Register object, | 
|  | 416 | DwVfpRegister dst, | 
|  | 417 | Register dst1, | 
|  | 418 | Register dst2, | 
|  | 419 | Register heap_number_map, | 
|  | 420 | Register scratch1, | 
|  | 421 | Register scratch2, | 
|  | 422 | Label* not_number); | 
|  | 423 | }; | 
|  | 424 |  | 
|  | 425 |  | 
|  | 426 | void FloatingPointHelper::LoadSmis(MacroAssembler* masm, | 
|  | 427 | FloatingPointHelper::Destination destination, | 
|  | 428 | Register scratch1, | 
|  | 429 | Register scratch2) { | 
|  | 430 | if (CpuFeatures::IsSupported(VFP3)) { | 
|  | 431 | CpuFeatures::Scope scope(VFP3); | 
|  | 432 | __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); | 
|  | 433 | __ vmov(d7.high(), scratch1); | 
|  | 434 | __ vcvt_f64_s32(d7, d7.high()); | 
|  | 435 | __ mov(scratch1, Operand(r1, ASR, kSmiTagSize)); | 
|  | 436 | __ vmov(d6.high(), scratch1); | 
|  | 437 | __ vcvt_f64_s32(d6, d6.high()); | 
|  | 438 | if (destination == kCoreRegisters) { | 
|  | 439 | __ vmov(r2, r3, d7); | 
|  | 440 | __ vmov(r0, r1, d6); | 
|  | 441 | } | 
|  | 442 | } else { | 
|  | 443 | ASSERT(destination == kCoreRegisters); | 
|  | 444 | // Write Smi from r0 to r3 and r2 in double format. | 
|  | 445 | __ mov(scratch1, Operand(r0)); | 
|  | 446 | ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2); | 
|  | 447 | __ push(lr); | 
|  | 448 | __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 
|  | 449 | // Write Smi from r1 to r1 and r0 in double format.  r9 is scratch. | 
|  | 450 | __ mov(scratch1, Operand(r1)); | 
|  | 451 | ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2); | 
|  | 452 | __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 
|  | 453 | __ pop(lr); | 
|  | 454 | } | 
|  | 455 | } | 
|  | 456 |  | 
|  | 457 |  | 
|  | 458 | void FloatingPointHelper::LoadOperands( | 
|  | 459 | MacroAssembler* masm, | 
|  | 460 | FloatingPointHelper::Destination destination, | 
|  | 461 | Register heap_number_map, | 
|  | 462 | Register scratch1, | 
|  | 463 | Register scratch2, | 
|  | 464 | Label* slow) { | 
|  | 465 |  | 
|  | 466 | // Load right operand (r0) to d6 or r2/r3. | 
|  | 467 | LoadNumber(masm, destination, | 
|  | 468 | r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow); | 
|  | 469 |  | 
|  | 470 | // Load left operand (r1) to d7 or r0/r1. | 
|  | 471 | LoadNumber(masm, destination, | 
|  | 472 | r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow); | 
|  | 473 | } | 
|  | 474 |  | 
|  | 475 |  | 
|  | 476 | void FloatingPointHelper::LoadNumber(MacroAssembler* masm, | 
|  | 477 | Destination destination, | 
|  | 478 | Register object, | 
|  | 479 | DwVfpRegister dst, | 
|  | 480 | Register dst1, | 
|  | 481 | Register dst2, | 
|  | 482 | Register heap_number_map, | 
|  | 483 | Register scratch1, | 
|  | 484 | Register scratch2, | 
|  | 485 | Label* not_number) { | 
|  | 486 | if (FLAG_debug_code) { | 
|  | 487 | __ AbortIfNotRootValue(heap_number_map, | 
|  | 488 | Heap::kHeapNumberMapRootIndex, | 
|  | 489 | "HeapNumberMap register clobbered."); | 
|  | 490 | } | 
|  | 491 |  | 
|  | 492 | Label is_smi, done; | 
|  | 493 |  | 
|  | 494 | __ JumpIfSmi(object, &is_smi); | 
|  | 495 | __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); | 
|  | 496 |  | 
|  | 497 | // Handle loading a double from a heap number. | 
|  | 498 | if (CpuFeatures::IsSupported(VFP3) && destination == kVFPRegisters) { | 
|  | 499 | CpuFeatures::Scope scope(VFP3); | 
|  | 500 | // Load the double from tagged HeapNumber to double register. | 
|  | 501 | __ sub(scratch1, object, Operand(kHeapObjectTag)); | 
|  | 502 | __ vldr(dst, scratch1, HeapNumber::kValueOffset); | 
|  | 503 | } else { | 
|  | 504 | ASSERT(destination == kCoreRegisters); | 
|  | 505 | // Load the double from heap number to dst1 and dst2 in double format. | 
|  | 506 | __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); | 
|  | 507 | } | 
|  | 508 | __ jmp(&done); | 
|  | 509 |  | 
|  | 510 | // Handle loading a double from a smi. | 
|  | 511 | __ bind(&is_smi); | 
|  | 512 | if (CpuFeatures::IsSupported(VFP3)) { | 
|  | 513 | CpuFeatures::Scope scope(VFP3); | 
|  | 514 | // Convert smi to double using VFP instructions. | 
|  | 515 | __ SmiUntag(scratch1, object); | 
|  | 516 | __ vmov(dst.high(), scratch1); | 
|  | 517 | __ vcvt_f64_s32(dst, dst.high()); | 
|  | 518 | if (destination == kCoreRegisters) { | 
|  | 519 | // Load the converted smi to dst1 and dst2 in double format. | 
|  | 520 | __ vmov(dst1, dst2, dst); | 
|  | 521 | } | 
|  | 522 | } else { | 
|  | 523 | ASSERT(destination == kCoreRegisters); | 
|  | 524 | // Write smi to dst1 and dst2 double format. | 
|  | 525 | __ mov(scratch1, Operand(object)); | 
|  | 526 | ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); | 
|  | 527 | __ push(lr); | 
|  | 528 | __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); | 
|  | 529 | __ pop(lr); | 
|  | 530 | } | 
|  | 531 |  | 
|  | 532 | __ bind(&done); | 
|  | 533 | } | 
|  | 534 |  | 
|  | 535 |  | 
|  | 536 | void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm, | 
|  | 537 | Register object, | 
|  | 538 | Register dst, | 
|  | 539 | Register heap_number_map, | 
|  | 540 | Register scratch1, | 
|  | 541 | Register scratch2, | 
|  | 542 | DwVfpRegister double_scratch, | 
|  | 543 | Label* not_int32) { | 
|  | 544 | if (FLAG_debug_code) { | 
|  | 545 | __ AbortIfNotRootValue(heap_number_map, | 
|  | 546 | Heap::kHeapNumberMapRootIndex, | 
|  | 547 | "HeapNumberMap register clobbered."); | 
|  | 548 | } | 
|  | 549 | Label is_smi, done; | 
|  | 550 | __ JumpIfSmi(object, &is_smi); | 
|  | 551 | __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset)); | 
|  | 552 | __ cmp(scratch1, heap_number_map); | 
|  | 553 | __ b(ne, not_int32); | 
|  | 554 | __ ConvertToInt32( | 
|  | 555 | object, dst, scratch1, scratch2, double_scratch, not_int32); | 
|  | 556 | __ jmp(&done); | 
|  | 557 | __ bind(&is_smi); | 
|  | 558 | __ SmiUntag(dst, object); | 
|  | 559 | __ bind(&done); | 
|  | 560 | } | 
|  | 561 |  | 
|  | 562 |  | 
|  | 563 |  | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 564 | // See comment for class. | 
|  | 565 | void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { | 
|  | 566 | Label max_negative_int; | 
|  | 567 | // the_int_ has the answer which is a signed int32 but not a Smi. | 
|  | 568 | // We test for the special value that has a different exponent.  This test | 
|  | 569 | // has the neat side effect of setting the flags according to the sign. | 
|  | 570 | STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); | 
|  | 571 | __ cmp(the_int_, Operand(0x80000000u)); | 
|  | 572 | __ b(eq, &max_negative_int); | 
|  | 573 | // Set up the correct exponent in scratch_.  All non-Smi int32s have the same. | 
|  | 574 | // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). | 
|  | 575 | uint32_t non_smi_exponent = | 
|  | 576 | (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; | 
|  | 577 | __ mov(scratch_, Operand(non_smi_exponent)); | 
|  | 578 | // Set the sign bit in scratch_ if the value was negative. | 
|  | 579 | __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs); | 
|  | 580 | // Subtract from 0 if the value was negative. | 
| Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 581 | __ rsb(the_int_, the_int_, Operand(0, RelocInfo::NONE), LeaveCC, cs); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 582 | // We should be masking the implict first digit of the mantissa away here, | 
|  | 583 | // but it just ends up combining harmlessly with the last digit of the | 
|  | 584 | // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get | 
|  | 585 | // the most significant 1 to hit the last bit of the 12 bit sign and exponent. | 
|  | 586 | ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0); | 
|  | 587 | const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; | 
|  | 588 | __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance)); | 
|  | 589 | __ str(scratch_, FieldMemOperand(the_heap_number_, | 
|  | 590 | HeapNumber::kExponentOffset)); | 
|  | 591 | __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance)); | 
|  | 592 | __ str(scratch_, FieldMemOperand(the_heap_number_, | 
|  | 593 | HeapNumber::kMantissaOffset)); | 
|  | 594 | __ Ret(); | 
|  | 595 |  | 
|  | 596 | __ bind(&max_negative_int); | 
|  | 597 | // The max negative int32 is stored as a positive number in the mantissa of | 
|  | 598 | // a double because it uses a sign bit instead of using two's complement. | 
|  | 599 | // The actual mantissa bits stored are all 0 because the implicit most | 
|  | 600 | // significant 1 bit is not stored. | 
|  | 601 | non_smi_exponent += 1 << HeapNumber::kExponentShift; | 
|  | 602 | __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent)); | 
|  | 603 | __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset)); | 
| Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 604 | __ mov(ip, Operand(0, RelocInfo::NONE)); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 605 | __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); | 
|  | 606 | __ Ret(); | 
|  | 607 | } | 
|  | 608 |  | 
|  | 609 |  | 
|  | 610 | // Handle the case where the lhs and rhs are the same object. | 
|  | 611 | // Equality is almost reflexive (everything but NaN), so this is a test | 
|  | 612 | // for "identity and not NaN". | 
|  | 613 | static void EmitIdenticalObjectComparison(MacroAssembler* masm, | 
|  | 614 | Label* slow, | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 615 | Condition cond, | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 616 | bool never_nan_nan) { | 
|  | 617 | Label not_identical; | 
|  | 618 | Label heap_number, return_equal; | 
|  | 619 | __ cmp(r0, r1); | 
|  | 620 | __ b(ne, ¬_identical); | 
|  | 621 |  | 
|  | 622 | // The two objects are identical.  If we know that one of them isn't NaN then | 
|  | 623 | // we now know they test equal. | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 624 | if (cond != eq || !never_nan_nan) { | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 625 | // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), | 
|  | 626 | // so we do the second best thing - test it ourselves. | 
|  | 627 | // They are both equal and they are not both Smis so both of them are not | 
|  | 628 | // Smis.  If it's not a heap number, then return equal. | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 629 | if (cond == lt || cond == gt) { | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 630 | __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE); | 
|  | 631 | __ b(ge, slow); | 
|  | 632 | } else { | 
|  | 633 | __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); | 
|  | 634 | __ b(eq, &heap_number); | 
|  | 635 | // Comparing JS objects with <=, >= is complicated. | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 636 | if (cond != eq) { | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 637 | __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); | 
|  | 638 | __ b(ge, slow); | 
|  | 639 | // Normally here we fall through to return_equal, but undefined is | 
|  | 640 | // special: (undefined == undefined) == true, but | 
|  | 641 | // (undefined <= undefined) == false!  See ECMAScript 11.8.5. | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 642 | if (cond == le || cond == ge) { | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 643 | __ cmp(r4, Operand(ODDBALL_TYPE)); | 
|  | 644 | __ b(ne, &return_equal); | 
|  | 645 | __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); | 
|  | 646 | __ cmp(r0, r2); | 
|  | 647 | __ b(ne, &return_equal); | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 648 | if (cond == le) { | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 649 | // undefined <= undefined should fail. | 
|  | 650 | __ mov(r0, Operand(GREATER)); | 
|  | 651 | } else  { | 
|  | 652 | // undefined >= undefined should fail. | 
|  | 653 | __ mov(r0, Operand(LESS)); | 
|  | 654 | } | 
|  | 655 | __ Ret(); | 
|  | 656 | } | 
|  | 657 | } | 
|  | 658 | } | 
|  | 659 | } | 
|  | 660 |  | 
|  | 661 | __ bind(&return_equal); | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 662 | if (cond == lt) { | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 663 | __ mov(r0, Operand(GREATER));  // Things aren't less than themselves. | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 664 | } else if (cond == gt) { | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 665 | __ mov(r0, Operand(LESS));     // Things aren't greater than themselves. | 
|  | 666 | } else { | 
|  | 667 | __ mov(r0, Operand(EQUAL));    // Things are <=, >=, ==, === themselves. | 
|  | 668 | } | 
|  | 669 | __ Ret(); | 
|  | 670 |  | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 671 | if (cond != eq || !never_nan_nan) { | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 672 | // For less and greater we don't have to check for NaN since the result of | 
|  | 673 | // x < x is false regardless.  For the others here is some code to check | 
|  | 674 | // for NaN. | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 675 | if (cond != lt && cond != gt) { | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 676 | __ bind(&heap_number); | 
|  | 677 | // It is a heap number, so return non-equal if it's NaN and equal if it's | 
|  | 678 | // not NaN. | 
|  | 679 |  | 
|  | 680 | // The representation of NaN values has all exponent bits (52..62) set, | 
|  | 681 | // and not all mantissa bits (0..51) clear. | 
|  | 682 | // Read top bits of double representation (second word of value). | 
|  | 683 | __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | 
|  | 684 | // Test that exponent bits are all set. | 
|  | 685 | __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); | 
|  | 686 | // NaNs have all-one exponents so they sign extend to -1. | 
|  | 687 | __ cmp(r3, Operand(-1)); | 
|  | 688 | __ b(ne, &return_equal); | 
|  | 689 |  | 
|  | 690 | // Shift out flag and all exponent bits, retaining only mantissa. | 
|  | 691 | __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); | 
|  | 692 | // Or with all low-bits of mantissa. | 
|  | 693 | __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | 
|  | 694 | __ orr(r0, r3, Operand(r2), SetCC); | 
|  | 695 | // For equal we already have the right value in r0:  Return zero (equal) | 
|  | 696 | // if all bits in mantissa are zero (it's an Infinity) and non-zero if | 
|  | 697 | // not (it's a NaN).  For <= and >= we need to load r0 with the failing | 
|  | 698 | // value if it's a NaN. | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 699 | if (cond != eq) { | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 700 | // All-zero means Infinity means equal. | 
|  | 701 | __ Ret(eq); | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 702 | if (cond == le) { | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 703 | __ mov(r0, Operand(GREATER));  // NaN <= NaN should fail. | 
|  | 704 | } else { | 
|  | 705 | __ mov(r0, Operand(LESS));     // NaN >= NaN should fail. | 
|  | 706 | } | 
|  | 707 | } | 
|  | 708 | __ Ret(); | 
|  | 709 | } | 
|  | 710 | // No fall through here. | 
|  | 711 | } | 
|  | 712 |  | 
|  | 713 | __ bind(¬_identical); | 
|  | 714 | } | 
|  | 715 |  | 
|  | 716 |  | 
|  | 717 | // See comment at call site. | 
|  | 718 | static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 
|  | 719 | Register lhs, | 
|  | 720 | Register rhs, | 
|  | 721 | Label* lhs_not_nan, | 
|  | 722 | Label* slow, | 
|  | 723 | bool strict) { | 
|  | 724 | ASSERT((lhs.is(r0) && rhs.is(r1)) || | 
|  | 725 | (lhs.is(r1) && rhs.is(r0))); | 
|  | 726 |  | 
|  | 727 | Label rhs_is_smi; | 
|  | 728 | __ tst(rhs, Operand(kSmiTagMask)); | 
|  | 729 | __ b(eq, &rhs_is_smi); | 
|  | 730 |  | 
|  | 731 | // Lhs is a Smi.  Check whether the rhs is a heap number. | 
|  | 732 | __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE); | 
|  | 733 | if (strict) { | 
|  | 734 | // If rhs is not a number and lhs is a Smi then strict equality cannot | 
|  | 735 | // succeed.  Return non-equal | 
|  | 736 | // If rhs is r0 then there is already a non zero value in it. | 
|  | 737 | if (!rhs.is(r0)) { | 
|  | 738 | __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); | 
|  | 739 | } | 
|  | 740 | __ Ret(ne); | 
|  | 741 | } else { | 
|  | 742 | // Smi compared non-strictly with a non-Smi non-heap-number.  Call | 
|  | 743 | // the runtime. | 
|  | 744 | __ b(ne, slow); | 
|  | 745 | } | 
|  | 746 |  | 
|  | 747 | // Lhs is a smi, rhs is a number. | 
|  | 748 | if (CpuFeatures::IsSupported(VFP3)) { | 
|  | 749 | // Convert lhs to a double in d7. | 
|  | 750 | CpuFeatures::Scope scope(VFP3); | 
|  | 751 | __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); | 
|  | 752 | // Load the double from rhs, tagged HeapNumber r0, to d6. | 
|  | 753 | __ sub(r7, rhs, Operand(kHeapObjectTag)); | 
|  | 754 | __ vldr(d6, r7, HeapNumber::kValueOffset); | 
|  | 755 | } else { | 
|  | 756 | __ push(lr); | 
|  | 757 | // Convert lhs to a double in r2, r3. | 
|  | 758 | __ mov(r7, Operand(lhs)); | 
|  | 759 | ConvertToDoubleStub stub1(r3, r2, r7, r6); | 
|  | 760 | __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 
|  | 761 | // Load rhs to a double in r0, r1. | 
|  | 762 | __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 
|  | 763 | __ pop(lr); | 
|  | 764 | } | 
|  | 765 |  | 
|  | 766 | // We now have both loaded as doubles but we can skip the lhs nan check | 
|  | 767 | // since it's a smi. | 
|  | 768 | __ jmp(lhs_not_nan); | 
|  | 769 |  | 
|  | 770 | __ bind(&rhs_is_smi); | 
|  | 771 | // Rhs is a smi.  Check whether the non-smi lhs is a heap number. | 
|  | 772 | __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); | 
|  | 773 | if (strict) { | 
|  | 774 | // If lhs is not a number and rhs is a smi then strict equality cannot | 
|  | 775 | // succeed.  Return non-equal. | 
|  | 776 | // If lhs is r0 then there is already a non zero value in it. | 
|  | 777 | if (!lhs.is(r0)) { | 
|  | 778 | __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); | 
|  | 779 | } | 
|  | 780 | __ Ret(ne); | 
|  | 781 | } else { | 
|  | 782 | // Smi compared non-strictly with a non-smi non-heap-number.  Call | 
|  | 783 | // the runtime. | 
|  | 784 | __ b(ne, slow); | 
|  | 785 | } | 
|  | 786 |  | 
|  | 787 | // Rhs is a smi, lhs is a heap number. | 
|  | 788 | if (CpuFeatures::IsSupported(VFP3)) { | 
|  | 789 | CpuFeatures::Scope scope(VFP3); | 
|  | 790 | // Load the double from lhs, tagged HeapNumber r1, to d7. | 
|  | 791 | __ sub(r7, lhs, Operand(kHeapObjectTag)); | 
|  | 792 | __ vldr(d7, r7, HeapNumber::kValueOffset); | 
|  | 793 | // Convert rhs to a double in d6              . | 
|  | 794 | __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); | 
|  | 795 | } else { | 
|  | 796 | __ push(lr); | 
|  | 797 | // Load lhs to a double in r2, r3. | 
|  | 798 | __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 
|  | 799 | // Convert rhs to a double in r0, r1. | 
|  | 800 | __ mov(r7, Operand(rhs)); | 
|  | 801 | ConvertToDoubleStub stub2(r1, r0, r7, r6); | 
|  | 802 | __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 
|  | 803 | __ pop(lr); | 
|  | 804 | } | 
|  | 805 | // Fall through to both_loaded_as_doubles. | 
|  | 806 | } | 
|  | 807 |  | 
|  | 808 |  | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 809 | void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) { | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 810 | bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); | 
|  | 811 | Register rhs_exponent = exp_first ? r0 : r1; | 
|  | 812 | Register lhs_exponent = exp_first ? r2 : r3; | 
|  | 813 | Register rhs_mantissa = exp_first ? r1 : r0; | 
|  | 814 | Register lhs_mantissa = exp_first ? r3 : r2; | 
|  | 815 | Label one_is_nan, neither_is_nan; | 
|  | 816 |  | 
|  | 817 | __ Sbfx(r4, | 
|  | 818 | lhs_exponent, | 
|  | 819 | HeapNumber::kExponentShift, | 
|  | 820 | HeapNumber::kExponentBits); | 
|  | 821 | // NaNs have all-one exponents so they sign extend to -1. | 
|  | 822 | __ cmp(r4, Operand(-1)); | 
|  | 823 | __ b(ne, lhs_not_nan); | 
|  | 824 | __ mov(r4, | 
|  | 825 | Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), | 
|  | 826 | SetCC); | 
|  | 827 | __ b(ne, &one_is_nan); | 
| Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 828 | __ cmp(lhs_mantissa, Operand(0, RelocInfo::NONE)); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 829 | __ b(ne, &one_is_nan); | 
|  | 830 |  | 
|  | 831 | __ bind(lhs_not_nan); | 
|  | 832 | __ Sbfx(r4, | 
|  | 833 | rhs_exponent, | 
|  | 834 | HeapNumber::kExponentShift, | 
|  | 835 | HeapNumber::kExponentBits); | 
|  | 836 | // NaNs have all-one exponents so they sign extend to -1. | 
|  | 837 | __ cmp(r4, Operand(-1)); | 
|  | 838 | __ b(ne, &neither_is_nan); | 
|  | 839 | __ mov(r4, | 
|  | 840 | Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), | 
|  | 841 | SetCC); | 
|  | 842 | __ b(ne, &one_is_nan); | 
| Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 843 | __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE)); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 844 | __ b(eq, &neither_is_nan); | 
|  | 845 |  | 
|  | 846 | __ bind(&one_is_nan); | 
|  | 847 | // NaN comparisons always fail. | 
|  | 848 | // Load whatever we need in r0 to make the comparison fail. | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 849 | if (cond == lt || cond == le) { | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 850 | __ mov(r0, Operand(GREATER)); | 
|  | 851 | } else { | 
|  | 852 | __ mov(r0, Operand(LESS)); | 
|  | 853 | } | 
|  | 854 | __ Ret(); | 
|  | 855 |  | 
|  | 856 | __ bind(&neither_is_nan); | 
|  | 857 | } | 
|  | 858 |  | 
|  | 859 |  | 
|  | 860 | // See comment at call site. | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 861 | static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, | 
|  | 862 | Condition cond) { | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 863 | bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); | 
|  | 864 | Register rhs_exponent = exp_first ? r0 : r1; | 
|  | 865 | Register lhs_exponent = exp_first ? r2 : r3; | 
|  | 866 | Register rhs_mantissa = exp_first ? r1 : r0; | 
|  | 867 | Register lhs_mantissa = exp_first ? r3 : r2; | 
|  | 868 |  | 
|  | 869 | // r0, r1, r2, r3 have the two doubles.  Neither is a NaN. | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 870 | if (cond == eq) { | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 871 | // Doubles are not equal unless they have the same bit pattern. | 
|  | 872 | // Exception: 0 and -0. | 
|  | 873 | __ cmp(rhs_mantissa, Operand(lhs_mantissa)); | 
|  | 874 | __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne); | 
|  | 875 | // Return non-zero if the numbers are unequal. | 
|  | 876 | __ Ret(ne); | 
|  | 877 |  | 
|  | 878 | __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC); | 
|  | 879 | // If exponents are equal then return 0. | 
|  | 880 | __ Ret(eq); | 
|  | 881 |  | 
|  | 882 | // Exponents are unequal.  The only way we can return that the numbers | 
|  | 883 | // are equal is if one is -0 and the other is 0.  We already dealt | 
|  | 884 | // with the case where both are -0 or both are 0. | 
|  | 885 | // We start by seeing if the mantissas (that are equal) or the bottom | 
|  | 886 | // 31 bits of the rhs exponent are non-zero.  If so we return not | 
|  | 887 | // equal. | 
|  | 888 | __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC); | 
|  | 889 | __ mov(r0, Operand(r4), LeaveCC, ne); | 
|  | 890 | __ Ret(ne); | 
|  | 891 | // Now they are equal if and only if the lhs exponent is zero in its | 
|  | 892 | // low 31 bits. | 
|  | 893 | __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize)); | 
|  | 894 | __ Ret(); | 
|  | 895 | } else { | 
|  | 896 | // Call a native function to do a comparison between two non-NaNs. | 
|  | 897 | // Call C routine that may not cause GC or other trouble. | 
|  | 898 | __ push(lr); | 
|  | 899 | __ PrepareCallCFunction(4, r5);  // Two doubles count as 4 arguments. | 
|  | 900 | __ CallCFunction(ExternalReference::compare_doubles(), 4); | 
|  | 901 | __ pop(pc);  // Return. | 
|  | 902 | } | 
|  | 903 | } | 
|  | 904 |  | 
|  | 905 |  | 
|  | 906 | // See comment at call site. | 
|  | 907 | static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | 
|  | 908 | Register lhs, | 
|  | 909 | Register rhs) { | 
|  | 910 | ASSERT((lhs.is(r0) && rhs.is(r1)) || | 
|  | 911 | (lhs.is(r1) && rhs.is(r0))); | 
|  | 912 |  | 
|  | 913 | // If either operand is a JSObject or an oddball value, then they are | 
|  | 914 | // not equal since their pointers are different. | 
|  | 915 | // There is no test for undetectability in strict equality. | 
|  | 916 | STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); | 
|  | 917 | Label first_non_object; | 
|  | 918 | // Get the type of the first operand into r2 and compare it with | 
|  | 919 | // FIRST_JS_OBJECT_TYPE. | 
|  | 920 | __ CompareObjectType(rhs, r2, r2, FIRST_JS_OBJECT_TYPE); | 
|  | 921 | __ b(lt, &first_non_object); | 
|  | 922 |  | 
|  | 923 | // Return non-zero (r0 is not zero) | 
|  | 924 | Label return_not_equal; | 
|  | 925 | __ bind(&return_not_equal); | 
|  | 926 | __ Ret(); | 
|  | 927 |  | 
|  | 928 | __ bind(&first_non_object); | 
|  | 929 | // Check for oddballs: true, false, null, undefined. | 
|  | 930 | __ cmp(r2, Operand(ODDBALL_TYPE)); | 
|  | 931 | __ b(eq, &return_not_equal); | 
|  | 932 |  | 
|  | 933 | __ CompareObjectType(lhs, r3, r3, FIRST_JS_OBJECT_TYPE); | 
|  | 934 | __ b(ge, &return_not_equal); | 
|  | 935 |  | 
|  | 936 | // Check for oddballs: true, false, null, undefined. | 
|  | 937 | __ cmp(r3, Operand(ODDBALL_TYPE)); | 
|  | 938 | __ b(eq, &return_not_equal); | 
|  | 939 |  | 
|  | 940 | // Now that we have the types we might as well check for symbol-symbol. | 
|  | 941 | // Ensure that no non-strings have the symbol bit set. | 
|  | 942 | STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask); | 
|  | 943 | STATIC_ASSERT(kSymbolTag != 0); | 
|  | 944 | __ and_(r2, r2, Operand(r3)); | 
|  | 945 | __ tst(r2, Operand(kIsSymbolMask)); | 
|  | 946 | __ b(ne, &return_not_equal); | 
|  | 947 | } | 
|  | 948 |  | 
|  | 949 |  | 
|  | 950 | // See comment at call site. | 
|  | 951 | static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, | 
|  | 952 | Register lhs, | 
|  | 953 | Register rhs, | 
|  | 954 | Label* both_loaded_as_doubles, | 
|  | 955 | Label* not_heap_numbers, | 
|  | 956 | Label* slow) { | 
|  | 957 | ASSERT((lhs.is(r0) && rhs.is(r1)) || | 
|  | 958 | (lhs.is(r1) && rhs.is(r0))); | 
|  | 959 |  | 
|  | 960 | __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); | 
|  | 961 | __ b(ne, not_heap_numbers); | 
|  | 962 | __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); | 
|  | 963 | __ cmp(r2, r3); | 
|  | 964 | __ b(ne, slow);  // First was a heap number, second wasn't.  Go slow case. | 
|  | 965 |  | 
|  | 966 | // Both are heap numbers.  Load them up then jump to the code we have | 
|  | 967 | // for that. | 
|  | 968 | if (CpuFeatures::IsSupported(VFP3)) { | 
|  | 969 | CpuFeatures::Scope scope(VFP3); | 
|  | 970 | __ sub(r7, rhs, Operand(kHeapObjectTag)); | 
|  | 971 | __ vldr(d6, r7, HeapNumber::kValueOffset); | 
|  | 972 | __ sub(r7, lhs, Operand(kHeapObjectTag)); | 
|  | 973 | __ vldr(d7, r7, HeapNumber::kValueOffset); | 
|  | 974 | } else { | 
|  | 975 | __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 
|  | 976 | __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 
|  | 977 | } | 
|  | 978 | __ jmp(both_loaded_as_doubles); | 
|  | 979 | } | 
|  | 980 |  | 
|  | 981 |  | 
|  | 982 | // Fast negative check for symbol-to-symbol equality. | 
|  | 983 | static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm, | 
|  | 984 | Register lhs, | 
|  | 985 | Register rhs, | 
|  | 986 | Label* possible_strings, | 
|  | 987 | Label* not_both_strings) { | 
|  | 988 | ASSERT((lhs.is(r0) && rhs.is(r1)) || | 
|  | 989 | (lhs.is(r1) && rhs.is(r0))); | 
|  | 990 |  | 
|  | 991 | // r2 is object type of rhs. | 
|  | 992 | // Ensure that no non-strings have the symbol bit set. | 
|  | 993 | Label object_test; | 
|  | 994 | STATIC_ASSERT(kSymbolTag != 0); | 
|  | 995 | __ tst(r2, Operand(kIsNotStringMask)); | 
|  | 996 | __ b(ne, &object_test); | 
|  | 997 | __ tst(r2, Operand(kIsSymbolMask)); | 
|  | 998 | __ b(eq, possible_strings); | 
|  | 999 | __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE); | 
|  | 1000 | __ b(ge, not_both_strings); | 
|  | 1001 | __ tst(r3, Operand(kIsSymbolMask)); | 
|  | 1002 | __ b(eq, possible_strings); | 
|  | 1003 |  | 
|  | 1004 | // Both are symbols.  We already checked they weren't the same pointer | 
|  | 1005 | // so they are not equal. | 
|  | 1006 | __ mov(r0, Operand(NOT_EQUAL)); | 
|  | 1007 | __ Ret(); | 
|  | 1008 |  | 
|  | 1009 | __ bind(&object_test); | 
|  | 1010 | __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE)); | 
|  | 1011 | __ b(lt, not_both_strings); | 
|  | 1012 | __ CompareObjectType(lhs, r2, r3, FIRST_JS_OBJECT_TYPE); | 
|  | 1013 | __ b(lt, not_both_strings); | 
|  | 1014 | // If both objects are undetectable, they are equal. Otherwise, they | 
|  | 1015 | // are not equal, since they are different objects and an object is not | 
|  | 1016 | // equal to undefined. | 
|  | 1017 | __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset)); | 
|  | 1018 | __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset)); | 
|  | 1019 | __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset)); | 
|  | 1020 | __ and_(r0, r2, Operand(r3)); | 
|  | 1021 | __ and_(r0, r0, Operand(1 << Map::kIsUndetectable)); | 
|  | 1022 | __ eor(r0, r0, Operand(1 << Map::kIsUndetectable)); | 
|  | 1023 | __ Ret(); | 
|  | 1024 | } | 
|  | 1025 |  | 
|  | 1026 |  | 
|  | 1027 | void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, | 
|  | 1028 | Register object, | 
|  | 1029 | Register result, | 
|  | 1030 | Register scratch1, | 
|  | 1031 | Register scratch2, | 
|  | 1032 | Register scratch3, | 
|  | 1033 | bool object_is_smi, | 
|  | 1034 | Label* not_found) { | 
|  | 1035 | // Use of registers. Register result is used as a temporary. | 
|  | 1036 | Register number_string_cache = result; | 
|  | 1037 | Register mask = scratch3; | 
|  | 1038 |  | 
|  | 1039 | // Load the number string cache. | 
|  | 1040 | __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); | 
|  | 1041 |  | 
|  | 1042 | // Make the hash mask from the length of the number string cache. It | 
|  | 1043 | // contains two elements (number and string) for each cache entry. | 
|  | 1044 | __ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset)); | 
|  | 1045 | // Divide length by two (length is a smi). | 
|  | 1046 | __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1)); | 
|  | 1047 | __ sub(mask, mask, Operand(1));  // Make mask. | 
|  | 1048 |  | 
|  | 1049 | // Calculate the entry in the number string cache. The hash value in the | 
|  | 1050 | // number string cache for smis is just the smi value, and the hash for | 
|  | 1051 | // doubles is the xor of the upper and lower words. See | 
|  | 1052 | // Heap::GetNumberStringCache. | 
|  | 1053 | Label is_smi; | 
|  | 1054 | Label load_result_from_cache; | 
|  | 1055 | if (!object_is_smi) { | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 1056 | __ JumpIfSmi(object, &is_smi); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 1057 | if (CpuFeatures::IsSupported(VFP3)) { | 
|  | 1058 | CpuFeatures::Scope scope(VFP3); | 
|  | 1059 | __ CheckMap(object, | 
|  | 1060 | scratch1, | 
|  | 1061 | Heap::kHeapNumberMapRootIndex, | 
|  | 1062 | not_found, | 
|  | 1063 | true); | 
|  | 1064 |  | 
|  | 1065 | STATIC_ASSERT(8 == kDoubleSize); | 
|  | 1066 | __ add(scratch1, | 
|  | 1067 | object, | 
|  | 1068 | Operand(HeapNumber::kValueOffset - kHeapObjectTag)); | 
|  | 1069 | __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); | 
|  | 1070 | __ eor(scratch1, scratch1, Operand(scratch2)); | 
|  | 1071 | __ and_(scratch1, scratch1, Operand(mask)); | 
|  | 1072 |  | 
|  | 1073 | // Calculate address of entry in string cache: each entry consists | 
|  | 1074 | // of two pointer sized fields. | 
|  | 1075 | __ add(scratch1, | 
|  | 1076 | number_string_cache, | 
|  | 1077 | Operand(scratch1, LSL, kPointerSizeLog2 + 1)); | 
|  | 1078 |  | 
|  | 1079 | Register probe = mask; | 
|  | 1080 | __ ldr(probe, | 
|  | 1081 | FieldMemOperand(scratch1, FixedArray::kHeaderSize)); | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 1082 | __ JumpIfSmi(probe, not_found); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 1083 | __ sub(scratch2, object, Operand(kHeapObjectTag)); | 
|  | 1084 | __ vldr(d0, scratch2, HeapNumber::kValueOffset); | 
|  | 1085 | __ sub(probe, probe, Operand(kHeapObjectTag)); | 
|  | 1086 | __ vldr(d1, probe, HeapNumber::kValueOffset); | 
| Ben Murdoch | b8e0da2 | 2011-05-16 14:20:40 +0100 | [diff] [blame] | 1087 | __ VFPCompareAndSetFlags(d0, d1); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 1088 | __ b(ne, not_found);  // The cache did not contain this value. | 
|  | 1089 | __ b(&load_result_from_cache); | 
|  | 1090 | } else { | 
|  | 1091 | __ b(not_found); | 
|  | 1092 | } | 
|  | 1093 | } | 
|  | 1094 |  | 
|  | 1095 | __ bind(&is_smi); | 
|  | 1096 | Register scratch = scratch1; | 
|  | 1097 | __ and_(scratch, mask, Operand(object, ASR, 1)); | 
|  | 1098 | // Calculate address of entry in string cache: each entry consists | 
|  | 1099 | // of two pointer sized fields. | 
|  | 1100 | __ add(scratch, | 
|  | 1101 | number_string_cache, | 
|  | 1102 | Operand(scratch, LSL, kPointerSizeLog2 + 1)); | 
|  | 1103 |  | 
|  | 1104 | // Check if the entry is the smi we are looking for. | 
|  | 1105 | Register probe = mask; | 
|  | 1106 | __ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); | 
|  | 1107 | __ cmp(object, probe); | 
|  | 1108 | __ b(ne, not_found); | 
|  | 1109 |  | 
|  | 1110 | // Get the result from the cache. | 
|  | 1111 | __ bind(&load_result_from_cache); | 
|  | 1112 | __ ldr(result, | 
|  | 1113 | FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); | 
|  | 1114 | __ IncrementCounter(&Counters::number_to_string_native, | 
|  | 1115 | 1, | 
|  | 1116 | scratch1, | 
|  | 1117 | scratch2); | 
|  | 1118 | } | 
|  | 1119 |  | 
|  | 1120 |  | 
|  | 1121 | void NumberToStringStub::Generate(MacroAssembler* masm) { | 
|  | 1122 | Label runtime; | 
|  | 1123 |  | 
|  | 1124 | __ ldr(r1, MemOperand(sp, 0)); | 
|  | 1125 |  | 
|  | 1126 | // Generate code to lookup number in the number string cache. | 
|  | 1127 | GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime); | 
|  | 1128 | __ add(sp, sp, Operand(1 * kPointerSize)); | 
|  | 1129 | __ Ret(); | 
|  | 1130 |  | 
|  | 1131 | __ bind(&runtime); | 
|  | 1132 | // Handle number to string in the runtime system if not found in the cache. | 
|  | 1133 | __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); | 
|  | 1134 | } | 
|  | 1135 |  | 
|  | 1136 |  | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 1137 | // On entry lhs_ and rhs_ are the values to be compared. | 
|  | 1138 | // On exit r0 is 0, positive or negative to indicate the result of | 
|  | 1139 | // the comparison. | 
|  | 1140 | void CompareStub::Generate(MacroAssembler* masm) { | 
|  | 1141 | ASSERT((lhs_.is(r0) && rhs_.is(r1)) || | 
|  | 1142 | (lhs_.is(r1) && rhs_.is(r0))); | 
|  | 1143 |  | 
|  | 1144 | Label slow;  // Call builtin. | 
|  | 1145 | Label not_smis, both_loaded_as_doubles, lhs_not_nan; | 
|  | 1146 |  | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 1147 | if (include_smi_compare_) { | 
|  | 1148 | Label not_two_smis, smi_done; | 
|  | 1149 | __ orr(r2, r1, r0); | 
|  | 1150 | __ tst(r2, Operand(kSmiTagMask)); | 
|  | 1151 | __ b(ne, ¬_two_smis); | 
| Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 1152 | __ mov(r1, Operand(r1, ASR, 1)); | 
|  | 1153 | __ sub(r0, r1, Operand(r0, ASR, 1)); | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 1154 | __ Ret(); | 
|  | 1155 | __ bind(¬_two_smis); | 
|  | 1156 | } else if (FLAG_debug_code) { | 
|  | 1157 | __ orr(r2, r1, r0); | 
|  | 1158 | __ tst(r2, Operand(kSmiTagMask)); | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 1159 | __ Assert(ne, "CompareStub: unexpected smi operands."); | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 1160 | } | 
|  | 1161 |  | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 1162 | // NOTICE! This code is only reached after a smi-fast-case check, so | 
|  | 1163 | // it is certain that at least one operand isn't a smi. | 
|  | 1164 |  | 
|  | 1165 | // Handle the case where the objects are identical.  Either returns the answer | 
|  | 1166 | // or goes to slow.  Only falls through if the objects were not identical. | 
|  | 1167 | EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_); | 
|  | 1168 |  | 
|  | 1169 | // If either is a Smi (we know that not both are), then they can only | 
|  | 1170 | // be strictly equal if the other is a HeapNumber. | 
|  | 1171 | STATIC_ASSERT(kSmiTag == 0); | 
|  | 1172 | ASSERT_EQ(0, Smi::FromInt(0)); | 
|  | 1173 | __ and_(r2, lhs_, Operand(rhs_)); | 
|  | 1174 | __ tst(r2, Operand(kSmiTagMask)); | 
|  | 1175 | __ b(ne, ¬_smis); | 
|  | 1176 | // One operand is a smi.  EmitSmiNonsmiComparison generates code that can: | 
|  | 1177 | // 1) Return the answer. | 
|  | 1178 | // 2) Go to slow. | 
|  | 1179 | // 3) Fall through to both_loaded_as_doubles. | 
|  | 1180 | // 4) Jump to lhs_not_nan. | 
|  | 1181 | // In cases 3 and 4 we have found out we were dealing with a number-number | 
|  | 1182 | // comparison.  If VFP3 is supported the double values of the numbers have | 
|  | 1183 | // been loaded into d7 and d6.  Otherwise, the double values have been loaded | 
|  | 1184 | // into r0, r1, r2, and r3. | 
|  | 1185 | EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_); | 
|  | 1186 |  | 
|  | 1187 | __ bind(&both_loaded_as_doubles); | 
|  | 1188 | // The arguments have been converted to doubles and stored in d6 and d7, if | 
|  | 1189 | // VFP3 is supported, or in r0, r1, r2, and r3. | 
|  | 1190 | if (CpuFeatures::IsSupported(VFP3)) { | 
|  | 1191 | __ bind(&lhs_not_nan); | 
|  | 1192 | CpuFeatures::Scope scope(VFP3); | 
|  | 1193 | Label no_nan; | 
|  | 1194 | // ARMv7 VFP3 instructions to implement double precision comparison. | 
| Ben Murdoch | b8e0da2 | 2011-05-16 14:20:40 +0100 | [diff] [blame] | 1195 | __ VFPCompareAndSetFlags(d7, d6); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 1196 | Label nan; | 
|  | 1197 | __ b(vs, &nan); | 
|  | 1198 | __ mov(r0, Operand(EQUAL), LeaveCC, eq); | 
|  | 1199 | __ mov(r0, Operand(LESS), LeaveCC, lt); | 
|  | 1200 | __ mov(r0, Operand(GREATER), LeaveCC, gt); | 
|  | 1201 | __ Ret(); | 
|  | 1202 |  | 
|  | 1203 | __ bind(&nan); | 
|  | 1204 | // If one of the sides was a NaN then the v flag is set.  Load r0 with | 
|  | 1205 | // whatever it takes to make the comparison fail, since comparisons with NaN | 
|  | 1206 | // always fail. | 
|  | 1207 | if (cc_ == lt || cc_ == le) { | 
|  | 1208 | __ mov(r0, Operand(GREATER)); | 
|  | 1209 | } else { | 
|  | 1210 | __ mov(r0, Operand(LESS)); | 
|  | 1211 | } | 
|  | 1212 | __ Ret(); | 
|  | 1213 | } else { | 
|  | 1214 | // Checks for NaN in the doubles we have loaded.  Can return the answer or | 
|  | 1215 | // fall through if neither is a NaN.  Also binds lhs_not_nan. | 
|  | 1216 | EmitNanCheck(masm, &lhs_not_nan, cc_); | 
|  | 1217 | // Compares two doubles in r0, r1, r2, r3 that are not NaNs.  Returns the | 
|  | 1218 | // answer.  Never falls through. | 
|  | 1219 | EmitTwoNonNanDoubleComparison(masm, cc_); | 
|  | 1220 | } | 
|  | 1221 |  | 
|  | 1222 | __ bind(¬_smis); | 
|  | 1223 | // At this point we know we are dealing with two different objects, | 
|  | 1224 | // and neither of them is a Smi.  The objects are in rhs_ and lhs_. | 
|  | 1225 | if (strict_) { | 
|  | 1226 | // This returns non-equal for some object types, or falls through if it | 
|  | 1227 | // was not lucky. | 
|  | 1228 | EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_); | 
|  | 1229 | } | 
|  | 1230 |  | 
|  | 1231 | Label check_for_symbols; | 
|  | 1232 | Label flat_string_check; | 
|  | 1233 | // Check for heap-number-heap-number comparison.  Can jump to slow case, | 
|  | 1234 | // or load both doubles into r0, r1, r2, r3 and jump to the code that handles | 
|  | 1235 | // that case.  If the inputs are not doubles then jumps to check_for_symbols. | 
|  | 1236 | // In this case r2 will contain the type of rhs_.  Never falls through. | 
|  | 1237 | EmitCheckForTwoHeapNumbers(masm, | 
|  | 1238 | lhs_, | 
|  | 1239 | rhs_, | 
|  | 1240 | &both_loaded_as_doubles, | 
|  | 1241 | &check_for_symbols, | 
|  | 1242 | &flat_string_check); | 
|  | 1243 |  | 
|  | 1244 | __ bind(&check_for_symbols); | 
|  | 1245 | // In the strict case the EmitStrictTwoHeapObjectCompare already took care of | 
|  | 1246 | // symbols. | 
|  | 1247 | if (cc_ == eq && !strict_) { | 
|  | 1248 | // Returns an answer for two symbols or two detectable objects. | 
|  | 1249 | // Otherwise jumps to string case or not both strings case. | 
|  | 1250 | // Assumes that r2 is the type of rhs_ on entry. | 
|  | 1251 | EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow); | 
|  | 1252 | } | 
|  | 1253 |  | 
|  | 1254 | // Check for both being sequential ASCII strings, and inline if that is the | 
|  | 1255 | // case. | 
|  | 1256 | __ bind(&flat_string_check); | 
|  | 1257 |  | 
|  | 1258 | __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow); | 
|  | 1259 |  | 
|  | 1260 | __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3); | 
|  | 1261 | StringCompareStub::GenerateCompareFlatAsciiStrings(masm, | 
|  | 1262 | lhs_, | 
|  | 1263 | rhs_, | 
|  | 1264 | r2, | 
|  | 1265 | r3, | 
|  | 1266 | r4, | 
|  | 1267 | r5); | 
|  | 1268 | // Never falls through to here. | 
|  | 1269 |  | 
|  | 1270 | __ bind(&slow); | 
|  | 1271 |  | 
|  | 1272 | __ Push(lhs_, rhs_); | 
|  | 1273 | // Figure out which native to call and setup the arguments. | 
|  | 1274 | Builtins::JavaScript native; | 
|  | 1275 | if (cc_ == eq) { | 
|  | 1276 | native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; | 
|  | 1277 | } else { | 
|  | 1278 | native = Builtins::COMPARE; | 
|  | 1279 | int ncr;  // NaN compare result | 
|  | 1280 | if (cc_ == lt || cc_ == le) { | 
|  | 1281 | ncr = GREATER; | 
|  | 1282 | } else { | 
|  | 1283 | ASSERT(cc_ == gt || cc_ == ge);  // remaining cases | 
|  | 1284 | ncr = LESS; | 
|  | 1285 | } | 
|  | 1286 | __ mov(r0, Operand(Smi::FromInt(ncr))); | 
|  | 1287 | __ push(r0); | 
|  | 1288 | } | 
|  | 1289 |  | 
|  | 1290 | // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) | 
|  | 1291 | // tagged as a small integer. | 
|  | 1292 | __ InvokeBuiltin(native, JUMP_JS); | 
|  | 1293 | } | 
|  | 1294 |  | 
|  | 1295 |  | 
|  | 1296 | // This stub does not handle the inlined cases (Smis, Booleans, undefined). | 
|  | 1297 | // The stub returns zero for false, and a non-zero value for true. | 
|  | 1298 | void ToBooleanStub::Generate(MacroAssembler* masm) { | 
|  | 1299 | Label false_result; | 
|  | 1300 | Label not_heap_number; | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 1301 | Register scratch = r9.is(tos_) ? r7 : r9; | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 1302 |  | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1303 | __ LoadRoot(ip, Heap::kNullValueRootIndex); | 
|  | 1304 | __ cmp(tos_, ip); | 
|  | 1305 | __ b(eq, &false_result); | 
|  | 1306 |  | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 1307 | // HeapNumber => false iff +0, -0, or NaN. | 
|  | 1308 | __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset)); | 
|  | 1309 | __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 
|  | 1310 | __ cmp(scratch, ip); | 
|  | 1311 | __ b(¬_heap_number, ne); | 
|  | 1312 |  | 
|  | 1313 | __ sub(ip, tos_, Operand(kHeapObjectTag)); | 
|  | 1314 | __ vldr(d1, ip, HeapNumber::kValueOffset); | 
| Ben Murdoch | b8e0da2 | 2011-05-16 14:20:40 +0100 | [diff] [blame] | 1315 | __ VFPCompareAndSetFlags(d1, 0.0); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 1316 | // "tos_" is a register, and contains a non zero value by default. | 
|  | 1317 | // Hence we only need to overwrite "tos_" with zero to return false for | 
|  | 1318 | // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. | 
| Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 1319 | __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq);  // for FP_ZERO | 
|  | 1320 | __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs);  // for FP_NAN | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 1321 | __ Ret(); | 
|  | 1322 |  | 
|  | 1323 | __ bind(¬_heap_number); | 
|  | 1324 |  | 
|  | 1325 | // Check if the value is 'null'. | 
|  | 1326 | // 'null' => false. | 
|  | 1327 | __ LoadRoot(ip, Heap::kNullValueRootIndex); | 
|  | 1328 | __ cmp(tos_, ip); | 
|  | 1329 | __ b(&false_result, eq); | 
|  | 1330 |  | 
|  | 1331 | // It can be an undetectable object. | 
|  | 1332 | // Undetectable => false. | 
|  | 1333 | __ ldr(ip, FieldMemOperand(tos_, HeapObject::kMapOffset)); | 
|  | 1334 | __ ldrb(scratch, FieldMemOperand(ip, Map::kBitFieldOffset)); | 
|  | 1335 | __ and_(scratch, scratch, Operand(1 << Map::kIsUndetectable)); | 
|  | 1336 | __ cmp(scratch, Operand(1 << Map::kIsUndetectable)); | 
|  | 1337 | __ b(&false_result, eq); | 
|  | 1338 |  | 
|  | 1339 | // JavaScript object => true. | 
|  | 1340 | __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset)); | 
|  | 1341 | __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 
|  | 1342 | __ cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE)); | 
|  | 1343 | // "tos_" is a register and contains a non-zero value. | 
|  | 1344 | // Hence we implicitly return true if the greater than | 
|  | 1345 | // condition is satisfied. | 
|  | 1346 | __ Ret(gt); | 
|  | 1347 |  | 
|  | 1348 | // Check for string | 
|  | 1349 | __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset)); | 
|  | 1350 | __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 
|  | 1351 | __ cmp(scratch, Operand(FIRST_NONSTRING_TYPE)); | 
|  | 1352 | // "tos_" is a register and contains a non-zero value. | 
|  | 1353 | // Hence we implicitly return true if the greater than | 
|  | 1354 | // condition is satisfied. | 
|  | 1355 | __ Ret(gt); | 
|  | 1356 |  | 
|  | 1357 | // String value => false iff empty, i.e., length is zero | 
|  | 1358 | __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset)); | 
|  | 1359 | // If length is zero, "tos_" contains zero ==> false. | 
|  | 1360 | // If length is not zero, "tos_" contains a non-zero value ==> true. | 
|  | 1361 | __ Ret(); | 
|  | 1362 |  | 
|  | 1363 | // Return 0 in "tos_" for false . | 
|  | 1364 | __ bind(&false_result); | 
| Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 1365 | __ mov(tos_, Operand(0, RelocInfo::NONE)); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 1366 | __ Ret(); | 
|  | 1367 | } | 
|  | 1368 |  | 
|  | 1369 |  | 
|  | 1370 | // We fall into this code if the operands were Smis, but the result was | 
|  | 1371 | // not (eg. overflow).  We branch into this code (to the not_smi label) if | 
|  | 1372 | // the operands were not both Smi.  The operands are in r0 and r1.  In order | 
|  | 1373 | // to call the C-implemented binary fp operation routines we need to end up | 
|  | 1374 | // with the double precision floating point operands in r0 and r1 (for the | 
|  | 1375 | // value in r1) and r2 and r3 (for the value in r0). | 
|  | 1376 | void GenericBinaryOpStub::HandleBinaryOpSlowCases( | 
|  | 1377 | MacroAssembler* masm, | 
|  | 1378 | Label* not_smi, | 
|  | 1379 | Register lhs, | 
|  | 1380 | Register rhs, | 
|  | 1381 | const Builtins::JavaScript& builtin) { | 
|  | 1382 | Label slow, slow_reverse, do_the_call; | 
|  | 1383 | bool use_fp_registers = CpuFeatures::IsSupported(VFP3) && Token::MOD != op_; | 
|  | 1384 |  | 
|  | 1385 | ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))); | 
|  | 1386 | Register heap_number_map = r6; | 
|  | 1387 |  | 
|  | 1388 | if (ShouldGenerateSmiCode()) { | 
|  | 1389 | __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 
|  | 1390 |  | 
|  | 1391 | // Smi-smi case (overflow). | 
|  | 1392 | // Since both are Smis there is no heap number to overwrite, so allocate. | 
|  | 1393 | // The new heap number is in r5.  r3 and r7 are scratch. | 
|  | 1394 | __ AllocateHeapNumber( | 
|  | 1395 | r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow); | 
|  | 1396 |  | 
|  | 1397 | // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, | 
|  | 1398 | // using registers d7 and d6 for the double values. | 
|  | 1399 | if (CpuFeatures::IsSupported(VFP3)) { | 
|  | 1400 | CpuFeatures::Scope scope(VFP3); | 
|  | 1401 | __ mov(r7, Operand(rhs, ASR, kSmiTagSize)); | 
|  | 1402 | __ vmov(s15, r7); | 
|  | 1403 | __ vcvt_f64_s32(d7, s15); | 
|  | 1404 | __ mov(r7, Operand(lhs, ASR, kSmiTagSize)); | 
|  | 1405 | __ vmov(s13, r7); | 
|  | 1406 | __ vcvt_f64_s32(d6, s13); | 
|  | 1407 | if (!use_fp_registers) { | 
|  | 1408 | __ vmov(r2, r3, d7); | 
|  | 1409 | __ vmov(r0, r1, d6); | 
|  | 1410 | } | 
|  | 1411 | } else { | 
|  | 1412 | // Write Smi from rhs to r3 and r2 in double format.  r9 is scratch. | 
|  | 1413 | __ mov(r7, Operand(rhs)); | 
|  | 1414 | ConvertToDoubleStub stub1(r3, r2, r7, r9); | 
|  | 1415 | __ push(lr); | 
|  | 1416 | __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | 
|  | 1417 | // Write Smi from lhs to r1 and r0 in double format.  r9 is scratch. | 
|  | 1418 | __ mov(r7, Operand(lhs)); | 
|  | 1419 | ConvertToDoubleStub stub2(r1, r0, r7, r9); | 
|  | 1420 | __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 
|  | 1421 | __ pop(lr); | 
|  | 1422 | } | 
|  | 1423 | __ jmp(&do_the_call);  // Tail call.  No return. | 
|  | 1424 | } | 
|  | 1425 |  | 
|  | 1426 | // We branch here if at least one of r0 and r1 is not a Smi. | 
|  | 1427 | __ bind(not_smi); | 
|  | 1428 | __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 
|  | 1429 |  | 
|  | 1430 | // After this point we have the left hand side in r1 and the right hand side | 
|  | 1431 | // in r0. | 
|  | 1432 | if (lhs.is(r0)) { | 
|  | 1433 | __ Swap(r0, r1, ip); | 
|  | 1434 | } | 
|  | 1435 |  | 
|  | 1436 | // The type transition also calculates the answer. | 
|  | 1437 | bool generate_code_to_calculate_answer = true; | 
|  | 1438 |  | 
|  | 1439 | if (ShouldGenerateFPCode()) { | 
| Steve Block | 9fac840 | 2011-05-12 15:51:54 +0100 | [diff] [blame] | 1440 | // DIV has neither SmiSmi fast code nor specialized slow code. | 
|  | 1441 | // So don't try to patch a DIV Stub. | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 1442 | if (runtime_operands_type_ == BinaryOpIC::DEFAULT) { | 
|  | 1443 | switch (op_) { | 
|  | 1444 | case Token::ADD: | 
|  | 1445 | case Token::SUB: | 
|  | 1446 | case Token::MUL: | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 1447 | GenerateTypeTransition(masm);  // Tail call. | 
|  | 1448 | generate_code_to_calculate_answer = false; | 
|  | 1449 | break; | 
|  | 1450 |  | 
| Steve Block | 9fac840 | 2011-05-12 15:51:54 +0100 | [diff] [blame] | 1451 | case Token::DIV: | 
|  | 1452 | // DIV has neither SmiSmi fast code nor specialized slow code. | 
|  | 1453 | // So don't try to patch a DIV Stub. | 
|  | 1454 | break; | 
|  | 1455 |  | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 1456 | default: | 
|  | 1457 | break; | 
|  | 1458 | } | 
|  | 1459 | } | 
|  | 1460 |  | 
|  | 1461 | if (generate_code_to_calculate_answer) { | 
|  | 1462 | Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; | 
|  | 1463 | if (mode_ == NO_OVERWRITE) { | 
|  | 1464 | // In the case where there is no chance of an overwritable float we may | 
|  | 1465 | // as well do the allocation immediately while r0 and r1 are untouched. | 
|  | 1466 | __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow); | 
|  | 1467 | } | 
|  | 1468 |  | 
|  | 1469 | // Move r0 to a double in r2-r3. | 
|  | 1470 | __ tst(r0, Operand(kSmiTagMask)); | 
|  | 1471 | __ b(eq, &r0_is_smi);  // It's a Smi so don't check it's a heap number. | 
|  | 1472 | __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); | 
|  | 1473 | __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 
|  | 1474 | __ cmp(r4, heap_number_map); | 
|  | 1475 | __ b(ne, &slow); | 
|  | 1476 | if (mode_ == OVERWRITE_RIGHT) { | 
|  | 1477 | __ mov(r5, Operand(r0));  // Overwrite this heap number. | 
|  | 1478 | } | 
|  | 1479 | if (use_fp_registers) { | 
|  | 1480 | CpuFeatures::Scope scope(VFP3); | 
|  | 1481 | // Load the double from tagged HeapNumber r0 to d7. | 
|  | 1482 | __ sub(r7, r0, Operand(kHeapObjectTag)); | 
|  | 1483 | __ vldr(d7, r7, HeapNumber::kValueOffset); | 
|  | 1484 | } else { | 
|  | 1485 | // Calling convention says that second double is in r2 and r3. | 
|  | 1486 | __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); | 
|  | 1487 | } | 
|  | 1488 | __ jmp(&finished_loading_r0); | 
|  | 1489 | __ bind(&r0_is_smi); | 
|  | 1490 | if (mode_ == OVERWRITE_RIGHT) { | 
|  | 1491 | // We can't overwrite a Smi so get address of new heap number into r5. | 
|  | 1492 | __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); | 
|  | 1493 | } | 
|  | 1494 |  | 
|  | 1495 | if (CpuFeatures::IsSupported(VFP3)) { | 
|  | 1496 | CpuFeatures::Scope scope(VFP3); | 
|  | 1497 | // Convert smi in r0 to double in d7. | 
|  | 1498 | __ mov(r7, Operand(r0, ASR, kSmiTagSize)); | 
|  | 1499 | __ vmov(s15, r7); | 
|  | 1500 | __ vcvt_f64_s32(d7, s15); | 
|  | 1501 | if (!use_fp_registers) { | 
|  | 1502 | __ vmov(r2, r3, d7); | 
|  | 1503 | } | 
|  | 1504 | } else { | 
|  | 1505 | // Write Smi from r0 to r3 and r2 in double format. | 
|  | 1506 | __ mov(r7, Operand(r0)); | 
|  | 1507 | ConvertToDoubleStub stub3(r3, r2, r7, r4); | 
|  | 1508 | __ push(lr); | 
|  | 1509 | __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); | 
|  | 1510 | __ pop(lr); | 
|  | 1511 | } | 
|  | 1512 |  | 
|  | 1513 | // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis. | 
|  | 1514 | // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC. | 
|  | 1515 | Label r1_is_not_smi; | 
| Steve Block | 9fac840 | 2011-05-12 15:51:54 +0100 | [diff] [blame] | 1516 | if ((runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) && | 
|  | 1517 | HasSmiSmiFastPath()) { | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 1518 | __ tst(r1, Operand(kSmiTagMask)); | 
|  | 1519 | __ b(ne, &r1_is_not_smi); | 
|  | 1520 | GenerateTypeTransition(masm);  // Tail call. | 
|  | 1521 | } | 
|  | 1522 |  | 
|  | 1523 | __ bind(&finished_loading_r0); | 
|  | 1524 |  | 
|  | 1525 | // Move r1 to a double in r0-r1. | 
|  | 1526 | __ tst(r1, Operand(kSmiTagMask)); | 
|  | 1527 | __ b(eq, &r1_is_smi);  // It's a Smi so don't check it's a heap number. | 
|  | 1528 | __ bind(&r1_is_not_smi); | 
|  | 1529 | __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset)); | 
|  | 1530 | __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 
|  | 1531 | __ cmp(r4, heap_number_map); | 
|  | 1532 | __ b(ne, &slow); | 
|  | 1533 | if (mode_ == OVERWRITE_LEFT) { | 
|  | 1534 | __ mov(r5, Operand(r1));  // Overwrite this heap number. | 
|  | 1535 | } | 
|  | 1536 | if (use_fp_registers) { | 
|  | 1537 | CpuFeatures::Scope scope(VFP3); | 
|  | 1538 | // Load the double from tagged HeapNumber r1 to d6. | 
|  | 1539 | __ sub(r7, r1, Operand(kHeapObjectTag)); | 
|  | 1540 | __ vldr(d6, r7, HeapNumber::kValueOffset); | 
|  | 1541 | } else { | 
|  | 1542 | // Calling convention says that first double is in r0 and r1. | 
|  | 1543 | __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset)); | 
|  | 1544 | } | 
|  | 1545 | __ jmp(&finished_loading_r1); | 
|  | 1546 | __ bind(&r1_is_smi); | 
|  | 1547 | if (mode_ == OVERWRITE_LEFT) { | 
|  | 1548 | // We can't overwrite a Smi so get address of new heap number into r5. | 
|  | 1549 | __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); | 
|  | 1550 | } | 
|  | 1551 |  | 
|  | 1552 | if (CpuFeatures::IsSupported(VFP3)) { | 
|  | 1553 | CpuFeatures::Scope scope(VFP3); | 
|  | 1554 | // Convert smi in r1 to double in d6. | 
|  | 1555 | __ mov(r7, Operand(r1, ASR, kSmiTagSize)); | 
|  | 1556 | __ vmov(s13, r7); | 
|  | 1557 | __ vcvt_f64_s32(d6, s13); | 
|  | 1558 | if (!use_fp_registers) { | 
|  | 1559 | __ vmov(r0, r1, d6); | 
|  | 1560 | } | 
|  | 1561 | } else { | 
|  | 1562 | // Write Smi from r1 to r1 and r0 in double format. | 
|  | 1563 | __ mov(r7, Operand(r1)); | 
|  | 1564 | ConvertToDoubleStub stub4(r1, r0, r7, r9); | 
|  | 1565 | __ push(lr); | 
|  | 1566 | __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); | 
|  | 1567 | __ pop(lr); | 
|  | 1568 | } | 
|  | 1569 |  | 
|  | 1570 | __ bind(&finished_loading_r1); | 
|  | 1571 | } | 
|  | 1572 |  | 
|  | 1573 | if (generate_code_to_calculate_answer || do_the_call.is_linked()) { | 
|  | 1574 | __ bind(&do_the_call); | 
|  | 1575 | // If we are inlining the operation using VFP3 instructions for | 
|  | 1576 | // add, subtract, multiply, or divide, the arguments are in d6 and d7. | 
|  | 1577 | if (use_fp_registers) { | 
|  | 1578 | CpuFeatures::Scope scope(VFP3); | 
|  | 1579 | // ARMv7 VFP3 instructions to implement | 
|  | 1580 | // double precision, add, subtract, multiply, divide. | 
|  | 1581 |  | 
|  | 1582 | if (Token::MUL == op_) { | 
|  | 1583 | __ vmul(d5, d6, d7); | 
|  | 1584 | } else if (Token::DIV == op_) { | 
|  | 1585 | __ vdiv(d5, d6, d7); | 
|  | 1586 | } else if (Token::ADD == op_) { | 
|  | 1587 | __ vadd(d5, d6, d7); | 
|  | 1588 | } else if (Token::SUB == op_) { | 
|  | 1589 | __ vsub(d5, d6, d7); | 
|  | 1590 | } else { | 
|  | 1591 | UNREACHABLE(); | 
|  | 1592 | } | 
|  | 1593 | __ sub(r0, r5, Operand(kHeapObjectTag)); | 
|  | 1594 | __ vstr(d5, r0, HeapNumber::kValueOffset); | 
|  | 1595 | __ add(r0, r0, Operand(kHeapObjectTag)); | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 1596 | __ Ret(); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 1597 | } else { | 
|  | 1598 | // If we did not inline the operation, then the arguments are in: | 
|  | 1599 | // r0: Left value (least significant part of mantissa). | 
|  | 1600 | // r1: Left value (sign, exponent, top of mantissa). | 
|  | 1601 | // r2: Right value (least significant part of mantissa). | 
|  | 1602 | // r3: Right value (sign, exponent, top of mantissa). | 
|  | 1603 | // r5: Address of heap number for result. | 
|  | 1604 |  | 
|  | 1605 | __ push(lr);   // For later. | 
|  | 1606 | __ PrepareCallCFunction(4, r4);  // Two doubles count as 4 arguments. | 
|  | 1607 | // Call C routine that may not cause GC or other trouble. r5 is callee | 
|  | 1608 | // save. | 
|  | 1609 | __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); | 
|  | 1610 | // Store answer in the overwritable heap number. | 
|  | 1611 | #if !defined(USE_ARM_EABI) | 
|  | 1612 | // Double returned in fp coprocessor register 0 and 1, encoded as | 
|  | 1613 | // register cr8.  Offsets must be divisible by 4 for coprocessor so we | 
|  | 1614 | // need to substract the tag from r5. | 
|  | 1615 | __ sub(r4, r5, Operand(kHeapObjectTag)); | 
|  | 1616 | __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset)); | 
|  | 1617 | #else | 
|  | 1618 | // Double returned in registers 0 and 1. | 
|  | 1619 | __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset)); | 
|  | 1620 | #endif | 
|  | 1621 | __ mov(r0, Operand(r5)); | 
|  | 1622 | // And we are done. | 
|  | 1623 | __ pop(pc); | 
|  | 1624 | } | 
|  | 1625 | } | 
|  | 1626 | } | 
|  | 1627 |  | 
|  | 1628 | if (!generate_code_to_calculate_answer && | 
|  | 1629 | !slow_reverse.is_linked() && | 
|  | 1630 | !slow.is_linked()) { | 
|  | 1631 | return; | 
|  | 1632 | } | 
|  | 1633 |  | 
|  | 1634 | if (lhs.is(r0)) { | 
|  | 1635 | __ b(&slow); | 
|  | 1636 | __ bind(&slow_reverse); | 
|  | 1637 | __ Swap(r0, r1, ip); | 
|  | 1638 | } | 
|  | 1639 |  | 
|  | 1640 | heap_number_map = no_reg;  // Don't use this any more from here on. | 
|  | 1641 |  | 
|  | 1642 | // We jump to here if something goes wrong (one param is not a number of any | 
|  | 1643 | // sort or new-space allocation fails). | 
|  | 1644 | __ bind(&slow); | 
|  | 1645 |  | 
|  | 1646 | // Push arguments to the stack | 
|  | 1647 | __ Push(r1, r0); | 
|  | 1648 |  | 
|  | 1649 | if (Token::ADD == op_) { | 
|  | 1650 | // Test for string arguments before calling runtime. | 
|  | 1651 | // r1 : first argument | 
|  | 1652 | // r0 : second argument | 
|  | 1653 | // sp[0] : second argument | 
|  | 1654 | // sp[4] : first argument | 
|  | 1655 |  | 
|  | 1656 | Label not_strings, not_string1, string1, string1_smi2; | 
|  | 1657 | __ tst(r1, Operand(kSmiTagMask)); | 
|  | 1658 | __ b(eq, ¬_string1); | 
|  | 1659 | __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE); | 
|  | 1660 | __ b(ge, ¬_string1); | 
|  | 1661 |  | 
|  | 1662 | // First argument is a a string, test second. | 
|  | 1663 | __ tst(r0, Operand(kSmiTagMask)); | 
|  | 1664 | __ b(eq, &string1_smi2); | 
|  | 1665 | __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); | 
|  | 1666 | __ b(ge, &string1); | 
|  | 1667 |  | 
|  | 1668 | // First and second argument are strings. | 
|  | 1669 | StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); | 
|  | 1670 | __ TailCallStub(&string_add_stub); | 
|  | 1671 |  | 
|  | 1672 | __ bind(&string1_smi2); | 
|  | 1673 | // First argument is a string, second is a smi. Try to lookup the number | 
|  | 1674 | // string for the smi in the number string cache. | 
|  | 1675 | NumberToStringStub::GenerateLookupNumberStringCache( | 
|  | 1676 | masm, r0, r2, r4, r5, r6, true, &string1); | 
|  | 1677 |  | 
|  | 1678 | // Replace second argument on stack and tailcall string add stub to make | 
|  | 1679 | // the result. | 
|  | 1680 | __ str(r2, MemOperand(sp, 0)); | 
|  | 1681 | __ TailCallStub(&string_add_stub); | 
|  | 1682 |  | 
|  | 1683 | // Only first argument is a string. | 
|  | 1684 | __ bind(&string1); | 
|  | 1685 | __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS); | 
|  | 1686 |  | 
|  | 1687 | // First argument was not a string, test second. | 
|  | 1688 | __ bind(¬_string1); | 
|  | 1689 | __ tst(r0, Operand(kSmiTagMask)); | 
|  | 1690 | __ b(eq, ¬_strings); | 
|  | 1691 | __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); | 
|  | 1692 | __ b(ge, ¬_strings); | 
|  | 1693 |  | 
|  | 1694 | // Only second argument is a string. | 
|  | 1695 | __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS); | 
|  | 1696 |  | 
|  | 1697 | __ bind(¬_strings); | 
|  | 1698 | } | 
|  | 1699 |  | 
|  | 1700 | __ InvokeBuiltin(builtin, JUMP_JS);  // Tail call.  No return. | 
|  | 1701 | } | 
|  | 1702 |  | 
|  | 1703 |  | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 1704 | // For bitwise ops where the inputs are not both Smis we here try to determine | 
|  | 1705 | // whether both inputs are either Smis or at least heap numbers that can be | 
|  | 1706 | // represented by a 32 bit signed value.  We truncate towards zero as required | 
|  | 1707 | // by the ES spec.  If this is the case we do the bitwise op and see if the | 
|  | 1708 | // result is a Smi.  If so, great, otherwise we try to find a heap number to | 
|  | 1709 | // write the answer into (either by allocating or by overwriting). | 
|  | 1710 | // On entry the operands are in lhs and rhs.  On exit the answer is in r0. | 
|  | 1711 | void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm, | 
|  | 1712 | Register lhs, | 
|  | 1713 | Register rhs) { | 
|  | 1714 | Label slow, result_not_a_smi; | 
|  | 1715 | Label rhs_is_smi, lhs_is_smi; | 
|  | 1716 | Label done_checking_rhs, done_checking_lhs; | 
|  | 1717 |  | 
|  | 1718 | Register heap_number_map = r6; | 
|  | 1719 | __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 
|  | 1720 |  | 
|  | 1721 | __ tst(lhs, Operand(kSmiTagMask)); | 
|  | 1722 | __ b(eq, &lhs_is_smi);  // It's a Smi so don't check it's a heap number. | 
|  | 1723 | __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset)); | 
|  | 1724 | __ cmp(r4, heap_number_map); | 
|  | 1725 | __ b(ne, &slow); | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 1726 | __ ConvertToInt32(lhs, r3, r5, r4, d0, &slow); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 1727 | __ jmp(&done_checking_lhs); | 
|  | 1728 | __ bind(&lhs_is_smi); | 
|  | 1729 | __ mov(r3, Operand(lhs, ASR, 1)); | 
|  | 1730 | __ bind(&done_checking_lhs); | 
|  | 1731 |  | 
|  | 1732 | __ tst(rhs, Operand(kSmiTagMask)); | 
|  | 1733 | __ b(eq, &rhs_is_smi);  // It's a Smi so don't check it's a heap number. | 
|  | 1734 | __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset)); | 
|  | 1735 | __ cmp(r4, heap_number_map); | 
|  | 1736 | __ b(ne, &slow); | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 1737 | __ ConvertToInt32(rhs, r2, r5, r4, d0, &slow); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 1738 | __ jmp(&done_checking_rhs); | 
|  | 1739 | __ bind(&rhs_is_smi); | 
|  | 1740 | __ mov(r2, Operand(rhs, ASR, 1)); | 
|  | 1741 | __ bind(&done_checking_rhs); | 
|  | 1742 |  | 
|  | 1743 | ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)))); | 
|  | 1744 |  | 
|  | 1745 | // r0 and r1: Original operands (Smi or heap numbers). | 
|  | 1746 | // r2 and r3: Signed int32 operands. | 
|  | 1747 | switch (op_) { | 
|  | 1748 | case Token::BIT_OR:  __ orr(r2, r2, Operand(r3)); break; | 
|  | 1749 | case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break; | 
|  | 1750 | case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break; | 
|  | 1751 | case Token::SAR: | 
|  | 1752 | // Use only the 5 least significant bits of the shift count. | 
|  | 1753 | __ and_(r2, r2, Operand(0x1f)); | 
|  | 1754 | __ mov(r2, Operand(r3, ASR, r2)); | 
|  | 1755 | break; | 
|  | 1756 | case Token::SHR: | 
|  | 1757 | // Use only the 5 least significant bits of the shift count. | 
|  | 1758 | __ and_(r2, r2, Operand(0x1f)); | 
|  | 1759 | __ mov(r2, Operand(r3, LSR, r2), SetCC); | 
|  | 1760 | // SHR is special because it is required to produce a positive answer. | 
|  | 1761 | // The code below for writing into heap numbers isn't capable of writing | 
|  | 1762 | // the register as an unsigned int so we go to slow case if we hit this | 
|  | 1763 | // case. | 
|  | 1764 | if (CpuFeatures::IsSupported(VFP3)) { | 
|  | 1765 | __ b(mi, &result_not_a_smi); | 
|  | 1766 | } else { | 
|  | 1767 | __ b(mi, &slow); | 
|  | 1768 | } | 
|  | 1769 | break; | 
|  | 1770 | case Token::SHL: | 
|  | 1771 | // Use only the 5 least significant bits of the shift count. | 
|  | 1772 | __ and_(r2, r2, Operand(0x1f)); | 
|  | 1773 | __ mov(r2, Operand(r3, LSL, r2)); | 
|  | 1774 | break; | 
|  | 1775 | default: UNREACHABLE(); | 
|  | 1776 | } | 
|  | 1777 | // check that the *signed* result fits in a smi | 
|  | 1778 | __ add(r3, r2, Operand(0x40000000), SetCC); | 
|  | 1779 | __ b(mi, &result_not_a_smi); | 
|  | 1780 | __ mov(r0, Operand(r2, LSL, kSmiTagSize)); | 
|  | 1781 | __ Ret(); | 
|  | 1782 |  | 
|  | 1783 | Label have_to_allocate, got_a_heap_number; | 
|  | 1784 | __ bind(&result_not_a_smi); | 
|  | 1785 | switch (mode_) { | 
|  | 1786 | case OVERWRITE_RIGHT: { | 
|  | 1787 | __ tst(rhs, Operand(kSmiTagMask)); | 
|  | 1788 | __ b(eq, &have_to_allocate); | 
|  | 1789 | __ mov(r5, Operand(rhs)); | 
|  | 1790 | break; | 
|  | 1791 | } | 
|  | 1792 | case OVERWRITE_LEFT: { | 
|  | 1793 | __ tst(lhs, Operand(kSmiTagMask)); | 
|  | 1794 | __ b(eq, &have_to_allocate); | 
|  | 1795 | __ mov(r5, Operand(lhs)); | 
|  | 1796 | break; | 
|  | 1797 | } | 
|  | 1798 | case NO_OVERWRITE: { | 
|  | 1799 | // Get a new heap number in r5.  r4 and r7 are scratch. | 
|  | 1800 | __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); | 
|  | 1801 | } | 
|  | 1802 | default: break; | 
|  | 1803 | } | 
|  | 1804 | __ bind(&got_a_heap_number); | 
|  | 1805 | // r2: Answer as signed int32. | 
|  | 1806 | // r5: Heap number to write answer into. | 
|  | 1807 |  | 
|  | 1808 | // Nothing can go wrong now, so move the heap number to r0, which is the | 
|  | 1809 | // result. | 
|  | 1810 | __ mov(r0, Operand(r5)); | 
|  | 1811 |  | 
|  | 1812 | if (CpuFeatures::IsSupported(VFP3)) { | 
|  | 1813 | // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. | 
|  | 1814 | CpuFeatures::Scope scope(VFP3); | 
|  | 1815 | __ vmov(s0, r2); | 
|  | 1816 | if (op_ == Token::SHR) { | 
|  | 1817 | __ vcvt_f64_u32(d0, s0); | 
|  | 1818 | } else { | 
|  | 1819 | __ vcvt_f64_s32(d0, s0); | 
|  | 1820 | } | 
|  | 1821 | __ sub(r3, r0, Operand(kHeapObjectTag)); | 
|  | 1822 | __ vstr(d0, r3, HeapNumber::kValueOffset); | 
|  | 1823 | __ Ret(); | 
|  | 1824 | } else { | 
|  | 1825 | // Tail call that writes the int32 in r2 to the heap number in r0, using | 
|  | 1826 | // r3 as scratch.  r0 is preserved and returned. | 
|  | 1827 | WriteInt32ToHeapNumberStub stub(r2, r0, r3); | 
|  | 1828 | __ TailCallStub(&stub); | 
|  | 1829 | } | 
|  | 1830 |  | 
|  | 1831 | if (mode_ != NO_OVERWRITE) { | 
|  | 1832 | __ bind(&have_to_allocate); | 
|  | 1833 | // Get a new heap number in r5.  r4 and r7 are scratch. | 
|  | 1834 | __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); | 
|  | 1835 | __ jmp(&got_a_heap_number); | 
|  | 1836 | } | 
|  | 1837 |  | 
|  | 1838 | // If all else failed then we go to the runtime system. | 
|  | 1839 | __ bind(&slow); | 
|  | 1840 | __ Push(lhs, rhs);  // Restore stack. | 
|  | 1841 | switch (op_) { | 
|  | 1842 | case Token::BIT_OR: | 
|  | 1843 | __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); | 
|  | 1844 | break; | 
|  | 1845 | case Token::BIT_AND: | 
|  | 1846 | __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS); | 
|  | 1847 | break; | 
|  | 1848 | case Token::BIT_XOR: | 
|  | 1849 | __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); | 
|  | 1850 | break; | 
|  | 1851 | case Token::SAR: | 
|  | 1852 | __ InvokeBuiltin(Builtins::SAR, JUMP_JS); | 
|  | 1853 | break; | 
|  | 1854 | case Token::SHR: | 
|  | 1855 | __ InvokeBuiltin(Builtins::SHR, JUMP_JS); | 
|  | 1856 | break; | 
|  | 1857 | case Token::SHL: | 
|  | 1858 | __ InvokeBuiltin(Builtins::SHL, JUMP_JS); | 
|  | 1859 | break; | 
|  | 1860 | default: | 
|  | 1861 | UNREACHABLE(); | 
|  | 1862 | } | 
|  | 1863 | } | 
|  | 1864 |  | 
|  | 1865 |  | 
|  | 1866 |  | 
|  | 1867 |  | 
|  | 1868 | // This function takes the known int in a register for the cases | 
|  | 1869 | // where it doesn't know a good trick, and may deliver | 
|  | 1870 | // a result that needs shifting. | 
|  | 1871 | static void MultiplyByKnownIntInStub( | 
|  | 1872 | MacroAssembler* masm, | 
|  | 1873 | Register result, | 
|  | 1874 | Register source, | 
|  | 1875 | Register known_int_register,   // Smi tagged. | 
|  | 1876 | int known_int, | 
|  | 1877 | int* required_shift) {  // Including Smi tag shift | 
|  | 1878 | switch (known_int) { | 
|  | 1879 | case 3: | 
|  | 1880 | __ add(result, source, Operand(source, LSL, 1)); | 
|  | 1881 | *required_shift = 1; | 
|  | 1882 | break; | 
|  | 1883 | case 5: | 
|  | 1884 | __ add(result, source, Operand(source, LSL, 2)); | 
|  | 1885 | *required_shift = 1; | 
|  | 1886 | break; | 
|  | 1887 | case 6: | 
|  | 1888 | __ add(result, source, Operand(source, LSL, 1)); | 
|  | 1889 | *required_shift = 2; | 
|  | 1890 | break; | 
|  | 1891 | case 7: | 
|  | 1892 | __ rsb(result, source, Operand(source, LSL, 3)); | 
|  | 1893 | *required_shift = 1; | 
|  | 1894 | break; | 
|  | 1895 | case 9: | 
|  | 1896 | __ add(result, source, Operand(source, LSL, 3)); | 
|  | 1897 | *required_shift = 1; | 
|  | 1898 | break; | 
|  | 1899 | case 10: | 
|  | 1900 | __ add(result, source, Operand(source, LSL, 2)); | 
|  | 1901 | *required_shift = 2; | 
|  | 1902 | break; | 
|  | 1903 | default: | 
|  | 1904 | ASSERT(!IsPowerOf2(known_int));  // That would be very inefficient. | 
|  | 1905 | __ mul(result, source, known_int_register); | 
|  | 1906 | *required_shift = 0; | 
|  | 1907 | } | 
|  | 1908 | } | 
|  | 1909 |  | 
|  | 1910 |  | 
|  | 1911 | // This uses versions of the sum-of-digits-to-see-if-a-number-is-divisible-by-3 | 
|  | 1912 | // trick.  See http://en.wikipedia.org/wiki/Divisibility_rule | 
|  | 1913 | // Takes the sum of the digits base (mask + 1) repeatedly until we have a | 
|  | 1914 | // number from 0 to mask.  On exit the 'eq' condition flags are set if the | 
|  | 1915 | // answer is exactly the mask. | 
|  | 1916 | void IntegerModStub::DigitSum(MacroAssembler* masm, | 
|  | 1917 | Register lhs, | 
|  | 1918 | int mask, | 
|  | 1919 | int shift, | 
|  | 1920 | Label* entry) { | 
|  | 1921 | ASSERT(mask > 0); | 
|  | 1922 | ASSERT(mask <= 0xff);  // This ensures we don't need ip to use it. | 
|  | 1923 | Label loop; | 
|  | 1924 | __ bind(&loop); | 
|  | 1925 | __ and_(ip, lhs, Operand(mask)); | 
|  | 1926 | __ add(lhs, ip, Operand(lhs, LSR, shift)); | 
|  | 1927 | __ bind(entry); | 
|  | 1928 | __ cmp(lhs, Operand(mask)); | 
|  | 1929 | __ b(gt, &loop); | 
|  | 1930 | } | 
|  | 1931 |  | 
|  | 1932 |  | 
|  | 1933 | void IntegerModStub::DigitSum(MacroAssembler* masm, | 
|  | 1934 | Register lhs, | 
|  | 1935 | Register scratch, | 
|  | 1936 | int mask, | 
|  | 1937 | int shift1, | 
|  | 1938 | int shift2, | 
|  | 1939 | Label* entry) { | 
|  | 1940 | ASSERT(mask > 0); | 
|  | 1941 | ASSERT(mask <= 0xff);  // This ensures we don't need ip to use it. | 
|  | 1942 | Label loop; | 
|  | 1943 | __ bind(&loop); | 
|  | 1944 | __ bic(scratch, lhs, Operand(mask)); | 
|  | 1945 | __ and_(ip, lhs, Operand(mask)); | 
|  | 1946 | __ add(lhs, ip, Operand(lhs, LSR, shift1)); | 
|  | 1947 | __ add(lhs, lhs, Operand(scratch, LSR, shift2)); | 
|  | 1948 | __ bind(entry); | 
|  | 1949 | __ cmp(lhs, Operand(mask)); | 
|  | 1950 | __ b(gt, &loop); | 
|  | 1951 | } | 
|  | 1952 |  | 
|  | 1953 |  | 
|  | 1954 | // Splits the number into two halves (bottom half has shift bits).  The top | 
|  | 1955 | // half is subtracted from the bottom half.  If the result is negative then | 
|  | 1956 | // rhs is added. | 
|  | 1957 | void IntegerModStub::ModGetInRangeBySubtraction(MacroAssembler* masm, | 
|  | 1958 | Register lhs, | 
|  | 1959 | int shift, | 
|  | 1960 | int rhs) { | 
|  | 1961 | int mask = (1 << shift) - 1; | 
|  | 1962 | __ and_(ip, lhs, Operand(mask)); | 
|  | 1963 | __ sub(lhs, ip, Operand(lhs, LSR, shift), SetCC); | 
|  | 1964 | __ add(lhs, lhs, Operand(rhs), LeaveCC, mi); | 
|  | 1965 | } | 
|  | 1966 |  | 
|  | 1967 |  | 
|  | 1968 | void IntegerModStub::ModReduce(MacroAssembler* masm, | 
|  | 1969 | Register lhs, | 
|  | 1970 | int max, | 
|  | 1971 | int denominator) { | 
|  | 1972 | int limit = denominator; | 
|  | 1973 | while (limit * 2 <= max) limit *= 2; | 
|  | 1974 | while (limit >= denominator) { | 
|  | 1975 | __ cmp(lhs, Operand(limit)); | 
|  | 1976 | __ sub(lhs, lhs, Operand(limit), LeaveCC, ge); | 
|  | 1977 | limit >>= 1; | 
|  | 1978 | } | 
|  | 1979 | } | 
|  | 1980 |  | 
|  | 1981 |  | 
|  | 1982 | void IntegerModStub::ModAnswer(MacroAssembler* masm, | 
|  | 1983 | Register result, | 
|  | 1984 | Register shift_distance, | 
|  | 1985 | Register mask_bits, | 
|  | 1986 | Register sum_of_digits) { | 
|  | 1987 | __ add(result, mask_bits, Operand(sum_of_digits, LSL, shift_distance)); | 
|  | 1988 | __ Ret(); | 
|  | 1989 | } | 
|  | 1990 |  | 
|  | 1991 |  | 
|  | 1992 | // See comment for class. | 
|  | 1993 | void IntegerModStub::Generate(MacroAssembler* masm) { | 
|  | 1994 | __ mov(lhs_, Operand(lhs_, LSR, shift_distance_)); | 
|  | 1995 | __ bic(odd_number_, odd_number_, Operand(1)); | 
|  | 1996 | __ mov(odd_number_, Operand(odd_number_, LSL, 1)); | 
|  | 1997 | // We now have (odd_number_ - 1) * 2 in the register. | 
|  | 1998 | // Build a switch out of branches instead of data because it avoids | 
|  | 1999 | // having to teach the assembler about intra-code-object pointers | 
|  | 2000 | // that are not in relative branch instructions. | 
|  | 2001 | Label mod3, mod5, mod7, mod9, mod11, mod13, mod15, mod17, mod19; | 
|  | 2002 | Label mod21, mod23, mod25; | 
|  | 2003 | { Assembler::BlockConstPoolScope block_const_pool(masm); | 
|  | 2004 | __ add(pc, pc, Operand(odd_number_)); | 
|  | 2005 | // When you read pc it is always 8 ahead, but when you write it you always | 
|  | 2006 | // write the actual value.  So we put in two nops to take up the slack. | 
|  | 2007 | __ nop(); | 
|  | 2008 | __ nop(); | 
|  | 2009 | __ b(&mod3); | 
|  | 2010 | __ b(&mod5); | 
|  | 2011 | __ b(&mod7); | 
|  | 2012 | __ b(&mod9); | 
|  | 2013 | __ b(&mod11); | 
|  | 2014 | __ b(&mod13); | 
|  | 2015 | __ b(&mod15); | 
|  | 2016 | __ b(&mod17); | 
|  | 2017 | __ b(&mod19); | 
|  | 2018 | __ b(&mod21); | 
|  | 2019 | __ b(&mod23); | 
|  | 2020 | __ b(&mod25); | 
|  | 2021 | } | 
|  | 2022 |  | 
|  | 2023 | // For each denominator we find a multiple that is almost only ones | 
|  | 2024 | // when expressed in binary.  Then we do the sum-of-digits trick for | 
|  | 2025 | // that number.  If the multiple is not 1 then we have to do a little | 
|  | 2026 | // more work afterwards to get the answer into the 0-denominator-1 | 
|  | 2027 | // range. | 
|  | 2028 | DigitSum(masm, lhs_, 3, 2, &mod3);  // 3 = b11. | 
|  | 2029 | __ sub(lhs_, lhs_, Operand(3), LeaveCC, eq); | 
|  | 2030 | ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | 
|  | 2031 |  | 
|  | 2032 | DigitSum(masm, lhs_, 0xf, 4, &mod5);  // 5 * 3 = b1111. | 
|  | 2033 | ModGetInRangeBySubtraction(masm, lhs_, 2, 5); | 
|  | 2034 | ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | 
|  | 2035 |  | 
|  | 2036 | DigitSum(masm, lhs_, 7, 3, &mod7);  // 7 = b111. | 
|  | 2037 | __ sub(lhs_, lhs_, Operand(7), LeaveCC, eq); | 
|  | 2038 | ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | 
|  | 2039 |  | 
|  | 2040 | DigitSum(masm, lhs_, 0x3f, 6, &mod9);  // 7 * 9 = b111111. | 
|  | 2041 | ModGetInRangeBySubtraction(masm, lhs_, 3, 9); | 
|  | 2042 | ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | 
|  | 2043 |  | 
|  | 2044 | DigitSum(masm, lhs_, r5, 0x3f, 6, 3, &mod11);  // 5 * 11 = b110111. | 
|  | 2045 | ModReduce(masm, lhs_, 0x3f, 11); | 
|  | 2046 | ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | 
|  | 2047 |  | 
|  | 2048 | DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod13);  // 19 * 13 = b11110111. | 
|  | 2049 | ModReduce(masm, lhs_, 0xff, 13); | 
|  | 2050 | ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | 
|  | 2051 |  | 
|  | 2052 | DigitSum(masm, lhs_, 0xf, 4, &mod15);  // 15 = b1111. | 
|  | 2053 | __ sub(lhs_, lhs_, Operand(15), LeaveCC, eq); | 
|  | 2054 | ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | 
|  | 2055 |  | 
|  | 2056 | DigitSum(masm, lhs_, 0xff, 8, &mod17);  // 15 * 17 = b11111111. | 
|  | 2057 | ModGetInRangeBySubtraction(masm, lhs_, 4, 17); | 
|  | 2058 | ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | 
|  | 2059 |  | 
|  | 2060 | DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod19);  // 13 * 19 = b11110111. | 
|  | 2061 | ModReduce(masm, lhs_, 0xff, 19); | 
|  | 2062 | ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | 
|  | 2063 |  | 
|  | 2064 | DigitSum(masm, lhs_, 0x3f, 6, &mod21);  // 3 * 21 = b111111. | 
|  | 2065 | ModReduce(masm, lhs_, 0x3f, 21); | 
|  | 2066 | ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | 
|  | 2067 |  | 
|  | 2068 | DigitSum(masm, lhs_, r5, 0xff, 8, 7, &mod23);  // 11 * 23 = b11111101. | 
|  | 2069 | ModReduce(masm, lhs_, 0xff, 23); | 
|  | 2070 | ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | 
|  | 2071 |  | 
|  | 2072 | DigitSum(masm, lhs_, r5, 0x7f, 7, 6, &mod25);  // 5 * 25 = b1111101. | 
|  | 2073 | ModReduce(masm, lhs_, 0x7f, 25); | 
|  | 2074 | ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); | 
|  | 2075 | } | 
|  | 2076 |  | 
|  | 2077 |  | 
|  | 2078 | void GenericBinaryOpStub::Generate(MacroAssembler* masm) { | 
|  | 2079 | // lhs_ : x | 
|  | 2080 | // rhs_ : y | 
|  | 2081 | // r0   : result | 
|  | 2082 |  | 
|  | 2083 | Register result = r0; | 
|  | 2084 | Register lhs = lhs_; | 
|  | 2085 | Register rhs = rhs_; | 
|  | 2086 |  | 
|  | 2087 | // This code can't cope with other register allocations yet. | 
|  | 2088 | ASSERT(result.is(r0) && | 
|  | 2089 | ((lhs.is(r0) && rhs.is(r1)) || | 
|  | 2090 | (lhs.is(r1) && rhs.is(r0)))); | 
|  | 2091 |  | 
|  | 2092 | Register smi_test_reg = r7; | 
|  | 2093 | Register scratch = r9; | 
|  | 2094 |  | 
|  | 2095 | // All ops need to know whether we are dealing with two Smis.  Set up | 
|  | 2096 | // smi_test_reg to tell us that. | 
|  | 2097 | if (ShouldGenerateSmiCode()) { | 
|  | 2098 | __ orr(smi_test_reg, lhs, Operand(rhs)); | 
|  | 2099 | } | 
|  | 2100 |  | 
|  | 2101 | switch (op_) { | 
|  | 2102 | case Token::ADD: { | 
|  | 2103 | Label not_smi; | 
|  | 2104 | // Fast path. | 
|  | 2105 | if (ShouldGenerateSmiCode()) { | 
|  | 2106 | STATIC_ASSERT(kSmiTag == 0);  // Adjust code below. | 
|  | 2107 | __ tst(smi_test_reg, Operand(kSmiTagMask)); | 
|  | 2108 | __ b(ne, ¬_smi); | 
|  | 2109 | __ add(r0, r1, Operand(r0), SetCC);  // Add y optimistically. | 
|  | 2110 | // Return if no overflow. | 
|  | 2111 | __ Ret(vc); | 
|  | 2112 | __ sub(r0, r0, Operand(r1));  // Revert optimistic add. | 
|  | 2113 | } | 
|  | 2114 | HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::ADD); | 
|  | 2115 | break; | 
|  | 2116 | } | 
|  | 2117 |  | 
|  | 2118 | case Token::SUB: { | 
|  | 2119 | Label not_smi; | 
|  | 2120 | // Fast path. | 
|  | 2121 | if (ShouldGenerateSmiCode()) { | 
|  | 2122 | STATIC_ASSERT(kSmiTag == 0);  // Adjust code below. | 
|  | 2123 | __ tst(smi_test_reg, Operand(kSmiTagMask)); | 
|  | 2124 | __ b(ne, ¬_smi); | 
|  | 2125 | if (lhs.is(r1)) { | 
|  | 2126 | __ sub(r0, r1, Operand(r0), SetCC);  // Subtract y optimistically. | 
|  | 2127 | // Return if no overflow. | 
|  | 2128 | __ Ret(vc); | 
|  | 2129 | __ sub(r0, r1, Operand(r0));  // Revert optimistic subtract. | 
|  | 2130 | } else { | 
|  | 2131 | __ sub(r0, r0, Operand(r1), SetCC);  // Subtract y optimistically. | 
|  | 2132 | // Return if no overflow. | 
|  | 2133 | __ Ret(vc); | 
|  | 2134 | __ add(r0, r0, Operand(r1));  // Revert optimistic subtract. | 
|  | 2135 | } | 
|  | 2136 | } | 
|  | 2137 | HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::SUB); | 
|  | 2138 | break; | 
|  | 2139 | } | 
|  | 2140 |  | 
|  | 2141 | case Token::MUL: { | 
|  | 2142 | Label not_smi, slow; | 
|  | 2143 | if (ShouldGenerateSmiCode()) { | 
|  | 2144 | STATIC_ASSERT(kSmiTag == 0);  // adjust code below | 
|  | 2145 | __ tst(smi_test_reg, Operand(kSmiTagMask)); | 
|  | 2146 | Register scratch2 = smi_test_reg; | 
|  | 2147 | smi_test_reg = no_reg; | 
|  | 2148 | __ b(ne, ¬_smi); | 
|  | 2149 | // Remove tag from one operand (but keep sign), so that result is Smi. | 
|  | 2150 | __ mov(ip, Operand(rhs, ASR, kSmiTagSize)); | 
|  | 2151 | // Do multiplication | 
|  | 2152 | // scratch = lower 32 bits of ip * lhs. | 
|  | 2153 | __ smull(scratch, scratch2, lhs, ip); | 
|  | 2154 | // Go slow on overflows (overflow bit is not set). | 
|  | 2155 | __ mov(ip, Operand(scratch, ASR, 31)); | 
|  | 2156 | // No overflow if higher 33 bits are identical. | 
|  | 2157 | __ cmp(ip, Operand(scratch2)); | 
|  | 2158 | __ b(ne, &slow); | 
|  | 2159 | // Go slow on zero result to handle -0. | 
|  | 2160 | __ tst(scratch, Operand(scratch)); | 
|  | 2161 | __ mov(result, Operand(scratch), LeaveCC, ne); | 
|  | 2162 | __ Ret(ne); | 
|  | 2163 | // We need -0 if we were multiplying a negative number with 0 to get 0. | 
|  | 2164 | // We know one of them was zero. | 
|  | 2165 | __ add(scratch2, rhs, Operand(lhs), SetCC); | 
|  | 2166 | __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl); | 
|  | 2167 | __ Ret(pl);  // Return Smi 0 if the non-zero one was positive. | 
|  | 2168 | // Slow case.  We fall through here if we multiplied a negative number | 
|  | 2169 | // with 0, because that would mean we should produce -0. | 
|  | 2170 | __ bind(&slow); | 
|  | 2171 | } | 
|  | 2172 | HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::MUL); | 
|  | 2173 | break; | 
|  | 2174 | } | 
|  | 2175 |  | 
|  | 2176 | case Token::DIV: | 
|  | 2177 | case Token::MOD: { | 
|  | 2178 | Label not_smi; | 
|  | 2179 | if (ShouldGenerateSmiCode() && specialized_on_rhs_) { | 
|  | 2180 | Label lhs_is_unsuitable; | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 2181 | __ JumpIfNotSmi(lhs, ¬_smi); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 2182 | if (IsPowerOf2(constant_rhs_)) { | 
|  | 2183 | if (op_ == Token::MOD) { | 
|  | 2184 | __ and_(rhs, | 
|  | 2185 | lhs, | 
|  | 2186 | Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)), | 
|  | 2187 | SetCC); | 
|  | 2188 | // We now have the answer, but if the input was negative we also | 
|  | 2189 | // have the sign bit.  Our work is done if the result is | 
|  | 2190 | // positive or zero: | 
|  | 2191 | if (!rhs.is(r0)) { | 
|  | 2192 | __ mov(r0, rhs, LeaveCC, pl); | 
|  | 2193 | } | 
|  | 2194 | __ Ret(pl); | 
|  | 2195 | // A mod of a negative left hand side must return a negative number. | 
|  | 2196 | // Unfortunately if the answer is 0 then we must return -0.  And we | 
|  | 2197 | // already optimistically trashed rhs so we may need to restore it. | 
|  | 2198 | __ eor(rhs, rhs, Operand(0x80000000u), SetCC); | 
|  | 2199 | // Next two instructions are conditional on the answer being -0. | 
|  | 2200 | __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq); | 
|  | 2201 | __ b(eq, &lhs_is_unsuitable); | 
|  | 2202 | // We need to subtract the dividend.  Eg. -3 % 4 == -3. | 
|  | 2203 | __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_))); | 
|  | 2204 | } else { | 
|  | 2205 | ASSERT(op_ == Token::DIV); | 
|  | 2206 | __ tst(lhs, | 
|  | 2207 | Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1))); | 
|  | 2208 | __ b(ne, &lhs_is_unsuitable);  // Go slow on negative or remainder. | 
|  | 2209 | int shift = 0; | 
|  | 2210 | int d = constant_rhs_; | 
|  | 2211 | while ((d & 1) == 0) { | 
|  | 2212 | d >>= 1; | 
|  | 2213 | shift++; | 
|  | 2214 | } | 
|  | 2215 | __ mov(r0, Operand(lhs, LSR, shift)); | 
|  | 2216 | __ bic(r0, r0, Operand(kSmiTagMask)); | 
|  | 2217 | } | 
|  | 2218 | } else { | 
|  | 2219 | // Not a power of 2. | 
|  | 2220 | __ tst(lhs, Operand(0x80000000u)); | 
|  | 2221 | __ b(ne, &lhs_is_unsuitable); | 
|  | 2222 | // Find a fixed point reciprocal of the divisor so we can divide by | 
|  | 2223 | // multiplying. | 
|  | 2224 | double divisor = 1.0 / constant_rhs_; | 
|  | 2225 | int shift = 32; | 
|  | 2226 | double scale = 4294967296.0;  // 1 << 32. | 
|  | 2227 | uint32_t mul; | 
|  | 2228 | // Maximise the precision of the fixed point reciprocal. | 
|  | 2229 | while (true) { | 
|  | 2230 | mul = static_cast<uint32_t>(scale * divisor); | 
|  | 2231 | if (mul >= 0x7fffffff) break; | 
|  | 2232 | scale *= 2.0; | 
|  | 2233 | shift++; | 
|  | 2234 | } | 
|  | 2235 | mul++; | 
|  | 2236 | Register scratch2 = smi_test_reg; | 
|  | 2237 | smi_test_reg = no_reg; | 
|  | 2238 | __ mov(scratch2, Operand(mul)); | 
|  | 2239 | __ umull(scratch, scratch2, scratch2, lhs); | 
|  | 2240 | __ mov(scratch2, Operand(scratch2, LSR, shift - 31)); | 
|  | 2241 | // scratch2 is lhs / rhs.  scratch2 is not Smi tagged. | 
|  | 2242 | // rhs is still the known rhs.  rhs is Smi tagged. | 
|  | 2243 | // lhs is still the unkown lhs.  lhs is Smi tagged. | 
|  | 2244 | int required_scratch_shift = 0;  // Including the Smi tag shift of 1. | 
|  | 2245 | // scratch = scratch2 * rhs. | 
|  | 2246 | MultiplyByKnownIntInStub(masm, | 
|  | 2247 | scratch, | 
|  | 2248 | scratch2, | 
|  | 2249 | rhs, | 
|  | 2250 | constant_rhs_, | 
|  | 2251 | &required_scratch_shift); | 
|  | 2252 | // scratch << required_scratch_shift is now the Smi tagged rhs * | 
|  | 2253 | // (lhs / rhs) where / indicates integer division. | 
|  | 2254 | if (op_ == Token::DIV) { | 
|  | 2255 | __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift)); | 
|  | 2256 | __ b(ne, &lhs_is_unsuitable);  // There was a remainder. | 
|  | 2257 | __ mov(result, Operand(scratch2, LSL, kSmiTagSize)); | 
|  | 2258 | } else { | 
|  | 2259 | ASSERT(op_ == Token::MOD); | 
|  | 2260 | __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift)); | 
|  | 2261 | } | 
|  | 2262 | } | 
|  | 2263 | __ Ret(); | 
|  | 2264 | __ bind(&lhs_is_unsuitable); | 
|  | 2265 | } else if (op_ == Token::MOD && | 
|  | 2266 | runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && | 
|  | 2267 | runtime_operands_type_ != BinaryOpIC::STRINGS) { | 
|  | 2268 | // Do generate a bit of smi code for modulus even though the default for | 
|  | 2269 | // modulus is not to do it, but as the ARM processor has no coprocessor | 
|  | 2270 | // support for modulus checking for smis makes sense.  We can handle | 
|  | 2271 | // 1 to 25 times any power of 2.  This covers over half the numbers from | 
|  | 2272 | // 1 to 100 including all of the first 25.  (Actually the constants < 10 | 
|  | 2273 | // are handled above by reciprocal multiplication.  We only get here for | 
|  | 2274 | // those cases if the right hand side is not a constant or for cases | 
|  | 2275 | // like 192 which is 3*2^6 and ends up in the 3 case in the integer mod | 
|  | 2276 | // stub.) | 
|  | 2277 | Label slow; | 
|  | 2278 | Label not_power_of_2; | 
|  | 2279 | ASSERT(!ShouldGenerateSmiCode()); | 
|  | 2280 | STATIC_ASSERT(kSmiTag == 0);  // Adjust code below. | 
|  | 2281 | // Check for two positive smis. | 
|  | 2282 | __ orr(smi_test_reg, lhs, Operand(rhs)); | 
|  | 2283 | __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask)); | 
|  | 2284 | __ b(ne, &slow); | 
|  | 2285 | // Check that rhs is a power of two and not zero. | 
|  | 2286 | Register mask_bits = r3; | 
|  | 2287 | __ sub(scratch, rhs, Operand(1), SetCC); | 
|  | 2288 | __ b(mi, &slow); | 
|  | 2289 | __ and_(mask_bits, rhs, Operand(scratch), SetCC); | 
|  | 2290 | __ b(ne, ¬_power_of_2); | 
|  | 2291 | // Calculate power of two modulus. | 
|  | 2292 | __ and_(result, lhs, Operand(scratch)); | 
|  | 2293 | __ Ret(); | 
|  | 2294 |  | 
|  | 2295 | __ bind(¬_power_of_2); | 
|  | 2296 | __ eor(scratch, scratch, Operand(mask_bits)); | 
|  | 2297 | // At least two bits are set in the modulus.  The high one(s) are in | 
|  | 2298 | // mask_bits and the low one is scratch + 1. | 
|  | 2299 | __ and_(mask_bits, scratch, Operand(lhs)); | 
|  | 2300 | Register shift_distance = scratch; | 
|  | 2301 | scratch = no_reg; | 
|  | 2302 |  | 
|  | 2303 | // The rhs consists of a power of 2 multiplied by some odd number. | 
|  | 2304 | // The power-of-2 part we handle by putting the corresponding bits | 
|  | 2305 | // from the lhs in the mask_bits register, and the power in the | 
|  | 2306 | // shift_distance register.  Shift distance is never 0 due to Smi | 
|  | 2307 | // tagging. | 
|  | 2308 | __ CountLeadingZeros(r4, shift_distance, shift_distance); | 
|  | 2309 | __ rsb(shift_distance, r4, Operand(32)); | 
|  | 2310 |  | 
|  | 2311 | // Now we need to find out what the odd number is. The last bit is | 
|  | 2312 | // always 1. | 
|  | 2313 | Register odd_number = r4; | 
|  | 2314 | __ mov(odd_number, Operand(rhs, LSR, shift_distance)); | 
|  | 2315 | __ cmp(odd_number, Operand(25)); | 
|  | 2316 | __ b(gt, &slow); | 
|  | 2317 |  | 
|  | 2318 | IntegerModStub stub( | 
|  | 2319 | result, shift_distance, odd_number, mask_bits, lhs, r5); | 
|  | 2320 | __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);  // Tail call. | 
|  | 2321 |  | 
|  | 2322 | __ bind(&slow); | 
|  | 2323 | } | 
|  | 2324 | HandleBinaryOpSlowCases( | 
|  | 2325 | masm, | 
|  | 2326 | ¬_smi, | 
|  | 2327 | lhs, | 
|  | 2328 | rhs, | 
|  | 2329 | op_ == Token::MOD ? Builtins::MOD : Builtins::DIV); | 
|  | 2330 | break; | 
|  | 2331 | } | 
|  | 2332 |  | 
|  | 2333 | case Token::BIT_OR: | 
|  | 2334 | case Token::BIT_AND: | 
|  | 2335 | case Token::BIT_XOR: | 
|  | 2336 | case Token::SAR: | 
|  | 2337 | case Token::SHR: | 
|  | 2338 | case Token::SHL: { | 
|  | 2339 | Label slow; | 
|  | 2340 | STATIC_ASSERT(kSmiTag == 0);  // adjust code below | 
|  | 2341 | __ tst(smi_test_reg, Operand(kSmiTagMask)); | 
|  | 2342 | __ b(ne, &slow); | 
|  | 2343 | Register scratch2 = smi_test_reg; | 
|  | 2344 | smi_test_reg = no_reg; | 
|  | 2345 | switch (op_) { | 
|  | 2346 | case Token::BIT_OR:  __ orr(result, rhs, Operand(lhs)); break; | 
|  | 2347 | case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break; | 
|  | 2348 | case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break; | 
|  | 2349 | case Token::SAR: | 
|  | 2350 | // Remove tags from right operand. | 
|  | 2351 | __ GetLeastBitsFromSmi(scratch2, rhs, 5); | 
|  | 2352 | __ mov(result, Operand(lhs, ASR, scratch2)); | 
|  | 2353 | // Smi tag result. | 
|  | 2354 | __ bic(result, result, Operand(kSmiTagMask)); | 
|  | 2355 | break; | 
|  | 2356 | case Token::SHR: | 
|  | 2357 | // Remove tags from operands.  We can't do this on a 31 bit number | 
|  | 2358 | // because then the 0s get shifted into bit 30 instead of bit 31. | 
|  | 2359 | __ mov(scratch, Operand(lhs, ASR, kSmiTagSize));  // x | 
|  | 2360 | __ GetLeastBitsFromSmi(scratch2, rhs, 5); | 
|  | 2361 | __ mov(scratch, Operand(scratch, LSR, scratch2)); | 
|  | 2362 | // Unsigned shift is not allowed to produce a negative number, so | 
|  | 2363 | // check the sign bit and the sign bit after Smi tagging. | 
|  | 2364 | __ tst(scratch, Operand(0xc0000000)); | 
|  | 2365 | __ b(ne, &slow); | 
|  | 2366 | // Smi tag result. | 
|  | 2367 | __ mov(result, Operand(scratch, LSL, kSmiTagSize)); | 
|  | 2368 | break; | 
|  | 2369 | case Token::SHL: | 
|  | 2370 | // Remove tags from operands. | 
|  | 2371 | __ mov(scratch, Operand(lhs, ASR, kSmiTagSize));  // x | 
|  | 2372 | __ GetLeastBitsFromSmi(scratch2, rhs, 5); | 
|  | 2373 | __ mov(scratch, Operand(scratch, LSL, scratch2)); | 
|  | 2374 | // Check that the signed result fits in a Smi. | 
|  | 2375 | __ add(scratch2, scratch, Operand(0x40000000), SetCC); | 
|  | 2376 | __ b(mi, &slow); | 
|  | 2377 | __ mov(result, Operand(scratch, LSL, kSmiTagSize)); | 
|  | 2378 | break; | 
|  | 2379 | default: UNREACHABLE(); | 
|  | 2380 | } | 
|  | 2381 | __ Ret(); | 
|  | 2382 | __ bind(&slow); | 
|  | 2383 | HandleNonSmiBitwiseOp(masm, lhs, rhs); | 
|  | 2384 | break; | 
|  | 2385 | } | 
|  | 2386 |  | 
|  | 2387 | default: UNREACHABLE(); | 
|  | 2388 | } | 
|  | 2389 | // This code should be unreachable. | 
|  | 2390 | __ stop("Unreachable"); | 
|  | 2391 |  | 
|  | 2392 | // Generate an unreachable reference to the DEFAULT stub so that it can be | 
|  | 2393 | // found at the end of this stub when clearing ICs at GC. | 
|  | 2394 | // TODO(kaznacheev): Check performance impact and get rid of this. | 
|  | 2395 | if (runtime_operands_type_ != BinaryOpIC::DEFAULT) { | 
|  | 2396 | GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT); | 
|  | 2397 | __ CallStub(&uninit); | 
|  | 2398 | } | 
|  | 2399 | } | 
|  | 2400 |  | 
|  | 2401 |  | 
|  | 2402 | void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | 
|  | 2403 | Label get_result; | 
|  | 2404 |  | 
|  | 2405 | __ Push(r1, r0); | 
|  | 2406 |  | 
|  | 2407 | __ mov(r2, Operand(Smi::FromInt(MinorKey()))); | 
|  | 2408 | __ mov(r1, Operand(Smi::FromInt(op_))); | 
|  | 2409 | __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_))); | 
|  | 2410 | __ Push(r2, r1, r0); | 
|  | 2411 |  | 
|  | 2412 | __ TailCallExternalReference( | 
|  | 2413 | ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), | 
|  | 2414 | 5, | 
|  | 2415 | 1); | 
|  | 2416 | } | 
|  | 2417 |  | 
|  | 2418 |  | 
|  | 2419 | Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { | 
|  | 2420 | GenericBinaryOpStub stub(key, type_info); | 
|  | 2421 | return stub.GetCode(); | 
|  | 2422 | } | 
|  | 2423 |  | 
|  | 2424 |  | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 2425 | Handle<Code> GetTypeRecordingBinaryOpStub(int key, | 
|  | 2426 | TRBinaryOpIC::TypeInfo type_info, | 
|  | 2427 | TRBinaryOpIC::TypeInfo result_type_info) { | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 2428 | TypeRecordingBinaryOpStub stub(key, type_info, result_type_info); | 
|  | 2429 | return stub.GetCode(); | 
|  | 2430 | } | 
|  | 2431 |  | 
|  | 2432 |  | 
|  | 2433 | void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | 
|  | 2434 | Label get_result; | 
|  | 2435 |  | 
|  | 2436 | __ Push(r1, r0); | 
|  | 2437 |  | 
|  | 2438 | __ mov(r2, Operand(Smi::FromInt(MinorKey()))); | 
|  | 2439 | __ mov(r1, Operand(Smi::FromInt(op_))); | 
|  | 2440 | __ mov(r0, Operand(Smi::FromInt(operands_type_))); | 
|  | 2441 | __ Push(r2, r1, r0); | 
|  | 2442 |  | 
|  | 2443 | __ TailCallExternalReference( | 
|  | 2444 | ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)), | 
|  | 2445 | 5, | 
|  | 2446 | 1); | 
|  | 2447 | } | 
|  | 2448 |  | 
|  | 2449 |  | 
|  | 2450 | void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs( | 
|  | 2451 | MacroAssembler* masm) { | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 2452 | UNIMPLEMENTED(); | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 2453 | } | 
|  | 2454 |  | 
|  | 2455 |  | 
|  | 2456 | void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) { | 
|  | 2457 | switch (operands_type_) { | 
|  | 2458 | case TRBinaryOpIC::UNINITIALIZED: | 
|  | 2459 | GenerateTypeTransition(masm); | 
|  | 2460 | break; | 
|  | 2461 | case TRBinaryOpIC::SMI: | 
|  | 2462 | GenerateSmiStub(masm); | 
|  | 2463 | break; | 
|  | 2464 | case TRBinaryOpIC::INT32: | 
|  | 2465 | GenerateInt32Stub(masm); | 
|  | 2466 | break; | 
|  | 2467 | case TRBinaryOpIC::HEAP_NUMBER: | 
|  | 2468 | GenerateHeapNumberStub(masm); | 
|  | 2469 | break; | 
|  | 2470 | case TRBinaryOpIC::STRING: | 
|  | 2471 | GenerateStringStub(masm); | 
|  | 2472 | break; | 
|  | 2473 | case TRBinaryOpIC::GENERIC: | 
|  | 2474 | GenerateGeneric(masm); | 
|  | 2475 | break; | 
|  | 2476 | default: | 
|  | 2477 | UNREACHABLE(); | 
|  | 2478 | } | 
|  | 2479 | } | 
|  | 2480 |  | 
|  | 2481 |  | 
|  | 2482 | const char* TypeRecordingBinaryOpStub::GetName() { | 
|  | 2483 | if (name_ != NULL) return name_; | 
|  | 2484 | const int kMaxNameLength = 100; | 
|  | 2485 | name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); | 
|  | 2486 | if (name_ == NULL) return "OOM"; | 
|  | 2487 | const char* op_name = Token::Name(op_); | 
|  | 2488 | const char* overwrite_name; | 
|  | 2489 | switch (mode_) { | 
|  | 2490 | case NO_OVERWRITE: overwrite_name = "Alloc"; break; | 
|  | 2491 | case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; | 
|  | 2492 | case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; | 
|  | 2493 | default: overwrite_name = "UnknownOverwrite"; break; | 
|  | 2494 | } | 
|  | 2495 |  | 
|  | 2496 | OS::SNPrintF(Vector<char>(name_, kMaxNameLength), | 
|  | 2497 | "TypeRecordingBinaryOpStub_%s_%s_%s", | 
|  | 2498 | op_name, | 
|  | 2499 | overwrite_name, | 
|  | 2500 | TRBinaryOpIC::GetName(operands_type_)); | 
|  | 2501 | return name_; | 
|  | 2502 | } | 
|  | 2503 |  | 
|  | 2504 |  | 
|  | 2505 | void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation( | 
|  | 2506 | MacroAssembler* masm) { | 
|  | 2507 | Register left = r1; | 
|  | 2508 | Register right = r0; | 
|  | 2509 | Register scratch1 = r7; | 
|  | 2510 | Register scratch2 = r9; | 
|  | 2511 |  | 
|  | 2512 | ASSERT(right.is(r0)); | 
|  | 2513 | STATIC_ASSERT(kSmiTag == 0); | 
|  | 2514 |  | 
|  | 2515 | Label not_smi_result; | 
|  | 2516 | switch (op_) { | 
|  | 2517 | case Token::ADD: | 
|  | 2518 | __ add(right, left, Operand(right), SetCC);  // Add optimistically. | 
|  | 2519 | __ Ret(vc); | 
|  | 2520 | __ sub(right, right, Operand(left));  // Revert optimistic add. | 
|  | 2521 | break; | 
|  | 2522 | case Token::SUB: | 
|  | 2523 | __ sub(right, left, Operand(right), SetCC);  // Subtract optimistically. | 
|  | 2524 | __ Ret(vc); | 
|  | 2525 | __ sub(right, left, Operand(right));  // Revert optimistic subtract. | 
|  | 2526 | break; | 
|  | 2527 | case Token::MUL: | 
|  | 2528 | // Remove tag from one of the operands. This way the multiplication result | 
|  | 2529 | // will be a smi if it fits the smi range. | 
|  | 2530 | __ SmiUntag(ip, right); | 
|  | 2531 | // Do multiplication | 
|  | 2532 | // scratch1 = lower 32 bits of ip * left. | 
|  | 2533 | // scratch2 = higher 32 bits of ip * left. | 
|  | 2534 | __ smull(scratch1, scratch2, left, ip); | 
|  | 2535 | // Check for overflowing the smi range - no overflow if higher 33 bits of | 
|  | 2536 | // the result are identical. | 
|  | 2537 | __ mov(ip, Operand(scratch1, ASR, 31)); | 
|  | 2538 | __ cmp(ip, Operand(scratch2)); | 
|  | 2539 | __ b(ne, ¬_smi_result); | 
|  | 2540 | // Go slow on zero result to handle -0. | 
|  | 2541 | __ tst(scratch1, Operand(scratch1)); | 
|  | 2542 | __ mov(right, Operand(scratch1), LeaveCC, ne); | 
|  | 2543 | __ Ret(ne); | 
|  | 2544 | // We need -0 if we were multiplying a negative number with 0 to get 0. | 
|  | 2545 | // We know one of them was zero. | 
|  | 2546 | __ add(scratch2, right, Operand(left), SetCC); | 
|  | 2547 | __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl); | 
|  | 2548 | __ Ret(pl);  // Return smi 0 if the non-zero one was positive. | 
|  | 2549 | // We fall through here if we multiplied a negative number with 0, because | 
|  | 2550 | // that would mean we should produce -0. | 
|  | 2551 | break; | 
|  | 2552 | case Token::DIV: | 
|  | 2553 | // Check for power of two on the right hand side. | 
|  | 2554 | __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result); | 
|  | 2555 | // Check for positive and no remainder (scratch1 contains right - 1). | 
|  | 2556 | __ orr(scratch2, scratch1, Operand(0x80000000u)); | 
|  | 2557 | __ tst(left, scratch2); | 
|  | 2558 | __ b(ne, ¬_smi_result); | 
|  | 2559 |  | 
|  | 2560 | // Perform division by shifting. | 
|  | 2561 | __ CountLeadingZeros(scratch1, scratch1, scratch2); | 
|  | 2562 | __ rsb(scratch1, scratch1, Operand(31)); | 
|  | 2563 | __ mov(right, Operand(left, LSR, scratch1)); | 
|  | 2564 | __ Ret(); | 
|  | 2565 | break; | 
|  | 2566 | case Token::MOD: | 
|  | 2567 | // Check for two positive smis. | 
|  | 2568 | __ orr(scratch1, left, Operand(right)); | 
|  | 2569 | __ tst(scratch1, Operand(0x80000000u | kSmiTagMask)); | 
|  | 2570 | __ b(ne, ¬_smi_result); | 
|  | 2571 |  | 
|  | 2572 | // Check for power of two on the right hand side. | 
|  | 2573 | __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result); | 
|  | 2574 |  | 
|  | 2575 | // Perform modulus by masking. | 
|  | 2576 | __ and_(right, left, Operand(scratch1)); | 
|  | 2577 | __ Ret(); | 
|  | 2578 | break; | 
|  | 2579 | case Token::BIT_OR: | 
|  | 2580 | __ orr(right, left, Operand(right)); | 
|  | 2581 | __ Ret(); | 
|  | 2582 | break; | 
|  | 2583 | case Token::BIT_AND: | 
|  | 2584 | __ and_(right, left, Operand(right)); | 
|  | 2585 | __ Ret(); | 
|  | 2586 | break; | 
|  | 2587 | case Token::BIT_XOR: | 
|  | 2588 | __ eor(right, left, Operand(right)); | 
|  | 2589 | __ Ret(); | 
|  | 2590 | break; | 
|  | 2591 | case Token::SAR: | 
|  | 2592 | // Remove tags from right operand. | 
|  | 2593 | __ GetLeastBitsFromSmi(scratch1, right, 5); | 
|  | 2594 | __ mov(right, Operand(left, ASR, scratch1)); | 
|  | 2595 | // Smi tag result. | 
|  | 2596 | __ bic(right, right, Operand(kSmiTagMask)); | 
|  | 2597 | __ Ret(); | 
|  | 2598 | break; | 
|  | 2599 | case Token::SHR: | 
|  | 2600 | // Remove tags from operands. We can't do this on a 31 bit number | 
|  | 2601 | // because then the 0s get shifted into bit 30 instead of bit 31. | 
|  | 2602 | __ SmiUntag(scratch1, left); | 
|  | 2603 | __ GetLeastBitsFromSmi(scratch2, right, 5); | 
|  | 2604 | __ mov(scratch1, Operand(scratch1, LSR, scratch2)); | 
|  | 2605 | // Unsigned shift is not allowed to produce a negative number, so | 
|  | 2606 | // check the sign bit and the sign bit after Smi tagging. | 
|  | 2607 | __ tst(scratch1, Operand(0xc0000000)); | 
|  | 2608 | __ b(ne, ¬_smi_result); | 
|  | 2609 | // Smi tag result. | 
|  | 2610 | __ SmiTag(right, scratch1); | 
|  | 2611 | __ Ret(); | 
|  | 2612 | break; | 
|  | 2613 | case Token::SHL: | 
|  | 2614 | // Remove tags from operands. | 
|  | 2615 | __ SmiUntag(scratch1, left); | 
|  | 2616 | __ GetLeastBitsFromSmi(scratch2, right, 5); | 
|  | 2617 | __ mov(scratch1, Operand(scratch1, LSL, scratch2)); | 
|  | 2618 | // Check that the signed result fits in a Smi. | 
|  | 2619 | __ add(scratch2, scratch1, Operand(0x40000000), SetCC); | 
|  | 2620 | __ b(mi, ¬_smi_result); | 
|  | 2621 | __ SmiTag(right, scratch1); | 
|  | 2622 | __ Ret(); | 
|  | 2623 | break; | 
|  | 2624 | default: | 
|  | 2625 | UNREACHABLE(); | 
|  | 2626 | } | 
|  | 2627 | __ bind(¬_smi_result); | 
|  | 2628 | } | 
|  | 2629 |  | 
|  | 2630 |  | 
|  | 2631 | void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, | 
|  | 2632 | bool smi_operands, | 
|  | 2633 | Label* not_numbers, | 
|  | 2634 | Label* gc_required) { | 
|  | 2635 | Register left = r1; | 
|  | 2636 | Register right = r0; | 
|  | 2637 | Register scratch1 = r7; | 
|  | 2638 | Register scratch2 = r9; | 
|  | 2639 |  | 
|  | 2640 | ASSERT(smi_operands || (not_numbers != NULL)); | 
|  | 2641 | if (smi_operands && FLAG_debug_code) { | 
|  | 2642 | __ AbortIfNotSmi(left); | 
|  | 2643 | __ AbortIfNotSmi(right); | 
|  | 2644 | } | 
|  | 2645 |  | 
|  | 2646 | Register heap_number_map = r6; | 
|  | 2647 | __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 
|  | 2648 |  | 
|  | 2649 | switch (op_) { | 
|  | 2650 | case Token::ADD: | 
|  | 2651 | case Token::SUB: | 
|  | 2652 | case Token::MUL: | 
|  | 2653 | case Token::DIV: | 
|  | 2654 | case Token::MOD: { | 
|  | 2655 | // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 | 
|  | 2656 | // depending on whether VFP3 is available or not. | 
|  | 2657 | FloatingPointHelper::Destination destination = | 
|  | 2658 | CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ? | 
|  | 2659 | FloatingPointHelper::kVFPRegisters : | 
|  | 2660 | FloatingPointHelper::kCoreRegisters; | 
|  | 2661 |  | 
|  | 2662 | // Allocate new heap number for result. | 
|  | 2663 | Register result = r5; | 
|  | 2664 | __ AllocateHeapNumber( | 
|  | 2665 | result, scratch1, scratch2, heap_number_map, gc_required); | 
|  | 2666 |  | 
|  | 2667 | // Load the operands. | 
|  | 2668 | if (smi_operands) { | 
|  | 2669 | FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); | 
|  | 2670 | } else { | 
|  | 2671 | FloatingPointHelper::LoadOperands(masm, | 
|  | 2672 | destination, | 
|  | 2673 | heap_number_map, | 
|  | 2674 | scratch1, | 
|  | 2675 | scratch2, | 
|  | 2676 | not_numbers); | 
|  | 2677 | } | 
|  | 2678 |  | 
|  | 2679 | // Calculate the result. | 
|  | 2680 | if (destination == FloatingPointHelper::kVFPRegisters) { | 
|  | 2681 | // Using VFP registers: | 
|  | 2682 | // d6: Left value | 
|  | 2683 | // d7: Right value | 
|  | 2684 | CpuFeatures::Scope scope(VFP3); | 
|  | 2685 | switch (op_) { | 
|  | 2686 | case Token::ADD: | 
|  | 2687 | __ vadd(d5, d6, d7); | 
|  | 2688 | break; | 
|  | 2689 | case Token::SUB: | 
|  | 2690 | __ vsub(d5, d6, d7); | 
|  | 2691 | break; | 
|  | 2692 | case Token::MUL: | 
|  | 2693 | __ vmul(d5, d6, d7); | 
|  | 2694 | break; | 
|  | 2695 | case Token::DIV: | 
|  | 2696 | __ vdiv(d5, d6, d7); | 
|  | 2697 | break; | 
|  | 2698 | default: | 
|  | 2699 | UNREACHABLE(); | 
|  | 2700 | } | 
|  | 2701 |  | 
|  | 2702 | __ sub(r0, result, Operand(kHeapObjectTag)); | 
|  | 2703 | __ vstr(d5, r0, HeapNumber::kValueOffset); | 
|  | 2704 | __ add(r0, r0, Operand(kHeapObjectTag)); | 
|  | 2705 | __ Ret(); | 
|  | 2706 | } else { | 
|  | 2707 | // Using core registers: | 
|  | 2708 | // r0: Left value (least significant part of mantissa). | 
|  | 2709 | // r1: Left value (sign, exponent, top of mantissa). | 
|  | 2710 | // r2: Right value (least significant part of mantissa). | 
|  | 2711 | // r3: Right value (sign, exponent, top of mantissa). | 
|  | 2712 |  | 
|  | 2713 | // Push the current return address before the C call. Return will be | 
|  | 2714 | // through pop(pc) below. | 
|  | 2715 | __ push(lr); | 
|  | 2716 | __ PrepareCallCFunction(4, scratch1);  // Two doubles are 4 arguments. | 
|  | 2717 | // Call C routine that may not cause GC or other trouble. r5 is callee | 
|  | 2718 | // save. | 
|  | 2719 | __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); | 
|  | 2720 | // Store answer in the overwritable heap number. | 
|  | 2721 | #if !defined(USE_ARM_EABI) | 
|  | 2722 | // Double returned in fp coprocessor register 0 and 1, encoded as | 
|  | 2723 | // register cr8.  Offsets must be divisible by 4 for coprocessor so we | 
|  | 2724 | // need to substract the tag from r5. | 
|  | 2725 | __ sub(scratch1, result, Operand(kHeapObjectTag)); | 
|  | 2726 | __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset)); | 
|  | 2727 | #else | 
|  | 2728 | // Double returned in registers 0 and 1. | 
|  | 2729 | __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset)); | 
|  | 2730 | #endif | 
|  | 2731 | // Plase result in r0 and return to the pushed return address. | 
|  | 2732 | __ mov(r0, Operand(result)); | 
|  | 2733 | __ pop(pc); | 
|  | 2734 | } | 
|  | 2735 | break; | 
|  | 2736 | } | 
|  | 2737 | case Token::BIT_OR: | 
|  | 2738 | case Token::BIT_XOR: | 
|  | 2739 | case Token::BIT_AND: | 
|  | 2740 | case Token::SAR: | 
|  | 2741 | case Token::SHR: | 
|  | 2742 | case Token::SHL: { | 
|  | 2743 | if (smi_operands) { | 
|  | 2744 | __ SmiUntag(r3, left); | 
|  | 2745 | __ SmiUntag(r2, right); | 
|  | 2746 | } else { | 
|  | 2747 | // Convert operands to 32-bit integers. Right in r2 and left in r3. | 
|  | 2748 | FloatingPointHelper::LoadNumberAsInteger(masm, | 
|  | 2749 | left, | 
|  | 2750 | r3, | 
|  | 2751 | heap_number_map, | 
|  | 2752 | scratch1, | 
|  | 2753 | scratch2, | 
|  | 2754 | d0, | 
|  | 2755 | not_numbers); | 
|  | 2756 | FloatingPointHelper::LoadNumberAsInteger(masm, | 
|  | 2757 | right, | 
|  | 2758 | r2, | 
|  | 2759 | heap_number_map, | 
|  | 2760 | scratch1, | 
|  | 2761 | scratch2, | 
|  | 2762 | d0, | 
|  | 2763 | not_numbers); | 
|  | 2764 | } | 
|  | 2765 |  | 
|  | 2766 | Label result_not_a_smi; | 
|  | 2767 | switch (op_) { | 
|  | 2768 | case Token::BIT_OR: | 
|  | 2769 | __ orr(r2, r3, Operand(r2)); | 
|  | 2770 | break; | 
|  | 2771 | case Token::BIT_XOR: | 
|  | 2772 | __ eor(r2, r3, Operand(r2)); | 
|  | 2773 | break; | 
|  | 2774 | case Token::BIT_AND: | 
|  | 2775 | __ and_(r2, r3, Operand(r2)); | 
|  | 2776 | break; | 
|  | 2777 | case Token::SAR: | 
|  | 2778 | // Use only the 5 least significant bits of the shift count. | 
|  | 2779 | __ and_(r2, r2, Operand(0x1f)); | 
|  | 2780 | __ GetLeastBitsFromInt32(r2, r2, 5); | 
|  | 2781 | __ mov(r2, Operand(r3, ASR, r2)); | 
|  | 2782 | break; | 
|  | 2783 | case Token::SHR: | 
|  | 2784 | // Use only the 5 least significant bits of the shift count. | 
|  | 2785 | __ GetLeastBitsFromInt32(r2, r2, 5); | 
|  | 2786 | __ mov(r2, Operand(r3, LSR, r2), SetCC); | 
|  | 2787 | // SHR is special because it is required to produce a positive answer. | 
|  | 2788 | // The code below for writing into heap numbers isn't capable of | 
|  | 2789 | // writing the register as an unsigned int so we go to slow case if we | 
|  | 2790 | // hit this case. | 
|  | 2791 | if (CpuFeatures::IsSupported(VFP3)) { | 
|  | 2792 | __ b(mi, &result_not_a_smi); | 
|  | 2793 | } else { | 
|  | 2794 | __ b(mi, not_numbers); | 
|  | 2795 | } | 
|  | 2796 | break; | 
|  | 2797 | case Token::SHL: | 
|  | 2798 | // Use only the 5 least significant bits of the shift count. | 
|  | 2799 | __ GetLeastBitsFromInt32(r2, r2, 5); | 
|  | 2800 | __ mov(r2, Operand(r3, LSL, r2)); | 
|  | 2801 | break; | 
|  | 2802 | default: | 
|  | 2803 | UNREACHABLE(); | 
|  | 2804 | } | 
|  | 2805 |  | 
|  | 2806 | // Check that the *signed* result fits in a smi. | 
|  | 2807 | __ add(r3, r2, Operand(0x40000000), SetCC); | 
|  | 2808 | __ b(mi, &result_not_a_smi); | 
|  | 2809 | __ SmiTag(r0, r2); | 
|  | 2810 | __ Ret(); | 
|  | 2811 |  | 
|  | 2812 | // Allocate new heap number for result. | 
|  | 2813 | __ bind(&result_not_a_smi); | 
|  | 2814 | __ AllocateHeapNumber( | 
|  | 2815 | r5, scratch1, scratch2, heap_number_map, gc_required); | 
|  | 2816 |  | 
|  | 2817 | // r2: Answer as signed int32. | 
|  | 2818 | // r5: Heap number to write answer into. | 
|  | 2819 |  | 
|  | 2820 | // Nothing can go wrong now, so move the heap number to r0, which is the | 
|  | 2821 | // result. | 
|  | 2822 | __ mov(r0, Operand(r5)); | 
|  | 2823 |  | 
|  | 2824 | if (CpuFeatures::IsSupported(VFP3)) { | 
|  | 2825 | // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As | 
|  | 2826 | // mentioned above SHR needs to always produce a positive result. | 
|  | 2827 | CpuFeatures::Scope scope(VFP3); | 
|  | 2828 | __ vmov(s0, r2); | 
|  | 2829 | if (op_ == Token::SHR) { | 
|  | 2830 | __ vcvt_f64_u32(d0, s0); | 
|  | 2831 | } else { | 
|  | 2832 | __ vcvt_f64_s32(d0, s0); | 
|  | 2833 | } | 
|  | 2834 | __ sub(r3, r0, Operand(kHeapObjectTag)); | 
|  | 2835 | __ vstr(d0, r3, HeapNumber::kValueOffset); | 
|  | 2836 | __ Ret(); | 
|  | 2837 | } else { | 
|  | 2838 | // Tail call that writes the int32 in r2 to the heap number in r0, using | 
|  | 2839 | // r3 as scratch. r0 is preserved and returned. | 
|  | 2840 | WriteInt32ToHeapNumberStub stub(r2, r0, r3); | 
|  | 2841 | __ TailCallStub(&stub); | 
|  | 2842 | } | 
|  | 2843 | break; | 
|  | 2844 | } | 
|  | 2845 | default: | 
|  | 2846 | UNREACHABLE(); | 
|  | 2847 | } | 
|  | 2848 | } | 
|  | 2849 |  | 
|  | 2850 |  | 
|  | 2851 | // Generate the smi code. If the operation on smis are successful this return is | 
|  | 2852 | // generated. If the result is not a smi and heap number allocation is not | 
|  | 2853 | // requested the code falls through. If number allocation is requested but a | 
|  | 2854 | // heap number cannot be allocated the code jumps to the lable gc_required. | 
|  | 2855 | void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, | 
|  | 2856 | Label* gc_required, | 
|  | 2857 | SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { | 
|  | 2858 | Label not_smis; | 
|  | 2859 |  | 
|  | 2860 | Register left = r1; | 
|  | 2861 | Register right = r0; | 
|  | 2862 | Register scratch1 = r7; | 
|  | 2863 | Register scratch2 = r9; | 
|  | 2864 |  | 
|  | 2865 | // Perform combined smi check on both operands. | 
|  | 2866 | __ orr(scratch1, left, Operand(right)); | 
|  | 2867 | STATIC_ASSERT(kSmiTag == 0); | 
|  | 2868 | __ tst(scratch1, Operand(kSmiTagMask)); | 
|  | 2869 | __ b(ne, ¬_smis); | 
|  | 2870 |  | 
|  | 2871 | // If the smi-smi operation results in a smi return is generated. | 
|  | 2872 | GenerateSmiSmiOperation(masm); | 
|  | 2873 |  | 
|  | 2874 | // If heap number results are possible generate the result in an allocated | 
|  | 2875 | // heap number. | 
|  | 2876 | if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { | 
|  | 2877 | GenerateFPOperation(masm, true, NULL, gc_required); | 
|  | 2878 | } | 
|  | 2879 | __ bind(¬_smis); | 
|  | 2880 | } | 
|  | 2881 |  | 
|  | 2882 |  | 
|  | 2883 | void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { | 
|  | 2884 | Label not_smis, call_runtime; | 
|  | 2885 |  | 
|  | 2886 | if (result_type_ == TRBinaryOpIC::UNINITIALIZED || | 
|  | 2887 | result_type_ == TRBinaryOpIC::SMI) { | 
|  | 2888 | // Only allow smi results. | 
|  | 2889 | GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS); | 
|  | 2890 | } else { | 
|  | 2891 | // Allow heap number result and don't make a transition if a heap number | 
|  | 2892 | // cannot be allocated. | 
|  | 2893 | GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); | 
|  | 2894 | } | 
|  | 2895 |  | 
|  | 2896 | // Code falls through if the result is not returned as either a smi or heap | 
|  | 2897 | // number. | 
|  | 2898 | GenerateTypeTransition(masm); | 
|  | 2899 |  | 
|  | 2900 | __ bind(&call_runtime); | 
|  | 2901 | GenerateCallRuntime(masm); | 
|  | 2902 | } | 
|  | 2903 |  | 
|  | 2904 |  | 
|  | 2905 | void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) { | 
|  | 2906 | ASSERT(operands_type_ == TRBinaryOpIC::STRING); | 
|  | 2907 | ASSERT(op_ == Token::ADD); | 
|  | 2908 | // Try to add arguments as strings, otherwise, transition to the generic | 
|  | 2909 | // TRBinaryOpIC type. | 
|  | 2910 | GenerateAddStrings(masm); | 
|  | 2911 | GenerateTypeTransition(masm); | 
|  | 2912 | } | 
|  | 2913 |  | 
|  | 2914 |  | 
|  | 2915 | void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { | 
|  | 2916 | ASSERT(operands_type_ == TRBinaryOpIC::INT32); | 
|  | 2917 |  | 
|  | 2918 | GenerateTypeTransition(masm); | 
|  | 2919 | } | 
|  | 2920 |  | 
|  | 2921 |  | 
|  | 2922 | void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { | 
|  | 2923 | Label not_numbers, call_runtime; | 
|  | 2924 | ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER); | 
|  | 2925 |  | 
|  | 2926 | GenerateFPOperation(masm, false, ¬_numbers, &call_runtime); | 
|  | 2927 |  | 
|  | 2928 | __ bind(¬_numbers); | 
|  | 2929 | GenerateTypeTransition(masm); | 
|  | 2930 |  | 
|  | 2931 | __ bind(&call_runtime); | 
|  | 2932 | GenerateCallRuntime(masm); | 
|  | 2933 | } | 
|  | 2934 |  | 
|  | 2935 |  | 
|  | 2936 | void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { | 
|  | 2937 | Label call_runtime; | 
|  | 2938 |  | 
|  | 2939 | GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); | 
|  | 2940 |  | 
|  | 2941 | // If all else fails, use the runtime system to get the correct | 
|  | 2942 | // result. | 
|  | 2943 | __ bind(&call_runtime); | 
|  | 2944 |  | 
|  | 2945 | // Try to add strings before calling runtime. | 
|  | 2946 | if (op_ == Token::ADD) { | 
|  | 2947 | GenerateAddStrings(masm); | 
|  | 2948 | } | 
|  | 2949 |  | 
|  | 2950 | GenericBinaryOpStub stub(op_, mode_, r1, r0); | 
|  | 2951 | __ TailCallStub(&stub); | 
|  | 2952 | } | 
|  | 2953 |  | 
|  | 2954 |  | 
|  | 2955 | void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { | 
|  | 2956 | ASSERT(op_ == Token::ADD); | 
|  | 2957 |  | 
|  | 2958 | Register left = r1; | 
|  | 2959 | Register right = r0; | 
|  | 2960 | Label call_runtime; | 
|  | 2961 |  | 
|  | 2962 | // Check if first argument is a string. | 
|  | 2963 | __ JumpIfSmi(left, &call_runtime); | 
|  | 2964 | __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); | 
|  | 2965 | __ b(ge, &call_runtime); | 
|  | 2966 |  | 
|  | 2967 | // First argument is a a string, test second. | 
|  | 2968 | __ JumpIfSmi(right, &call_runtime); | 
|  | 2969 | __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); | 
|  | 2970 | __ b(ge, &call_runtime); | 
|  | 2971 |  | 
|  | 2972 | // First and second argument are strings. | 
|  | 2973 | StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); | 
|  | 2974 | GenerateRegisterArgsPush(masm); | 
|  | 2975 | __ TailCallStub(&string_add_stub); | 
|  | 2976 |  | 
|  | 2977 | // At least one argument is not a string. | 
|  | 2978 | __ bind(&call_runtime); | 
|  | 2979 | } | 
|  | 2980 |  | 
|  | 2981 |  | 
|  | 2982 | void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { | 
|  | 2983 | GenerateRegisterArgsPush(masm); | 
|  | 2984 | switch (op_) { | 
|  | 2985 | case Token::ADD: | 
|  | 2986 | __ InvokeBuiltin(Builtins::ADD, JUMP_JS); | 
|  | 2987 | break; | 
|  | 2988 | case Token::SUB: | 
|  | 2989 | __ InvokeBuiltin(Builtins::SUB, JUMP_JS); | 
|  | 2990 | break; | 
|  | 2991 | case Token::MUL: | 
|  | 2992 | __ InvokeBuiltin(Builtins::MUL, JUMP_JS); | 
|  | 2993 | break; | 
|  | 2994 | case Token::DIV: | 
|  | 2995 | __ InvokeBuiltin(Builtins::DIV, JUMP_JS); | 
|  | 2996 | break; | 
|  | 2997 | case Token::MOD: | 
|  | 2998 | __ InvokeBuiltin(Builtins::MOD, JUMP_JS); | 
|  | 2999 | break; | 
|  | 3000 | case Token::BIT_OR: | 
|  | 3001 | __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); | 
|  | 3002 | break; | 
|  | 3003 | case Token::BIT_AND: | 
|  | 3004 | __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS); | 
|  | 3005 | break; | 
|  | 3006 | case Token::BIT_XOR: | 
|  | 3007 | __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); | 
|  | 3008 | break; | 
|  | 3009 | case Token::SAR: | 
|  | 3010 | __ InvokeBuiltin(Builtins::SAR, JUMP_JS); | 
|  | 3011 | break; | 
|  | 3012 | case Token::SHR: | 
|  | 3013 | __ InvokeBuiltin(Builtins::SHR, JUMP_JS); | 
|  | 3014 | break; | 
|  | 3015 | case Token::SHL: | 
|  | 3016 | __ InvokeBuiltin(Builtins::SHL, JUMP_JS); | 
|  | 3017 | break; | 
|  | 3018 | default: | 
|  | 3019 | UNREACHABLE(); | 
|  | 3020 | } | 
|  | 3021 | } | 
|  | 3022 |  | 
|  | 3023 |  | 
|  | 3024 | void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( | 
|  | 3025 | MacroAssembler* masm, | 
|  | 3026 | Register result, | 
|  | 3027 | Register heap_number_map, | 
|  | 3028 | Register scratch1, | 
|  | 3029 | Register scratch2, | 
|  | 3030 | Label* gc_required) { | 
|  | 3031 |  | 
|  | 3032 | // Code below will scratch result if allocation fails. To keep both arguments | 
|  | 3033 | // intact for the runtime call result cannot be one of these. | 
|  | 3034 | ASSERT(!result.is(r0) && !result.is(r1)); | 
|  | 3035 |  | 
|  | 3036 | if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) { | 
|  | 3037 | Label skip_allocation, allocated; | 
|  | 3038 | Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0; | 
|  | 3039 | // If the overwritable operand is already an object, we skip the | 
|  | 3040 | // allocation of a heap number. | 
|  | 3041 | __ JumpIfNotSmi(overwritable_operand, &skip_allocation); | 
|  | 3042 | // Allocate a heap number for the result. | 
|  | 3043 | __ AllocateHeapNumber( | 
|  | 3044 | result, scratch1, scratch2, heap_number_map, gc_required); | 
|  | 3045 | __ b(&allocated); | 
|  | 3046 | __ bind(&skip_allocation); | 
|  | 3047 | // Use object holding the overwritable operand for result. | 
|  | 3048 | __ mov(result, Operand(overwritable_operand)); | 
|  | 3049 | __ bind(&allocated); | 
|  | 3050 | } else { | 
|  | 3051 | ASSERT(mode_ == NO_OVERWRITE); | 
|  | 3052 | __ AllocateHeapNumber( | 
|  | 3053 | result, scratch1, scratch2, heap_number_map, gc_required); | 
|  | 3054 | } | 
|  | 3055 | } | 
|  | 3056 |  | 
|  | 3057 |  | 
|  | 3058 | void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { | 
|  | 3059 | __ Push(r1, r0); | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 3060 | } | 
|  | 3061 |  | 
|  | 3062 |  | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3063 | void TranscendentalCacheStub::Generate(MacroAssembler* masm) { | 
|  | 3064 | // Argument is a number and is on stack and in r0. | 
|  | 3065 | Label runtime_call; | 
|  | 3066 | Label input_not_smi; | 
|  | 3067 | Label loaded; | 
|  | 3068 |  | 
|  | 3069 | if (CpuFeatures::IsSupported(VFP3)) { | 
|  | 3070 | // Load argument and check if it is a smi. | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3071 | __ JumpIfNotSmi(r0, &input_not_smi); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3072 |  | 
|  | 3073 | CpuFeatures::Scope scope(VFP3); | 
|  | 3074 | // Input is a smi. Convert to double and load the low and high words | 
|  | 3075 | // of the double into r2, r3. | 
|  | 3076 | __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); | 
|  | 3077 | __ b(&loaded); | 
|  | 3078 |  | 
|  | 3079 | __ bind(&input_not_smi); | 
|  | 3080 | // Check if input is a HeapNumber. | 
|  | 3081 | __ CheckMap(r0, | 
|  | 3082 | r1, | 
|  | 3083 | Heap::kHeapNumberMapRootIndex, | 
|  | 3084 | &runtime_call, | 
|  | 3085 | true); | 
|  | 3086 | // Input is a HeapNumber. Load it to a double register and store the | 
|  | 3087 | // low and high words into r2, r3. | 
|  | 3088 | __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); | 
|  | 3089 |  | 
|  | 3090 | __ bind(&loaded); | 
|  | 3091 | // r2 = low 32 bits of double value | 
|  | 3092 | // r3 = high 32 bits of double value | 
|  | 3093 | // Compute hash (the shifts are arithmetic): | 
|  | 3094 | //   h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); | 
|  | 3095 | __ eor(r1, r2, Operand(r3)); | 
|  | 3096 | __ eor(r1, r1, Operand(r1, ASR, 16)); | 
|  | 3097 | __ eor(r1, r1, Operand(r1, ASR, 8)); | 
|  | 3098 | ASSERT(IsPowerOf2(TranscendentalCache::kCacheSize)); | 
|  | 3099 | __ And(r1, r1, Operand(TranscendentalCache::kCacheSize - 1)); | 
|  | 3100 |  | 
|  | 3101 | // r2 = low 32 bits of double value. | 
|  | 3102 | // r3 = high 32 bits of double value. | 
|  | 3103 | // r1 = TranscendentalCache::hash(double value). | 
|  | 3104 | __ mov(r0, | 
|  | 3105 | Operand(ExternalReference::transcendental_cache_array_address())); | 
|  | 3106 | // r0 points to cache array. | 
|  | 3107 | __ ldr(r0, MemOperand(r0, type_ * sizeof(TranscendentalCache::caches_[0]))); | 
|  | 3108 | // r0 points to the cache for the type type_. | 
|  | 3109 | // If NULL, the cache hasn't been initialized yet, so go through runtime. | 
| Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 3110 | __ cmp(r0, Operand(0, RelocInfo::NONE)); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3111 | __ b(eq, &runtime_call); | 
|  | 3112 |  | 
|  | 3113 | #ifdef DEBUG | 
|  | 3114 | // Check that the layout of cache elements match expectations. | 
|  | 3115 | { TranscendentalCache::Element test_elem[2]; | 
|  | 3116 | char* elem_start = reinterpret_cast<char*>(&test_elem[0]); | 
|  | 3117 | char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); | 
|  | 3118 | char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); | 
|  | 3119 | char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); | 
|  | 3120 | char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); | 
|  | 3121 | CHECK_EQ(12, elem2_start - elem_start);  // Two uint_32's and a pointer. | 
|  | 3122 | CHECK_EQ(0, elem_in0 - elem_start); | 
|  | 3123 | CHECK_EQ(kIntSize, elem_in1 - elem_start); | 
|  | 3124 | CHECK_EQ(2 * kIntSize, elem_out - elem_start); | 
|  | 3125 | } | 
|  | 3126 | #endif | 
|  | 3127 |  | 
|  | 3128 | // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. | 
|  | 3129 | __ add(r1, r1, Operand(r1, LSL, 1)); | 
|  | 3130 | __ add(r0, r0, Operand(r1, LSL, 2)); | 
|  | 3131 | // Check if cache matches: Double value is stored in uint32_t[2] array. | 
|  | 3132 | __ ldm(ia, r0, r4.bit()| r5.bit() | r6.bit()); | 
|  | 3133 | __ cmp(r2, r4); | 
|  | 3134 | __ b(ne, &runtime_call); | 
|  | 3135 | __ cmp(r3, r5); | 
|  | 3136 | __ b(ne, &runtime_call); | 
|  | 3137 | // Cache hit. Load result, pop argument and return. | 
|  | 3138 | __ mov(r0, Operand(r6)); | 
|  | 3139 | __ pop(); | 
|  | 3140 | __ Ret(); | 
|  | 3141 | } | 
|  | 3142 |  | 
|  | 3143 | __ bind(&runtime_call); | 
|  | 3144 | __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1); | 
|  | 3145 | } | 
|  | 3146 |  | 
|  | 3147 |  | 
|  | 3148 | Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() { | 
|  | 3149 | switch (type_) { | 
|  | 3150 | // Add more cases when necessary. | 
|  | 3151 | case TranscendentalCache::SIN: return Runtime::kMath_sin; | 
|  | 3152 | case TranscendentalCache::COS: return Runtime::kMath_cos; | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 3153 | case TranscendentalCache::LOG: return Runtime::kMath_log; | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3154 | default: | 
|  | 3155 | UNIMPLEMENTED(); | 
|  | 3156 | return Runtime::kAbort; | 
|  | 3157 | } | 
|  | 3158 | } | 
|  | 3159 |  | 
|  | 3160 |  | 
|  | 3161 | void StackCheckStub::Generate(MacroAssembler* masm) { | 
| Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 3162 | __ TailCallRuntime(Runtime::kStackGuard, 0, 1); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3163 | } | 
|  | 3164 |  | 
|  | 3165 |  | 
|  | 3166 | void GenericUnaryOpStub::Generate(MacroAssembler* masm) { | 
|  | 3167 | Label slow, done; | 
|  | 3168 |  | 
|  | 3169 | Register heap_number_map = r6; | 
|  | 3170 | __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 
|  | 3171 |  | 
|  | 3172 | if (op_ == Token::SUB) { | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 3173 | if (include_smi_code_) { | 
|  | 3174 | // Check whether the value is a smi. | 
|  | 3175 | Label try_float; | 
|  | 3176 | __ tst(r0, Operand(kSmiTagMask)); | 
|  | 3177 | __ b(ne, &try_float); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3178 |  | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 3179 | // Go slow case if the value of the expression is zero | 
|  | 3180 | // to make sure that we switch between 0 and -0. | 
|  | 3181 | if (negative_zero_ == kStrictNegativeZero) { | 
|  | 3182 | // If we have to check for zero, then we can check for the max negative | 
|  | 3183 | // smi while we are at it. | 
|  | 3184 | __ bic(ip, r0, Operand(0x80000000), SetCC); | 
|  | 3185 | __ b(eq, &slow); | 
|  | 3186 | __ rsb(r0, r0, Operand(0, RelocInfo::NONE)); | 
|  | 3187 | __ Ret(); | 
|  | 3188 | } else { | 
|  | 3189 | // The value of the expression is a smi and 0 is OK for -0.  Try | 
|  | 3190 | // optimistic subtraction '0 - value'. | 
|  | 3191 | __ rsb(r0, r0, Operand(0, RelocInfo::NONE), SetCC); | 
|  | 3192 | __ Ret(vc); | 
|  | 3193 | // We don't have to reverse the optimistic neg since the only case | 
|  | 3194 | // where we fall through is the minimum negative Smi, which is the case | 
|  | 3195 | // where the neg leaves the register unchanged. | 
|  | 3196 | __ jmp(&slow);  // Go slow on max negative Smi. | 
|  | 3197 | } | 
|  | 3198 | __ bind(&try_float); | 
|  | 3199 | } else if (FLAG_debug_code) { | 
|  | 3200 | __ tst(r0, Operand(kSmiTagMask)); | 
|  | 3201 | __ Assert(ne, "Unexpected smi operand."); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3202 | } | 
|  | 3203 |  | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3204 | __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); | 
|  | 3205 | __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 
|  | 3206 | __ cmp(r1, heap_number_map); | 
|  | 3207 | __ b(ne, &slow); | 
|  | 3208 | // r0 is a heap number.  Get a new heap number in r1. | 
|  | 3209 | if (overwrite_ == UNARY_OVERWRITE) { | 
|  | 3210 | __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | 
|  | 3211 | __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign. | 
|  | 3212 | __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | 
|  | 3213 | } else { | 
|  | 3214 | __ AllocateHeapNumber(r1, r2, r3, r6, &slow); | 
|  | 3215 | __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | 
|  | 3216 | __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | 
|  | 3217 | __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); | 
|  | 3218 | __ eor(r2, r2, Operand(HeapNumber::kSignMask));  // Flip sign. | 
|  | 3219 | __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); | 
|  | 3220 | __ mov(r0, Operand(r1)); | 
|  | 3221 | } | 
|  | 3222 | } else if (op_ == Token::BIT_NOT) { | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 3223 | if (include_smi_code_) { | 
|  | 3224 | Label non_smi; | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3225 | __ JumpIfNotSmi(r0, &non_smi); | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 3226 | __ mvn(r0, Operand(r0)); | 
|  | 3227 | // Bit-clear inverted smi-tag. | 
|  | 3228 | __ bic(r0, r0, Operand(kSmiTagMask)); | 
|  | 3229 | __ Ret(); | 
|  | 3230 | __ bind(&non_smi); | 
|  | 3231 | } else if (FLAG_debug_code) { | 
|  | 3232 | __ tst(r0, Operand(kSmiTagMask)); | 
|  | 3233 | __ Assert(ne, "Unexpected smi operand."); | 
|  | 3234 | } | 
|  | 3235 |  | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3236 | // Check if the operand is a heap number. | 
|  | 3237 | __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); | 
|  | 3238 | __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 
|  | 3239 | __ cmp(r1, heap_number_map); | 
|  | 3240 | __ b(ne, &slow); | 
|  | 3241 |  | 
|  | 3242 | // Convert the heap number is r0 to an untagged integer in r1. | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3243 | __ ConvertToInt32(r0, r1, r2, r3, d0, &slow); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3244 |  | 
|  | 3245 | // Do the bitwise operation (move negated) and check if the result | 
|  | 3246 | // fits in a smi. | 
|  | 3247 | Label try_float; | 
|  | 3248 | __ mvn(r1, Operand(r1)); | 
|  | 3249 | __ add(r2, r1, Operand(0x40000000), SetCC); | 
|  | 3250 | __ b(mi, &try_float); | 
|  | 3251 | __ mov(r0, Operand(r1, LSL, kSmiTagSize)); | 
|  | 3252 | __ b(&done); | 
|  | 3253 |  | 
|  | 3254 | __ bind(&try_float); | 
|  | 3255 | if (!overwrite_ == UNARY_OVERWRITE) { | 
|  | 3256 | // Allocate a fresh heap number, but don't overwrite r0 until | 
|  | 3257 | // we're sure we can do it without going through the slow case | 
|  | 3258 | // that needs the value in r0. | 
|  | 3259 | __ AllocateHeapNumber(r2, r3, r4, r6, &slow); | 
|  | 3260 | __ mov(r0, Operand(r2)); | 
|  | 3261 | } | 
|  | 3262 |  | 
|  | 3263 | if (CpuFeatures::IsSupported(VFP3)) { | 
|  | 3264 | // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. | 
|  | 3265 | CpuFeatures::Scope scope(VFP3); | 
|  | 3266 | __ vmov(s0, r1); | 
|  | 3267 | __ vcvt_f64_s32(d0, s0); | 
|  | 3268 | __ sub(r2, r0, Operand(kHeapObjectTag)); | 
|  | 3269 | __ vstr(d0, r2, HeapNumber::kValueOffset); | 
|  | 3270 | } else { | 
|  | 3271 | // WriteInt32ToHeapNumberStub does not trigger GC, so we do not | 
|  | 3272 | // have to set up a frame. | 
|  | 3273 | WriteInt32ToHeapNumberStub stub(r1, r0, r2); | 
|  | 3274 | __ push(lr); | 
|  | 3275 | __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); | 
|  | 3276 | __ pop(lr); | 
|  | 3277 | } | 
|  | 3278 | } else { | 
|  | 3279 | UNIMPLEMENTED(); | 
|  | 3280 | } | 
|  | 3281 |  | 
|  | 3282 | __ bind(&done); | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 3283 | __ Ret(); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3284 |  | 
|  | 3285 | // Handle the slow case by jumping to the JavaScript builtin. | 
|  | 3286 | __ bind(&slow); | 
|  | 3287 | __ push(r0); | 
|  | 3288 | switch (op_) { | 
|  | 3289 | case Token::SUB: | 
|  | 3290 | __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS); | 
|  | 3291 | break; | 
|  | 3292 | case Token::BIT_NOT: | 
|  | 3293 | __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS); | 
|  | 3294 | break; | 
|  | 3295 | default: | 
|  | 3296 | UNREACHABLE(); | 
|  | 3297 | } | 
|  | 3298 | } | 
|  | 3299 |  | 
|  | 3300 |  | 
|  | 3301 | void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { | 
|  | 3302 | // r0 holds the exception. | 
|  | 3303 |  | 
|  | 3304 | // Adjust this code if not the case. | 
|  | 3305 | STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); | 
|  | 3306 |  | 
|  | 3307 | // Drop the sp to the top of the handler. | 
|  | 3308 | __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); | 
|  | 3309 | __ ldr(sp, MemOperand(r3)); | 
|  | 3310 |  | 
|  | 3311 | // Restore the next handler and frame pointer, discard handler state. | 
|  | 3312 | STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | 
|  | 3313 | __ pop(r2); | 
|  | 3314 | __ str(r2, MemOperand(r3)); | 
|  | 3315 | STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); | 
|  | 3316 | __ ldm(ia_w, sp, r3.bit() | fp.bit());  // r3: discarded state. | 
|  | 3317 |  | 
|  | 3318 | // Before returning we restore the context from the frame pointer if | 
|  | 3319 | // not NULL.  The frame pointer is NULL in the exception handler of a | 
|  | 3320 | // JS entry frame. | 
| Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 3321 | __ cmp(fp, Operand(0, RelocInfo::NONE)); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3322 | // Set cp to NULL if fp is NULL. | 
| Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 3323 | __ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3324 | // Restore cp otherwise. | 
|  | 3325 | __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); | 
|  | 3326 | #ifdef DEBUG | 
|  | 3327 | if (FLAG_debug_code) { | 
|  | 3328 | __ mov(lr, Operand(pc)); | 
|  | 3329 | } | 
|  | 3330 | #endif | 
|  | 3331 | STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); | 
|  | 3332 | __ pop(pc); | 
|  | 3333 | } | 
|  | 3334 |  | 
|  | 3335 |  | 
|  | 3336 | void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, | 
|  | 3337 | UncatchableExceptionType type) { | 
|  | 3338 | // Adjust this code if not the case. | 
|  | 3339 | STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); | 
|  | 3340 |  | 
|  | 3341 | // Drop sp to the top stack handler. | 
|  | 3342 | __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); | 
|  | 3343 | __ ldr(sp, MemOperand(r3)); | 
|  | 3344 |  | 
|  | 3345 | // Unwind the handlers until the ENTRY handler is found. | 
|  | 3346 | Label loop, done; | 
|  | 3347 | __ bind(&loop); | 
|  | 3348 | // Load the type of the current stack handler. | 
|  | 3349 | const int kStateOffset = StackHandlerConstants::kStateOffset; | 
|  | 3350 | __ ldr(r2, MemOperand(sp, kStateOffset)); | 
|  | 3351 | __ cmp(r2, Operand(StackHandler::ENTRY)); | 
|  | 3352 | __ b(eq, &done); | 
|  | 3353 | // Fetch the next handler in the list. | 
|  | 3354 | const int kNextOffset = StackHandlerConstants::kNextOffset; | 
|  | 3355 | __ ldr(sp, MemOperand(sp, kNextOffset)); | 
|  | 3356 | __ jmp(&loop); | 
|  | 3357 | __ bind(&done); | 
|  | 3358 |  | 
|  | 3359 | // Set the top handler address to next handler past the current ENTRY handler. | 
|  | 3360 | STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | 
|  | 3361 | __ pop(r2); | 
|  | 3362 | __ str(r2, MemOperand(r3)); | 
|  | 3363 |  | 
|  | 3364 | if (type == OUT_OF_MEMORY) { | 
|  | 3365 | // Set external caught exception to false. | 
|  | 3366 | ExternalReference external_caught(Top::k_external_caught_exception_address); | 
| Ben Murdoch | b8e0da2 | 2011-05-16 14:20:40 +0100 | [diff] [blame] | 3367 | __ mov(r0, Operand(false, RelocInfo::NONE)); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3368 | __ mov(r2, Operand(external_caught)); | 
|  | 3369 | __ str(r0, MemOperand(r2)); | 
|  | 3370 |  | 
|  | 3371 | // Set pending exception and r0 to out of memory exception. | 
|  | 3372 | Failure* out_of_memory = Failure::OutOfMemoryException(); | 
|  | 3373 | __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory))); | 
|  | 3374 | __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address))); | 
|  | 3375 | __ str(r0, MemOperand(r2)); | 
|  | 3376 | } | 
|  | 3377 |  | 
|  | 3378 | // Stack layout at this point. See also StackHandlerConstants. | 
|  | 3379 | // sp ->   state (ENTRY) | 
|  | 3380 | //         fp | 
|  | 3381 | //         lr | 
|  | 3382 |  | 
|  | 3383 | // Discard handler state (r2 is not used) and restore frame pointer. | 
|  | 3384 | STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); | 
|  | 3385 | __ ldm(ia_w, sp, r2.bit() | fp.bit());  // r2: discarded state. | 
|  | 3386 | // Before returning we restore the context from the frame pointer if | 
|  | 3387 | // not NULL.  The frame pointer is NULL in the exception handler of a | 
|  | 3388 | // JS entry frame. | 
| Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 3389 | __ cmp(fp, Operand(0, RelocInfo::NONE)); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3390 | // Set cp to NULL if fp is NULL. | 
| Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 3391 | __ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3392 | // Restore cp otherwise. | 
|  | 3393 | __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); | 
|  | 3394 | #ifdef DEBUG | 
|  | 3395 | if (FLAG_debug_code) { | 
|  | 3396 | __ mov(lr, Operand(pc)); | 
|  | 3397 | } | 
|  | 3398 | #endif | 
|  | 3399 | STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); | 
|  | 3400 | __ pop(pc); | 
|  | 3401 | } | 
|  | 3402 |  | 
|  | 3403 |  | 
|  | 3404 | void CEntryStub::GenerateCore(MacroAssembler* masm, | 
|  | 3405 | Label* throw_normal_exception, | 
|  | 3406 | Label* throw_termination_exception, | 
|  | 3407 | Label* throw_out_of_memory_exception, | 
|  | 3408 | bool do_gc, | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3409 | bool always_allocate) { | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3410 | // r0: result parameter for PerformGC, if any | 
|  | 3411 | // r4: number of arguments including receiver  (C callee-saved) | 
|  | 3412 | // r5: pointer to builtin function  (C callee-saved) | 
|  | 3413 | // r6: pointer to the first argument (C callee-saved) | 
|  | 3414 |  | 
|  | 3415 | if (do_gc) { | 
|  | 3416 | // Passing r0. | 
|  | 3417 | __ PrepareCallCFunction(1, r1); | 
|  | 3418 | __ CallCFunction(ExternalReference::perform_gc_function(), 1); | 
|  | 3419 | } | 
|  | 3420 |  | 
|  | 3421 | ExternalReference scope_depth = | 
|  | 3422 | ExternalReference::heap_always_allocate_scope_depth(); | 
|  | 3423 | if (always_allocate) { | 
|  | 3424 | __ mov(r0, Operand(scope_depth)); | 
|  | 3425 | __ ldr(r1, MemOperand(r0)); | 
|  | 3426 | __ add(r1, r1, Operand(1)); | 
|  | 3427 | __ str(r1, MemOperand(r0)); | 
|  | 3428 | } | 
|  | 3429 |  | 
|  | 3430 | // Call C built-in. | 
|  | 3431 | // r0 = argc, r1 = argv | 
|  | 3432 | __ mov(r0, Operand(r4)); | 
|  | 3433 | __ mov(r1, Operand(r6)); | 
|  | 3434 |  | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3435 | #if defined(V8_HOST_ARCH_ARM) | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3436 | int frame_alignment = MacroAssembler::ActivationFrameAlignment(); | 
|  | 3437 | int frame_alignment_mask = frame_alignment - 1; | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3438 | if (FLAG_debug_code) { | 
|  | 3439 | if (frame_alignment > kPointerSize) { | 
|  | 3440 | Label alignment_as_expected; | 
|  | 3441 | ASSERT(IsPowerOf2(frame_alignment)); | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3442 | __ tst(sp, Operand(frame_alignment_mask)); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3443 | __ b(eq, &alignment_as_expected); | 
|  | 3444 | // Don't use Check here, as it will call Runtime_Abort re-entering here. | 
|  | 3445 | __ stop("Unexpected alignment"); | 
|  | 3446 | __ bind(&alignment_as_expected); | 
|  | 3447 | } | 
|  | 3448 | } | 
|  | 3449 | #endif | 
|  | 3450 |  | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3451 | // TODO(1242173): To let the GC traverse the return address of the exit | 
|  | 3452 | // frames, we need to know where the return address is. Right now, | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3453 | // we store it on the stack to be able to find it again, but we never | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3454 | // restore from it in case of changes, which makes it impossible to | 
|  | 3455 | // support moving the C entry code stub. This should be fixed, but currently | 
|  | 3456 | // this is OK because the CEntryStub gets generated so early in the V8 boot | 
|  | 3457 | // sequence that it is not moving ever. | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3458 |  | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3459 | // Compute the return address in lr to return to after the jump below. Pc is | 
|  | 3460 | // already at '+ 8' from the current instruction but return is after three | 
|  | 3461 | // instructions so add another 4 to pc to get the return address. | 
|  | 3462 | masm->add(lr, pc, Operand(4)); | 
|  | 3463 | __ str(lr, MemOperand(sp, 0)); | 
|  | 3464 | masm->Jump(r5); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3465 |  | 
|  | 3466 | if (always_allocate) { | 
|  | 3467 | // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 | 
|  | 3468 | // though (contain the result). | 
|  | 3469 | __ mov(r2, Operand(scope_depth)); | 
|  | 3470 | __ ldr(r3, MemOperand(r2)); | 
|  | 3471 | __ sub(r3, r3, Operand(1)); | 
|  | 3472 | __ str(r3, MemOperand(r2)); | 
|  | 3473 | } | 
|  | 3474 |  | 
|  | 3475 | // check for failure result | 
|  | 3476 | Label failure_returned; | 
|  | 3477 | STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); | 
|  | 3478 | // Lower 2 bits of r2 are 0 iff r0 has failure tag. | 
|  | 3479 | __ add(r2, r0, Operand(1)); | 
|  | 3480 | __ tst(r2, Operand(kFailureTagMask)); | 
|  | 3481 | __ b(eq, &failure_returned); | 
|  | 3482 |  | 
|  | 3483 | // Exit C frame and return. | 
|  | 3484 | // r0:r1: result | 
|  | 3485 | // sp: stack pointer | 
|  | 3486 | // fp: frame pointer | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 3487 | __ LeaveExitFrame(save_doubles_); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3488 |  | 
|  | 3489 | // check if we should retry or throw exception | 
|  | 3490 | Label retry; | 
|  | 3491 | __ bind(&failure_returned); | 
|  | 3492 | STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); | 
|  | 3493 | __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); | 
|  | 3494 | __ b(eq, &retry); | 
|  | 3495 |  | 
|  | 3496 | // Special handling of out of memory exceptions. | 
|  | 3497 | Failure* out_of_memory = Failure::OutOfMemoryException(); | 
|  | 3498 | __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory))); | 
|  | 3499 | __ b(eq, throw_out_of_memory_exception); | 
|  | 3500 |  | 
|  | 3501 | // Retrieve the pending exception and clear the variable. | 
|  | 3502 | __ mov(ip, Operand(ExternalReference::the_hole_value_location())); | 
|  | 3503 | __ ldr(r3, MemOperand(ip)); | 
|  | 3504 | __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); | 
|  | 3505 | __ ldr(r0, MemOperand(ip)); | 
|  | 3506 | __ str(r3, MemOperand(ip)); | 
|  | 3507 |  | 
|  | 3508 | // Special handling of termination exceptions which are uncatchable | 
|  | 3509 | // by javascript code. | 
|  | 3510 | __ cmp(r0, Operand(Factory::termination_exception())); | 
|  | 3511 | __ b(eq, throw_termination_exception); | 
|  | 3512 |  | 
|  | 3513 | // Handle normal exception. | 
|  | 3514 | __ jmp(throw_normal_exception); | 
|  | 3515 |  | 
|  | 3516 | __ bind(&retry);  // pass last failure (r0) as parameter (r0) when retrying | 
|  | 3517 | } | 
|  | 3518 |  | 
|  | 3519 |  | 
|  | 3520 | void CEntryStub::Generate(MacroAssembler* masm) { | 
|  | 3521 | // Called from JavaScript; parameters are on stack as if calling JS function | 
|  | 3522 | // r0: number of arguments including receiver | 
|  | 3523 | // r1: pointer to builtin function | 
|  | 3524 | // fp: frame pointer  (restored after C call) | 
|  | 3525 | // sp: stack pointer  (restored as callee's sp after C call) | 
|  | 3526 | // cp: current context  (C callee-saved) | 
|  | 3527 |  | 
|  | 3528 | // Result returned in r0 or r0+r1 by default. | 
|  | 3529 |  | 
|  | 3530 | // NOTE: Invocations of builtins may return failure objects | 
|  | 3531 | // instead of a proper result. The builtin entry handles | 
|  | 3532 | // this by performing a garbage collection and retrying the | 
|  | 3533 | // builtin once. | 
|  | 3534 |  | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3535 | // Compute the argv pointer in a callee-saved register. | 
|  | 3536 | __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2)); | 
|  | 3537 | __ sub(r6, r6, Operand(kPointerSize)); | 
|  | 3538 |  | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3539 | // Enter the exit frame that transitions from JavaScript to C++. | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 3540 | __ EnterExitFrame(save_doubles_); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3541 |  | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3542 | // Setup argc and the builtin function in callee-saved registers. | 
|  | 3543 | __ mov(r4, Operand(r0)); | 
|  | 3544 | __ mov(r5, Operand(r1)); | 
|  | 3545 |  | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3546 | // r4: number of arguments (C callee-saved) | 
|  | 3547 | // r5: pointer to builtin function (C callee-saved) | 
|  | 3548 | // r6: pointer to first argument (C callee-saved) | 
|  | 3549 |  | 
|  | 3550 | Label throw_normal_exception; | 
|  | 3551 | Label throw_termination_exception; | 
|  | 3552 | Label throw_out_of_memory_exception; | 
|  | 3553 |  | 
|  | 3554 | // Call into the runtime system. | 
|  | 3555 | GenerateCore(masm, | 
|  | 3556 | &throw_normal_exception, | 
|  | 3557 | &throw_termination_exception, | 
|  | 3558 | &throw_out_of_memory_exception, | 
|  | 3559 | false, | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3560 | false); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3561 |  | 
|  | 3562 | // Do space-specific GC and retry runtime call. | 
|  | 3563 | GenerateCore(masm, | 
|  | 3564 | &throw_normal_exception, | 
|  | 3565 | &throw_termination_exception, | 
|  | 3566 | &throw_out_of_memory_exception, | 
|  | 3567 | true, | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3568 | false); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3569 |  | 
|  | 3570 | // Do full GC and retry runtime call one final time. | 
|  | 3571 | Failure* failure = Failure::InternalError(); | 
|  | 3572 | __ mov(r0, Operand(reinterpret_cast<int32_t>(failure))); | 
|  | 3573 | GenerateCore(masm, | 
|  | 3574 | &throw_normal_exception, | 
|  | 3575 | &throw_termination_exception, | 
|  | 3576 | &throw_out_of_memory_exception, | 
|  | 3577 | true, | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3578 | true); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3579 |  | 
|  | 3580 | __ bind(&throw_out_of_memory_exception); | 
|  | 3581 | GenerateThrowUncatchable(masm, OUT_OF_MEMORY); | 
|  | 3582 |  | 
|  | 3583 | __ bind(&throw_termination_exception); | 
|  | 3584 | GenerateThrowUncatchable(masm, TERMINATION); | 
|  | 3585 |  | 
|  | 3586 | __ bind(&throw_normal_exception); | 
|  | 3587 | GenerateThrowTOS(masm); | 
|  | 3588 | } | 
|  | 3589 |  | 
|  | 3590 |  | 
|  | 3591 | void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { | 
|  | 3592 | // r0: code entry | 
|  | 3593 | // r1: function | 
|  | 3594 | // r2: receiver | 
|  | 3595 | // r3: argc | 
|  | 3596 | // [sp+0]: argv | 
|  | 3597 |  | 
|  | 3598 | Label invoke, exit; | 
|  | 3599 |  | 
|  | 3600 | // Called from C, so do not pop argc and args on exit (preserve sp) | 
|  | 3601 | // No need to save register-passed args | 
|  | 3602 | // Save callee-saved registers (incl. cp and fp), sp, and lr | 
|  | 3603 | __ stm(db_w, sp, kCalleeSaved | lr.bit()); | 
|  | 3604 |  | 
|  | 3605 | // Get address of argv, see stm above. | 
|  | 3606 | // r0: code entry | 
|  | 3607 | // r1: function | 
|  | 3608 | // r2: receiver | 
|  | 3609 | // r3: argc | 
|  | 3610 | __ ldr(r4, MemOperand(sp, (kNumCalleeSaved + 1) * kPointerSize));  // argv | 
|  | 3611 |  | 
|  | 3612 | // Push a frame with special values setup to mark it as an entry frame. | 
|  | 3613 | // r0: code entry | 
|  | 3614 | // r1: function | 
|  | 3615 | // r2: receiver | 
|  | 3616 | // r3: argc | 
|  | 3617 | // r4: argv | 
|  | 3618 | __ mov(r8, Operand(-1));  // Push a bad frame pointer to fail if it is used. | 
|  | 3619 | int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; | 
|  | 3620 | __ mov(r7, Operand(Smi::FromInt(marker))); | 
|  | 3621 | __ mov(r6, Operand(Smi::FromInt(marker))); | 
|  | 3622 | __ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address))); | 
|  | 3623 | __ ldr(r5, MemOperand(r5)); | 
|  | 3624 | __ Push(r8, r7, r6, r5); | 
|  | 3625 |  | 
|  | 3626 | // Setup frame pointer for the frame to be pushed. | 
|  | 3627 | __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); | 
|  | 3628 |  | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 3629 | #ifdef ENABLE_LOGGING_AND_PROFILING | 
|  | 3630 | // If this is the outermost JS call, set js_entry_sp value. | 
|  | 3631 | ExternalReference js_entry_sp(Top::k_js_entry_sp_address); | 
|  | 3632 | __ mov(r5, Operand(ExternalReference(js_entry_sp))); | 
|  | 3633 | __ ldr(r6, MemOperand(r5)); | 
|  | 3634 | __ cmp(r6, Operand(0, RelocInfo::NONE)); | 
|  | 3635 | __ str(fp, MemOperand(r5), eq); | 
|  | 3636 | #endif | 
|  | 3637 |  | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3638 | // Call a faked try-block that does the invoke. | 
|  | 3639 | __ bl(&invoke); | 
|  | 3640 |  | 
|  | 3641 | // Caught exception: Store result (exception) in the pending | 
|  | 3642 | // exception field in the JSEnv and return a failure sentinel. | 
|  | 3643 | // Coming in here the fp will be invalid because the PushTryHandler below | 
|  | 3644 | // sets it to 0 to signal the existence of the JSEntry frame. | 
|  | 3645 | __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); | 
|  | 3646 | __ str(r0, MemOperand(ip)); | 
|  | 3647 | __ mov(r0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); | 
|  | 3648 | __ b(&exit); | 
|  | 3649 |  | 
|  | 3650 | // Invoke: Link this frame into the handler chain. | 
|  | 3651 | __ bind(&invoke); | 
|  | 3652 | // Must preserve r0-r4, r5-r7 are available. | 
|  | 3653 | __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER); | 
|  | 3654 | // If an exception not caught by another handler occurs, this handler | 
|  | 3655 | // returns control to the code after the bl(&invoke) above, which | 
|  | 3656 | // restores all kCalleeSaved registers (including cp and fp) to their | 
|  | 3657 | // saved values before returning a failure to C. | 
|  | 3658 |  | 
|  | 3659 | // Clear any pending exceptions. | 
|  | 3660 | __ mov(ip, Operand(ExternalReference::the_hole_value_location())); | 
|  | 3661 | __ ldr(r5, MemOperand(ip)); | 
|  | 3662 | __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); | 
|  | 3663 | __ str(r5, MemOperand(ip)); | 
|  | 3664 |  | 
|  | 3665 | // Invoke the function by calling through JS entry trampoline builtin. | 
|  | 3666 | // Notice that we cannot store a reference to the trampoline code directly in | 
|  | 3667 | // this stub, because runtime stubs are not traversed when doing GC. | 
|  | 3668 |  | 
|  | 3669 | // Expected registers by Builtins::JSEntryTrampoline | 
|  | 3670 | // r0: code entry | 
|  | 3671 | // r1: function | 
|  | 3672 | // r2: receiver | 
|  | 3673 | // r3: argc | 
|  | 3674 | // r4: argv | 
|  | 3675 | if (is_construct) { | 
|  | 3676 | ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline); | 
|  | 3677 | __ mov(ip, Operand(construct_entry)); | 
|  | 3678 | } else { | 
|  | 3679 | ExternalReference entry(Builtins::JSEntryTrampoline); | 
|  | 3680 | __ mov(ip, Operand(entry)); | 
|  | 3681 | } | 
|  | 3682 | __ ldr(ip, MemOperand(ip));  // deref address | 
|  | 3683 |  | 
|  | 3684 | // Branch and link to JSEntryTrampoline.  We don't use the double underscore | 
|  | 3685 | // macro for the add instruction because we don't want the coverage tool | 
|  | 3686 | // inserting instructions here after we read the pc. | 
|  | 3687 | __ mov(lr, Operand(pc)); | 
|  | 3688 | masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); | 
|  | 3689 |  | 
|  | 3690 | // Unlink this frame from the handler chain. When reading the | 
|  | 3691 | // address of the next handler, there is no need to use the address | 
|  | 3692 | // displacement since the current stack pointer (sp) points directly | 
|  | 3693 | // to the stack handler. | 
|  | 3694 | __ ldr(r3, MemOperand(sp, StackHandlerConstants::kNextOffset)); | 
|  | 3695 | __ mov(ip, Operand(ExternalReference(Top::k_handler_address))); | 
|  | 3696 | __ str(r3, MemOperand(ip)); | 
|  | 3697 | // No need to restore registers | 
|  | 3698 | __ add(sp, sp, Operand(StackHandlerConstants::kSize)); | 
|  | 3699 |  | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 3700 | #ifdef ENABLE_LOGGING_AND_PROFILING | 
|  | 3701 | // If current FP value is the same as js_entry_sp value, it means that | 
|  | 3702 | // the current function is the outermost. | 
|  | 3703 | __ mov(r5, Operand(ExternalReference(js_entry_sp))); | 
|  | 3704 | __ ldr(r6, MemOperand(r5)); | 
|  | 3705 | __ cmp(fp, Operand(r6)); | 
|  | 3706 | __ mov(r6, Operand(0, RelocInfo::NONE), LeaveCC, eq); | 
|  | 3707 | __ str(r6, MemOperand(r5), eq); | 
|  | 3708 | #endif | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3709 |  | 
|  | 3710 | __ bind(&exit);  // r0 holds result | 
|  | 3711 | // Restore the top frame descriptors from the stack. | 
|  | 3712 | __ pop(r3); | 
|  | 3713 | __ mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); | 
|  | 3714 | __ str(r3, MemOperand(ip)); | 
|  | 3715 |  | 
|  | 3716 | // Reset the stack to the callee saved registers. | 
|  | 3717 | __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); | 
|  | 3718 |  | 
|  | 3719 | // Restore callee-saved registers and return. | 
|  | 3720 | #ifdef DEBUG | 
|  | 3721 | if (FLAG_debug_code) { | 
|  | 3722 | __ mov(lr, Operand(pc)); | 
|  | 3723 | } | 
|  | 3724 | #endif | 
|  | 3725 | __ ldm(ia_w, sp, kCalleeSaved | pc.bit()); | 
|  | 3726 | } | 
|  | 3727 |  | 
|  | 3728 |  | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3729 | // Uses registers r0 to r4. | 
|  | 3730 | // Expected input (depending on whether args are in registers or on the stack): | 
|  | 3731 | // * object: r0 or at sp + 1 * kPointerSize. | 
|  | 3732 | // * function: r1 or at sp. | 
|  | 3733 | // | 
|  | 3734 | // An inlined call site may have been generated before calling this stub. | 
|  | 3735 | // In this case the offset to the inline site to patch is passed on the stack, | 
|  | 3736 | // in the safepoint slot for register r4. | 
|  | 3737 | // (See LCodeGen::DoInstanceOfKnownGlobal) | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3738 | void InstanceofStub::Generate(MacroAssembler* masm) { | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3739 | // Call site inlining and patching implies arguments in registers. | 
|  | 3740 | ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); | 
|  | 3741 | // ReturnTrueFalse is only implemented for inlined call sites. | 
|  | 3742 | ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck()); | 
|  | 3743 |  | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 3744 | // Fixed register usage throughout the stub: | 
| Steve Block | 9fac840 | 2011-05-12 15:51:54 +0100 | [diff] [blame] | 3745 | const Register object = r0;  // Object (lhs). | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3746 | Register map = r3;  // Map of the object. | 
| Steve Block | 9fac840 | 2011-05-12 15:51:54 +0100 | [diff] [blame] | 3747 | const Register function = r1;  // Function (rhs). | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 3748 | const Register prototype = r4;  // Prototype of the function. | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3749 | const Register inline_site = r9; | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 3750 | const Register scratch = r2; | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3751 |  | 
|  | 3752 | const int32_t kDeltaToLoadBoolResult = 3 * kPointerSize; | 
|  | 3753 |  | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 3754 | Label slow, loop, is_instance, is_not_instance, not_js_object; | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3755 |  | 
| Ben Murdoch | 086aeea | 2011-05-13 15:57:08 +0100 | [diff] [blame] | 3756 | if (!HasArgsInRegisters()) { | 
| Steve Block | 9fac840 | 2011-05-12 15:51:54 +0100 | [diff] [blame] | 3757 | __ ldr(object, MemOperand(sp, 1 * kPointerSize)); | 
|  | 3758 | __ ldr(function, MemOperand(sp, 0)); | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 3759 | } | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3760 |  | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 3761 | // Check that the left hand is a JS object and load map. | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3762 | __ JumpIfSmi(object, ¬_js_object); | 
| Steve Block | 9fac840 | 2011-05-12 15:51:54 +0100 | [diff] [blame] | 3763 | __ IsObjectJSObjectType(object, map, scratch, ¬_js_object); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3764 |  | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3765 | // If there is a call site cache don't look in the global cache, but do the | 
|  | 3766 | // real lookup and update the call site cache. | 
|  | 3767 | if (!HasCallSiteInlineCheck()) { | 
|  | 3768 | Label miss; | 
|  | 3769 | __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex); | 
|  | 3770 | __ cmp(function, ip); | 
|  | 3771 | __ b(ne, &miss); | 
|  | 3772 | __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex); | 
|  | 3773 | __ cmp(map, ip); | 
|  | 3774 | __ b(ne, &miss); | 
|  | 3775 | __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); | 
|  | 3776 | __ Ret(HasArgsInRegisters() ? 0 : 2); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3777 |  | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3778 | __ bind(&miss); | 
|  | 3779 | } | 
|  | 3780 |  | 
|  | 3781 | // Get the prototype of the function. | 
| Steve Block | 9fac840 | 2011-05-12 15:51:54 +0100 | [diff] [blame] | 3782 | __ TryGetFunctionPrototype(function, prototype, scratch, &slow); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3783 |  | 
|  | 3784 | // Check that the function prototype is a JS object. | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3785 | __ JumpIfSmi(prototype, &slow); | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 3786 | __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3787 |  | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3788 | // Update the global instanceof or call site inlined cache with the current | 
|  | 3789 | // map and function. The cached answer will be set when it is known below. | 
|  | 3790 | if (!HasCallSiteInlineCheck()) { | 
|  | 3791 | __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); | 
|  | 3792 | __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); | 
|  | 3793 | } else { | 
|  | 3794 | ASSERT(HasArgsInRegisters()); | 
|  | 3795 | // Patch the (relocated) inlined map check. | 
|  | 3796 |  | 
|  | 3797 | // The offset was stored in r4 safepoint slot. | 
|  | 3798 | // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal) | 
|  | 3799 | __ ldr(scratch, MacroAssembler::SafepointRegisterSlot(r4)); | 
|  | 3800 | __ sub(inline_site, lr, scratch); | 
|  | 3801 | // Get the map location in scratch and patch it. | 
|  | 3802 | __ GetRelocatedValueLocation(inline_site, scratch); | 
|  | 3803 | __ str(map, MemOperand(scratch)); | 
|  | 3804 | } | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3805 |  | 
|  | 3806 | // Register mapping: r3 is object map and r4 is function prototype. | 
|  | 3807 | // Get prototype of object into r2. | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 3808 | __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3809 |  | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3810 | // We don't need map any more. Use it as a scratch register. | 
|  | 3811 | Register scratch2 = map; | 
|  | 3812 | map = no_reg; | 
|  | 3813 |  | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3814 | // Loop through the prototype chain looking for the function prototype. | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3815 | __ LoadRoot(scratch2, Heap::kNullValueRootIndex); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3816 | __ bind(&loop); | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 3817 | __ cmp(scratch, Operand(prototype)); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3818 | __ b(eq, &is_instance); | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3819 | __ cmp(scratch, scratch2); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3820 | __ b(eq, &is_not_instance); | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 3821 | __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); | 
|  | 3822 | __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3823 | __ jmp(&loop); | 
|  | 3824 |  | 
|  | 3825 | __ bind(&is_instance); | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3826 | if (!HasCallSiteInlineCheck()) { | 
|  | 3827 | __ mov(r0, Operand(Smi::FromInt(0))); | 
|  | 3828 | __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); | 
|  | 3829 | } else { | 
|  | 3830 | // Patch the call site to return true. | 
|  | 3831 | __ LoadRoot(r0, Heap::kTrueValueRootIndex); | 
|  | 3832 | __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); | 
|  | 3833 | // Get the boolean result location in scratch and patch it. | 
|  | 3834 | __ GetRelocatedValueLocation(inline_site, scratch); | 
|  | 3835 | __ str(r0, MemOperand(scratch)); | 
|  | 3836 |  | 
|  | 3837 | if (!ReturnTrueFalseObject()) { | 
|  | 3838 | __ mov(r0, Operand(Smi::FromInt(0))); | 
|  | 3839 | } | 
|  | 3840 | } | 
| Ben Murdoch | 086aeea | 2011-05-13 15:57:08 +0100 | [diff] [blame] | 3841 | __ Ret(HasArgsInRegisters() ? 0 : 2); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3842 |  | 
|  | 3843 | __ bind(&is_not_instance); | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3844 | if (!HasCallSiteInlineCheck()) { | 
|  | 3845 | __ mov(r0, Operand(Smi::FromInt(1))); | 
|  | 3846 | __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); | 
|  | 3847 | } else { | 
|  | 3848 | // Patch the call site to return false. | 
|  | 3849 | __ LoadRoot(r0, Heap::kFalseValueRootIndex); | 
|  | 3850 | __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); | 
|  | 3851 | // Get the boolean result location in scratch and patch it. | 
|  | 3852 | __ GetRelocatedValueLocation(inline_site, scratch); | 
|  | 3853 | __ str(r0, MemOperand(scratch)); | 
|  | 3854 |  | 
|  | 3855 | if (!ReturnTrueFalseObject()) { | 
|  | 3856 | __ mov(r0, Operand(Smi::FromInt(1))); | 
|  | 3857 | } | 
|  | 3858 | } | 
| Ben Murdoch | 086aeea | 2011-05-13 15:57:08 +0100 | [diff] [blame] | 3859 | __ Ret(HasArgsInRegisters() ? 0 : 2); | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 3860 |  | 
|  | 3861 | Label object_not_null, object_not_null_or_smi; | 
|  | 3862 | __ bind(¬_js_object); | 
|  | 3863 | // Before null, smi and string value checks, check that the rhs is a function | 
|  | 3864 | // as for a non-function rhs an exception needs to be thrown. | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3865 | __ JumpIfSmi(function, &slow); | 
|  | 3866 | __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE); | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 3867 | __ b(ne, &slow); | 
|  | 3868 |  | 
|  | 3869 | // Null is not instance of anything. | 
|  | 3870 | __ cmp(scratch, Operand(Factory::null_value())); | 
|  | 3871 | __ b(ne, &object_not_null); | 
|  | 3872 | __ mov(r0, Operand(Smi::FromInt(1))); | 
| Ben Murdoch | 086aeea | 2011-05-13 15:57:08 +0100 | [diff] [blame] | 3873 | __ Ret(HasArgsInRegisters() ? 0 : 2); | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 3874 |  | 
|  | 3875 | __ bind(&object_not_null); | 
|  | 3876 | // Smi values are not instances of anything. | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3877 | __ JumpIfNotSmi(object, &object_not_null_or_smi); | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 3878 | __ mov(r0, Operand(Smi::FromInt(1))); | 
| Ben Murdoch | 086aeea | 2011-05-13 15:57:08 +0100 | [diff] [blame] | 3879 | __ Ret(HasArgsInRegisters() ? 0 : 2); | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 3880 |  | 
|  | 3881 | __ bind(&object_not_null_or_smi); | 
|  | 3882 | // String values are not instances of anything. | 
|  | 3883 | __ IsObjectJSStringType(object, scratch, &slow); | 
|  | 3884 | __ mov(r0, Operand(Smi::FromInt(1))); | 
| Ben Murdoch | 086aeea | 2011-05-13 15:57:08 +0100 | [diff] [blame] | 3885 | __ Ret(HasArgsInRegisters() ? 0 : 2); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3886 |  | 
|  | 3887 | // Slow-case.  Tail call builtin. | 
| Ben Murdoch | 086aeea | 2011-05-13 15:57:08 +0100 | [diff] [blame] | 3888 | __ bind(&slow); | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3889 | if (!ReturnTrueFalseObject()) { | 
|  | 3890 | if (HasArgsInRegisters()) { | 
|  | 3891 | __ Push(r0, r1); | 
|  | 3892 | } | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3893 | __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS); | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3894 | } else { | 
|  | 3895 | __ EnterInternalFrame(); | 
|  | 3896 | __ Push(r0, r1); | 
|  | 3897 | __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_JS); | 
|  | 3898 | __ LeaveInternalFrame(); | 
|  | 3899 | __ cmp(r0, Operand(0)); | 
|  | 3900 | __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq); | 
|  | 3901 | __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne); | 
|  | 3902 | __ Ret(HasArgsInRegisters() ? 0 : 2); | 
|  | 3903 | } | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3904 | } | 
|  | 3905 |  | 
|  | 3906 |  | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3907 | Register InstanceofStub::left() { return r0; } | 
|  | 3908 |  | 
|  | 3909 |  | 
|  | 3910 | Register InstanceofStub::right() { return r1; } | 
|  | 3911 |  | 
|  | 3912 |  | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3913 | void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { | 
|  | 3914 | // The displacement is the offset of the last parameter (if any) | 
|  | 3915 | // relative to the frame pointer. | 
|  | 3916 | static const int kDisplacement = | 
|  | 3917 | StandardFrameConstants::kCallerSPOffset - kPointerSize; | 
|  | 3918 |  | 
|  | 3919 | // Check that the key is a smi. | 
|  | 3920 | Label slow; | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 3921 | __ JumpIfNotSmi(r1, &slow); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3922 |  | 
|  | 3923 | // Check if the calling frame is an arguments adaptor frame. | 
|  | 3924 | Label adaptor; | 
|  | 3925 | __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 
|  | 3926 | __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); | 
|  | 3927 | __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 
|  | 3928 | __ b(eq, &adaptor); | 
|  | 3929 |  | 
|  | 3930 | // Check index against formal parameters count limit passed in | 
|  | 3931 | // through register r0. Use unsigned comparison to get negative | 
|  | 3932 | // check for free. | 
|  | 3933 | __ cmp(r1, r0); | 
| Ben Murdoch | 086aeea | 2011-05-13 15:57:08 +0100 | [diff] [blame] | 3934 | __ b(hs, &slow); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3935 |  | 
|  | 3936 | // Read the argument from the stack and return it. | 
|  | 3937 | __ sub(r3, r0, r1); | 
|  | 3938 | __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); | 
|  | 3939 | __ ldr(r0, MemOperand(r3, kDisplacement)); | 
|  | 3940 | __ Jump(lr); | 
|  | 3941 |  | 
|  | 3942 | // Arguments adaptor case: Check index against actual arguments | 
|  | 3943 | // limit found in the arguments adaptor frame. Use unsigned | 
|  | 3944 | // comparison to get negative check for free. | 
|  | 3945 | __ bind(&adaptor); | 
|  | 3946 | __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 
|  | 3947 | __ cmp(r1, r0); | 
|  | 3948 | __ b(cs, &slow); | 
|  | 3949 |  | 
|  | 3950 | // Read the argument from the adaptor frame and return it. | 
|  | 3951 | __ sub(r3, r0, r1); | 
|  | 3952 | __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); | 
|  | 3953 | __ ldr(r0, MemOperand(r3, kDisplacement)); | 
|  | 3954 | __ Jump(lr); | 
|  | 3955 |  | 
|  | 3956 | // Slow-case: Handle non-smi or out-of-bounds access to arguments | 
|  | 3957 | // by calling the runtime system. | 
|  | 3958 | __ bind(&slow); | 
|  | 3959 | __ push(r1); | 
|  | 3960 | __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); | 
|  | 3961 | } | 
|  | 3962 |  | 
|  | 3963 |  | 
|  | 3964 | void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { | 
|  | 3965 | // sp[0] : number of parameters | 
|  | 3966 | // sp[4] : receiver displacement | 
|  | 3967 | // sp[8] : function | 
|  | 3968 |  | 
|  | 3969 | // Check if the calling frame is an arguments adaptor frame. | 
|  | 3970 | Label adaptor_frame, try_allocate, runtime; | 
|  | 3971 | __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 
|  | 3972 | __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); | 
|  | 3973 | __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 
|  | 3974 | __ b(eq, &adaptor_frame); | 
|  | 3975 |  | 
|  | 3976 | // Get the length from the frame. | 
|  | 3977 | __ ldr(r1, MemOperand(sp, 0)); | 
|  | 3978 | __ b(&try_allocate); | 
|  | 3979 |  | 
|  | 3980 | // Patch the arguments.length and the parameters pointer. | 
|  | 3981 | __ bind(&adaptor_frame); | 
|  | 3982 | __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 
|  | 3983 | __ str(r1, MemOperand(sp, 0)); | 
|  | 3984 | __ add(r3, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize)); | 
|  | 3985 | __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); | 
|  | 3986 | __ str(r3, MemOperand(sp, 1 * kPointerSize)); | 
|  | 3987 |  | 
|  | 3988 | // Try the new space allocation. Start out with computing the size | 
|  | 3989 | // of the arguments object and the elements array in words. | 
|  | 3990 | Label add_arguments_object; | 
|  | 3991 | __ bind(&try_allocate); | 
| Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 3992 | __ cmp(r1, Operand(0, RelocInfo::NONE)); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 3993 | __ b(eq, &add_arguments_object); | 
|  | 3994 | __ mov(r1, Operand(r1, LSR, kSmiTagSize)); | 
|  | 3995 | __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize)); | 
|  | 3996 | __ bind(&add_arguments_object); | 
|  | 3997 | __ add(r1, r1, Operand(Heap::kArgumentsObjectSize / kPointerSize)); | 
|  | 3998 |  | 
|  | 3999 | // Do the allocation of both objects in one go. | 
|  | 4000 | __ AllocateInNewSpace( | 
|  | 4001 | r1, | 
|  | 4002 | r0, | 
|  | 4003 | r2, | 
|  | 4004 | r3, | 
|  | 4005 | &runtime, | 
|  | 4006 | static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); | 
|  | 4007 |  | 
|  | 4008 | // Get the arguments boilerplate from the current (global) context. | 
|  | 4009 | int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX); | 
|  | 4010 | __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); | 
|  | 4011 | __ ldr(r4, FieldMemOperand(r4, GlobalObject::kGlobalContextOffset)); | 
|  | 4012 | __ ldr(r4, MemOperand(r4, offset)); | 
|  | 4013 |  | 
|  | 4014 | // Copy the JS object part. | 
|  | 4015 | __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize); | 
|  | 4016 |  | 
|  | 4017 | // Setup the callee in-object property. | 
|  | 4018 | STATIC_ASSERT(Heap::arguments_callee_index == 0); | 
|  | 4019 | __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); | 
|  | 4020 | __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize)); | 
|  | 4021 |  | 
|  | 4022 | // Get the length (smi tagged) and set that as an in-object property too. | 
|  | 4023 | STATIC_ASSERT(Heap::arguments_length_index == 1); | 
|  | 4024 | __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); | 
|  | 4025 | __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize)); | 
|  | 4026 |  | 
|  | 4027 | // If there are no actual arguments, we're done. | 
|  | 4028 | Label done; | 
| Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 4029 | __ cmp(r1, Operand(0, RelocInfo::NONE)); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 4030 | __ b(eq, &done); | 
|  | 4031 |  | 
|  | 4032 | // Get the parameters pointer from the stack. | 
|  | 4033 | __ ldr(r2, MemOperand(sp, 1 * kPointerSize)); | 
|  | 4034 |  | 
|  | 4035 | // Setup the elements pointer in the allocated arguments object and | 
|  | 4036 | // initialize the header in the elements fixed array. | 
|  | 4037 | __ add(r4, r0, Operand(Heap::kArgumentsObjectSize)); | 
|  | 4038 | __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); | 
|  | 4039 | __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex); | 
|  | 4040 | __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset)); | 
|  | 4041 | __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset)); | 
|  | 4042 | __ mov(r1, Operand(r1, LSR, kSmiTagSize));  // Untag the length for the loop. | 
|  | 4043 |  | 
|  | 4044 | // Copy the fixed array slots. | 
|  | 4045 | Label loop; | 
|  | 4046 | // Setup r4 to point to the first array slot. | 
|  | 4047 | __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 
|  | 4048 | __ bind(&loop); | 
|  | 4049 | // Pre-decrement r2 with kPointerSize on each iteration. | 
|  | 4050 | // Pre-decrement in order to skip receiver. | 
|  | 4051 | __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex)); | 
|  | 4052 | // Post-increment r4 with kPointerSize on each iteration. | 
|  | 4053 | __ str(r3, MemOperand(r4, kPointerSize, PostIndex)); | 
|  | 4054 | __ sub(r1, r1, Operand(1)); | 
| Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 4055 | __ cmp(r1, Operand(0, RelocInfo::NONE)); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 4056 | __ b(ne, &loop); | 
|  | 4057 |  | 
|  | 4058 | // Return and remove the on-stack parameters. | 
|  | 4059 | __ bind(&done); | 
|  | 4060 | __ add(sp, sp, Operand(3 * kPointerSize)); | 
|  | 4061 | __ Ret(); | 
|  | 4062 |  | 
|  | 4063 | // Do the runtime call to allocate the arguments object. | 
|  | 4064 | __ bind(&runtime); | 
|  | 4065 | __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); | 
|  | 4066 | } | 
|  | 4067 |  | 
|  | 4068 |  | 
|  | 4069 | void RegExpExecStub::Generate(MacroAssembler* masm) { | 
|  | 4070 | // Just jump directly to runtime if native RegExp is not selected at compile | 
|  | 4071 | // time or if regexp entry in generated code is turned off runtime switch or | 
|  | 4072 | // at compilation. | 
|  | 4073 | #ifdef V8_INTERPRETED_REGEXP | 
|  | 4074 | __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); | 
|  | 4075 | #else  // V8_INTERPRETED_REGEXP | 
|  | 4076 | if (!FLAG_regexp_entry_native) { | 
|  | 4077 | __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); | 
|  | 4078 | return; | 
|  | 4079 | } | 
|  | 4080 |  | 
|  | 4081 | // Stack frame on entry. | 
|  | 4082 | //  sp[0]: last_match_info (expected JSArray) | 
|  | 4083 | //  sp[4]: previous index | 
|  | 4084 | //  sp[8]: subject string | 
|  | 4085 | //  sp[12]: JSRegExp object | 
|  | 4086 |  | 
|  | 4087 | static const int kLastMatchInfoOffset = 0 * kPointerSize; | 
|  | 4088 | static const int kPreviousIndexOffset = 1 * kPointerSize; | 
|  | 4089 | static const int kSubjectOffset = 2 * kPointerSize; | 
|  | 4090 | static const int kJSRegExpOffset = 3 * kPointerSize; | 
|  | 4091 |  | 
|  | 4092 | Label runtime, invoke_regexp; | 
|  | 4093 |  | 
|  | 4094 | // Allocation of registers for this function. These are in callee save | 
|  | 4095 | // registers and will be preserved by the call to the native RegExp code, as | 
|  | 4096 | // this code is called using the normal C calling convention. When calling | 
|  | 4097 | // directly from generated code the native RegExp code will not do a GC and | 
|  | 4098 | // therefore the content of these registers are safe to use after the call. | 
|  | 4099 | Register subject = r4; | 
|  | 4100 | Register regexp_data = r5; | 
|  | 4101 | Register last_match_info_elements = r6; | 
|  | 4102 |  | 
|  | 4103 | // Ensure that a RegExp stack is allocated. | 
|  | 4104 | ExternalReference address_of_regexp_stack_memory_address = | 
|  | 4105 | ExternalReference::address_of_regexp_stack_memory_address(); | 
|  | 4106 | ExternalReference address_of_regexp_stack_memory_size = | 
|  | 4107 | ExternalReference::address_of_regexp_stack_memory_size(); | 
|  | 4108 | __ mov(r0, Operand(address_of_regexp_stack_memory_size)); | 
|  | 4109 | __ ldr(r0, MemOperand(r0, 0)); | 
|  | 4110 | __ tst(r0, Operand(r0)); | 
|  | 4111 | __ b(eq, &runtime); | 
|  | 4112 |  | 
|  | 4113 | // Check that the first argument is a JSRegExp object. | 
|  | 4114 | __ ldr(r0, MemOperand(sp, kJSRegExpOffset)); | 
|  | 4115 | STATIC_ASSERT(kSmiTag == 0); | 
|  | 4116 | __ tst(r0, Operand(kSmiTagMask)); | 
|  | 4117 | __ b(eq, &runtime); | 
|  | 4118 | __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); | 
|  | 4119 | __ b(ne, &runtime); | 
|  | 4120 |  | 
|  | 4121 | // Check that the RegExp has been compiled (data contains a fixed array). | 
|  | 4122 | __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset)); | 
|  | 4123 | if (FLAG_debug_code) { | 
|  | 4124 | __ tst(regexp_data, Operand(kSmiTagMask)); | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 4125 | __ Check(ne, "Unexpected type for RegExp data, FixedArray expected"); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 4126 | __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE); | 
|  | 4127 | __ Check(eq, "Unexpected type for RegExp data, FixedArray expected"); | 
|  | 4128 | } | 
|  | 4129 |  | 
|  | 4130 | // regexp_data: RegExp data (FixedArray) | 
|  | 4131 | // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. | 
|  | 4132 | __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); | 
|  | 4133 | __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); | 
|  | 4134 | __ b(ne, &runtime); | 
|  | 4135 |  | 
|  | 4136 | // regexp_data: RegExp data (FixedArray) | 
|  | 4137 | // Check that the number of captures fit in the static offsets vector buffer. | 
|  | 4138 | __ ldr(r2, | 
|  | 4139 | FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); | 
|  | 4140 | // Calculate number of capture registers (number_of_captures + 1) * 2. This | 
|  | 4141 | // uses the asumption that smis are 2 * their untagged value. | 
|  | 4142 | STATIC_ASSERT(kSmiTag == 0); | 
|  | 4143 | STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); | 
|  | 4144 | __ add(r2, r2, Operand(2));  // r2 was a smi. | 
|  | 4145 | // Check that the static offsets vector buffer is large enough. | 
|  | 4146 | __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize)); | 
|  | 4147 | __ b(hi, &runtime); | 
|  | 4148 |  | 
|  | 4149 | // r2: Number of capture registers | 
|  | 4150 | // regexp_data: RegExp data (FixedArray) | 
|  | 4151 | // Check that the second argument is a string. | 
|  | 4152 | __ ldr(subject, MemOperand(sp, kSubjectOffset)); | 
|  | 4153 | __ tst(subject, Operand(kSmiTagMask)); | 
|  | 4154 | __ b(eq, &runtime); | 
|  | 4155 | Condition is_string = masm->IsObjectStringType(subject, r0); | 
|  | 4156 | __ b(NegateCondition(is_string), &runtime); | 
|  | 4157 | // Get the length of the string to r3. | 
|  | 4158 | __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset)); | 
|  | 4159 |  | 
|  | 4160 | // r2: Number of capture registers | 
|  | 4161 | // r3: Length of subject string as a smi | 
|  | 4162 | // subject: Subject string | 
|  | 4163 | // regexp_data: RegExp data (FixedArray) | 
|  | 4164 | // Check that the third argument is a positive smi less than the subject | 
|  | 4165 | // string length. A negative value will be greater (unsigned comparison). | 
|  | 4166 | __ ldr(r0, MemOperand(sp, kPreviousIndexOffset)); | 
|  | 4167 | __ tst(r0, Operand(kSmiTagMask)); | 
|  | 4168 | __ b(ne, &runtime); | 
|  | 4169 | __ cmp(r3, Operand(r0)); | 
|  | 4170 | __ b(ls, &runtime); | 
|  | 4171 |  | 
|  | 4172 | // r2: Number of capture registers | 
|  | 4173 | // subject: Subject string | 
|  | 4174 | // regexp_data: RegExp data (FixedArray) | 
|  | 4175 | // Check that the fourth object is a JSArray object. | 
|  | 4176 | __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); | 
|  | 4177 | __ tst(r0, Operand(kSmiTagMask)); | 
|  | 4178 | __ b(eq, &runtime); | 
|  | 4179 | __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE); | 
|  | 4180 | __ b(ne, &runtime); | 
|  | 4181 | // Check that the JSArray is in fast case. | 
|  | 4182 | __ ldr(last_match_info_elements, | 
|  | 4183 | FieldMemOperand(r0, JSArray::kElementsOffset)); | 
|  | 4184 | __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); | 
|  | 4185 | __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); | 
|  | 4186 | __ cmp(r0, ip); | 
|  | 4187 | __ b(ne, &runtime); | 
|  | 4188 | // Check that the last match info has space for the capture registers and the | 
|  | 4189 | // additional information. | 
|  | 4190 | __ ldr(r0, | 
|  | 4191 | FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); | 
|  | 4192 | __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead)); | 
|  | 4193 | __ cmp(r2, Operand(r0, ASR, kSmiTagSize)); | 
|  | 4194 | __ b(gt, &runtime); | 
|  | 4195 |  | 
|  | 4196 | // subject: Subject string | 
|  | 4197 | // regexp_data: RegExp data (FixedArray) | 
|  | 4198 | // Check the representation and encoding of the subject string. | 
|  | 4199 | Label seq_string; | 
|  | 4200 | __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); | 
|  | 4201 | __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); | 
|  | 4202 | // First check for flat string. | 
|  | 4203 | __ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask)); | 
|  | 4204 | STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); | 
|  | 4205 | __ b(eq, &seq_string); | 
|  | 4206 |  | 
|  | 4207 | // subject: Subject string | 
|  | 4208 | // regexp_data: RegExp data (FixedArray) | 
|  | 4209 | // Check for flat cons string. | 
|  | 4210 | // A flat cons string is a cons string where the second part is the empty | 
|  | 4211 | // string. In that case the subject string is just the first part of the cons | 
|  | 4212 | // string. Also in this case the first part of the cons string is known to be | 
|  | 4213 | // a sequential string or an external string. | 
|  | 4214 | STATIC_ASSERT(kExternalStringTag !=0); | 
|  | 4215 | STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0); | 
|  | 4216 | __ tst(r0, Operand(kIsNotStringMask | kExternalStringTag)); | 
|  | 4217 | __ b(ne, &runtime); | 
|  | 4218 | __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset)); | 
|  | 4219 | __ LoadRoot(r1, Heap::kEmptyStringRootIndex); | 
|  | 4220 | __ cmp(r0, r1); | 
|  | 4221 | __ b(ne, &runtime); | 
|  | 4222 | __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); | 
|  | 4223 | __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); | 
|  | 4224 | __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); | 
|  | 4225 | // Is first part a flat string? | 
|  | 4226 | STATIC_ASSERT(kSeqStringTag == 0); | 
|  | 4227 | __ tst(r0, Operand(kStringRepresentationMask)); | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 4228 | __ b(ne, &runtime); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 4229 |  | 
|  | 4230 | __ bind(&seq_string); | 
|  | 4231 | // subject: Subject string | 
|  | 4232 | // regexp_data: RegExp data (FixedArray) | 
|  | 4233 | // r0: Instance type of subject string | 
|  | 4234 | STATIC_ASSERT(4 == kAsciiStringTag); | 
|  | 4235 | STATIC_ASSERT(kTwoByteStringTag == 0); | 
|  | 4236 | // Find the code object based on the assumptions above. | 
|  | 4237 | __ and_(r0, r0, Operand(kStringEncodingMask)); | 
|  | 4238 | __ mov(r3, Operand(r0, ASR, 2), SetCC); | 
|  | 4239 | __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne); | 
|  | 4240 | __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq); | 
|  | 4241 |  | 
|  | 4242 | // Check that the irregexp code has been generated for the actual string | 
|  | 4243 | // encoding. If it has, the field contains a code object otherwise it contains | 
|  | 4244 | // the hole. | 
|  | 4245 | __ CompareObjectType(r7, r0, r0, CODE_TYPE); | 
|  | 4246 | __ b(ne, &runtime); | 
|  | 4247 |  | 
|  | 4248 | // r3: encoding of subject string (1 if ascii, 0 if two_byte); | 
|  | 4249 | // r7: code | 
|  | 4250 | // subject: Subject string | 
|  | 4251 | // regexp_data: RegExp data (FixedArray) | 
|  | 4252 | // Load used arguments before starting to push arguments for call to native | 
|  | 4253 | // RegExp code to avoid handling changing stack height. | 
|  | 4254 | __ ldr(r1, MemOperand(sp, kPreviousIndexOffset)); | 
|  | 4255 | __ mov(r1, Operand(r1, ASR, kSmiTagSize)); | 
|  | 4256 |  | 
|  | 4257 | // r1: previous index | 
|  | 4258 | // r3: encoding of subject string (1 if ascii, 0 if two_byte); | 
|  | 4259 | // r7: code | 
|  | 4260 | // subject: Subject string | 
|  | 4261 | // regexp_data: RegExp data (FixedArray) | 
|  | 4262 | // All checks done. Now push arguments for native regexp code. | 
|  | 4263 | __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2); | 
|  | 4264 |  | 
|  | 4265 | static const int kRegExpExecuteArguments = 7; | 
|  | 4266 | __ push(lr); | 
|  | 4267 | __ PrepareCallCFunction(kRegExpExecuteArguments, r0); | 
|  | 4268 |  | 
|  | 4269 | // Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript. | 
|  | 4270 | __ mov(r0, Operand(1)); | 
|  | 4271 | __ str(r0, MemOperand(sp, 2 * kPointerSize)); | 
|  | 4272 |  | 
|  | 4273 | // Argument 6 (sp[4]): Start (high end) of backtracking stack memory area. | 
|  | 4274 | __ mov(r0, Operand(address_of_regexp_stack_memory_address)); | 
|  | 4275 | __ ldr(r0, MemOperand(r0, 0)); | 
|  | 4276 | __ mov(r2, Operand(address_of_regexp_stack_memory_size)); | 
|  | 4277 | __ ldr(r2, MemOperand(r2, 0)); | 
|  | 4278 | __ add(r0, r0, Operand(r2)); | 
|  | 4279 | __ str(r0, MemOperand(sp, 1 * kPointerSize)); | 
|  | 4280 |  | 
|  | 4281 | // Argument 5 (sp[0]): static offsets vector buffer. | 
|  | 4282 | __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector())); | 
|  | 4283 | __ str(r0, MemOperand(sp, 0 * kPointerSize)); | 
|  | 4284 |  | 
|  | 4285 | // For arguments 4 and 3 get string length, calculate start of string data and | 
|  | 4286 | // calculate the shift of the index (0 for ASCII and 1 for two byte). | 
|  | 4287 | __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset)); | 
|  | 4288 | __ mov(r0, Operand(r0, ASR, kSmiTagSize)); | 
|  | 4289 | STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); | 
|  | 4290 | __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); | 
|  | 4291 | __ eor(r3, r3, Operand(1)); | 
|  | 4292 | // Argument 4 (r3): End of string data | 
|  | 4293 | // Argument 3 (r2): Start of string data | 
|  | 4294 | __ add(r2, r9, Operand(r1, LSL, r3)); | 
|  | 4295 | __ add(r3, r9, Operand(r0, LSL, r3)); | 
|  | 4296 |  | 
|  | 4297 | // Argument 2 (r1): Previous index. | 
|  | 4298 | // Already there | 
|  | 4299 |  | 
|  | 4300 | // Argument 1 (r0): Subject string. | 
|  | 4301 | __ mov(r0, subject); | 
|  | 4302 |  | 
|  | 4303 | // Locate the code entry and call it. | 
|  | 4304 | __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); | 
|  | 4305 | __ CallCFunction(r7, kRegExpExecuteArguments); | 
|  | 4306 | __ pop(lr); | 
|  | 4307 |  | 
|  | 4308 | // r0: result | 
|  | 4309 | // subject: subject string (callee saved) | 
|  | 4310 | // regexp_data: RegExp data (callee saved) | 
|  | 4311 | // last_match_info_elements: Last match info elements (callee saved) | 
|  | 4312 |  | 
|  | 4313 | // Check the result. | 
|  | 4314 | Label success; | 
|  | 4315 | __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS)); | 
|  | 4316 | __ b(eq, &success); | 
|  | 4317 | Label failure; | 
|  | 4318 | __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE)); | 
|  | 4319 | __ b(eq, &failure); | 
|  | 4320 | __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION)); | 
|  | 4321 | // If not exception it can only be retry. Handle that in the runtime system. | 
|  | 4322 | __ b(ne, &runtime); | 
|  | 4323 | // Result must now be exception. If there is no pending exception already a | 
|  | 4324 | // stack overflow (on the backtrack stack) was detected in RegExp code but | 
|  | 4325 | // haven't created the exception yet. Handle that in the runtime system. | 
|  | 4326 | // TODO(592): Rerunning the RegExp to get the stack overflow exception. | 
|  | 4327 | __ mov(r0, Operand(ExternalReference::the_hole_value_location())); | 
|  | 4328 | __ ldr(r0, MemOperand(r0, 0)); | 
|  | 4329 | __ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address))); | 
|  | 4330 | __ ldr(r1, MemOperand(r1, 0)); | 
|  | 4331 | __ cmp(r0, r1); | 
|  | 4332 | __ b(eq, &runtime); | 
|  | 4333 | __ bind(&failure); | 
|  | 4334 | // For failure and exception return null. | 
|  | 4335 | __ mov(r0, Operand(Factory::null_value())); | 
|  | 4336 | __ add(sp, sp, Operand(4 * kPointerSize)); | 
|  | 4337 | __ Ret(); | 
|  | 4338 |  | 
|  | 4339 | // Process the result from the native regexp code. | 
|  | 4340 | __ bind(&success); | 
|  | 4341 | __ ldr(r1, | 
|  | 4342 | FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); | 
|  | 4343 | // Calculate number of capture registers (number_of_captures + 1) * 2. | 
|  | 4344 | STATIC_ASSERT(kSmiTag == 0); | 
|  | 4345 | STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); | 
|  | 4346 | __ add(r1, r1, Operand(2));  // r1 was a smi. | 
|  | 4347 |  | 
|  | 4348 | // r1: number of capture registers | 
|  | 4349 | // r4: subject string | 
|  | 4350 | // Store the capture count. | 
|  | 4351 | __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize));  // To smi. | 
|  | 4352 | __ str(r2, FieldMemOperand(last_match_info_elements, | 
|  | 4353 | RegExpImpl::kLastCaptureCountOffset)); | 
|  | 4354 | // Store last subject and last input. | 
|  | 4355 | __ mov(r3, last_match_info_elements);  // Moved up to reduce latency. | 
|  | 4356 | __ str(subject, | 
|  | 4357 | FieldMemOperand(last_match_info_elements, | 
|  | 4358 | RegExpImpl::kLastSubjectOffset)); | 
|  | 4359 | __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7); | 
|  | 4360 | __ str(subject, | 
|  | 4361 | FieldMemOperand(last_match_info_elements, | 
|  | 4362 | RegExpImpl::kLastInputOffset)); | 
|  | 4363 | __ mov(r3, last_match_info_elements); | 
|  | 4364 | __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7); | 
|  | 4365 |  | 
|  | 4366 | // Get the static offsets vector filled by the native regexp code. | 
|  | 4367 | ExternalReference address_of_static_offsets_vector = | 
|  | 4368 | ExternalReference::address_of_static_offsets_vector(); | 
|  | 4369 | __ mov(r2, Operand(address_of_static_offsets_vector)); | 
|  | 4370 |  | 
|  | 4371 | // r1: number of capture registers | 
|  | 4372 | // r2: offsets vector | 
|  | 4373 | Label next_capture, done; | 
|  | 4374 | // Capture register counter starts from number of capture registers and | 
|  | 4375 | // counts down until wraping after zero. | 
|  | 4376 | __ add(r0, | 
|  | 4377 | last_match_info_elements, | 
|  | 4378 | Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag)); | 
|  | 4379 | __ bind(&next_capture); | 
|  | 4380 | __ sub(r1, r1, Operand(1), SetCC); | 
|  | 4381 | __ b(mi, &done); | 
|  | 4382 | // Read the value from the static offsets vector buffer. | 
|  | 4383 | __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex)); | 
|  | 4384 | // Store the smi value in the last match info. | 
|  | 4385 | __ mov(r3, Operand(r3, LSL, kSmiTagSize)); | 
|  | 4386 | __ str(r3, MemOperand(r0, kPointerSize, PostIndex)); | 
|  | 4387 | __ jmp(&next_capture); | 
|  | 4388 | __ bind(&done); | 
|  | 4389 |  | 
|  | 4390 | // Return last match info. | 
|  | 4391 | __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); | 
|  | 4392 | __ add(sp, sp, Operand(4 * kPointerSize)); | 
|  | 4393 | __ Ret(); | 
|  | 4394 |  | 
|  | 4395 | // Do the runtime call to execute the regexp. | 
|  | 4396 | __ bind(&runtime); | 
|  | 4397 | __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); | 
|  | 4398 | #endif  // V8_INTERPRETED_REGEXP | 
|  | 4399 | } | 
|  | 4400 |  | 
|  | 4401 |  | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 4402 | void RegExpConstructResultStub::Generate(MacroAssembler* masm) { | 
|  | 4403 | const int kMaxInlineLength = 100; | 
|  | 4404 | Label slowcase; | 
|  | 4405 | Label done; | 
|  | 4406 | __ ldr(r1, MemOperand(sp, kPointerSize * 2)); | 
|  | 4407 | STATIC_ASSERT(kSmiTag == 0); | 
|  | 4408 | STATIC_ASSERT(kSmiTagSize == 1); | 
|  | 4409 | __ tst(r1, Operand(kSmiTagMask)); | 
|  | 4410 | __ b(ne, &slowcase); | 
|  | 4411 | __ cmp(r1, Operand(Smi::FromInt(kMaxInlineLength))); | 
|  | 4412 | __ b(hi, &slowcase); | 
|  | 4413 | // Smi-tagging is equivalent to multiplying by 2. | 
|  | 4414 | // Allocate RegExpResult followed by FixedArray with size in ebx. | 
|  | 4415 | // JSArray:   [Map][empty properties][Elements][Length-smi][index][input] | 
|  | 4416 | // Elements:  [Map][Length][..elements..] | 
|  | 4417 | // Size of JSArray with two in-object properties and the header of a | 
|  | 4418 | // FixedArray. | 
|  | 4419 | int objects_size = | 
|  | 4420 | (JSRegExpResult::kSize + FixedArray::kHeaderSize) / kPointerSize; | 
|  | 4421 | __ mov(r5, Operand(r1, LSR, kSmiTagSize + kSmiShiftSize)); | 
|  | 4422 | __ add(r2, r5, Operand(objects_size)); | 
|  | 4423 | __ AllocateInNewSpace( | 
|  | 4424 | r2,  // In: Size, in words. | 
|  | 4425 | r0,  // Out: Start of allocation (tagged). | 
|  | 4426 | r3,  // Scratch register. | 
|  | 4427 | r4,  // Scratch register. | 
|  | 4428 | &slowcase, | 
|  | 4429 | static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); | 
|  | 4430 | // r0: Start of allocated area, object-tagged. | 
|  | 4431 | // r1: Number of elements in array, as smi. | 
|  | 4432 | // r5: Number of elements, untagged. | 
|  | 4433 |  | 
|  | 4434 | // Set JSArray map to global.regexp_result_map(). | 
|  | 4435 | // Set empty properties FixedArray. | 
|  | 4436 | // Set elements to point to FixedArray allocated right after the JSArray. | 
|  | 4437 | // Interleave operations for better latency. | 
|  | 4438 | __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX)); | 
|  | 4439 | __ add(r3, r0, Operand(JSRegExpResult::kSize)); | 
|  | 4440 | __ mov(r4, Operand(Factory::empty_fixed_array())); | 
|  | 4441 | __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset)); | 
|  | 4442 | __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset)); | 
|  | 4443 | __ ldr(r2, ContextOperand(r2, Context::REGEXP_RESULT_MAP_INDEX)); | 
|  | 4444 | __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset)); | 
|  | 4445 | __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); | 
|  | 4446 |  | 
|  | 4447 | // Set input, index and length fields from arguments. | 
|  | 4448 | __ ldr(r1, MemOperand(sp, kPointerSize * 0)); | 
|  | 4449 | __ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset)); | 
|  | 4450 | __ ldr(r1, MemOperand(sp, kPointerSize * 1)); | 
|  | 4451 | __ str(r1, FieldMemOperand(r0, JSRegExpResult::kIndexOffset)); | 
|  | 4452 | __ ldr(r1, MemOperand(sp, kPointerSize * 2)); | 
|  | 4453 | __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset)); | 
|  | 4454 |  | 
|  | 4455 | // Fill out the elements FixedArray. | 
|  | 4456 | // r0: JSArray, tagged. | 
|  | 4457 | // r3: FixedArray, tagged. | 
|  | 4458 | // r5: Number of elements in array, untagged. | 
|  | 4459 |  | 
|  | 4460 | // Set map. | 
|  | 4461 | __ mov(r2, Operand(Factory::fixed_array_map())); | 
|  | 4462 | __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); | 
|  | 4463 | // Set FixedArray length. | 
|  | 4464 | __ mov(r6, Operand(r5, LSL, kSmiTagSize)); | 
|  | 4465 | __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset)); | 
|  | 4466 | // Fill contents of fixed-array with the-hole. | 
|  | 4467 | __ mov(r2, Operand(Factory::the_hole_value())); | 
|  | 4468 | __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 
|  | 4469 | // Fill fixed array elements with hole. | 
|  | 4470 | // r0: JSArray, tagged. | 
|  | 4471 | // r2: the hole. | 
|  | 4472 | // r3: Start of elements in FixedArray. | 
|  | 4473 | // r5: Number of elements to fill. | 
|  | 4474 | Label loop; | 
|  | 4475 | __ tst(r5, Operand(r5)); | 
|  | 4476 | __ bind(&loop); | 
|  | 4477 | __ b(le, &done);  // Jump if r1 is negative or zero. | 
|  | 4478 | __ sub(r5, r5, Operand(1), SetCC); | 
|  | 4479 | __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2)); | 
|  | 4480 | __ jmp(&loop); | 
|  | 4481 |  | 
|  | 4482 | __ bind(&done); | 
|  | 4483 | __ add(sp, sp, Operand(3 * kPointerSize)); | 
|  | 4484 | __ Ret(); | 
|  | 4485 |  | 
|  | 4486 | __ bind(&slowcase); | 
|  | 4487 | __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1); | 
|  | 4488 | } | 
|  | 4489 |  | 
|  | 4490 |  | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 4491 | void CallFunctionStub::Generate(MacroAssembler* masm) { | 
|  | 4492 | Label slow; | 
|  | 4493 |  | 
|  | 4494 | // If the receiver might be a value (string, number or boolean) check for this | 
|  | 4495 | // and box it if it is. | 
|  | 4496 | if (ReceiverMightBeValue()) { | 
|  | 4497 | // Get the receiver from the stack. | 
|  | 4498 | // function, receiver [, arguments] | 
|  | 4499 | Label receiver_is_value, receiver_is_js_object; | 
|  | 4500 | __ ldr(r1, MemOperand(sp, argc_ * kPointerSize)); | 
|  | 4501 |  | 
|  | 4502 | // Check if receiver is a smi (which is a number value). | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 4503 | __ JumpIfSmi(r1, &receiver_is_value); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 4504 |  | 
|  | 4505 | // Check if the receiver is a valid JS object. | 
|  | 4506 | __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE); | 
|  | 4507 | __ b(ge, &receiver_is_js_object); | 
|  | 4508 |  | 
|  | 4509 | // Call the runtime to box the value. | 
|  | 4510 | __ bind(&receiver_is_value); | 
|  | 4511 | __ EnterInternalFrame(); | 
|  | 4512 | __ push(r1); | 
|  | 4513 | __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS); | 
|  | 4514 | __ LeaveInternalFrame(); | 
|  | 4515 | __ str(r0, MemOperand(sp, argc_ * kPointerSize)); | 
|  | 4516 |  | 
|  | 4517 | __ bind(&receiver_is_js_object); | 
|  | 4518 | } | 
|  | 4519 |  | 
|  | 4520 | // Get the function to call from the stack. | 
|  | 4521 | // function, receiver [, arguments] | 
|  | 4522 | __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize)); | 
|  | 4523 |  | 
|  | 4524 | // Check that the function is really a JavaScript function. | 
|  | 4525 | // r1: pushed function (to be verified) | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 4526 | __ JumpIfSmi(r1, &slow); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 4527 | // Get the map of the function object. | 
|  | 4528 | __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); | 
|  | 4529 | __ b(ne, &slow); | 
|  | 4530 |  | 
|  | 4531 | // Fast-case: Invoke the function now. | 
|  | 4532 | // r1: pushed function | 
|  | 4533 | ParameterCount actual(argc_); | 
|  | 4534 | __ InvokeFunction(r1, actual, JUMP_FUNCTION); | 
|  | 4535 |  | 
|  | 4536 | // Slow-case: Non-function called. | 
|  | 4537 | __ bind(&slow); | 
|  | 4538 | // CALL_NON_FUNCTION expects the non-function callee as receiver (instead | 
|  | 4539 | // of the original receiver from the call site). | 
|  | 4540 | __ str(r1, MemOperand(sp, argc_ * kPointerSize)); | 
|  | 4541 | __ mov(r0, Operand(argc_));  // Setup the number of arguments. | 
| Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 4542 | __ mov(r2, Operand(0, RelocInfo::NONE)); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 4543 | __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); | 
|  | 4544 | __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)), | 
|  | 4545 | RelocInfo::CODE_TARGET); | 
|  | 4546 | } | 
|  | 4547 |  | 
|  | 4548 |  | 
|  | 4549 | // Unfortunately you have to run without snapshots to see most of these | 
|  | 4550 | // names in the profile since most compare stubs end up in the snapshot. | 
|  | 4551 | const char* CompareStub::GetName() { | 
|  | 4552 | ASSERT((lhs_.is(r0) && rhs_.is(r1)) || | 
|  | 4553 | (lhs_.is(r1) && rhs_.is(r0))); | 
|  | 4554 |  | 
|  | 4555 | if (name_ != NULL) return name_; | 
|  | 4556 | const int kMaxNameLength = 100; | 
|  | 4557 | name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); | 
|  | 4558 | if (name_ == NULL) return "OOM"; | 
|  | 4559 |  | 
|  | 4560 | const char* cc_name; | 
|  | 4561 | switch (cc_) { | 
|  | 4562 | case lt: cc_name = "LT"; break; | 
|  | 4563 | case gt: cc_name = "GT"; break; | 
|  | 4564 | case le: cc_name = "LE"; break; | 
|  | 4565 | case ge: cc_name = "GE"; break; | 
|  | 4566 | case eq: cc_name = "EQ"; break; | 
|  | 4567 | case ne: cc_name = "NE"; break; | 
|  | 4568 | default: cc_name = "UnknownCondition"; break; | 
|  | 4569 | } | 
|  | 4570 |  | 
|  | 4571 | const char* lhs_name = lhs_.is(r0) ? "_r0" : "_r1"; | 
|  | 4572 | const char* rhs_name = rhs_.is(r0) ? "_r0" : "_r1"; | 
|  | 4573 |  | 
|  | 4574 | const char* strict_name = ""; | 
|  | 4575 | if (strict_ && (cc_ == eq || cc_ == ne)) { | 
|  | 4576 | strict_name = "_STRICT"; | 
|  | 4577 | } | 
|  | 4578 |  | 
|  | 4579 | const char* never_nan_nan_name = ""; | 
|  | 4580 | if (never_nan_nan_ && (cc_ == eq || cc_ == ne)) { | 
|  | 4581 | never_nan_nan_name = "_NO_NAN"; | 
|  | 4582 | } | 
|  | 4583 |  | 
|  | 4584 | const char* include_number_compare_name = ""; | 
|  | 4585 | if (!include_number_compare_) { | 
|  | 4586 | include_number_compare_name = "_NO_NUMBER"; | 
|  | 4587 | } | 
|  | 4588 |  | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 4589 | const char* include_smi_compare_name = ""; | 
|  | 4590 | if (!include_smi_compare_) { | 
|  | 4591 | include_smi_compare_name = "_NO_SMI"; | 
|  | 4592 | } | 
|  | 4593 |  | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 4594 | OS::SNPrintF(Vector<char>(name_, kMaxNameLength), | 
|  | 4595 | "CompareStub_%s%s%s%s%s%s", | 
|  | 4596 | cc_name, | 
|  | 4597 | lhs_name, | 
|  | 4598 | rhs_name, | 
|  | 4599 | strict_name, | 
|  | 4600 | never_nan_nan_name, | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 4601 | include_number_compare_name, | 
|  | 4602 | include_smi_compare_name); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 4603 | return name_; | 
|  | 4604 | } | 
|  | 4605 |  | 
|  | 4606 |  | 
|  | 4607 | int CompareStub::MinorKey() { | 
|  | 4608 | // Encode the three parameters in a unique 16 bit value. To avoid duplicate | 
|  | 4609 | // stubs the never NaN NaN condition is only taken into account if the | 
|  | 4610 | // condition is equals. | 
|  | 4611 | ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12)); | 
|  | 4612 | ASSERT((lhs_.is(r0) && rhs_.is(r1)) || | 
|  | 4613 | (lhs_.is(r1) && rhs_.is(r0))); | 
|  | 4614 | return ConditionField::encode(static_cast<unsigned>(cc_) >> 28) | 
|  | 4615 | | RegisterField::encode(lhs_.is(r0)) | 
|  | 4616 | | StrictField::encode(strict_) | 
|  | 4617 | | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false) | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 4618 | | IncludeNumberCompareField::encode(include_number_compare_) | 
|  | 4619 | | IncludeSmiCompareField::encode(include_smi_compare_); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 4620 | } | 
|  | 4621 |  | 
|  | 4622 |  | 
|  | 4623 | // StringCharCodeAtGenerator | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 4624 | void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { | 
|  | 4625 | Label flat_string; | 
|  | 4626 | Label ascii_string; | 
|  | 4627 | Label got_char_code; | 
|  | 4628 |  | 
|  | 4629 | // If the receiver is a smi trigger the non-string case. | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 4630 | __ JumpIfSmi(object_, receiver_not_string_); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 4631 |  | 
|  | 4632 | // Fetch the instance type of the receiver into result register. | 
|  | 4633 | __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | 
|  | 4634 | __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | 
|  | 4635 | // If the receiver is not a string trigger the non-string case. | 
|  | 4636 | __ tst(result_, Operand(kIsNotStringMask)); | 
|  | 4637 | __ b(ne, receiver_not_string_); | 
|  | 4638 |  | 
|  | 4639 | // If the index is non-smi trigger the non-smi case. | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 4640 | __ JumpIfNotSmi(index_, &index_not_smi_); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 4641 |  | 
|  | 4642 | // Put smi-tagged index into scratch register. | 
|  | 4643 | __ mov(scratch_, index_); | 
|  | 4644 | __ bind(&got_smi_index_); | 
|  | 4645 |  | 
|  | 4646 | // Check for index out of range. | 
|  | 4647 | __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset)); | 
|  | 4648 | __ cmp(ip, Operand(scratch_)); | 
|  | 4649 | __ b(ls, index_out_of_range_); | 
|  | 4650 |  | 
|  | 4651 | // We need special handling for non-flat strings. | 
|  | 4652 | STATIC_ASSERT(kSeqStringTag == 0); | 
|  | 4653 | __ tst(result_, Operand(kStringRepresentationMask)); | 
|  | 4654 | __ b(eq, &flat_string); | 
|  | 4655 |  | 
|  | 4656 | // Handle non-flat strings. | 
|  | 4657 | __ tst(result_, Operand(kIsConsStringMask)); | 
|  | 4658 | __ b(eq, &call_runtime_); | 
|  | 4659 |  | 
|  | 4660 | // ConsString. | 
|  | 4661 | // Check whether the right hand side is the empty string (i.e. if | 
|  | 4662 | // this is really a flat string in a cons string). If that is not | 
|  | 4663 | // the case we would rather go to the runtime system now to flatten | 
|  | 4664 | // the string. | 
|  | 4665 | __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset)); | 
|  | 4666 | __ LoadRoot(ip, Heap::kEmptyStringRootIndex); | 
|  | 4667 | __ cmp(result_, Operand(ip)); | 
|  | 4668 | __ b(ne, &call_runtime_); | 
|  | 4669 | // Get the first of the two strings and load its instance type. | 
|  | 4670 | __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset)); | 
|  | 4671 | __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | 
|  | 4672 | __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | 
|  | 4673 | // If the first cons component is also non-flat, then go to runtime. | 
|  | 4674 | STATIC_ASSERT(kSeqStringTag == 0); | 
|  | 4675 | __ tst(result_, Operand(kStringRepresentationMask)); | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 4676 | __ b(ne, &call_runtime_); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 4677 |  | 
|  | 4678 | // Check for 1-byte or 2-byte string. | 
|  | 4679 | __ bind(&flat_string); | 
|  | 4680 | STATIC_ASSERT(kAsciiStringTag != 0); | 
|  | 4681 | __ tst(result_, Operand(kStringEncodingMask)); | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 4682 | __ b(ne, &ascii_string); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 4683 |  | 
|  | 4684 | // 2-byte string. | 
|  | 4685 | // Load the 2-byte character code into the result register. We can | 
|  | 4686 | // add without shifting since the smi tag size is the log2 of the | 
|  | 4687 | // number of bytes in a two-byte character. | 
|  | 4688 | STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0); | 
|  | 4689 | __ add(scratch_, object_, Operand(scratch_)); | 
|  | 4690 | __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize)); | 
|  | 4691 | __ jmp(&got_char_code); | 
|  | 4692 |  | 
|  | 4693 | // ASCII string. | 
|  | 4694 | // Load the byte into the result register. | 
|  | 4695 | __ bind(&ascii_string); | 
|  | 4696 | __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize)); | 
|  | 4697 | __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize)); | 
|  | 4698 |  | 
|  | 4699 | __ bind(&got_char_code); | 
|  | 4700 | __ mov(result_, Operand(result_, LSL, kSmiTagSize)); | 
|  | 4701 | __ bind(&exit_); | 
|  | 4702 | } | 
|  | 4703 |  | 
|  | 4704 |  | 
|  | 4705 | void StringCharCodeAtGenerator::GenerateSlow( | 
|  | 4706 | MacroAssembler* masm, const RuntimeCallHelper& call_helper) { | 
|  | 4707 | __ Abort("Unexpected fallthrough to CharCodeAt slow case"); | 
|  | 4708 |  | 
|  | 4709 | // Index is not a smi. | 
|  | 4710 | __ bind(&index_not_smi_); | 
|  | 4711 | // If index is a heap number, try converting it to an integer. | 
|  | 4712 | __ CheckMap(index_, | 
|  | 4713 | scratch_, | 
|  | 4714 | Heap::kHeapNumberMapRootIndex, | 
|  | 4715 | index_not_number_, | 
|  | 4716 | true); | 
|  | 4717 | call_helper.BeforeCall(masm); | 
|  | 4718 | __ Push(object_, index_); | 
|  | 4719 | __ push(index_);  // Consumed by runtime conversion function. | 
|  | 4720 | if (index_flags_ == STRING_INDEX_IS_NUMBER) { | 
|  | 4721 | __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); | 
|  | 4722 | } else { | 
|  | 4723 | ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); | 
|  | 4724 | // NumberToSmi discards numbers that are not exact integers. | 
|  | 4725 | __ CallRuntime(Runtime::kNumberToSmi, 1); | 
|  | 4726 | } | 
|  | 4727 | // Save the conversion result before the pop instructions below | 
|  | 4728 | // have a chance to overwrite it. | 
|  | 4729 | __ Move(scratch_, r0); | 
|  | 4730 | __ pop(index_); | 
|  | 4731 | __ pop(object_); | 
|  | 4732 | // Reload the instance type. | 
|  | 4733 | __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | 
|  | 4734 | __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | 
|  | 4735 | call_helper.AfterCall(masm); | 
|  | 4736 | // If index is still not a smi, it must be out of range. | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 4737 | __ JumpIfNotSmi(scratch_, index_out_of_range_); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 4738 | // Otherwise, return to the fast path. | 
|  | 4739 | __ jmp(&got_smi_index_); | 
|  | 4740 |  | 
|  | 4741 | // Call runtime. We get here when the receiver is a string and the | 
|  | 4742 | // index is a number, but the code of getting the actual character | 
|  | 4743 | // is too complex (e.g., when the string needs to be flattened). | 
|  | 4744 | __ bind(&call_runtime_); | 
|  | 4745 | call_helper.BeforeCall(masm); | 
|  | 4746 | __ Push(object_, index_); | 
|  | 4747 | __ CallRuntime(Runtime::kStringCharCodeAt, 2); | 
|  | 4748 | __ Move(result_, r0); | 
|  | 4749 | call_helper.AfterCall(masm); | 
|  | 4750 | __ jmp(&exit_); | 
|  | 4751 |  | 
|  | 4752 | __ Abort("Unexpected fallthrough from CharCodeAt slow case"); | 
|  | 4753 | } | 
|  | 4754 |  | 
|  | 4755 |  | 
|  | 4756 | // ------------------------------------------------------------------------- | 
|  | 4757 | // StringCharFromCodeGenerator | 
|  | 4758 |  | 
|  | 4759 | void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { | 
|  | 4760 | // Fast case of Heap::LookupSingleCharacterStringFromCode. | 
|  | 4761 | STATIC_ASSERT(kSmiTag == 0); | 
|  | 4762 | STATIC_ASSERT(kSmiShiftSize == 0); | 
|  | 4763 | ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); | 
|  | 4764 | __ tst(code_, | 
|  | 4765 | Operand(kSmiTagMask | | 
|  | 4766 | ((~String::kMaxAsciiCharCode) << kSmiTagSize))); | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 4767 | __ b(ne, &slow_case_); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 4768 |  | 
|  | 4769 | __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); | 
|  | 4770 | // At this point code register contains smi tagged ascii char code. | 
|  | 4771 | STATIC_ASSERT(kSmiTag == 0); | 
|  | 4772 | __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize)); | 
|  | 4773 | __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); | 
|  | 4774 | __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 
|  | 4775 | __ cmp(result_, Operand(ip)); | 
|  | 4776 | __ b(eq, &slow_case_); | 
|  | 4777 | __ bind(&exit_); | 
|  | 4778 | } | 
|  | 4779 |  | 
|  | 4780 |  | 
|  | 4781 | void StringCharFromCodeGenerator::GenerateSlow( | 
|  | 4782 | MacroAssembler* masm, const RuntimeCallHelper& call_helper) { | 
|  | 4783 | __ Abort("Unexpected fallthrough to CharFromCode slow case"); | 
|  | 4784 |  | 
|  | 4785 | __ bind(&slow_case_); | 
|  | 4786 | call_helper.BeforeCall(masm); | 
|  | 4787 | __ push(code_); | 
|  | 4788 | __ CallRuntime(Runtime::kCharFromCode, 1); | 
|  | 4789 | __ Move(result_, r0); | 
|  | 4790 | call_helper.AfterCall(masm); | 
|  | 4791 | __ jmp(&exit_); | 
|  | 4792 |  | 
|  | 4793 | __ Abort("Unexpected fallthrough from CharFromCode slow case"); | 
|  | 4794 | } | 
|  | 4795 |  | 
|  | 4796 |  | 
|  | 4797 | // ------------------------------------------------------------------------- | 
|  | 4798 | // StringCharAtGenerator | 
|  | 4799 |  | 
|  | 4800 | void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) { | 
|  | 4801 | char_code_at_generator_.GenerateFast(masm); | 
|  | 4802 | char_from_code_generator_.GenerateFast(masm); | 
|  | 4803 | } | 
|  | 4804 |  | 
|  | 4805 |  | 
|  | 4806 | void StringCharAtGenerator::GenerateSlow( | 
|  | 4807 | MacroAssembler* masm, const RuntimeCallHelper& call_helper) { | 
|  | 4808 | char_code_at_generator_.GenerateSlow(masm, call_helper); | 
|  | 4809 | char_from_code_generator_.GenerateSlow(masm, call_helper); | 
|  | 4810 | } | 
|  | 4811 |  | 
|  | 4812 |  | 
|  | 4813 | class StringHelper : public AllStatic { | 
|  | 4814 | public: | 
|  | 4815 | // Generate code for copying characters using a simple loop. This should only | 
|  | 4816 | // be used in places where the number of characters is small and the | 
|  | 4817 | // additional setup and checking in GenerateCopyCharactersLong adds too much | 
|  | 4818 | // overhead. Copying of overlapping regions is not supported. | 
|  | 4819 | // Dest register ends at the position after the last character written. | 
|  | 4820 | static void GenerateCopyCharacters(MacroAssembler* masm, | 
|  | 4821 | Register dest, | 
|  | 4822 | Register src, | 
|  | 4823 | Register count, | 
|  | 4824 | Register scratch, | 
|  | 4825 | bool ascii); | 
|  | 4826 |  | 
|  | 4827 | // Generate code for copying a large number of characters. This function | 
|  | 4828 | // is allowed to spend extra time setting up conditions to make copying | 
|  | 4829 | // faster. Copying of overlapping regions is not supported. | 
|  | 4830 | // Dest register ends at the position after the last character written. | 
|  | 4831 | static void GenerateCopyCharactersLong(MacroAssembler* masm, | 
|  | 4832 | Register dest, | 
|  | 4833 | Register src, | 
|  | 4834 | Register count, | 
|  | 4835 | Register scratch1, | 
|  | 4836 | Register scratch2, | 
|  | 4837 | Register scratch3, | 
|  | 4838 | Register scratch4, | 
|  | 4839 | Register scratch5, | 
|  | 4840 | int flags); | 
|  | 4841 |  | 
|  | 4842 |  | 
|  | 4843 | // Probe the symbol table for a two character string. If the string is | 
|  | 4844 | // not found by probing a jump to the label not_found is performed. This jump | 
|  | 4845 | // does not guarantee that the string is not in the symbol table. If the | 
|  | 4846 | // string is found the code falls through with the string in register r0. | 
|  | 4847 | // Contents of both c1 and c2 registers are modified. At the exit c1 is | 
|  | 4848 | // guaranteed to contain halfword with low and high bytes equal to | 
|  | 4849 | // initial contents of c1 and c2 respectively. | 
|  | 4850 | static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, | 
|  | 4851 | Register c1, | 
|  | 4852 | Register c2, | 
|  | 4853 | Register scratch1, | 
|  | 4854 | Register scratch2, | 
|  | 4855 | Register scratch3, | 
|  | 4856 | Register scratch4, | 
|  | 4857 | Register scratch5, | 
|  | 4858 | Label* not_found); | 
|  | 4859 |  | 
|  | 4860 | // Generate string hash. | 
|  | 4861 | static void GenerateHashInit(MacroAssembler* masm, | 
|  | 4862 | Register hash, | 
|  | 4863 | Register character); | 
|  | 4864 |  | 
|  | 4865 | static void GenerateHashAddCharacter(MacroAssembler* masm, | 
|  | 4866 | Register hash, | 
|  | 4867 | Register character); | 
|  | 4868 |  | 
|  | 4869 | static void GenerateHashGetHash(MacroAssembler* masm, | 
|  | 4870 | Register hash); | 
|  | 4871 |  | 
|  | 4872 | private: | 
|  | 4873 | DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); | 
|  | 4874 | }; | 
|  | 4875 |  | 
|  | 4876 |  | 
|  | 4877 | void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, | 
|  | 4878 | Register dest, | 
|  | 4879 | Register src, | 
|  | 4880 | Register count, | 
|  | 4881 | Register scratch, | 
|  | 4882 | bool ascii) { | 
|  | 4883 | Label loop; | 
|  | 4884 | Label done; | 
|  | 4885 | // This loop just copies one character at a time, as it is only used for very | 
|  | 4886 | // short strings. | 
|  | 4887 | if (!ascii) { | 
|  | 4888 | __ add(count, count, Operand(count), SetCC); | 
|  | 4889 | } else { | 
| Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 4890 | __ cmp(count, Operand(0, RelocInfo::NONE)); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 4891 | } | 
|  | 4892 | __ b(eq, &done); | 
|  | 4893 |  | 
|  | 4894 | __ bind(&loop); | 
|  | 4895 | __ ldrb(scratch, MemOperand(src, 1, PostIndex)); | 
|  | 4896 | // Perform sub between load and dependent store to get the load time to | 
|  | 4897 | // complete. | 
|  | 4898 | __ sub(count, count, Operand(1), SetCC); | 
|  | 4899 | __ strb(scratch, MemOperand(dest, 1, PostIndex)); | 
|  | 4900 | // last iteration. | 
|  | 4901 | __ b(gt, &loop); | 
|  | 4902 |  | 
|  | 4903 | __ bind(&done); | 
|  | 4904 | } | 
|  | 4905 |  | 
|  | 4906 |  | 
|  | 4907 | enum CopyCharactersFlags { | 
|  | 4908 | COPY_ASCII = 1, | 
|  | 4909 | DEST_ALWAYS_ALIGNED = 2 | 
|  | 4910 | }; | 
|  | 4911 |  | 
|  | 4912 |  | 
|  | 4913 | void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, | 
|  | 4914 | Register dest, | 
|  | 4915 | Register src, | 
|  | 4916 | Register count, | 
|  | 4917 | Register scratch1, | 
|  | 4918 | Register scratch2, | 
|  | 4919 | Register scratch3, | 
|  | 4920 | Register scratch4, | 
|  | 4921 | Register scratch5, | 
|  | 4922 | int flags) { | 
|  | 4923 | bool ascii = (flags & COPY_ASCII) != 0; | 
|  | 4924 | bool dest_always_aligned = (flags & DEST_ALWAYS_ALIGNED) != 0; | 
|  | 4925 |  | 
|  | 4926 | if (dest_always_aligned && FLAG_debug_code) { | 
|  | 4927 | // Check that destination is actually word aligned if the flag says | 
|  | 4928 | // that it is. | 
|  | 4929 | __ tst(dest, Operand(kPointerAlignmentMask)); | 
|  | 4930 | __ Check(eq, "Destination of copy not aligned."); | 
|  | 4931 | } | 
|  | 4932 |  | 
|  | 4933 | const int kReadAlignment = 4; | 
|  | 4934 | const int kReadAlignmentMask = kReadAlignment - 1; | 
|  | 4935 | // Ensure that reading an entire aligned word containing the last character | 
|  | 4936 | // of a string will not read outside the allocated area (because we pad up | 
|  | 4937 | // to kObjectAlignment). | 
|  | 4938 | STATIC_ASSERT(kObjectAlignment >= kReadAlignment); | 
|  | 4939 | // Assumes word reads and writes are little endian. | 
|  | 4940 | // Nothing to do for zero characters. | 
|  | 4941 | Label done; | 
|  | 4942 | if (!ascii) { | 
|  | 4943 | __ add(count, count, Operand(count), SetCC); | 
|  | 4944 | } else { | 
| Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 4945 | __ cmp(count, Operand(0, RelocInfo::NONE)); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 4946 | } | 
|  | 4947 | __ b(eq, &done); | 
|  | 4948 |  | 
|  | 4949 | // Assume that you cannot read (or write) unaligned. | 
|  | 4950 | Label byte_loop; | 
|  | 4951 | // Must copy at least eight bytes, otherwise just do it one byte at a time. | 
|  | 4952 | __ cmp(count, Operand(8)); | 
|  | 4953 | __ add(count, dest, Operand(count)); | 
|  | 4954 | Register limit = count;  // Read until src equals this. | 
|  | 4955 | __ b(lt, &byte_loop); | 
|  | 4956 |  | 
|  | 4957 | if (!dest_always_aligned) { | 
|  | 4958 | // Align dest by byte copying. Copies between zero and three bytes. | 
|  | 4959 | __ and_(scratch4, dest, Operand(kReadAlignmentMask), SetCC); | 
|  | 4960 | Label dest_aligned; | 
|  | 4961 | __ b(eq, &dest_aligned); | 
|  | 4962 | __ cmp(scratch4, Operand(2)); | 
|  | 4963 | __ ldrb(scratch1, MemOperand(src, 1, PostIndex)); | 
|  | 4964 | __ ldrb(scratch2, MemOperand(src, 1, PostIndex), le); | 
|  | 4965 | __ ldrb(scratch3, MemOperand(src, 1, PostIndex), lt); | 
|  | 4966 | __ strb(scratch1, MemOperand(dest, 1, PostIndex)); | 
|  | 4967 | __ strb(scratch2, MemOperand(dest, 1, PostIndex), le); | 
|  | 4968 | __ strb(scratch3, MemOperand(dest, 1, PostIndex), lt); | 
|  | 4969 | __ bind(&dest_aligned); | 
|  | 4970 | } | 
|  | 4971 |  | 
|  | 4972 | Label simple_loop; | 
|  | 4973 |  | 
|  | 4974 | __ sub(scratch4, dest, Operand(src)); | 
|  | 4975 | __ and_(scratch4, scratch4, Operand(0x03), SetCC); | 
|  | 4976 | __ b(eq, &simple_loop); | 
|  | 4977 | // Shift register is number of bits in a source word that | 
|  | 4978 | // must be combined with bits in the next source word in order | 
|  | 4979 | // to create a destination word. | 
|  | 4980 |  | 
|  | 4981 | // Complex loop for src/dst that are not aligned the same way. | 
|  | 4982 | { | 
|  | 4983 | Label loop; | 
|  | 4984 | __ mov(scratch4, Operand(scratch4, LSL, 3)); | 
|  | 4985 | Register left_shift = scratch4; | 
|  | 4986 | __ and_(src, src, Operand(~3));  // Round down to load previous word. | 
|  | 4987 | __ ldr(scratch1, MemOperand(src, 4, PostIndex)); | 
|  | 4988 | // Store the "shift" most significant bits of scratch in the least | 
|  | 4989 | // signficant bits (i.e., shift down by (32-shift)). | 
|  | 4990 | __ rsb(scratch2, left_shift, Operand(32)); | 
|  | 4991 | Register right_shift = scratch2; | 
|  | 4992 | __ mov(scratch1, Operand(scratch1, LSR, right_shift)); | 
|  | 4993 |  | 
|  | 4994 | __ bind(&loop); | 
|  | 4995 | __ ldr(scratch3, MemOperand(src, 4, PostIndex)); | 
|  | 4996 | __ sub(scratch5, limit, Operand(dest)); | 
|  | 4997 | __ orr(scratch1, scratch1, Operand(scratch3, LSL, left_shift)); | 
|  | 4998 | __ str(scratch1, MemOperand(dest, 4, PostIndex)); | 
|  | 4999 | __ mov(scratch1, Operand(scratch3, LSR, right_shift)); | 
|  | 5000 | // Loop if four or more bytes left to copy. | 
|  | 5001 | // Compare to eight, because we did the subtract before increasing dst. | 
|  | 5002 | __ sub(scratch5, scratch5, Operand(8), SetCC); | 
|  | 5003 | __ b(ge, &loop); | 
|  | 5004 | } | 
|  | 5005 | // There is now between zero and three bytes left to copy (negative that | 
|  | 5006 | // number is in scratch5), and between one and three bytes already read into | 
|  | 5007 | // scratch1 (eight times that number in scratch4). We may have read past | 
|  | 5008 | // the end of the string, but because objects are aligned, we have not read | 
|  | 5009 | // past the end of the object. | 
|  | 5010 | // Find the minimum of remaining characters to move and preloaded characters | 
|  | 5011 | // and write those as bytes. | 
|  | 5012 | __ add(scratch5, scratch5, Operand(4), SetCC); | 
|  | 5013 | __ b(eq, &done); | 
|  | 5014 | __ cmp(scratch4, Operand(scratch5, LSL, 3), ne); | 
|  | 5015 | // Move minimum of bytes read and bytes left to copy to scratch4. | 
|  | 5016 | __ mov(scratch5, Operand(scratch4, LSR, 3), LeaveCC, lt); | 
|  | 5017 | // Between one and three (value in scratch5) characters already read into | 
|  | 5018 | // scratch ready to write. | 
|  | 5019 | __ cmp(scratch5, Operand(2)); | 
|  | 5020 | __ strb(scratch1, MemOperand(dest, 1, PostIndex)); | 
|  | 5021 | __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, ge); | 
|  | 5022 | __ strb(scratch1, MemOperand(dest, 1, PostIndex), ge); | 
|  | 5023 | __ mov(scratch1, Operand(scratch1, LSR, 8), LeaveCC, gt); | 
|  | 5024 | __ strb(scratch1, MemOperand(dest, 1, PostIndex), gt); | 
|  | 5025 | // Copy any remaining bytes. | 
|  | 5026 | __ b(&byte_loop); | 
|  | 5027 |  | 
|  | 5028 | // Simple loop. | 
|  | 5029 | // Copy words from src to dst, until less than four bytes left. | 
|  | 5030 | // Both src and dest are word aligned. | 
|  | 5031 | __ bind(&simple_loop); | 
|  | 5032 | { | 
|  | 5033 | Label loop; | 
|  | 5034 | __ bind(&loop); | 
|  | 5035 | __ ldr(scratch1, MemOperand(src, 4, PostIndex)); | 
|  | 5036 | __ sub(scratch3, limit, Operand(dest)); | 
|  | 5037 | __ str(scratch1, MemOperand(dest, 4, PostIndex)); | 
|  | 5038 | // Compare to 8, not 4, because we do the substraction before increasing | 
|  | 5039 | // dest. | 
|  | 5040 | __ cmp(scratch3, Operand(8)); | 
|  | 5041 | __ b(ge, &loop); | 
|  | 5042 | } | 
|  | 5043 |  | 
|  | 5044 | // Copy bytes from src to dst until dst hits limit. | 
|  | 5045 | __ bind(&byte_loop); | 
|  | 5046 | __ cmp(dest, Operand(limit)); | 
|  | 5047 | __ ldrb(scratch1, MemOperand(src, 1, PostIndex), lt); | 
|  | 5048 | __ b(ge, &done); | 
|  | 5049 | __ strb(scratch1, MemOperand(dest, 1, PostIndex)); | 
|  | 5050 | __ b(&byte_loop); | 
|  | 5051 |  | 
|  | 5052 | __ bind(&done); | 
|  | 5053 | } | 
|  | 5054 |  | 
|  | 5055 |  | 
|  | 5056 | void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, | 
|  | 5057 | Register c1, | 
|  | 5058 | Register c2, | 
|  | 5059 | Register scratch1, | 
|  | 5060 | Register scratch2, | 
|  | 5061 | Register scratch3, | 
|  | 5062 | Register scratch4, | 
|  | 5063 | Register scratch5, | 
|  | 5064 | Label* not_found) { | 
|  | 5065 | // Register scratch3 is the general scratch register in this function. | 
|  | 5066 | Register scratch = scratch3; | 
|  | 5067 |  | 
|  | 5068 | // Make sure that both characters are not digits as such strings has a | 
|  | 5069 | // different hash algorithm. Don't try to look for these in the symbol table. | 
|  | 5070 | Label not_array_index; | 
|  | 5071 | __ sub(scratch, c1, Operand(static_cast<int>('0'))); | 
|  | 5072 | __ cmp(scratch, Operand(static_cast<int>('9' - '0'))); | 
|  | 5073 | __ b(hi, ¬_array_index); | 
|  | 5074 | __ sub(scratch, c2, Operand(static_cast<int>('0'))); | 
|  | 5075 | __ cmp(scratch, Operand(static_cast<int>('9' - '0'))); | 
|  | 5076 |  | 
|  | 5077 | // If check failed combine both characters into single halfword. | 
|  | 5078 | // This is required by the contract of the method: code at the | 
|  | 5079 | // not_found branch expects this combination in c1 register | 
|  | 5080 | __ orr(c1, c1, Operand(c2, LSL, kBitsPerByte), LeaveCC, ls); | 
|  | 5081 | __ b(ls, not_found); | 
|  | 5082 |  | 
|  | 5083 | __ bind(¬_array_index); | 
|  | 5084 | // Calculate the two character string hash. | 
|  | 5085 | Register hash = scratch1; | 
|  | 5086 | StringHelper::GenerateHashInit(masm, hash, c1); | 
|  | 5087 | StringHelper::GenerateHashAddCharacter(masm, hash, c2); | 
|  | 5088 | StringHelper::GenerateHashGetHash(masm, hash); | 
|  | 5089 |  | 
|  | 5090 | // Collect the two characters in a register. | 
|  | 5091 | Register chars = c1; | 
|  | 5092 | __ orr(chars, chars, Operand(c2, LSL, kBitsPerByte)); | 
|  | 5093 |  | 
|  | 5094 | // chars: two character string, char 1 in byte 0 and char 2 in byte 1. | 
|  | 5095 | // hash:  hash of two character string. | 
|  | 5096 |  | 
|  | 5097 | // Load symbol table | 
|  | 5098 | // Load address of first element of the symbol table. | 
|  | 5099 | Register symbol_table = c2; | 
|  | 5100 | __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex); | 
|  | 5101 |  | 
|  | 5102 | // Load undefined value | 
|  | 5103 | Register undefined = scratch4; | 
|  | 5104 | __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); | 
|  | 5105 |  | 
|  | 5106 | // Calculate capacity mask from the symbol table capacity. | 
|  | 5107 | Register mask = scratch2; | 
|  | 5108 | __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset)); | 
|  | 5109 | __ mov(mask, Operand(mask, ASR, 1)); | 
|  | 5110 | __ sub(mask, mask, Operand(1)); | 
|  | 5111 |  | 
|  | 5112 | // Calculate untagged address of the first element of the symbol table. | 
|  | 5113 | Register first_symbol_table_element = symbol_table; | 
|  | 5114 | __ add(first_symbol_table_element, symbol_table, | 
|  | 5115 | Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag)); | 
|  | 5116 |  | 
|  | 5117 | // Registers | 
|  | 5118 | // chars: two character string, char 1 in byte 0 and char 2 in byte 1. | 
|  | 5119 | // hash:  hash of two character string | 
|  | 5120 | // mask:  capacity mask | 
|  | 5121 | // first_symbol_table_element: address of the first element of | 
|  | 5122 | //                             the symbol table | 
|  | 5123 | // scratch: - | 
|  | 5124 |  | 
|  | 5125 | // Perform a number of probes in the symbol table. | 
|  | 5126 | static const int kProbes = 4; | 
|  | 5127 | Label found_in_symbol_table; | 
|  | 5128 | Label next_probe[kProbes]; | 
|  | 5129 | for (int i = 0; i < kProbes; i++) { | 
|  | 5130 | Register candidate = scratch5;  // Scratch register contains candidate. | 
|  | 5131 |  | 
|  | 5132 | // Calculate entry in symbol table. | 
|  | 5133 | if (i > 0) { | 
|  | 5134 | __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i))); | 
|  | 5135 | } else { | 
|  | 5136 | __ mov(candidate, hash); | 
|  | 5137 | } | 
|  | 5138 |  | 
|  | 5139 | __ and_(candidate, candidate, Operand(mask)); | 
|  | 5140 |  | 
|  | 5141 | // Load the entry from the symble table. | 
|  | 5142 | STATIC_ASSERT(SymbolTable::kEntrySize == 1); | 
|  | 5143 | __ ldr(candidate, | 
|  | 5144 | MemOperand(first_symbol_table_element, | 
|  | 5145 | candidate, | 
|  | 5146 | LSL, | 
|  | 5147 | kPointerSizeLog2)); | 
|  | 5148 |  | 
|  | 5149 | // If entry is undefined no string with this hash can be found. | 
|  | 5150 | __ cmp(candidate, undefined); | 
|  | 5151 | __ b(eq, not_found); | 
|  | 5152 |  | 
|  | 5153 | // If length is not 2 the string is not a candidate. | 
|  | 5154 | __ ldr(scratch, FieldMemOperand(candidate, String::kLengthOffset)); | 
|  | 5155 | __ cmp(scratch, Operand(Smi::FromInt(2))); | 
|  | 5156 | __ b(ne, &next_probe[i]); | 
|  | 5157 |  | 
|  | 5158 | // Check that the candidate is a non-external ascii string. | 
|  | 5159 | __ ldr(scratch, FieldMemOperand(candidate, HeapObject::kMapOffset)); | 
|  | 5160 | __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 
|  | 5161 | __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, | 
|  | 5162 | &next_probe[i]); | 
|  | 5163 |  | 
|  | 5164 | // Check if the two characters match. | 
|  | 5165 | // Assumes that word load is little endian. | 
|  | 5166 | __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize)); | 
|  | 5167 | __ cmp(chars, scratch); | 
|  | 5168 | __ b(eq, &found_in_symbol_table); | 
|  | 5169 | __ bind(&next_probe[i]); | 
|  | 5170 | } | 
|  | 5171 |  | 
|  | 5172 | // No matching 2 character string found by probing. | 
|  | 5173 | __ jmp(not_found); | 
|  | 5174 |  | 
|  | 5175 | // Scratch register contains result when we fall through to here. | 
|  | 5176 | Register result = scratch; | 
|  | 5177 | __ bind(&found_in_symbol_table); | 
|  | 5178 | __ Move(r0, result); | 
|  | 5179 | } | 
|  | 5180 |  | 
|  | 5181 |  | 
|  | 5182 | void StringHelper::GenerateHashInit(MacroAssembler* masm, | 
|  | 5183 | Register hash, | 
|  | 5184 | Register character) { | 
|  | 5185 | // hash = character + (character << 10); | 
|  | 5186 | __ add(hash, character, Operand(character, LSL, 10)); | 
|  | 5187 | // hash ^= hash >> 6; | 
|  | 5188 | __ eor(hash, hash, Operand(hash, ASR, 6)); | 
|  | 5189 | } | 
|  | 5190 |  | 
|  | 5191 |  | 
|  | 5192 | void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, | 
|  | 5193 | Register hash, | 
|  | 5194 | Register character) { | 
|  | 5195 | // hash += character; | 
|  | 5196 | __ add(hash, hash, Operand(character)); | 
|  | 5197 | // hash += hash << 10; | 
|  | 5198 | __ add(hash, hash, Operand(hash, LSL, 10)); | 
|  | 5199 | // hash ^= hash >> 6; | 
|  | 5200 | __ eor(hash, hash, Operand(hash, ASR, 6)); | 
|  | 5201 | } | 
|  | 5202 |  | 
|  | 5203 |  | 
|  | 5204 | void StringHelper::GenerateHashGetHash(MacroAssembler* masm, | 
|  | 5205 | Register hash) { | 
|  | 5206 | // hash += hash << 3; | 
|  | 5207 | __ add(hash, hash, Operand(hash, LSL, 3)); | 
|  | 5208 | // hash ^= hash >> 11; | 
|  | 5209 | __ eor(hash, hash, Operand(hash, ASR, 11)); | 
|  | 5210 | // hash += hash << 15; | 
|  | 5211 | __ add(hash, hash, Operand(hash, LSL, 15), SetCC); | 
|  | 5212 |  | 
|  | 5213 | // if (hash == 0) hash = 27; | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 5214 | __ mov(hash, Operand(27), LeaveCC, ne); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 5215 | } | 
|  | 5216 |  | 
|  | 5217 |  | 
|  | 5218 | void SubStringStub::Generate(MacroAssembler* masm) { | 
|  | 5219 | Label runtime; | 
|  | 5220 |  | 
|  | 5221 | // Stack frame on entry. | 
|  | 5222 | //  lr: return address | 
|  | 5223 | //  sp[0]: to | 
|  | 5224 | //  sp[4]: from | 
|  | 5225 | //  sp[8]: string | 
|  | 5226 |  | 
|  | 5227 | // This stub is called from the native-call %_SubString(...), so | 
|  | 5228 | // nothing can be assumed about the arguments. It is tested that: | 
|  | 5229 | //  "string" is a sequential string, | 
|  | 5230 | //  both "from" and "to" are smis, and | 
|  | 5231 | //  0 <= from <= to <= string.length. | 
|  | 5232 | // If any of these assumptions fail, we call the runtime system. | 
|  | 5233 |  | 
|  | 5234 | static const int kToOffset = 0 * kPointerSize; | 
|  | 5235 | static const int kFromOffset = 1 * kPointerSize; | 
|  | 5236 | static const int kStringOffset = 2 * kPointerSize; | 
|  | 5237 |  | 
|  | 5238 |  | 
|  | 5239 | // Check bounds and smi-ness. | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 5240 | Register to = r6; | 
|  | 5241 | Register from = r7; | 
|  | 5242 | __ Ldrd(to, from, MemOperand(sp, kToOffset)); | 
|  | 5243 | STATIC_ASSERT(kFromOffset == kToOffset + 4); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 5244 | STATIC_ASSERT(kSmiTag == 0); | 
|  | 5245 | STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); | 
|  | 5246 | // I.e., arithmetic shift right by one un-smi-tags. | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 5247 | __ mov(r2, Operand(to, ASR, 1), SetCC); | 
|  | 5248 | __ mov(r3, Operand(from, ASR, 1), SetCC, cc); | 
|  | 5249 | // If either to or from had the smi tag bit set, then carry is set now. | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 5250 | __ b(cs, &runtime);  // Either "from" or "to" is not a smi. | 
|  | 5251 | __ b(mi, &runtime);  // From is negative. | 
|  | 5252 |  | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 5253 | // Both to and from are smis. | 
|  | 5254 |  | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 5255 | __ sub(r2, r2, Operand(r3), SetCC); | 
|  | 5256 | __ b(mi, &runtime);  // Fail if from > to. | 
|  | 5257 | // Special handling of sub-strings of length 1 and 2. One character strings | 
|  | 5258 | // are handled in the runtime system (looked up in the single character | 
|  | 5259 | // cache). Two character strings are looked for in the symbol cache. | 
|  | 5260 | __ cmp(r2, Operand(2)); | 
|  | 5261 | __ b(lt, &runtime); | 
|  | 5262 |  | 
|  | 5263 | // r2: length | 
|  | 5264 | // r3: from index (untaged smi) | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 5265 | // r6 (a.k.a. to): to (smi) | 
|  | 5266 | // r7 (a.k.a. from): from offset (smi) | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 5267 |  | 
|  | 5268 | // Make sure first argument is a sequential (or flat) string. | 
|  | 5269 | __ ldr(r5, MemOperand(sp, kStringOffset)); | 
|  | 5270 | STATIC_ASSERT(kSmiTag == 0); | 
|  | 5271 | __ tst(r5, Operand(kSmiTagMask)); | 
|  | 5272 | __ b(eq, &runtime); | 
|  | 5273 | Condition is_string = masm->IsObjectStringType(r5, r1); | 
|  | 5274 | __ b(NegateCondition(is_string), &runtime); | 
|  | 5275 |  | 
|  | 5276 | // r1: instance type | 
|  | 5277 | // r2: length | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 5278 | // r3: from index (untagged smi) | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 5279 | // r5: string | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 5280 | // r6 (a.k.a. to): to (smi) | 
|  | 5281 | // r7 (a.k.a. from): from offset (smi) | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 5282 | Label seq_string; | 
|  | 5283 | __ and_(r4, r1, Operand(kStringRepresentationMask)); | 
|  | 5284 | STATIC_ASSERT(kSeqStringTag < kConsStringTag); | 
|  | 5285 | STATIC_ASSERT(kConsStringTag < kExternalStringTag); | 
|  | 5286 | __ cmp(r4, Operand(kConsStringTag)); | 
|  | 5287 | __ b(gt, &runtime);  // External strings go to runtime. | 
|  | 5288 | __ b(lt, &seq_string);  // Sequential strings are handled directly. | 
|  | 5289 |  | 
|  | 5290 | // Cons string. Try to recurse (once) on the first substring. | 
|  | 5291 | // (This adds a little more generality than necessary to handle flattened | 
|  | 5292 | // cons strings, but not much). | 
|  | 5293 | __ ldr(r5, FieldMemOperand(r5, ConsString::kFirstOffset)); | 
|  | 5294 | __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset)); | 
|  | 5295 | __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset)); | 
|  | 5296 | __ tst(r1, Operand(kStringRepresentationMask)); | 
|  | 5297 | STATIC_ASSERT(kSeqStringTag == 0); | 
|  | 5298 | __ b(ne, &runtime);  // Cons and External strings go to runtime. | 
|  | 5299 |  | 
|  | 5300 | // Definitly a sequential string. | 
|  | 5301 | __ bind(&seq_string); | 
|  | 5302 |  | 
|  | 5303 | // r1: instance type. | 
|  | 5304 | // r2: length | 
|  | 5305 | // r3: from index (untaged smi) | 
|  | 5306 | // r5: string | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 5307 | // r6 (a.k.a. to): to (smi) | 
|  | 5308 | // r7 (a.k.a. from): from offset (smi) | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 5309 | __ ldr(r4, FieldMemOperand(r5, String::kLengthOffset)); | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 5310 | __ cmp(r4, Operand(to)); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 5311 | __ b(lt, &runtime);  // Fail if to > length. | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 5312 | to = no_reg; | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 5313 |  | 
|  | 5314 | // r1: instance type. | 
|  | 5315 | // r2: result string length. | 
|  | 5316 | // r3: from index (untaged smi) | 
|  | 5317 | // r5: string. | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 5318 | // r7 (a.k.a. from): from offset (smi) | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 5319 | // Check for flat ascii string. | 
|  | 5320 | Label non_ascii_flat; | 
|  | 5321 | __ tst(r1, Operand(kStringEncodingMask)); | 
|  | 5322 | STATIC_ASSERT(kTwoByteStringTag == 0); | 
|  | 5323 | __ b(eq, &non_ascii_flat); | 
|  | 5324 |  | 
|  | 5325 | Label result_longer_than_two; | 
|  | 5326 | __ cmp(r2, Operand(2)); | 
|  | 5327 | __ b(gt, &result_longer_than_two); | 
|  | 5328 |  | 
|  | 5329 | // Sub string of length 2 requested. | 
|  | 5330 | // Get the two characters forming the sub string. | 
|  | 5331 | __ add(r5, r5, Operand(r3)); | 
|  | 5332 | __ ldrb(r3, FieldMemOperand(r5, SeqAsciiString::kHeaderSize)); | 
|  | 5333 | __ ldrb(r4, FieldMemOperand(r5, SeqAsciiString::kHeaderSize + 1)); | 
|  | 5334 |  | 
|  | 5335 | // Try to lookup two character string in symbol table. | 
|  | 5336 | Label make_two_character_string; | 
|  | 5337 | StringHelper::GenerateTwoCharacterSymbolTableProbe( | 
|  | 5338 | masm, r3, r4, r1, r5, r6, r7, r9, &make_two_character_string); | 
|  | 5339 | __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); | 
|  | 5340 | __ add(sp, sp, Operand(3 * kPointerSize)); | 
|  | 5341 | __ Ret(); | 
|  | 5342 |  | 
|  | 5343 | // r2: result string length. | 
|  | 5344 | // r3: two characters combined into halfword in little endian byte order. | 
|  | 5345 | __ bind(&make_two_character_string); | 
|  | 5346 | __ AllocateAsciiString(r0, r2, r4, r5, r9, &runtime); | 
|  | 5347 | __ strh(r3, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); | 
|  | 5348 | __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); | 
|  | 5349 | __ add(sp, sp, Operand(3 * kPointerSize)); | 
|  | 5350 | __ Ret(); | 
|  | 5351 |  | 
|  | 5352 | __ bind(&result_longer_than_two); | 
|  | 5353 |  | 
|  | 5354 | // Allocate the result. | 
|  | 5355 | __ AllocateAsciiString(r0, r2, r3, r4, r1, &runtime); | 
|  | 5356 |  | 
|  | 5357 | // r0: result string. | 
|  | 5358 | // r2: result string length. | 
|  | 5359 | // r5: string. | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 5360 | // r7 (a.k.a. from): from offset (smi) | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 5361 | // Locate first character of result. | 
|  | 5362 | __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); | 
|  | 5363 | // Locate 'from' character of string. | 
|  | 5364 | __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 5365 | __ add(r5, r5, Operand(from, ASR, 1)); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 5366 |  | 
|  | 5367 | // r0: result string. | 
|  | 5368 | // r1: first character of result string. | 
|  | 5369 | // r2: result string length. | 
|  | 5370 | // r5: first character of sub string to copy. | 
|  | 5371 | STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); | 
|  | 5372 | StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, | 
|  | 5373 | COPY_ASCII | DEST_ALWAYS_ALIGNED); | 
|  | 5374 | __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); | 
|  | 5375 | __ add(sp, sp, Operand(3 * kPointerSize)); | 
|  | 5376 | __ Ret(); | 
|  | 5377 |  | 
|  | 5378 | __ bind(&non_ascii_flat); | 
|  | 5379 | // r2: result string length. | 
|  | 5380 | // r5: string. | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 5381 | // r7 (a.k.a. from): from offset (smi) | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 5382 | // Check for flat two byte string. | 
|  | 5383 |  | 
|  | 5384 | // Allocate the result. | 
|  | 5385 | __ AllocateTwoByteString(r0, r2, r1, r3, r4, &runtime); | 
|  | 5386 |  | 
|  | 5387 | // r0: result string. | 
|  | 5388 | // r2: result string length. | 
|  | 5389 | // r5: string. | 
|  | 5390 | // Locate first character of result. | 
|  | 5391 | __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | 
|  | 5392 | // Locate 'from' character of string. | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 5393 | __ add(r5, r5, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 5394 | // As "from" is a smi it is 2 times the value which matches the size of a two | 
|  | 5395 | // byte character. | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 5396 | __ add(r5, r5, Operand(from)); | 
|  | 5397 | from = no_reg; | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 5398 |  | 
|  | 5399 | // r0: result string. | 
|  | 5400 | // r1: first character of result. | 
|  | 5401 | // r2: result length. | 
|  | 5402 | // r5: first character of string to copy. | 
|  | 5403 | STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 5404 | StringHelper::GenerateCopyCharactersLong( | 
|  | 5405 | masm, r1, r5, r2, r3, r4, r6, r7, r9, DEST_ALWAYS_ALIGNED); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 5406 | __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4); | 
|  | 5407 | __ add(sp, sp, Operand(3 * kPointerSize)); | 
|  | 5408 | __ Ret(); | 
|  | 5409 |  | 
|  | 5410 | // Just jump to runtime to create the sub string. | 
|  | 5411 | __ bind(&runtime); | 
|  | 5412 | __ TailCallRuntime(Runtime::kSubString, 3, 1); | 
|  | 5413 | } | 
|  | 5414 |  | 
|  | 5415 |  | 
|  | 5416 | void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, | 
|  | 5417 | Register left, | 
|  | 5418 | Register right, | 
|  | 5419 | Register scratch1, | 
|  | 5420 | Register scratch2, | 
|  | 5421 | Register scratch3, | 
|  | 5422 | Register scratch4) { | 
|  | 5423 | Label compare_lengths; | 
|  | 5424 | // Find minimum length and length difference. | 
|  | 5425 | __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset)); | 
|  | 5426 | __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); | 
|  | 5427 | __ sub(scratch3, scratch1, Operand(scratch2), SetCC); | 
|  | 5428 | Register length_delta = scratch3; | 
|  | 5429 | __ mov(scratch1, scratch2, LeaveCC, gt); | 
|  | 5430 | Register min_length = scratch1; | 
|  | 5431 | STATIC_ASSERT(kSmiTag == 0); | 
|  | 5432 | __ tst(min_length, Operand(min_length)); | 
|  | 5433 | __ b(eq, &compare_lengths); | 
|  | 5434 |  | 
|  | 5435 | // Untag smi. | 
|  | 5436 | __ mov(min_length, Operand(min_length, ASR, kSmiTagSize)); | 
|  | 5437 |  | 
|  | 5438 | // Setup registers so that we only need to increment one register | 
|  | 5439 | // in the loop. | 
|  | 5440 | __ add(scratch2, min_length, | 
|  | 5441 | Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); | 
|  | 5442 | __ add(left, left, Operand(scratch2)); | 
|  | 5443 | __ add(right, right, Operand(scratch2)); | 
|  | 5444 | // Registers left and right points to the min_length character of strings. | 
|  | 5445 | __ rsb(min_length, min_length, Operand(-1)); | 
|  | 5446 | Register index = min_length; | 
|  | 5447 | // Index starts at -min_length. | 
|  | 5448 |  | 
|  | 5449 | { | 
|  | 5450 | // Compare loop. | 
|  | 5451 | Label loop; | 
|  | 5452 | __ bind(&loop); | 
|  | 5453 | // Compare characters. | 
|  | 5454 | __ add(index, index, Operand(1), SetCC); | 
|  | 5455 | __ ldrb(scratch2, MemOperand(left, index), ne); | 
|  | 5456 | __ ldrb(scratch4, MemOperand(right, index), ne); | 
|  | 5457 | // Skip to compare lengths with eq condition true. | 
|  | 5458 | __ b(eq, &compare_lengths); | 
|  | 5459 | __ cmp(scratch2, scratch4); | 
|  | 5460 | __ b(eq, &loop); | 
|  | 5461 | // Fallthrough with eq condition false. | 
|  | 5462 | } | 
|  | 5463 | // Compare lengths -  strings up to min-length are equal. | 
|  | 5464 | __ bind(&compare_lengths); | 
|  | 5465 | ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); | 
|  | 5466 | // Use zero length_delta as result. | 
|  | 5467 | __ mov(r0, Operand(length_delta), SetCC, eq); | 
|  | 5468 | // Fall through to here if characters compare not-equal. | 
|  | 5469 | __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt); | 
|  | 5470 | __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt); | 
|  | 5471 | __ Ret(); | 
|  | 5472 | } | 
|  | 5473 |  | 
|  | 5474 |  | 
|  | 5475 | void StringCompareStub::Generate(MacroAssembler* masm) { | 
|  | 5476 | Label runtime; | 
|  | 5477 |  | 
|  | 5478 | // Stack frame on entry. | 
|  | 5479 | //  sp[0]: right string | 
|  | 5480 | //  sp[4]: left string | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 5481 | __ Ldrd(r0 , r1, MemOperand(sp));  // Load right in r0, left in r1. | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 5482 |  | 
|  | 5483 | Label not_same; | 
|  | 5484 | __ cmp(r0, r1); | 
|  | 5485 | __ b(ne, ¬_same); | 
|  | 5486 | STATIC_ASSERT(EQUAL == 0); | 
|  | 5487 | STATIC_ASSERT(kSmiTag == 0); | 
|  | 5488 | __ mov(r0, Operand(Smi::FromInt(EQUAL))); | 
|  | 5489 | __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2); | 
|  | 5490 | __ add(sp, sp, Operand(2 * kPointerSize)); | 
|  | 5491 | __ Ret(); | 
|  | 5492 |  | 
|  | 5493 | __ bind(¬_same); | 
|  | 5494 |  | 
|  | 5495 | // Check that both objects are sequential ascii strings. | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 5496 | __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 5497 |  | 
|  | 5498 | // Compare flat ascii strings natively. Remove arguments from stack first. | 
|  | 5499 | __ IncrementCounter(&Counters::string_compare_native, 1, r2, r3); | 
|  | 5500 | __ add(sp, sp, Operand(2 * kPointerSize)); | 
| Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 5501 | GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5); | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 5502 |  | 
|  | 5503 | // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) | 
|  | 5504 | // tagged as a small integer. | 
|  | 5505 | __ bind(&runtime); | 
|  | 5506 | __ TailCallRuntime(Runtime::kStringCompare, 2, 1); | 
|  | 5507 | } | 
|  | 5508 |  | 
|  | 5509 |  | 
|  | 5510 | void StringAddStub::Generate(MacroAssembler* masm) { | 
|  | 5511 | Label string_add_runtime; | 
|  | 5512 | // Stack on entry: | 
|  | 5513 | // sp[0]: second argument. | 
|  | 5514 | // sp[4]: first argument. | 
|  | 5515 |  | 
|  | 5516 | // Load the two arguments. | 
|  | 5517 | __ ldr(r0, MemOperand(sp, 1 * kPointerSize));  // First argument. | 
|  | 5518 | __ ldr(r1, MemOperand(sp, 0 * kPointerSize));  // Second argument. | 
|  | 5519 |  | 
|  | 5520 | // Make sure that both arguments are strings if not known in advance. | 
|  | 5521 | if (string_check_) { | 
|  | 5522 | STATIC_ASSERT(kSmiTag == 0); | 
|  | 5523 | __ JumpIfEitherSmi(r0, r1, &string_add_runtime); | 
|  | 5524 | // Load instance types. | 
|  | 5525 | __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); | 
|  | 5526 | __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); | 
|  | 5527 | __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); | 
|  | 5528 | __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); | 
|  | 5529 | STATIC_ASSERT(kStringTag == 0); | 
|  | 5530 | // If either is not a string, go to runtime. | 
|  | 5531 | __ tst(r4, Operand(kIsNotStringMask)); | 
|  | 5532 | __ tst(r5, Operand(kIsNotStringMask), eq); | 
|  | 5533 | __ b(ne, &string_add_runtime); | 
|  | 5534 | } | 
|  | 5535 |  | 
|  | 5536 | // Both arguments are strings. | 
|  | 5537 | // r0: first string | 
|  | 5538 | // r1: second string | 
|  | 5539 | // r4: first string instance type (if string_check_) | 
|  | 5540 | // r5: second string instance type (if string_check_) | 
|  | 5541 | { | 
|  | 5542 | Label strings_not_empty; | 
|  | 5543 | // Check if either of the strings are empty. In that case return the other. | 
|  | 5544 | __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset)); | 
|  | 5545 | __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset)); | 
|  | 5546 | STATIC_ASSERT(kSmiTag == 0); | 
|  | 5547 | __ cmp(r2, Operand(Smi::FromInt(0)));  // Test if first string is empty. | 
|  | 5548 | __ mov(r0, Operand(r1), LeaveCC, eq);  // If first is empty, return second. | 
|  | 5549 | STATIC_ASSERT(kSmiTag == 0); | 
|  | 5550 | // Else test if second string is empty. | 
|  | 5551 | __ cmp(r3, Operand(Smi::FromInt(0)), ne); | 
|  | 5552 | __ b(ne, &strings_not_empty);  // If either string was empty, return r0. | 
|  | 5553 |  | 
|  | 5554 | __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); | 
|  | 5555 | __ add(sp, sp, Operand(2 * kPointerSize)); | 
|  | 5556 | __ Ret(); | 
|  | 5557 |  | 
|  | 5558 | __ bind(&strings_not_empty); | 
|  | 5559 | } | 
|  | 5560 |  | 
|  | 5561 | __ mov(r2, Operand(r2, ASR, kSmiTagSize)); | 
|  | 5562 | __ mov(r3, Operand(r3, ASR, kSmiTagSize)); | 
|  | 5563 | // Both strings are non-empty. | 
|  | 5564 | // r0: first string | 
|  | 5565 | // r1: second string | 
|  | 5566 | // r2: length of first string | 
|  | 5567 | // r3: length of second string | 
|  | 5568 | // r4: first string instance type (if string_check_) | 
|  | 5569 | // r5: second string instance type (if string_check_) | 
|  | 5570 | // Look at the length of the result of adding the two strings. | 
|  | 5571 | Label string_add_flat_result, longer_than_two; | 
|  | 5572 | // Adding two lengths can't overflow. | 
|  | 5573 | STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2); | 
|  | 5574 | __ add(r6, r2, Operand(r3)); | 
|  | 5575 | // Use the runtime system when adding two one character strings, as it | 
|  | 5576 | // contains optimizations for this specific case using the symbol table. | 
|  | 5577 | __ cmp(r6, Operand(2)); | 
|  | 5578 | __ b(ne, &longer_than_two); | 
|  | 5579 |  | 
|  | 5580 | // Check that both strings are non-external ascii strings. | 
|  | 5581 | if (!string_check_) { | 
|  | 5582 | __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); | 
|  | 5583 | __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); | 
|  | 5584 | __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); | 
|  | 5585 | __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); | 
|  | 5586 | } | 
|  | 5587 | __ JumpIfBothInstanceTypesAreNotSequentialAscii(r4, r5, r6, r7, | 
|  | 5588 | &string_add_runtime); | 
|  | 5589 |  | 
|  | 5590 | // Get the two characters forming the sub string. | 
|  | 5591 | __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); | 
|  | 5592 | __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize)); | 
|  | 5593 |  | 
|  | 5594 | // Try to lookup two character string in symbol table. If it is not found | 
|  | 5595 | // just allocate a new one. | 
|  | 5596 | Label make_two_character_string; | 
|  | 5597 | StringHelper::GenerateTwoCharacterSymbolTableProbe( | 
|  | 5598 | masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string); | 
|  | 5599 | __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); | 
|  | 5600 | __ add(sp, sp, Operand(2 * kPointerSize)); | 
|  | 5601 | __ Ret(); | 
|  | 5602 |  | 
|  | 5603 | __ bind(&make_two_character_string); | 
|  | 5604 | // Resulting string has length 2 and first chars of two strings | 
|  | 5605 | // are combined into single halfword in r2 register. | 
|  | 5606 | // So we can fill resulting string without two loops by a single | 
|  | 5607 | // halfword store instruction (which assumes that processor is | 
|  | 5608 | // in a little endian mode) | 
|  | 5609 | __ mov(r6, Operand(2)); | 
|  | 5610 | __ AllocateAsciiString(r0, r6, r4, r5, r9, &string_add_runtime); | 
|  | 5611 | __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); | 
|  | 5612 | __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); | 
|  | 5613 | __ add(sp, sp, Operand(2 * kPointerSize)); | 
|  | 5614 | __ Ret(); | 
|  | 5615 |  | 
|  | 5616 | __ bind(&longer_than_two); | 
|  | 5617 | // Check if resulting string will be flat. | 
|  | 5618 | __ cmp(r6, Operand(String::kMinNonFlatLength)); | 
|  | 5619 | __ b(lt, &string_add_flat_result); | 
|  | 5620 | // Handle exceptionally long strings in the runtime system. | 
|  | 5621 | STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0); | 
|  | 5622 | ASSERT(IsPowerOf2(String::kMaxLength + 1)); | 
|  | 5623 | // kMaxLength + 1 is representable as shifted literal, kMaxLength is not. | 
|  | 5624 | __ cmp(r6, Operand(String::kMaxLength + 1)); | 
|  | 5625 | __ b(hs, &string_add_runtime); | 
|  | 5626 |  | 
|  | 5627 | // If result is not supposed to be flat, allocate a cons string object. | 
|  | 5628 | // If both strings are ascii the result is an ascii cons string. | 
|  | 5629 | if (!string_check_) { | 
|  | 5630 | __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); | 
|  | 5631 | __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); | 
|  | 5632 | __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); | 
|  | 5633 | __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); | 
|  | 5634 | } | 
|  | 5635 | Label non_ascii, allocated, ascii_data; | 
|  | 5636 | STATIC_ASSERT(kTwoByteStringTag == 0); | 
|  | 5637 | __ tst(r4, Operand(kStringEncodingMask)); | 
|  | 5638 | __ tst(r5, Operand(kStringEncodingMask), ne); | 
|  | 5639 | __ b(eq, &non_ascii); | 
|  | 5640 |  | 
|  | 5641 | // Allocate an ASCII cons string. | 
|  | 5642 | __ bind(&ascii_data); | 
|  | 5643 | __ AllocateAsciiConsString(r7, r6, r4, r5, &string_add_runtime); | 
|  | 5644 | __ bind(&allocated); | 
|  | 5645 | // Fill the fields of the cons string. | 
|  | 5646 | __ str(r0, FieldMemOperand(r7, ConsString::kFirstOffset)); | 
|  | 5647 | __ str(r1, FieldMemOperand(r7, ConsString::kSecondOffset)); | 
|  | 5648 | __ mov(r0, Operand(r7)); | 
|  | 5649 | __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); | 
|  | 5650 | __ add(sp, sp, Operand(2 * kPointerSize)); | 
|  | 5651 | __ Ret(); | 
|  | 5652 |  | 
|  | 5653 | __ bind(&non_ascii); | 
|  | 5654 | // At least one of the strings is two-byte. Check whether it happens | 
|  | 5655 | // to contain only ascii characters. | 
|  | 5656 | // r4: first instance type. | 
|  | 5657 | // r5: second instance type. | 
|  | 5658 | __ tst(r4, Operand(kAsciiDataHintMask)); | 
|  | 5659 | __ tst(r5, Operand(kAsciiDataHintMask), ne); | 
|  | 5660 | __ b(ne, &ascii_data); | 
|  | 5661 | __ eor(r4, r4, Operand(r5)); | 
|  | 5662 | STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); | 
|  | 5663 | __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); | 
|  | 5664 | __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); | 
|  | 5665 | __ b(eq, &ascii_data); | 
|  | 5666 |  | 
|  | 5667 | // Allocate a two byte cons string. | 
|  | 5668 | __ AllocateTwoByteConsString(r7, r6, r4, r5, &string_add_runtime); | 
|  | 5669 | __ jmp(&allocated); | 
|  | 5670 |  | 
|  | 5671 | // Handle creating a flat result. First check that both strings are | 
|  | 5672 | // sequential and that they have the same encoding. | 
|  | 5673 | // r0: first string | 
|  | 5674 | // r1: second string | 
|  | 5675 | // r2: length of first string | 
|  | 5676 | // r3: length of second string | 
|  | 5677 | // r4: first string instance type (if string_check_) | 
|  | 5678 | // r5: second string instance type (if string_check_) | 
|  | 5679 | // r6: sum of lengths. | 
|  | 5680 | __ bind(&string_add_flat_result); | 
|  | 5681 | if (!string_check_) { | 
|  | 5682 | __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); | 
|  | 5683 | __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset)); | 
|  | 5684 | __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); | 
|  | 5685 | __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset)); | 
|  | 5686 | } | 
|  | 5687 | // Check that both strings are sequential. | 
|  | 5688 | STATIC_ASSERT(kSeqStringTag == 0); | 
|  | 5689 | __ tst(r4, Operand(kStringRepresentationMask)); | 
|  | 5690 | __ tst(r5, Operand(kStringRepresentationMask), eq); | 
|  | 5691 | __ b(ne, &string_add_runtime); | 
|  | 5692 | // Now check if both strings have the same encoding (ASCII/Two-byte). | 
|  | 5693 | // r0: first string. | 
|  | 5694 | // r1: second string. | 
|  | 5695 | // r2: length of first string. | 
|  | 5696 | // r3: length of second string. | 
|  | 5697 | // r6: sum of lengths.. | 
|  | 5698 | Label non_ascii_string_add_flat_result; | 
|  | 5699 | ASSERT(IsPowerOf2(kStringEncodingMask));  // Just one bit to test. | 
|  | 5700 | __ eor(r7, r4, Operand(r5)); | 
|  | 5701 | __ tst(r7, Operand(kStringEncodingMask)); | 
|  | 5702 | __ b(ne, &string_add_runtime); | 
|  | 5703 | // And see if it's ASCII or two-byte. | 
|  | 5704 | __ tst(r4, Operand(kStringEncodingMask)); | 
|  | 5705 | __ b(eq, &non_ascii_string_add_flat_result); | 
|  | 5706 |  | 
|  | 5707 | // Both strings are sequential ASCII strings. We also know that they are | 
|  | 5708 | // short (since the sum of the lengths is less than kMinNonFlatLength). | 
|  | 5709 | // r6: length of resulting flat string | 
|  | 5710 | __ AllocateAsciiString(r7, r6, r4, r5, r9, &string_add_runtime); | 
|  | 5711 | // Locate first character of result. | 
|  | 5712 | __ add(r6, r7, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); | 
|  | 5713 | // Locate first character of first argument. | 
|  | 5714 | __ add(r0, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); | 
|  | 5715 | // r0: first character of first string. | 
|  | 5716 | // r1: second string. | 
|  | 5717 | // r2: length of first string. | 
|  | 5718 | // r3: length of second string. | 
|  | 5719 | // r6: first character of result. | 
|  | 5720 | // r7: result string. | 
|  | 5721 | StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, true); | 
|  | 5722 |  | 
|  | 5723 | // Load second argument and locate first character. | 
|  | 5724 | __ add(r1, r1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); | 
|  | 5725 | // r1: first character of second string. | 
|  | 5726 | // r3: length of second string. | 
|  | 5727 | // r6: next character of result. | 
|  | 5728 | // r7: result string. | 
|  | 5729 | StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, true); | 
|  | 5730 | __ mov(r0, Operand(r7)); | 
|  | 5731 | __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); | 
|  | 5732 | __ add(sp, sp, Operand(2 * kPointerSize)); | 
|  | 5733 | __ Ret(); | 
|  | 5734 |  | 
|  | 5735 | __ bind(&non_ascii_string_add_flat_result); | 
|  | 5736 | // Both strings are sequential two byte strings. | 
|  | 5737 | // r0: first string. | 
|  | 5738 | // r1: second string. | 
|  | 5739 | // r2: length of first string. | 
|  | 5740 | // r3: length of second string. | 
|  | 5741 | // r6: sum of length of strings. | 
|  | 5742 | __ AllocateTwoByteString(r7, r6, r4, r5, r9, &string_add_runtime); | 
|  | 5743 | // r0: first string. | 
|  | 5744 | // r1: second string. | 
|  | 5745 | // r2: length of first string. | 
|  | 5746 | // r3: length of second string. | 
|  | 5747 | // r7: result string. | 
|  | 5748 |  | 
|  | 5749 | // Locate first character of result. | 
|  | 5750 | __ add(r6, r7, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | 
|  | 5751 | // Locate first character of first argument. | 
|  | 5752 | __ add(r0, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | 
|  | 5753 |  | 
|  | 5754 | // r0: first character of first string. | 
|  | 5755 | // r1: second string. | 
|  | 5756 | // r2: length of first string. | 
|  | 5757 | // r3: length of second string. | 
|  | 5758 | // r6: first character of result. | 
|  | 5759 | // r7: result string. | 
|  | 5760 | StringHelper::GenerateCopyCharacters(masm, r6, r0, r2, r4, false); | 
|  | 5761 |  | 
|  | 5762 | // Locate first character of second argument. | 
|  | 5763 | __ add(r1, r1, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | 
|  | 5764 |  | 
|  | 5765 | // r1: first character of second string. | 
|  | 5766 | // r3: length of second string. | 
|  | 5767 | // r6: next character of result (after copy of first string). | 
|  | 5768 | // r7: result string. | 
|  | 5769 | StringHelper::GenerateCopyCharacters(masm, r6, r1, r3, r4, false); | 
|  | 5770 |  | 
|  | 5771 | __ mov(r0, Operand(r7)); | 
|  | 5772 | __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); | 
|  | 5773 | __ add(sp, sp, Operand(2 * kPointerSize)); | 
|  | 5774 | __ Ret(); | 
|  | 5775 |  | 
|  | 5776 | // Just jump to runtime to add the two strings. | 
|  | 5777 | __ bind(&string_add_runtime); | 
|  | 5778 | __ TailCallRuntime(Runtime::kStringAdd, 2, 1); | 
|  | 5779 | } | 
|  | 5780 |  | 
|  | 5781 |  | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 5782 | void StringCharAtStub::Generate(MacroAssembler* masm) { | 
|  | 5783 | // Expects two arguments (object, index) on the stack: | 
|  | 5784 | //  lr: return address | 
|  | 5785 | //  sp[0]: index | 
|  | 5786 | //  sp[4]: object | 
|  | 5787 | Register object = r1; | 
|  | 5788 | Register index = r0; | 
|  | 5789 | Register scratch1 = r2; | 
|  | 5790 | Register scratch2 = r3; | 
|  | 5791 | Register result = r0; | 
|  | 5792 |  | 
|  | 5793 | // Get object and index from the stack. | 
|  | 5794 | __ pop(index); | 
|  | 5795 | __ pop(object); | 
|  | 5796 |  | 
|  | 5797 | Label need_conversion; | 
|  | 5798 | Label index_out_of_range; | 
|  | 5799 | Label done; | 
|  | 5800 | StringCharAtGenerator generator(object, | 
|  | 5801 | index, | 
|  | 5802 | scratch1, | 
|  | 5803 | scratch2, | 
|  | 5804 | result, | 
|  | 5805 | &need_conversion, | 
|  | 5806 | &need_conversion, | 
|  | 5807 | &index_out_of_range, | 
|  | 5808 | STRING_INDEX_IS_NUMBER); | 
|  | 5809 | generator.GenerateFast(masm); | 
|  | 5810 | __ b(&done); | 
|  | 5811 |  | 
|  | 5812 | __ bind(&index_out_of_range); | 
|  | 5813 | // When the index is out of range, the spec requires us to return | 
|  | 5814 | // the empty string. | 
|  | 5815 | __ LoadRoot(result, Heap::kEmptyStringRootIndex); | 
|  | 5816 | __ jmp(&done); | 
|  | 5817 |  | 
|  | 5818 | __ bind(&need_conversion); | 
|  | 5819 | // Move smi zero into the result register, which will trigger | 
|  | 5820 | // conversion. | 
|  | 5821 | __ mov(result, Operand(Smi::FromInt(0))); | 
|  | 5822 | __ b(&done); | 
|  | 5823 |  | 
|  | 5824 | StubRuntimeCallHelper call_helper; | 
|  | 5825 | generator.GenerateSlow(masm, call_helper); | 
|  | 5826 |  | 
|  | 5827 | __ bind(&done); | 
|  | 5828 | __ Ret(); | 
|  | 5829 | } | 
|  | 5830 |  | 
|  | 5831 |  | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 5832 | void ICCompareStub::GenerateSmis(MacroAssembler* masm) { | 
|  | 5833 | ASSERT(state_ == CompareIC::SMIS); | 
|  | 5834 | Label miss; | 
|  | 5835 | __ orr(r2, r1, r0); | 
|  | 5836 | __ tst(r2, Operand(kSmiTagMask)); | 
|  | 5837 | __ b(ne, &miss); | 
|  | 5838 |  | 
|  | 5839 | if (GetCondition() == eq) { | 
|  | 5840 | // For equality we do not care about the sign of the result. | 
|  | 5841 | __ sub(r0, r0, r1, SetCC); | 
|  | 5842 | } else { | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 5843 | // Untag before subtracting to avoid handling overflow. | 
|  | 5844 | __ SmiUntag(r1); | 
|  | 5845 | __ sub(r0, r1, SmiUntagOperand(r0)); | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 5846 | } | 
|  | 5847 | __ Ret(); | 
|  | 5848 |  | 
|  | 5849 | __ bind(&miss); | 
|  | 5850 | GenerateMiss(masm); | 
|  | 5851 | } | 
|  | 5852 |  | 
|  | 5853 |  | 
|  | 5854 | void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { | 
|  | 5855 | ASSERT(state_ == CompareIC::HEAP_NUMBERS); | 
|  | 5856 |  | 
|  | 5857 | Label generic_stub; | 
|  | 5858 | Label unordered; | 
|  | 5859 | Label miss; | 
|  | 5860 | __ and_(r2, r1, Operand(r0)); | 
|  | 5861 | __ tst(r2, Operand(kSmiTagMask)); | 
|  | 5862 | __ b(eq, &generic_stub); | 
|  | 5863 |  | 
|  | 5864 | __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE); | 
|  | 5865 | __ b(ne, &miss); | 
|  | 5866 | __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE); | 
|  | 5867 | __ b(ne, &miss); | 
|  | 5868 |  | 
|  | 5869 | // Inlining the double comparison and falling back to the general compare | 
|  | 5870 | // stub if NaN is involved or VFP3 is unsupported. | 
|  | 5871 | if (CpuFeatures::IsSupported(VFP3)) { | 
|  | 5872 | CpuFeatures::Scope scope(VFP3); | 
|  | 5873 |  | 
|  | 5874 | // Load left and right operand | 
|  | 5875 | __ sub(r2, r1, Operand(kHeapObjectTag)); | 
|  | 5876 | __ vldr(d0, r2, HeapNumber::kValueOffset); | 
|  | 5877 | __ sub(r2, r0, Operand(kHeapObjectTag)); | 
|  | 5878 | __ vldr(d1, r2, HeapNumber::kValueOffset); | 
|  | 5879 |  | 
|  | 5880 | // Compare operands | 
| Ben Murdoch | b8e0da2 | 2011-05-16 14:20:40 +0100 | [diff] [blame] | 5881 | __ VFPCompareAndSetFlags(d0, d1); | 
| Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 5882 |  | 
|  | 5883 | // Don't base result on status bits when a NaN is involved. | 
|  | 5884 | __ b(vs, &unordered); | 
|  | 5885 |  | 
|  | 5886 | // Return a result of -1, 0, or 1, based on status bits. | 
|  | 5887 | __ mov(r0, Operand(EQUAL), LeaveCC, eq); | 
|  | 5888 | __ mov(r0, Operand(LESS), LeaveCC, lt); | 
|  | 5889 | __ mov(r0, Operand(GREATER), LeaveCC, gt); | 
|  | 5890 | __ Ret(); | 
|  | 5891 |  | 
|  | 5892 | __ bind(&unordered); | 
|  | 5893 | } | 
|  | 5894 |  | 
|  | 5895 | CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0); | 
|  | 5896 | __ bind(&generic_stub); | 
|  | 5897 | __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | 
|  | 5898 |  | 
|  | 5899 | __ bind(&miss); | 
|  | 5900 | GenerateMiss(masm); | 
|  | 5901 | } | 
|  | 5902 |  | 
|  | 5903 |  | 
|  | 5904 | void ICCompareStub::GenerateObjects(MacroAssembler* masm) { | 
|  | 5905 | ASSERT(state_ == CompareIC::OBJECTS); | 
|  | 5906 | Label miss; | 
|  | 5907 | __ and_(r2, r1, Operand(r0)); | 
|  | 5908 | __ tst(r2, Operand(kSmiTagMask)); | 
|  | 5909 | __ b(eq, &miss); | 
|  | 5910 |  | 
|  | 5911 | __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE); | 
|  | 5912 | __ b(ne, &miss); | 
|  | 5913 | __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE); | 
|  | 5914 | __ b(ne, &miss); | 
|  | 5915 |  | 
|  | 5916 | ASSERT(GetCondition() == eq); | 
|  | 5917 | __ sub(r0, r0, Operand(r1)); | 
|  | 5918 | __ Ret(); | 
|  | 5919 |  | 
|  | 5920 | __ bind(&miss); | 
|  | 5921 | GenerateMiss(masm); | 
|  | 5922 | } | 
|  | 5923 |  | 
|  | 5924 |  | 
|  | 5925 | void ICCompareStub::GenerateMiss(MacroAssembler* masm) { | 
|  | 5926 | __ Push(r1, r0); | 
|  | 5927 | __ push(lr); | 
|  | 5928 |  | 
|  | 5929 | // Call the runtime system in a fresh internal frame. | 
|  | 5930 | ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss)); | 
|  | 5931 | __ EnterInternalFrame(); | 
|  | 5932 | __ Push(r1, r0); | 
|  | 5933 | __ mov(ip, Operand(Smi::FromInt(op_))); | 
|  | 5934 | __ push(ip); | 
|  | 5935 | __ CallExternalReference(miss, 3); | 
|  | 5936 | __ LeaveInternalFrame(); | 
|  | 5937 | // Compute the entry point of the rewritten stub. | 
|  | 5938 | __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); | 
|  | 5939 | // Restore registers. | 
|  | 5940 | __ pop(lr); | 
|  | 5941 | __ pop(r0); | 
|  | 5942 | __ pop(r1); | 
|  | 5943 | __ Jump(r2); | 
|  | 5944 | } | 
|  | 5945 |  | 
|  | 5946 |  | 
| Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame^] | 5947 | void DirectCEntryStub::Generate(MacroAssembler* masm) { | 
|  | 5948 | __ ldr(pc, MemOperand(sp, 0)); | 
|  | 5949 | } | 
|  | 5950 |  | 
|  | 5951 |  | 
|  | 5952 | void DirectCEntryStub::GenerateCall(MacroAssembler* masm, | 
|  | 5953 | ApiFunction *function) { | 
|  | 5954 | __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()), | 
|  | 5955 | RelocInfo::CODE_TARGET)); | 
|  | 5956 | // Push return address (accessible to GC through exit frame pc). | 
|  | 5957 | __ mov(r2, | 
|  | 5958 | Operand(ExternalReference(function, ExternalReference::DIRECT_CALL))); | 
|  | 5959 | __ str(pc, MemOperand(sp, 0)); | 
|  | 5960 | __ Jump(r2);  // Call the api function. | 
|  | 5961 | } | 
|  | 5962 |  | 
|  | 5963 |  | 
|  | 5964 | void GenerateFastPixelArrayLoad(MacroAssembler* masm, | 
|  | 5965 | Register receiver, | 
|  | 5966 | Register key, | 
|  | 5967 | Register elements_map, | 
|  | 5968 | Register elements, | 
|  | 5969 | Register scratch1, | 
|  | 5970 | Register scratch2, | 
|  | 5971 | Register result, | 
|  | 5972 | Label* not_pixel_array, | 
|  | 5973 | Label* key_not_smi, | 
|  | 5974 | Label* out_of_range) { | 
|  | 5975 | // Register use: | 
|  | 5976 | // | 
|  | 5977 | // receiver - holds the receiver on entry. | 
|  | 5978 | //            Unchanged unless 'result' is the same register. | 
|  | 5979 | // | 
|  | 5980 | // key      - holds the smi key on entry. | 
|  | 5981 | //            Unchanged unless 'result' is the same register. | 
|  | 5982 | // | 
|  | 5983 | // elements - set to be the receiver's elements on exit. | 
|  | 5984 | // | 
|  | 5985 | // elements_map - set to be the map of the receiver's elements | 
|  | 5986 | //            on exit. | 
|  | 5987 | // | 
|  | 5988 | // result   - holds the result of the pixel array load on exit, | 
|  | 5989 | //            tagged as a smi if successful. | 
|  | 5990 | // | 
|  | 5991 | // Scratch registers: | 
|  | 5992 | // | 
|  | 5993 | // scratch1 - used a scratch register in map check, if map | 
|  | 5994 | //            check is successful, contains the length of the | 
|  | 5995 | //            pixel array, the pointer to external elements and | 
|  | 5996 | //            the untagged result. | 
|  | 5997 | // | 
|  | 5998 | // scratch2 - holds the untaged key. | 
|  | 5999 |  | 
|  | 6000 | // Some callers already have verified that the key is a smi.  key_not_smi is | 
|  | 6001 | // set to NULL as a sentinel for that case.  Otherwise, add an explicit check | 
|  | 6002 | // to ensure the key is a smi must be added. | 
|  | 6003 | if (key_not_smi != NULL) { | 
|  | 6004 | __ JumpIfNotSmi(key, key_not_smi); | 
|  | 6005 | } else { | 
|  | 6006 | if (FLAG_debug_code) { | 
|  | 6007 | __ AbortIfNotSmi(key); | 
|  | 6008 | } | 
|  | 6009 | } | 
|  | 6010 | __ SmiUntag(scratch2, key); | 
|  | 6011 |  | 
|  | 6012 | // Verify that the receiver has pixel array elements. | 
|  | 6013 | __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 
|  | 6014 | __ CheckMap(elements, scratch1, Heap::kPixelArrayMapRootIndex, | 
|  | 6015 | not_pixel_array, true); | 
|  | 6016 |  | 
|  | 6017 | // Key must be in range of the pixel array. | 
|  | 6018 | __ ldr(scratch1, FieldMemOperand(elements, PixelArray::kLengthOffset)); | 
|  | 6019 | __ cmp(scratch2, scratch1); | 
|  | 6020 | __ b(hs, out_of_range);  // unsigned check handles negative keys. | 
|  | 6021 |  | 
|  | 6022 | // Perform the indexed load and tag the result as a smi. | 
|  | 6023 | __ ldr(scratch1, | 
|  | 6024 | FieldMemOperand(elements, PixelArray::kExternalPointerOffset)); | 
|  | 6025 | __ ldrb(scratch1, MemOperand(scratch1, scratch2)); | 
|  | 6026 | __ SmiTag(r0, scratch1); | 
|  | 6027 | __ Ret(); | 
|  | 6028 | } | 
|  | 6029 |  | 
|  | 6030 |  | 
| Kristian Monsen | 80d68ea | 2010-09-08 11:05:35 +0100 | [diff] [blame] | 6031 | #undef __ | 
|  | 6032 |  | 
|  | 6033 | } }  // namespace v8::internal | 
|  | 6034 |  | 
|  | 6035 | #endif  // V8_TARGET_ARCH_ARM |