blob: ae13161501fd29b6ffa679636fbb8d44660f18b4 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/v8.h"
6
7#if V8_TARGET_ARCH_ARM
8
9#include "src/codegen.h"
10#include "src/ic/ic.h"
11#include "src/ic/ic-compiler.h"
12#include "src/ic/stub-cache.h"
13
14namespace v8 {
15namespace internal {
16
17
18// ----------------------------------------------------------------------------
19// Static IC stub generators.
20//
21
22#define __ ACCESS_MASM(masm)
23
24
25static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
26 Label* global_object) {
27 // Register usage:
28 // type: holds the receiver instance type on entry.
29 __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
30 __ b(eq, global_object);
31 __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE));
32 __ b(eq, global_object);
33 __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
34 __ b(eq, global_object);
35}
36
37
38// Helper function used from LoadIC GenerateNormal.
39//
40// elements: Property dictionary. It is not clobbered if a jump to the miss
41// label is done.
42// name: Property name. It is not clobbered if a jump to the miss label is
43// done
44// result: Register for the result. It is only updated if a jump to the miss
45// label is not done. Can be the same as elements or name clobbering
46// one of these in the case of not jumping to the miss label.
47// The two scratch registers need to be different from elements, name and
48// result.
49// The generated code assumes that the receiver has slow properties,
50// is not a global object and does not have interceptors.
51static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
52 Register elements, Register name,
53 Register result, Register scratch1,
54 Register scratch2) {
55 // Main use of the scratch registers.
56 // scratch1: Used as temporary and to hold the capacity of the property
57 // dictionary.
58 // scratch2: Used as temporary.
59 Label done;
60
61 // Probe the dictionary.
62 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
63 name, scratch1, scratch2);
64
65 // If probing finds an entry check that the value is a normal
66 // property.
67 __ bind(&done); // scratch2 == elements + 4 * index
68 const int kElementsStartOffset =
69 NameDictionary::kHeaderSize +
70 NameDictionary::kElementsStartIndex * kPointerSize;
71 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
72 __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
73 __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
74 __ b(ne, miss);
75
76 // Get the value at the masked, scaled index and return.
77 __ ldr(result,
78 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
79}
80
81
82// Helper function used from StoreIC::GenerateNormal.
83//
84// elements: Property dictionary. It is not clobbered if a jump to the miss
85// label is done.
86// name: Property name. It is not clobbered if a jump to the miss label is
87// done
88// value: The value to store.
89// The two scratch registers need to be different from elements, name and
90// result.
91// The generated code assumes that the receiver has slow properties,
92// is not a global object and does not have interceptors.
93static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
94 Register elements, Register name,
95 Register value, Register scratch1,
96 Register scratch2) {
97 // Main use of the scratch registers.
98 // scratch1: Used as temporary and to hold the capacity of the property
99 // dictionary.
100 // scratch2: Used as temporary.
101 Label done;
102
103 // Probe the dictionary.
104 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
105 name, scratch1, scratch2);
106
107 // If probing finds an entry in the dictionary check that the value
108 // is a normal property that is not read only.
109 __ bind(&done); // scratch2 == elements + 4 * index
110 const int kElementsStartOffset =
111 NameDictionary::kHeaderSize +
112 NameDictionary::kElementsStartIndex * kPointerSize;
113 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
114 const int kTypeAndReadOnlyMask =
115 (PropertyDetails::TypeField::kMask |
116 PropertyDetails::AttributesField::encode(READ_ONLY))
117 << kSmiTagSize;
118 __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
119 __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
120 __ b(ne, miss);
121
122 // Store the value at the masked, scaled index and return.
123 const int kValueOffset = kElementsStartOffset + kPointerSize;
124 __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
125 __ str(value, MemOperand(scratch2));
126
127 // Update the write barrier. Make sure not to clobber the value.
128 __ mov(scratch1, value);
129 __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
130 kDontSaveFPRegs);
131}
132
133
134// Checks the receiver for special cases (value type, slow case bits).
135// Falls through for regular JS object.
136static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
137 Register receiver, Register map,
138 Register scratch,
139 int interceptor_bit, Label* slow) {
140 // Check that the object isn't a smi.
141 __ JumpIfSmi(receiver, slow);
142 // Get the map of the receiver.
143 __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
144 // Check bit field.
145 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
146 __ tst(scratch,
147 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
148 __ b(ne, slow);
149 // Check that the object is some kind of JS object EXCEPT JS Value type.
150 // In the case that the object is a value-wrapper object,
151 // we enter the runtime system to make sure that indexing into string
152 // objects work as intended.
153 DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
154 __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
155 __ cmp(scratch, Operand(JS_OBJECT_TYPE));
156 __ b(lt, slow);
157}
158
159
160// Loads an indexed element from a fast case array.
161// If not_fast_array is NULL, doesn't perform the elements map check.
162static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
163 Register key, Register elements,
164 Register scratch1, Register scratch2,
165 Register result, Label* not_fast_array,
166 Label* out_of_range) {
167 // Register use:
168 //
169 // receiver - holds the receiver on entry.
170 // Unchanged unless 'result' is the same register.
171 //
172 // key - holds the smi key on entry.
173 // Unchanged unless 'result' is the same register.
174 //
175 // elements - holds the elements of the receiver on exit.
176 //
177 // result - holds the result on exit if the load succeeded.
178 // Allowed to be the the same as 'receiver' or 'key'.
179 // Unchanged on bailout so 'receiver' and 'key' can be safely
180 // used by further computation.
181 //
182 // Scratch registers:
183 //
184 // scratch1 - used to hold elements map and elements length.
185 // Holds the elements map if not_fast_array branch is taken.
186 //
187 // scratch2 - used to hold the loaded value.
188
189 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
190 if (not_fast_array != NULL) {
191 // Check that the object is in fast mode and writable.
192 __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
193 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
194 __ cmp(scratch1, ip);
195 __ b(ne, not_fast_array);
196 } else {
197 __ AssertFastElements(elements);
198 }
199 // Check that the key (index) is within bounds.
200 __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
201 __ cmp(key, Operand(scratch1));
202 __ b(hs, out_of_range);
203 // Fast case: Do the load.
204 __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
205 __ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key));
206 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
207 __ cmp(scratch2, ip);
208 // In case the loaded value is the_hole we have to consult GetProperty
209 // to ensure the prototype chain is searched.
210 __ b(eq, out_of_range);
211 __ mov(result, scratch2);
212}
213
214
215// Checks whether a key is an array index string or a unique name.
216// Falls through if a key is a unique name.
217static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
218 Register map, Register hash,
219 Label* index_string, Label* not_unique) {
220 // The key is not a smi.
221 Label unique;
222 // Is it a name?
223 __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
224 __ b(hi, not_unique);
225 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
226 __ b(eq, &unique);
227
228 // Is the string an array index, with cached numeric value?
229 __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
230 __ tst(hash, Operand(Name::kContainsCachedArrayIndexMask));
231 __ b(eq, index_string);
232
233 // Is the string internalized? We know it's a string, so a single
234 // bit test is enough.
235 // map: key map
236 __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
237 STATIC_ASSERT(kInternalizedTag == 0);
238 __ tst(hash, Operand(kIsNotInternalizedMask));
239 __ b(ne, not_unique);
240
241 __ bind(&unique);
242}
243
244
245void LoadIC::GenerateNormal(MacroAssembler* masm) {
246 Register dictionary = r0;
247 DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
248 DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
249
250 Label slow;
251
252 __ ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
253 JSObject::kPropertiesOffset));
254 GenerateDictionaryLoad(masm, &slow, dictionary,
255 LoadDescriptor::NameRegister(), r0, r3, r4);
256 __ Ret();
257
258 // Dictionary load failed, go slow (but don't miss).
259 __ bind(&slow);
260 GenerateRuntimeGetProperty(masm);
261}
262
263
264// A register that isn't one of the parameters to the load ic.
265static const Register LoadIC_TempRegister() { return r3; }
266
267
268void LoadIC::GenerateMiss(MacroAssembler* masm) {
269 // The return address is in lr.
270 Isolate* isolate = masm->isolate();
271
272 __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4);
273
274 __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
275 __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
276
277 // Perform tail call to the entry.
278 ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
279 __ TailCallExternalReference(ref, 2, 1);
280}
281
282
283void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
284 // The return address is in lr.
285
286 __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
287 __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
288
289 __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
290}
291
292
293static MemOperand GenerateMappedArgumentsLookup(
294 MacroAssembler* masm, Register object, Register key, Register scratch1,
295 Register scratch2, Register scratch3, Label* unmapped_case,
296 Label* slow_case) {
297 Heap* heap = masm->isolate()->heap();
298
299 // Check that the receiver is a JSObject. Because of the map check
300 // later, we do not need to check for interceptors or whether it
301 // requires access checks.
302 __ JumpIfSmi(object, slow_case);
303 // Check that the object is some kind of JSObject.
304 __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE);
305 __ b(lt, slow_case);
306
307 // Check that the key is a positive smi.
308 __ tst(key, Operand(0x80000001));
309 __ b(ne, slow_case);
310
311 // Load the elements into scratch1 and check its map.
312 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
313 __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
314 __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
315
316 // Check if element is in the range of mapped arguments. If not, jump
317 // to the unmapped lookup with the parameter map in scratch1.
318 __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
319 __ sub(scratch2, scratch2, Operand(Smi::FromInt(2)));
320 __ cmp(key, Operand(scratch2));
321 __ b(cs, unmapped_case);
322
323 // Load element index and check whether it is the hole.
324 const int kOffset =
325 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
326
327 __ mov(scratch3, Operand(kPointerSize >> 1));
328 __ mul(scratch3, key, scratch3);
329 __ add(scratch3, scratch3, Operand(kOffset));
330
331 __ ldr(scratch2, MemOperand(scratch1, scratch3));
332 __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
333 __ cmp(scratch2, scratch3);
334 __ b(eq, unmapped_case);
335
336 // Load value from context and return it. We can reuse scratch1 because
337 // we do not jump to the unmapped lookup (which requires the parameter
338 // map in scratch1).
339 __ ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
340 __ mov(scratch3, Operand(kPointerSize >> 1));
341 __ mul(scratch3, scratch2, scratch3);
342 __ add(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
343 return MemOperand(scratch1, scratch3);
344}
345
346
347static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
348 Register key,
349 Register parameter_map,
350 Register scratch,
351 Label* slow_case) {
352 // Element is in arguments backing store, which is referenced by the
353 // second element of the parameter_map. The parameter_map register
354 // must be loaded with the parameter map of the arguments object and is
355 // overwritten.
356 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
357 Register backing_store = parameter_map;
358 __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
359 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
360 __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
361 DONT_DO_SMI_CHECK);
362 __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
363 __ cmp(key, Operand(scratch));
364 __ b(cs, slow_case);
365 __ mov(scratch, Operand(kPointerSize >> 1));
366 __ mul(scratch, key, scratch);
367 __ add(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
368 return MemOperand(backing_store, scratch);
369}
370
371
372void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
373 Register receiver = StoreDescriptor::ReceiverRegister();
374 Register key = StoreDescriptor::NameRegister();
375 Register value = StoreDescriptor::ValueRegister();
376 DCHECK(receiver.is(r1));
377 DCHECK(key.is(r2));
378 DCHECK(value.is(r0));
379
380 Label slow, notin;
381 MemOperand mapped_location = GenerateMappedArgumentsLookup(
382 masm, receiver, key, r3, r4, r5, &notin, &slow);
383 __ str(value, mapped_location);
384 __ add(r6, r3, r5);
385 __ mov(r9, value);
386 __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
387 __ Ret();
388 __ bind(&notin);
389 // The unmapped lookup expects that the parameter map is in r3.
390 MemOperand unmapped_location =
391 GenerateUnmappedArgumentsLookup(masm, key, r3, r4, &slow);
392 __ str(value, unmapped_location);
393 __ add(r6, r3, r4);
394 __ mov(r9, value);
395 __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
396 __ Ret();
397 __ bind(&slow);
398 GenerateMiss(masm);
399}
400
401
402void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
403 // The return address is in lr.
404 Isolate* isolate = masm->isolate();
405
406 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4);
407
408 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
409
410 // Perform tail call to the entry.
411 ExternalReference ref =
412 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
413
414 __ TailCallExternalReference(ref, 2, 1);
415}
416
417
418void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
419 // The return address is in lr.
420
421 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
422
423 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
424}
425
426
427void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
428 // The return address is in lr.
429 Label slow, check_name, index_smi, index_name, property_array_property;
430 Label probe_dictionary, check_number_dictionary;
431
432 Register key = LoadDescriptor::NameRegister();
433 Register receiver = LoadDescriptor::ReceiverRegister();
434 DCHECK(key.is(r2));
435 DCHECK(receiver.is(r1));
436
437 Isolate* isolate = masm->isolate();
438
439 // Check that the key is a smi.
440 __ JumpIfNotSmi(key, &check_name);
441 __ bind(&index_smi);
442 // Now the key is known to be a smi. This place is also jumped to from below
443 // where a numeric string is converted to a smi.
444
445 GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3,
446 Map::kHasIndexedInterceptor, &slow);
447
448 // Check the receiver's map to see if it has fast elements.
449 __ CheckFastElements(r0, r3, &check_number_dictionary);
450
451 GenerateFastArrayLoad(masm, receiver, key, r0, r3, r4, r0, NULL, &slow);
452 __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r4, r3);
453 __ Ret();
454
455 __ bind(&check_number_dictionary);
456 __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
457 __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset));
458
459 // Check whether the elements is a number dictionary.
460 // r3: elements map
461 // r4: elements
462 __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
463 __ cmp(r3, ip);
464 __ b(ne, &slow);
465 __ SmiUntag(r0, key);
466 __ LoadFromNumberDictionary(&slow, r4, key, r0, r0, r3, r5);
467 __ Ret();
468
469 // Slow case, key and receiver still in r2 and r1.
470 __ bind(&slow);
471 __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, r4,
472 r3);
473 GenerateRuntimeGetProperty(masm);
474
475 __ bind(&check_name);
476 GenerateKeyNameCheck(masm, key, r0, r3, &index_name, &slow);
477
478 GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3,
479 Map::kHasNamedInterceptor, &slow);
480
481 // If the receiver is a fast-case object, check the keyed lookup
482 // cache. Otherwise probe the dictionary.
483 __ ldr(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
484 __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
485 __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
486 __ cmp(r4, ip);
487 __ b(eq, &probe_dictionary);
488
489 // Load the map of the receiver, compute the keyed lookup cache hash
490 // based on 32 bits of the map pointer and the name hash.
491 __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset));
492 __ mov(r3, Operand(r0, ASR, KeyedLookupCache::kMapHashShift));
493 __ ldr(r4, FieldMemOperand(key, Name::kHashFieldOffset));
494 __ eor(r3, r3, Operand(r4, ASR, Name::kHashShift));
495 int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
496 __ And(r3, r3, Operand(mask));
497
498 // Load the key (consisting of map and unique name) from the cache and
499 // check for match.
500 Label load_in_object_property;
501 static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
502 Label hit_on_nth_entry[kEntriesPerBucket];
503 ExternalReference cache_keys =
504 ExternalReference::keyed_lookup_cache_keys(isolate);
505
506 __ mov(r4, Operand(cache_keys));
507 __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
508
509 for (int i = 0; i < kEntriesPerBucket - 1; i++) {
510 Label try_next_entry;
511 // Load map and move r4 to next entry.
512 __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
513 __ cmp(r0, r5);
514 __ b(ne, &try_next_entry);
515 __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load name
516 __ cmp(key, r5);
517 __ b(eq, &hit_on_nth_entry[i]);
518 __ bind(&try_next_entry);
519 }
520
521 // Last entry: Load map and move r4 to name.
522 __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));
523 __ cmp(r0, r5);
524 __ b(ne, &slow);
525 __ ldr(r5, MemOperand(r4));
526 __ cmp(key, r5);
527 __ b(ne, &slow);
528
529 // Get field offset.
530 // r0 : receiver's map
531 // r3 : lookup cache index
532 ExternalReference cache_field_offsets =
533 ExternalReference::keyed_lookup_cache_field_offsets(isolate);
534
535 // Hit on nth entry.
536 for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
537 __ bind(&hit_on_nth_entry[i]);
538 __ mov(r4, Operand(cache_field_offsets));
539 if (i != 0) {
540 __ add(r3, r3, Operand(i));
541 }
542 __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
543 __ ldrb(r6, FieldMemOperand(r0, Map::kInObjectPropertiesOffset));
544 __ sub(r5, r5, r6, SetCC);
545 __ b(ge, &property_array_property);
546 if (i != 0) {
547 __ jmp(&load_in_object_property);
548 }
549 }
550
551 // Load in-object property.
552 __ bind(&load_in_object_property);
553 __ ldrb(r6, FieldMemOperand(r0, Map::kInstanceSizeOffset));
554 __ add(r6, r6, r5); // Index from start of object.
555 __ sub(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag.
556 __ ldr(r0, MemOperand(receiver, r6, LSL, kPointerSizeLog2));
557 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
558 r4, r3);
559 __ Ret();
560
561 // Load property array property.
562 __ bind(&property_array_property);
563 __ ldr(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
564 __ add(receiver, receiver, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
565 __ ldr(r0, MemOperand(receiver, r5, LSL, kPointerSizeLog2));
566 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
567 r4, r3);
568 __ Ret();
569
570 // Do a quick inline probe of the receiver's dictionary, if it
571 // exists.
572 __ bind(&probe_dictionary);
573 // r3: elements
574 __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset));
575 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
576 GenerateGlobalInstanceTypeCheck(masm, r0, &slow);
577 // Load the property to r0.
578 GenerateDictionaryLoad(masm, &slow, r3, key, r0, r5, r4);
579 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, r4,
580 r3);
581 __ Ret();
582
583 __ bind(&index_name);
584 __ IndexFromHash(r3, key);
585 // Now jump to the place where smi keys are handled.
586 __ jmp(&index_smi);
587}
588
589
590void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
591 // Return address is in lr.
592 Label miss;
593
594 Register receiver = LoadDescriptor::ReceiverRegister();
595 Register index = LoadDescriptor::NameRegister();
596 Register scratch = r3;
597 Register result = r0;
598 DCHECK(!scratch.is(receiver) && !scratch.is(index));
599
600 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
601 &miss, // When not a string.
602 &miss, // When not a number.
603 &miss, // When index out of range.
604 STRING_INDEX_IS_ARRAY_INDEX);
605 char_at_generator.GenerateFast(masm);
606 __ Ret();
607
608 StubRuntimeCallHelper call_helper;
609 char_at_generator.GenerateSlow(masm, call_helper);
610
611 __ bind(&miss);
612 GenerateMiss(masm);
613}
614
615
616void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
617 // Push receiver, key and value for runtime call.
618 __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
619 StoreDescriptor::ValueRegister());
620
621 ExternalReference ref =
622 ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
623 __ TailCallExternalReference(ref, 3, 1);
624}
625
626
627static void KeyedStoreGenerateGenericHelper(
628 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
629 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
630 Register value, Register key, Register receiver, Register receiver_map,
631 Register elements_map, Register elements) {
632 Label transition_smi_elements;
633 Label finish_object_store, non_double_value, transition_double_elements;
634 Label fast_double_without_map_check;
635
636 // Fast case: Do the store, could be either Object or double.
637 __ bind(fast_object);
638 Register scratch_value = r4;
639 Register address = r5;
640 if (check_map == kCheckMap) {
641 __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
642 __ cmp(elements_map,
643 Operand(masm->isolate()->factory()->fixed_array_map()));
644 __ b(ne, fast_double);
645 }
646
647 // HOLECHECK: guards "A[i] = V"
648 // We have to go to the runtime if the current value is the hole because
649 // there may be a callback on the element
650 Label holecheck_passed1;
651 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
652 __ ldr(scratch_value,
653 MemOperand::PointerAddressFromSmiKey(address, key, PreIndex));
654 __ cmp(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()));
655 __ b(ne, &holecheck_passed1);
656 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
657 slow);
658
659 __ bind(&holecheck_passed1);
660
661 // Smi stores don't require further checks.
662 Label non_smi_value;
663 __ JumpIfNotSmi(value, &non_smi_value);
664
665 if (increment_length == kIncrementLength) {
666 // Add 1 to receiver->length.
667 __ add(scratch_value, key, Operand(Smi::FromInt(1)));
668 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
669 }
670 // It's irrelevant whether array is smi-only or not when writing a smi.
671 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
672 __ str(value, MemOperand::PointerAddressFromSmiKey(address, key));
673 __ Ret();
674
675 __ bind(&non_smi_value);
676 // Escape to elements kind transition case.
677 __ CheckFastObjectElements(receiver_map, scratch_value,
678 &transition_smi_elements);
679
680 // Fast elements array, store the value to the elements backing store.
681 __ bind(&finish_object_store);
682 if (increment_length == kIncrementLength) {
683 // Add 1 to receiver->length.
684 __ add(scratch_value, key, Operand(Smi::FromInt(1)));
685 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
686 }
687 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
688 __ add(address, address, Operand::PointerOffsetFromSmiKey(key));
689 __ str(value, MemOperand(address));
690 // Update write barrier for the elements array address.
691 __ mov(scratch_value, value); // Preserve the value which is returned.
692 __ RecordWrite(elements, address, scratch_value, kLRHasNotBeenSaved,
693 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
694 __ Ret();
695
696 __ bind(fast_double);
697 if (check_map == kCheckMap) {
698 // Check for fast double array case. If this fails, call through to the
699 // runtime.
700 __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
701 __ b(ne, slow);
702 }
703
704 // HOLECHECK: guards "A[i] double hole?"
705 // We have to see if the double version of the hole is present. If so
706 // go to the runtime.
707 __ add(address, elements,
708 Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)) -
709 kHeapObjectTag));
710 __ ldr(scratch_value,
711 MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
712 __ cmp(scratch_value, Operand(kHoleNanUpper32));
713 __ b(ne, &fast_double_without_map_check);
714 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
715 slow);
716
717 __ bind(&fast_double_without_map_check);
718 __ StoreNumberToDoubleElements(value, key, elements, r3, d0,
719 &transition_double_elements);
720 if (increment_length == kIncrementLength) {
721 // Add 1 to receiver->length.
722 __ add(scratch_value, key, Operand(Smi::FromInt(1)));
723 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
724 }
725 __ Ret();
726
727 __ bind(&transition_smi_elements);
728 // Transition the array appropriately depending on the value type.
729 __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset));
730 __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
731 __ b(ne, &non_double_value);
732
733 // Value is a double. Transition FAST_SMI_ELEMENTS ->
734 // FAST_DOUBLE_ELEMENTS and complete the store.
735 __ LoadTransitionedArrayMapConditional(
736 FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, r4, slow);
737 AllocationSiteMode mode =
738 AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
739 ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
740 receiver_map, mode, slow);
741 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
742 __ jmp(&fast_double_without_map_check);
743
744 __ bind(&non_double_value);
745 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
746 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
747 receiver_map, r4, slow);
748 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
749 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
750 masm, receiver, key, value, receiver_map, mode, slow);
751 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
752 __ jmp(&finish_object_store);
753
754 __ bind(&transition_double_elements);
755 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
756 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
757 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
758 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
759 receiver_map, r4, slow);
760 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
761 ElementsTransitionGenerator::GenerateDoubleToObject(
762 masm, receiver, key, value, receiver_map, mode, slow);
763 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
764 __ jmp(&finish_object_store);
765}
766
767
768void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
769 StrictMode strict_mode) {
770 // ---------- S t a t e --------------
771 // -- r0 : value
772 // -- r1 : key
773 // -- r2 : receiver
774 // -- lr : return address
775 // -----------------------------------
776 Label slow, fast_object, fast_object_grow;
777 Label fast_double, fast_double_grow;
778 Label array, extra, check_if_double_array;
779
780 // Register usage.
781 Register value = StoreDescriptor::ValueRegister();
782 Register key = StoreDescriptor::NameRegister();
783 Register receiver = StoreDescriptor::ReceiverRegister();
784 DCHECK(receiver.is(r1));
785 DCHECK(key.is(r2));
786 DCHECK(value.is(r0));
787 Register receiver_map = r3;
788 Register elements_map = r6;
789 Register elements = r9; // Elements array of the receiver.
790 // r4 and r5 are used as general scratch registers.
791
792 // Check that the key is a smi.
793 __ JumpIfNotSmi(key, &slow);
794 // Check that the object isn't a smi.
795 __ JumpIfSmi(receiver, &slow);
796 // Get the map of the object.
797 __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
798 // Check that the receiver does not require access checks and is not observed.
799 // The generic stub does not perform map checks or handle observed objects.
800 __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
801 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
802 __ b(ne, &slow);
803 // Check if the object is a JS array or not.
804 __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
805 __ cmp(r4, Operand(JS_ARRAY_TYPE));
806 __ b(eq, &array);
807 // Check that the object is some kind of JSObject.
808 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
809 __ b(lt, &slow);
810
811 // Object case: Check key against length in the elements array.
812 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
813 // Check array bounds. Both the key and the length of FixedArray are smis.
814 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
815 __ cmp(key, Operand(ip));
816 __ b(lo, &fast_object);
817
818 // Slow case, handle jump to runtime.
819 __ bind(&slow);
820 // Entry registers are intact.
821 // r0: value.
822 // r1: key.
823 // r2: receiver.
824 PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
825
826 // Extra capacity case: Check if there is extra capacity to
827 // perform the store and update the length. Used for adding one
828 // element to the array by writing to array[array.length].
829 __ bind(&extra);
830 // Condition code from comparing key and array length is still available.
831 __ b(ne, &slow); // Only support writing to writing to array[array.length].
832 // Check for room in the elements backing store.
833 // Both the key and the length of FixedArray are smis.
834 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
835 __ cmp(key, Operand(ip));
836 __ b(hs, &slow);
837 __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
838 __ cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
839 __ b(ne, &check_if_double_array);
840 __ jmp(&fast_object_grow);
841
842 __ bind(&check_if_double_array);
843 __ cmp(elements_map,
844 Operand(masm->isolate()->factory()->fixed_double_array_map()));
845 __ b(ne, &slow);
846 __ jmp(&fast_double_grow);
847
848 // Array case: Get the length and the elements array from the JS
849 // array. Check that the array is in fast mode (and writable); if it
850 // is the length is always a smi.
851 __ bind(&array);
852 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
853
854 // Check the key against the length in the array.
855 __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
856 __ cmp(key, Operand(ip));
857 __ b(hs, &extra);
858
859 KeyedStoreGenerateGenericHelper(
860 masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
861 value, key, receiver, receiver_map, elements_map, elements);
862 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
863 &slow, kDontCheckMap, kIncrementLength, value,
864 key, receiver, receiver_map, elements_map,
865 elements);
866}
867
868
869void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
870 Register receiver = StoreDescriptor::ReceiverRegister();
871 Register name = StoreDescriptor::NameRegister();
872 DCHECK(receiver.is(r1));
873 DCHECK(name.is(r2));
874 DCHECK(StoreDescriptor::ValueRegister().is(r0));
875
876 // Get the receiver from the stack and probe the stub cache.
877 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
878 Code::ComputeHandlerFlags(Code::STORE_IC));
879
880 masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
881 name, r3, r4, r5, r6);
882
883 // Cache miss: Jump to runtime.
884 GenerateMiss(masm);
885}
886
887
888void StoreIC::GenerateMiss(MacroAssembler* masm) {
889 __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
890 StoreDescriptor::ValueRegister());
891
892 // Perform tail call to the entry.
893 ExternalReference ref =
894 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
895 __ TailCallExternalReference(ref, 3, 1);
896}
897
898
899void StoreIC::GenerateNormal(MacroAssembler* masm) {
900 Label miss;
901 Register receiver = StoreDescriptor::ReceiverRegister();
902 Register name = StoreDescriptor::NameRegister();
903 Register value = StoreDescriptor::ValueRegister();
904 Register dictionary = r3;
905 DCHECK(receiver.is(r1));
906 DCHECK(name.is(r2));
907 DCHECK(value.is(r0));
908
909 __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
910
911 GenerateDictionaryStore(masm, &miss, dictionary, name, value, r4, r5);
912 Counters* counters = masm->isolate()->counters();
913 __ IncrementCounter(counters->store_normal_hit(), 1, r4, r5);
914 __ Ret();
915
916 __ bind(&miss);
917 __ IncrementCounter(counters->store_normal_miss(), 1, r4, r5);
918 GenerateMiss(masm);
919}
920
921
922#undef __
923
924
925Condition CompareIC::ComputeCondition(Token::Value op) {
926 switch (op) {
927 case Token::EQ_STRICT:
928 case Token::EQ:
929 return eq;
930 case Token::LT:
931 return lt;
932 case Token::GT:
933 return gt;
934 case Token::LTE:
935 return le;
936 case Token::GTE:
937 return ge;
938 default:
939 UNREACHABLE();
940 return kNoCondition;
941 }
942}
943
944
945bool CompareIC::HasInlinedSmiCode(Address address) {
946 // The address of the instruction following the call.
947 Address cmp_instruction_address =
948 Assembler::return_address_from_call_start(address);
949
950 // If the instruction following the call is not a cmp rx, #yyy, nothing
951 // was inlined.
952 Instr instr = Assembler::instr_at(cmp_instruction_address);
953 return Assembler::IsCmpImmediate(instr);
954}
955
956
957void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
958 Address cmp_instruction_address =
959 Assembler::return_address_from_call_start(address);
960
961 // If the instruction following the call is not a cmp rx, #yyy, nothing
962 // was inlined.
963 Instr instr = Assembler::instr_at(cmp_instruction_address);
964 if (!Assembler::IsCmpImmediate(instr)) {
965 return;
966 }
967
968 // The delta to the start of the map check instruction and the
969 // condition code uses at the patched jump.
970 int delta = Assembler::GetCmpImmediateRawImmediate(instr);
971 delta += Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask;
972 // If the delta is 0 the instruction is cmp r0, #0 which also signals that
973 // nothing was inlined.
974 if (delta == 0) {
975 return;
976 }
977
978 if (FLAG_trace_ic) {
979 PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", address,
980 cmp_instruction_address, delta);
981 }
982
983 Address patch_address =
984 cmp_instruction_address - delta * Instruction::kInstrSize;
985 Instr instr_at_patch = Assembler::instr_at(patch_address);
986 Instr branch_instr =
987 Assembler::instr_at(patch_address + Instruction::kInstrSize);
988 // This is patching a conditional "jump if not smi/jump if smi" site.
989 // Enabling by changing from
990 // cmp rx, rx
991 // b eq/ne, <target>
992 // to
993 // tst rx, #kSmiTagMask
994 // b ne/eq, <target>
995 // and vice-versa to be disabled again.
996 CodePatcher patcher(patch_address, 2);
997 Register reg = Assembler::GetRn(instr_at_patch);
998 if (check == ENABLE_INLINED_SMI_CHECK) {
999 DCHECK(Assembler::IsCmpRegister(instr_at_patch));
1000 DCHECK_EQ(Assembler::GetRn(instr_at_patch).code(),
1001 Assembler::GetRm(instr_at_patch).code());
1002 patcher.masm()->tst(reg, Operand(kSmiTagMask));
1003 } else {
1004 DCHECK(check == DISABLE_INLINED_SMI_CHECK);
1005 DCHECK(Assembler::IsTstImmediate(instr_at_patch));
1006 patcher.masm()->cmp(reg, reg);
1007 }
1008 DCHECK(Assembler::IsBranch(branch_instr));
1009 if (Assembler::GetCondition(branch_instr) == eq) {
1010 patcher.EmitCondition(ne);
1011 } else {
1012 DCHECK(Assembler::GetCondition(branch_instr) == ne);
1013 patcher.EmitCondition(eq);
1014 }
1015}
1016}
1017} // namespace v8::internal
1018
1019#endif // V8_TARGET_ARCH_ARM