blob: a01015c1864c559523435b06d213760625e6ad3c [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/v8.h"
6
7#if V8_TARGET_ARCH_ARM64
8
9#include "src/codegen.h"
10#include "src/ic/ic.h"
11#include "src/ic/ic-compiler.h"
12#include "src/ic/stub-cache.h"
13
14namespace v8 {
15namespace internal {
16
17
18#define __ ACCESS_MASM(masm)
19
20
21// "type" holds an instance type on entry and is not clobbered.
22// Generated code branch on "global_object" if type is any kind of global
23// JS object.
24static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
25 Label* global_object) {
26 __ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
27 __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne);
28 __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
29 __ B(eq, global_object);
30}
31
32
33// Helper function used from LoadIC GenerateNormal.
34//
35// elements: Property dictionary. It is not clobbered if a jump to the miss
36// label is done.
37// name: Property name. It is not clobbered if a jump to the miss label is
38// done
39// result: Register for the result. It is only updated if a jump to the miss
40// label is not done.
41// The scratch registers need to be different from elements, name and result.
42// The generated code assumes that the receiver has slow properties,
43// is not a global object and does not have interceptors.
44static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
45 Register elements, Register name,
46 Register result, Register scratch1,
47 Register scratch2) {
48 DCHECK(!AreAliased(elements, name, scratch1, scratch2));
49 DCHECK(!AreAliased(result, scratch1, scratch2));
50
51 Label done;
52
53 // Probe the dictionary.
54 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
55 name, scratch1, scratch2);
56
57 // If probing finds an entry check that the value is a normal property.
58 __ Bind(&done);
59
60 static const int kElementsStartOffset =
61 NameDictionary::kHeaderSize +
62 NameDictionary::kElementsStartIndex * kPointerSize;
63 static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
64 __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
65 __ Tst(scratch1, Smi::FromInt(PropertyDetails::TypeField::kMask));
66 __ B(ne, miss);
67
68 // Get the value at the masked, scaled index and return.
69 __ Ldr(result,
70 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
71}
72
73
74// Helper function used from StoreIC::GenerateNormal.
75//
76// elements: Property dictionary. It is not clobbered if a jump to the miss
77// label is done.
78// name: Property name. It is not clobbered if a jump to the miss label is
79// done
80// value: The value to store (never clobbered).
81//
82// The generated code assumes that the receiver has slow properties,
83// is not a global object and does not have interceptors.
84static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
85 Register elements, Register name,
86 Register value, Register scratch1,
87 Register scratch2) {
88 DCHECK(!AreAliased(elements, name, value, scratch1, scratch2));
89
90 Label done;
91
92 // Probe the dictionary.
93 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
94 name, scratch1, scratch2);
95
96 // If probing finds an entry in the dictionary check that the value
97 // is a normal property that is not read only.
98 __ Bind(&done);
99
100 static const int kElementsStartOffset =
101 NameDictionary::kHeaderSize +
102 NameDictionary::kElementsStartIndex * kPointerSize;
103 static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
104 static const int kTypeAndReadOnlyMask =
105 PropertyDetails::TypeField::kMask |
106 PropertyDetails::AttributesField::encode(READ_ONLY);
107 __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
108 __ Tst(scratch1, kTypeAndReadOnlyMask);
109 __ B(ne, miss);
110
111 // Store the value at the masked, scaled index and return.
112 static const int kValueOffset = kElementsStartOffset + kPointerSize;
113 __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag);
114 __ Str(value, MemOperand(scratch2));
115
116 // Update the write barrier. Make sure not to clobber the value.
117 __ Mov(scratch1, value);
118 __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
119 kDontSaveFPRegs);
120}
121
122
123// Checks the receiver for special cases (value type, slow case bits).
124// Falls through for regular JS object and return the map of the
125// receiver in 'map_scratch' if the receiver is not a SMI.
126static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
127 Register receiver,
128 Register map_scratch,
129 Register scratch,
130 int interceptor_bit, Label* slow) {
131 DCHECK(!AreAliased(map_scratch, scratch));
132
133 // Check that the object isn't a smi.
134 __ JumpIfSmi(receiver, slow);
135 // Get the map of the receiver.
136 __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
137 // Check bit field.
138 __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset));
139 __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow);
140 __ Tbnz(scratch, interceptor_bit, slow);
141
142 // Check that the object is some kind of JS object EXCEPT JS Value type.
143 // In the case that the object is a value-wrapper object, we enter the
144 // runtime system to make sure that indexing into string objects work
145 // as intended.
146 STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
147 __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
148 __ Cmp(scratch, JS_OBJECT_TYPE);
149 __ B(lt, slow);
150}
151
152
153// Loads an indexed element from a fast case array.
154// If not_fast_array is NULL, doesn't perform the elements map check.
155//
156// receiver - holds the receiver on entry.
157// Unchanged unless 'result' is the same register.
158//
159// key - holds the smi key on entry.
160// Unchanged unless 'result' is the same register.
161//
162// elements - holds the elements of the receiver on exit.
163//
164// elements_map - holds the elements map on exit if the not_fast_array branch is
165// taken. Otherwise, this is used as a scratch register.
166//
167// result - holds the result on exit if the load succeeded.
168// Allowed to be the the same as 'receiver' or 'key'.
169// Unchanged on bailout so 'receiver' and 'key' can be safely
170// used by further computation.
171static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
172 Register key, Register elements,
173 Register elements_map, Register scratch2,
174 Register result, Label* not_fast_array,
175 Label* slow) {
176 DCHECK(!AreAliased(receiver, key, elements, elements_map, scratch2));
177
178 // Check for fast array.
179 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
180 if (not_fast_array != NULL) {
181 // Check that the object is in fast mode and writable.
182 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
183 __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex,
184 not_fast_array);
185 } else {
186 __ AssertFastElements(elements);
187 }
188
189 // The elements_map register is only used for the not_fast_array path, which
190 // was handled above. From this point onward it is a scratch register.
191 Register scratch1 = elements_map;
192
193 // Check that the key (index) is within bounds.
194 __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
195 __ Cmp(key, scratch1);
196 __ B(hs, slow);
197
198 // Fast case: Do the load.
199 __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag);
200 __ SmiUntag(scratch2, key);
201 __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
202
203 // In case the loaded value is the_hole we have to consult GetProperty
204 // to ensure the prototype chain is searched.
205 __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, slow);
206
207 // Move the value to the result register.
208 // 'result' can alias with 'receiver' or 'key' but these two must be
209 // preserved if we jump to 'slow'.
210 __ Mov(result, scratch2);
211}
212
213
214// Checks whether a key is an array index string or a unique name.
215// Falls through if a key is a unique name.
216// The map of the key is returned in 'map_scratch'.
217// If the jump to 'index_string' is done the hash of the key is left
218// in 'hash_scratch'.
219static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
220 Register map_scratch, Register hash_scratch,
221 Label* index_string, Label* not_unique) {
222 DCHECK(!AreAliased(key, map_scratch, hash_scratch));
223
224 // Is the key a name?
225 Label unique;
226 __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE,
227 not_unique, hi);
228 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
229 __ B(eq, &unique);
230
231 // Is the string an array index with cached numeric value?
232 __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset));
233 __ TestAndBranchIfAllClear(hash_scratch, Name::kContainsCachedArrayIndexMask,
234 index_string);
235
236 // Is the string internalized? We know it's a string, so a single bit test is
237 // enough.
238 __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
239 STATIC_ASSERT(kInternalizedTag == 0);
240 __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique);
241
242 __ Bind(&unique);
243 // Fall through if the key is a unique name.
244}
245
246
247// Neither 'object' nor 'key' are modified by this function.
248//
249// If the 'unmapped_case' or 'slow_case' exit is taken, the 'map' register is
250// left with the object's elements map. Otherwise, it is used as a scratch
251// register.
252static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
253 Register object, Register key,
254 Register map, Register scratch1,
255 Register scratch2,
256 Label* unmapped_case,
257 Label* slow_case) {
258 DCHECK(!AreAliased(object, key, map, scratch1, scratch2));
259
260 Heap* heap = masm->isolate()->heap();
261
262 // Check that the receiver is a JSObject. Because of the elements
263 // map check later, we do not need to check for interceptors or
264 // whether it requires access checks.
265 __ JumpIfSmi(object, slow_case);
266 // Check that the object is some kind of JSObject.
267 __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE, slow_case,
268 lt);
269
270 // Check that the key is a positive smi.
271 __ JumpIfNotSmi(key, slow_case);
272 __ Tbnz(key, kXSignBit, slow_case);
273
274 // Load the elements object and check its map.
275 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
276 __ Ldr(map, FieldMemOperand(object, JSObject::kElementsOffset));
277 __ CheckMap(map, scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
278
279 // Check if element is in the range of mapped arguments. If not, jump
280 // to the unmapped lookup.
281 __ Ldr(scratch1, FieldMemOperand(map, FixedArray::kLengthOffset));
282 __ Sub(scratch1, scratch1, Smi::FromInt(2));
283 __ Cmp(key, scratch1);
284 __ B(hs, unmapped_case);
285
286 // Load element index and check whether it is the hole.
287 static const int offset =
288 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
289
290 __ Add(scratch1, map, offset);
291 __ SmiUntag(scratch2, key);
292 __ Ldr(scratch1, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
293 __ JumpIfRoot(scratch1, Heap::kTheHoleValueRootIndex, unmapped_case);
294
295 // Load value from context and return it.
296 __ Ldr(scratch2, FieldMemOperand(map, FixedArray::kHeaderSize));
297 __ SmiUntag(scratch1);
298 __ Lsl(scratch1, scratch1, kPointerSizeLog2);
299 __ Add(scratch1, scratch1, Context::kHeaderSize - kHeapObjectTag);
300 // The base of the result (scratch2) is passed to RecordWrite in
301 // KeyedStoreIC::GenerateSloppyArguments and it must be a HeapObject.
302 return MemOperand(scratch2, scratch1);
303}
304
305
306// The 'parameter_map' register must be loaded with the parameter map of the
307// arguments object and is overwritten.
308static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
309 Register key,
310 Register parameter_map,
311 Register scratch,
312 Label* slow_case) {
313 DCHECK(!AreAliased(key, parameter_map, scratch));
314
315 // Element is in arguments backing store, which is referenced by the
316 // second element of the parameter_map.
317 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
318 Register backing_store = parameter_map;
319 __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
320 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
321 __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
322 DONT_DO_SMI_CHECK);
323 __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
324 __ Cmp(key, scratch);
325 __ B(hs, slow_case);
326
327 __ Add(backing_store, backing_store,
328 FixedArray::kHeaderSize - kHeapObjectTag);
329 __ SmiUntag(scratch, key);
330 return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2);
331}
332
333
334void LoadIC::GenerateNormal(MacroAssembler* masm) {
335 Register dictionary = x0;
336 DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
337 DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
338 Label slow;
339
340 __ Ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
341 JSObject::kPropertiesOffset));
342 GenerateDictionaryLoad(masm, &slow, dictionary,
343 LoadDescriptor::NameRegister(), x0, x3, x4);
344 __ Ret();
345
346 // Dictionary load failed, go slow (but don't miss).
347 __ Bind(&slow);
348 GenerateRuntimeGetProperty(masm);
349}
350
351
352void LoadIC::GenerateMiss(MacroAssembler* masm) {
353 // The return address is in lr.
354 Isolate* isolate = masm->isolate();
355 ASM_LOCATION("LoadIC::GenerateMiss");
356
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400357 DCHECK(!FLAG_vector_ics ||
358 !AreAliased(x4, x5, VectorLoadICDescriptor::SlotRegister(),
359 VectorLoadICDescriptor::VectorRegister()));
360 __ IncrementCounter(isolate->counters()->load_miss(), 1, x4, x5);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000361
362 // Perform tail call to the entry.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400363 if (FLAG_vector_ics) {
364 __ Push(VectorLoadICDescriptor::ReceiverRegister(),
365 VectorLoadICDescriptor::NameRegister(),
366 VectorLoadICDescriptor::SlotRegister(),
367 VectorLoadICDescriptor::VectorRegister());
368 } else {
369 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
370 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000371 ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400372 int arg_count = FLAG_vector_ics ? 4 : 2;
373 __ TailCallExternalReference(ref, arg_count, 1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000374}
375
376
377void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
378 // The return address is in lr.
379 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
380 __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
381}
382
383
384void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
385 ASM_LOCATION("KeyedStoreIC::GenerateSloppyArguments");
386 Label slow, notin;
387 Register value = StoreDescriptor::ValueRegister();
388 Register key = StoreDescriptor::NameRegister();
389 Register receiver = StoreDescriptor::ReceiverRegister();
390 DCHECK(receiver.is(x1));
391 DCHECK(key.is(x2));
392 DCHECK(value.is(x0));
393
394 Register map = x3;
395
396 // These registers are used by GenerateMappedArgumentsLookup to build a
397 // MemOperand. They are live for as long as the MemOperand is live.
398 Register mapped1 = x4;
399 Register mapped2 = x5;
400
401 MemOperand mapped = GenerateMappedArgumentsLookup(
402 masm, receiver, key, map, mapped1, mapped2, &notin, &slow);
403 Operand mapped_offset = mapped.OffsetAsOperand();
404 __ Str(value, mapped);
405 __ Add(x10, mapped.base(), mapped_offset);
406 __ Mov(x11, value);
407 __ RecordWrite(mapped.base(), x10, x11, kLRHasNotBeenSaved, kDontSaveFPRegs);
408 __ Ret();
409
410 __ Bind(&notin);
411
412 // These registers are used by GenerateMappedArgumentsLookup to build a
413 // MemOperand. They are live for as long as the MemOperand is live.
414 Register unmapped1 = map; // This is assumed to alias 'map'.
415 Register unmapped2 = x4;
416 MemOperand unmapped =
417 GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow);
418 Operand unmapped_offset = unmapped.OffsetAsOperand();
419 __ Str(value, unmapped);
420 __ Add(x10, unmapped.base(), unmapped_offset);
421 __ Mov(x11, value);
422 __ RecordWrite(unmapped.base(), x10, x11, kLRHasNotBeenSaved,
423 kDontSaveFPRegs);
424 __ Ret();
425 __ Bind(&slow);
426 GenerateMiss(masm);
427}
428
429
430void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
431 // The return address is in lr.
432 Isolate* isolate = masm->isolate();
433
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400434 DCHECK(!FLAG_vector_ics ||
435 !AreAliased(x10, x11, VectorLoadICDescriptor::SlotRegister(),
436 VectorLoadICDescriptor::VectorRegister()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000437 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
438
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400439 if (FLAG_vector_ics) {
440 __ Push(VectorLoadICDescriptor::ReceiverRegister(),
441 VectorLoadICDescriptor::NameRegister(),
442 VectorLoadICDescriptor::SlotRegister(),
443 VectorLoadICDescriptor::VectorRegister());
444 } else {
445 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
446 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000447
448 // Perform tail call to the entry.
449 ExternalReference ref =
450 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400451 int arg_count = FLAG_vector_ics ? 4 : 2;
452 __ TailCallExternalReference(ref, arg_count, 1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000453}
454
455
456void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
457 // The return address is in lr.
458 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
459 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
460}
461
462
463static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, Register key,
464 Register receiver, Register scratch1,
465 Register scratch2, Register scratch3,
466 Register scratch4, Register scratch5,
467 Label* slow) {
468 DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
469 scratch5));
470
471 Isolate* isolate = masm->isolate();
472 Label check_number_dictionary;
473 // If we can load the value, it should be returned in x0.
474 Register result = x0;
475
476 GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
477 Map::kHasIndexedInterceptor, slow);
478
479 // Check the receiver's map to see if it has fast elements.
480 __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
481
482 GenerateFastArrayLoad(masm, receiver, key, scratch3, scratch2, scratch1,
483 result, NULL, slow);
484 __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1,
485 scratch1, scratch2);
486 __ Ret();
487
488 __ Bind(&check_number_dictionary);
489 __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset));
490 __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset));
491
492 // Check whether we have a number dictionary.
493 __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
494
495 __ LoadFromNumberDictionary(slow, scratch3, key, result, scratch1, scratch2,
496 scratch4, scratch5);
497 __ Ret();
498}
499
500static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key,
501 Register receiver, Register scratch1,
502 Register scratch2, Register scratch3,
503 Register scratch4, Register scratch5,
504 Label* slow) {
505 DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
506 scratch5));
507
508 Isolate* isolate = masm->isolate();
509 Label probe_dictionary, property_array_property;
510 // If we can load the value, it should be returned in x0.
511 Register result = x0;
512
513 GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
514 Map::kHasNamedInterceptor, slow);
515
516 // If the receiver is a fast-case object, check the keyed lookup cache.
517 // Otherwise probe the dictionary.
518 __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
519 __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
520 __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
521
522 // We keep the map of the receiver in scratch1.
523 Register receiver_map = scratch1;
524
525 // Load the map of the receiver, compute the keyed lookup cache hash
526 // based on 32 bits of the map pointer and the name hash.
527 __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
528 __ Mov(scratch2, Operand(receiver_map, ASR, KeyedLookupCache::kMapHashShift));
529 __ Ldr(scratch3.W(), FieldMemOperand(key, Name::kHashFieldOffset));
530 __ Eor(scratch2, scratch2, Operand(scratch3, ASR, Name::kHashShift));
531 int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
532 __ And(scratch2, scratch2, mask);
533
534 // Load the key (consisting of map and unique name) from the cache and
535 // check for match.
536 Label load_in_object_property;
537 static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
538 Label hit_on_nth_entry[kEntriesPerBucket];
539 ExternalReference cache_keys =
540 ExternalReference::keyed_lookup_cache_keys(isolate);
541
542 __ Mov(scratch3, cache_keys);
543 __ Add(scratch3, scratch3, Operand(scratch2, LSL, kPointerSizeLog2 + 1));
544
545 for (int i = 0; i < kEntriesPerBucket - 1; i++) {
546 Label try_next_entry;
547 // Load map and make scratch3 pointing to the next entry.
548 __ Ldr(scratch4, MemOperand(scratch3, kPointerSize * 2, PostIndex));
549 __ Cmp(receiver_map, scratch4);
550 __ B(ne, &try_next_entry);
551 __ Ldr(scratch4, MemOperand(scratch3, -kPointerSize)); // Load name
552 __ Cmp(key, scratch4);
553 __ B(eq, &hit_on_nth_entry[i]);
554 __ Bind(&try_next_entry);
555 }
556
557 // Last entry.
558 __ Ldr(scratch4, MemOperand(scratch3, kPointerSize, PostIndex));
559 __ Cmp(receiver_map, scratch4);
560 __ B(ne, slow);
561 __ Ldr(scratch4, MemOperand(scratch3));
562 __ Cmp(key, scratch4);
563 __ B(ne, slow);
564
565 // Get field offset.
566 ExternalReference cache_field_offsets =
567 ExternalReference::keyed_lookup_cache_field_offsets(isolate);
568
569 // Hit on nth entry.
570 for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
571 __ Bind(&hit_on_nth_entry[i]);
572 __ Mov(scratch3, cache_field_offsets);
573 if (i != 0) {
574 __ Add(scratch2, scratch2, i);
575 }
576 __ Ldr(scratch4.W(), MemOperand(scratch3, scratch2, LSL, 2));
577 __ Ldrb(scratch5,
578 FieldMemOperand(receiver_map, Map::kInObjectPropertiesOffset));
579 __ Subs(scratch4, scratch4, scratch5);
580 __ B(ge, &property_array_property);
581 if (i != 0) {
582 __ B(&load_in_object_property);
583 }
584 }
585
586 // Load in-object property.
587 __ Bind(&load_in_object_property);
588 __ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset));
589 __ Add(scratch5, scratch5, scratch4); // Index from start of object.
590 __ Sub(receiver, receiver, kHeapObjectTag); // Remove the heap tag.
591 __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2));
592 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
593 scratch1, scratch2);
594 __ Ret();
595
596 // Load property array property.
597 __ Bind(&property_array_property);
598 __ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
599 __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
600 __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2));
601 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
602 scratch1, scratch2);
603 __ Ret();
604
605 // Do a quick inline probe of the receiver's dictionary, if it exists.
606 __ Bind(&probe_dictionary);
607 __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
608 __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
609 GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
610 // Load the property.
611 GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
612 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1,
613 scratch1, scratch2);
614 __ Ret();
615}
616
617
618void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
619 // The return address is in lr.
620 Label slow, check_name, index_smi, index_name;
621
622 Register key = LoadDescriptor::NameRegister();
623 Register receiver = LoadDescriptor::ReceiverRegister();
624 DCHECK(key.is(x2));
625 DCHECK(receiver.is(x1));
626
627 __ JumpIfNotSmi(key, &check_name);
628 __ Bind(&index_smi);
629 // Now the key is known to be a smi. This place is also jumped to from below
630 // where a numeric string is converted to a smi.
631 GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
632
633 // Slow case.
634 __ Bind(&slow);
635 __ IncrementCounter(masm->isolate()->counters()->keyed_load_generic_slow(), 1,
636 x4, x3);
637 GenerateRuntimeGetProperty(masm);
638
639 __ Bind(&check_name);
640 GenerateKeyNameCheck(masm, key, x0, x3, &index_name, &slow);
641
642 GenerateKeyedLoadWithNameKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
643
644 __ Bind(&index_name);
645 __ IndexFromHash(x3, key);
646 // Now jump to the place where smi keys are handled.
647 __ B(&index_smi);
648}
649
650
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000651void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
652 ASM_LOCATION("KeyedStoreIC::GenerateMiss");
653
654 // Push receiver, key and value for runtime call.
655 __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
656 StoreDescriptor::ValueRegister());
657
658 ExternalReference ref =
659 ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
660 __ TailCallExternalReference(ref, 3, 1);
661}
662
663
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400664static void KeyedStoreGenerateMegamorphicHelper(
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000665 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
666 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
667 Register value, Register key, Register receiver, Register receiver_map,
668 Register elements_map, Register elements) {
669 DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
670 x10, x11));
671
672 Label transition_smi_elements;
673 Label transition_double_elements;
674 Label fast_double_without_map_check;
675 Label non_double_value;
676 Label finish_store;
677
678 __ Bind(fast_object);
679 if (check_map == kCheckMap) {
680 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
681 __ Cmp(elements_map,
682 Operand(masm->isolate()->factory()->fixed_array_map()));
683 __ B(ne, fast_double);
684 }
685
686 // HOLECHECK: guards "A[i] = V"
687 // We have to go to the runtime if the current value is the hole because there
688 // may be a callback on the element.
689 Label holecheck_passed;
690 __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
691 __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
692 __ Ldr(x11, MemOperand(x10));
693 __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
694 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
695 __ bind(&holecheck_passed);
696
697 // Smi stores don't require further checks.
698 __ JumpIfSmi(value, &finish_store);
699
700 // Escape to elements kind transition case.
701 __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
702
703 __ Bind(&finish_store);
704 if (increment_length == kIncrementLength) {
705 // Add 1 to receiver->length.
706 __ Add(x10, key, Smi::FromInt(1));
707 __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
708 }
709
710 Register address = x11;
711 __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
712 __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
713 __ Str(value, MemOperand(address));
714
715 Label dont_record_write;
716 __ JumpIfSmi(value, &dont_record_write);
717
718 // Update write barrier for the elements array address.
719 __ Mov(x10, value); // Preserve the value which is returned.
720 __ RecordWrite(elements, address, x10, kLRHasNotBeenSaved, kDontSaveFPRegs,
721 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
722
723 __ Bind(&dont_record_write);
724 __ Ret();
725
726
727 __ Bind(fast_double);
728 if (check_map == kCheckMap) {
729 // Check for fast double array case. If this fails, call through to the
730 // runtime.
731 __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
732 }
733
734 // HOLECHECK: guards "A[i] double hole?"
735 // We have to see if the double version of the hole is present. If so go to
736 // the runtime.
737 __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
738 __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
739 __ Ldr(x11, MemOperand(x10));
740 __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
741 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
742
743 __ Bind(&fast_double_without_map_check);
744 __ StoreNumberToDoubleElements(value, key, elements, x10, d0,
745 &transition_double_elements);
746 if (increment_length == kIncrementLength) {
747 // Add 1 to receiver->length.
748 __ Add(x10, key, Smi::FromInt(1));
749 __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
750 }
751 __ Ret();
752
753
754 __ Bind(&transition_smi_elements);
755 // Transition the array appropriately depending on the value type.
756 __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
757 __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
758
759 // Value is a double. Transition FAST_SMI_ELEMENTS ->
760 // FAST_DOUBLE_ELEMENTS and complete the store.
761 __ LoadTransitionedArrayMapConditional(
762 FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, x10, x11, slow);
763 AllocationSiteMode mode =
764 AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
765 ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
766 receiver_map, mode, slow);
767 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
768 __ B(&fast_double_without_map_check);
769
770 __ Bind(&non_double_value);
771 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
772 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
773 receiver_map, x10, x11, slow);
774
775 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
776 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
777 masm, receiver, key, value, receiver_map, mode, slow);
778
779 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
780 __ B(&finish_store);
781
782 __ Bind(&transition_double_elements);
783 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
784 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
785 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
786 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
787 receiver_map, x10, x11, slow);
788 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
789 ElementsTransitionGenerator::GenerateDoubleToObject(
790 masm, receiver, key, value, receiver_map, mode, slow);
791 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
792 __ B(&finish_store);
793}
794
795
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400796void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
797 StrictMode strict_mode) {
798 ASM_LOCATION("KeyedStoreIC::GenerateMegamorphic");
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000799 Label slow;
800 Label array;
801 Label fast_object;
802 Label extra;
803 Label fast_object_grow;
804 Label fast_double_grow;
805 Label fast_double;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400806 Label maybe_name_key;
807 Label miss;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000808
809 Register value = StoreDescriptor::ValueRegister();
810 Register key = StoreDescriptor::NameRegister();
811 Register receiver = StoreDescriptor::ReceiverRegister();
812 DCHECK(receiver.is(x1));
813 DCHECK(key.is(x2));
814 DCHECK(value.is(x0));
815
816 Register receiver_map = x3;
817 Register elements = x4;
818 Register elements_map = x5;
819
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400820 __ JumpIfNotSmi(key, &maybe_name_key);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000821 __ JumpIfSmi(receiver, &slow);
822 __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
823
824 // Check that the receiver does not require access checks and is not observed.
825 // The generic stub does not perform map checks or handle observed objects.
826 __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
827 __ TestAndBranchIfAnySet(
828 x10, (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kIsObserved), &slow);
829
830 // Check if the object is a JS array or not.
831 Register instance_type = x10;
832 __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
833 __ B(eq, &array);
834 // Check that the object is some kind of JSObject.
835 __ Cmp(instance_type, FIRST_JS_OBJECT_TYPE);
836 __ B(lt, &slow);
837
838 // Object case: Check key against length in the elements array.
839 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
840 // Check array bounds. Both the key and the length of FixedArray are smis.
841 __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
842 __ Cmp(x10, Operand::UntagSmi(key));
843 __ B(hi, &fast_object);
844
845
846 __ Bind(&slow);
847 // Slow case, handle jump to runtime.
848 // Live values:
849 // x0: value
850 // x1: key
851 // x2: receiver
852 PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400853 // Never returns to here.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000854
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400855 __ bind(&maybe_name_key);
856 __ Ldr(x10, FieldMemOperand(key, HeapObject::kMapOffset));
857 __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
858 __ JumpIfNotUniqueNameInstanceType(x10, &slow);
859 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
860 Code::ComputeHandlerFlags(Code::STORE_IC));
861 masm->isolate()->stub_cache()->GenerateProbe(
862 masm, Code::STORE_IC, flags, false, receiver, key, x3, x4, x5, x6);
863 // Cache miss.
864 __ B(&miss);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000865
866 __ Bind(&extra);
867 // Extra capacity case: Check if there is extra capacity to
868 // perform the store and update the length. Used for adding one
869 // element to the array by writing to array[array.length].
870
871 // Check for room in the elements backing store.
872 // Both the key and the length of FixedArray are smis.
873 __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
874 __ Cmp(x10, Operand::UntagSmi(key));
875 __ B(ls, &slow);
876
877 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
878 __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
879 __ B(eq, &fast_object_grow);
880 __ Cmp(elements_map,
881 Operand(masm->isolate()->factory()->fixed_double_array_map()));
882 __ B(eq, &fast_double_grow);
883 __ B(&slow);
884
885
886 __ Bind(&array);
887 // Array case: Get the length and the elements array from the JS
888 // array. Check that the array is in fast mode (and writable); if it
889 // is the length is always a smi.
890
891 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
892
893 // Check the key against the length in the array.
894 __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
895 __ Cmp(x10, Operand::UntagSmi(key));
896 __ B(eq, &extra); // We can handle the case where we are appending 1 element.
897 __ B(lo, &slow);
898
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400899 KeyedStoreGenerateMegamorphicHelper(
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000900 masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
901 value, key, receiver, receiver_map, elements_map, elements);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400902 KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
903 &fast_double_grow, &slow, kDontCheckMap,
904 kIncrementLength, value, key, receiver,
905 receiver_map, elements_map, elements);
906
907 __ bind(&miss);
908 GenerateMiss(masm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000909}
910
911
912void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
913 Register receiver = StoreDescriptor::ReceiverRegister();
914 Register name = StoreDescriptor::NameRegister();
915 DCHECK(!AreAliased(receiver, name, StoreDescriptor::ValueRegister(), x3, x4,
916 x5, x6));
917
918 // Probe the stub cache.
919 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
920 Code::ComputeHandlerFlags(Code::STORE_IC));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400921 masm->isolate()->stub_cache()->GenerateProbe(
922 masm, Code::STORE_IC, flags, false, receiver, name, x3, x4, x5, x6);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000923
924 // Cache miss: Jump to runtime.
925 GenerateMiss(masm);
926}
927
928
929void StoreIC::GenerateMiss(MacroAssembler* masm) {
930 __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
931 StoreDescriptor::ValueRegister());
932
933 // Tail call to the entry.
934 ExternalReference ref =
935 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
936 __ TailCallExternalReference(ref, 3, 1);
937}
938
939
940void StoreIC::GenerateNormal(MacroAssembler* masm) {
941 Label miss;
942 Register value = StoreDescriptor::ValueRegister();
943 Register receiver = StoreDescriptor::ReceiverRegister();
944 Register name = StoreDescriptor::NameRegister();
945 Register dictionary = x3;
946 DCHECK(!AreAliased(value, receiver, name, x3, x4, x5));
947
948 __ Ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
949
950 GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5);
951 Counters* counters = masm->isolate()->counters();
952 __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5);
953 __ Ret();
954
955 // Cache miss: Jump to runtime.
956 __ Bind(&miss);
957 __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5);
958 GenerateMiss(masm);
959}
960
961
962Condition CompareIC::ComputeCondition(Token::Value op) {
963 switch (op) {
964 case Token::EQ_STRICT:
965 case Token::EQ:
966 return eq;
967 case Token::LT:
968 return lt;
969 case Token::GT:
970 return gt;
971 case Token::LTE:
972 return le;
973 case Token::GTE:
974 return ge;
975 default:
976 UNREACHABLE();
977 return al;
978 }
979}
980
981
982bool CompareIC::HasInlinedSmiCode(Address address) {
983 // The address of the instruction following the call.
984 Address info_address = Assembler::return_address_from_call_start(address);
985
986 InstructionSequence* patch_info = InstructionSequence::At(info_address);
987 return patch_info->IsInlineData();
988}
989
990
991// Activate a SMI fast-path by patching the instructions generated by
992// JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by
993// JumpPatchSite::EmitPatchInfo().
994void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
995 // The patch information is encoded in the instruction stream using
996 // instructions which have no side effects, so we can safely execute them.
997 // The patch information is encoded directly after the call to the helper
998 // function which is requesting this patch operation.
999 Address info_address = Assembler::return_address_from_call_start(address);
1000 InlineSmiCheckInfo info(info_address);
1001
1002 // Check and decode the patch information instruction.
1003 if (!info.HasSmiCheck()) {
1004 return;
1005 }
1006
1007 if (FLAG_trace_ic) {
1008 PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n", address,
1009 info_address, reinterpret_cast<void*>(info.SmiCheck()));
1010 }
1011
1012 // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
1013 // and JumpPatchSite::EmitJumpIfSmi().
1014 // Changing
1015 // tb(n)z xzr, #0, <target>
1016 // to
1017 // tb(!n)z test_reg, #0, <target>
1018 Instruction* to_patch = info.SmiCheck();
1019 PatchingAssembler patcher(to_patch, 1);
1020 DCHECK(to_patch->IsTestBranch());
1021 DCHECK(to_patch->ImmTestBranchBit5() == 0);
1022 DCHECK(to_patch->ImmTestBranchBit40() == 0);
1023
1024 STATIC_ASSERT(kSmiTag == 0);
1025 STATIC_ASSERT(kSmiTagMask == 1);
1026
1027 int branch_imm = to_patch->ImmTestBranch();
1028 Register smi_reg;
1029 if (check == ENABLE_INLINED_SMI_CHECK) {
1030 DCHECK(to_patch->Rt() == xzr.code());
1031 smi_reg = info.SmiRegister();
1032 } else {
1033 DCHECK(check == DISABLE_INLINED_SMI_CHECK);
1034 DCHECK(to_patch->Rt() != xzr.code());
1035 smi_reg = xzr;
1036 }
1037
1038 if (to_patch->Mask(TestBranchMask) == TBZ) {
1039 // This is JumpIfNotSmi(smi_reg, branch_imm).
1040 patcher.tbnz(smi_reg, 0, branch_imm);
1041 } else {
1042 DCHECK(to_patch->Mask(TestBranchMask) == TBNZ);
1043 // This is JumpIfSmi(smi_reg, branch_imm).
1044 patcher.tbz(smi_reg, 0, branch_imm);
1045 }
1046}
1047}
1048} // namespace v8::internal
1049
1050#endif // V8_TARGET_ARCH_ARM64