blob: 7ac191c3956e74661e34a45fc9f2f2a3a3ae14ba [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5
6#include "src/v8.h"
7
8#if V8_TARGET_ARCH_MIPS64
9
10#include "src/codegen.h"
11#include "src/ic/ic.h"
12#include "src/ic/ic-compiler.h"
13#include "src/ic/stub-cache.h"
14
15namespace v8 {
16namespace internal {
17
18
19// ----------------------------------------------------------------------------
20// Static IC stub generators.
21//
22
23#define __ ACCESS_MASM(masm)
24
25
26static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
27 Label* global_object) {
28 // Register usage:
29 // type: holds the receiver instance type on entry.
30 __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
31 __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
32 __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
33}
34
35
36// Helper function used from LoadIC GenerateNormal.
37//
38// elements: Property dictionary. It is not clobbered if a jump to the miss
39// label is done.
40// name: Property name. It is not clobbered if a jump to the miss label is
41// done
42// result: Register for the result. It is only updated if a jump to the miss
43// label is not done. Can be the same as elements or name clobbering
44// one of these in the case of not jumping to the miss label.
45// The two scratch registers need to be different from elements, name and
46// result.
47// The generated code assumes that the receiver has slow properties,
48// is not a global object and does not have interceptors.
49// The address returned from GenerateStringDictionaryProbes() in scratch2
50// is used.
51static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
52 Register elements, Register name,
53 Register result, Register scratch1,
54 Register scratch2) {
55 // Main use of the scratch registers.
56 // scratch1: Used as temporary and to hold the capacity of the property
57 // dictionary.
58 // scratch2: Used as temporary.
59 Label done;
60
61 // Probe the dictionary.
62 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
63 name, scratch1, scratch2);
64
65 // If probing finds an entry check that the value is a normal
66 // property.
67 __ bind(&done); // scratch2 == elements + 4 * index.
68 const int kElementsStartOffset =
69 NameDictionary::kHeaderSize +
70 NameDictionary::kElementsStartIndex * kPointerSize;
71 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
72 __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
73 __ And(at, scratch1,
74 Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
75 __ Branch(miss, ne, at, Operand(zero_reg));
76
77 // Get the value at the masked, scaled index and return.
78 __ ld(result,
79 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
80}
81
82
83// Helper function used from StoreIC::GenerateNormal.
84//
85// elements: Property dictionary. It is not clobbered if a jump to the miss
86// label is done.
87// name: Property name. It is not clobbered if a jump to the miss label is
88// done
89// value: The value to store.
90// The two scratch registers need to be different from elements, name and
91// result.
92// The generated code assumes that the receiver has slow properties,
93// is not a global object and does not have interceptors.
94// The address returned from GenerateStringDictionaryProbes() in scratch2
95// is used.
96static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
97 Register elements, Register name,
98 Register value, Register scratch1,
99 Register scratch2) {
100 // Main use of the scratch registers.
101 // scratch1: Used as temporary and to hold the capacity of the property
102 // dictionary.
103 // scratch2: Used as temporary.
104 Label done;
105
106 // Probe the dictionary.
107 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
108 name, scratch1, scratch2);
109
110 // If probing finds an entry in the dictionary check that the value
111 // is a normal property that is not read only.
112 __ bind(&done); // scratch2 == elements + 4 * index.
113 const int kElementsStartOffset =
114 NameDictionary::kHeaderSize +
115 NameDictionary::kElementsStartIndex * kPointerSize;
116 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
117 const int kTypeAndReadOnlyMask =
118 (PropertyDetails::TypeField::kMask |
119 PropertyDetails::AttributesField::encode(READ_ONLY));
120 __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
121 __ And(at, scratch1, Operand(Smi::FromInt(kTypeAndReadOnlyMask)));
122 __ Branch(miss, ne, at, Operand(zero_reg));
123
124 // Store the value at the masked, scaled index and return.
125 const int kValueOffset = kElementsStartOffset + kPointerSize;
126 __ Daddu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
127 __ sd(value, MemOperand(scratch2));
128
129 // Update the write barrier. Make sure not to clobber the value.
130 __ mov(scratch1, value);
131 __ RecordWrite(elements, scratch2, scratch1, kRAHasNotBeenSaved,
132 kDontSaveFPRegs);
133}
134
135
136// Checks the receiver for special cases (value type, slow case bits).
137// Falls through for regular JS object.
138static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
139 Register receiver, Register map,
140 Register scratch,
141 int interceptor_bit, Label* slow) {
142 // Check that the object isn't a smi.
143 __ JumpIfSmi(receiver, slow);
144 // Get the map of the receiver.
145 __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
146 // Check bit field.
147 __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
148 __ And(at, scratch,
149 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
150 __ Branch(slow, ne, at, Operand(zero_reg));
151 // Check that the object is some kind of JS object EXCEPT JS Value type.
152 // In the case that the object is a value-wrapper object,
153 // we enter the runtime system to make sure that indexing into string
154 // objects work as intended.
155 DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
156 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
157 __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
158}
159
160
161// Loads an indexed element from a fast case array.
162// If not_fast_array is NULL, doesn't perform the elements map check.
163static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
164 Register key, Register elements,
165 Register scratch1, Register scratch2,
166 Register result, Label* not_fast_array,
167 Label* out_of_range) {
168 // Register use:
169 //
170 // receiver - holds the receiver on entry.
171 // Unchanged unless 'result' is the same register.
172 //
173 // key - holds the smi key on entry.
174 // Unchanged unless 'result' is the same register.
175 //
176 // elements - holds the elements of the receiver on exit.
177 //
178 // result - holds the result on exit if the load succeeded.
179 // Allowed to be the the same as 'receiver' or 'key'.
180 // Unchanged on bailout so 'receiver' and 'key' can be safely
181 // used by further computation.
182 //
183 // Scratch registers:
184 //
185 // scratch1 - used to hold elements map and elements length.
186 // Holds the elements map if not_fast_array branch is taken.
187 //
188 // scratch2 - used to hold the loaded value.
189
190 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
191 if (not_fast_array != NULL) {
192 // Check that the object is in fast mode (not dictionary).
193 __ ld(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
194 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
195 __ Branch(not_fast_array, ne, scratch1, Operand(at));
196 } else {
197 __ AssertFastElements(elements);
198 }
199
200 // Check that the key (index) is within bounds.
201 __ ld(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
202 __ Branch(out_of_range, hs, key, Operand(scratch1));
203
204 // Fast case: Do the load.
205 __ Daddu(scratch1, elements,
206 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
207 // The key is a smi.
208 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
209 __ SmiScale(at, key, kPointerSizeLog2);
210 __ daddu(at, at, scratch1);
211 __ ld(scratch2, MemOperand(at));
212
213 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
214 // In case the loaded value is the_hole we have to consult GetProperty
215 // to ensure the prototype chain is searched.
216 __ Branch(out_of_range, eq, scratch2, Operand(at));
217 __ mov(result, scratch2);
218}
219
220
221// Checks whether a key is an array index string or a unique name.
222// Falls through if a key is a unique name.
223static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
224 Register map, Register hash,
225 Label* index_string, Label* not_unique) {
226 // The key is not a smi.
227 Label unique;
228 // Is it a name?
229 __ GetObjectType(key, map, hash);
230 __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
231 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
232 __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
233
234 // Is the string an array index, with cached numeric value?
235 __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
236 __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
237 __ Branch(index_string, eq, at, Operand(zero_reg));
238
239 // Is the string internalized? We know it's a string, so a single
240 // bit test is enough.
241 // map: key map
242 __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
243 STATIC_ASSERT(kInternalizedTag == 0);
244 __ And(at, hash, Operand(kIsNotInternalizedMask));
245 __ Branch(not_unique, ne, at, Operand(zero_reg));
246
247 __ bind(&unique);
248}
249
250
251void LoadIC::GenerateNormal(MacroAssembler* masm) {
252 Register dictionary = a0;
253 DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
254 DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
255 Label slow;
256
257 __ ld(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
258 JSObject::kPropertiesOffset));
259 GenerateDictionaryLoad(masm, &slow, dictionary,
260 LoadDescriptor::NameRegister(), v0, a3, a4);
261 __ Ret();
262
263 // Dictionary load failed, go slow (but don't miss).
264 __ bind(&slow);
265 GenerateRuntimeGetProperty(masm);
266}
267
268
269// A register that isn't one of the parameters to the load ic.
270static const Register LoadIC_TempRegister() { return a3; }
271
272
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400273static void LoadIC_PushArgs(MacroAssembler* masm) {
274 Register receiver = LoadDescriptor::ReceiverRegister();
275 Register name = LoadDescriptor::NameRegister();
276 if (FLAG_vector_ics) {
277 Register slot = VectorLoadICDescriptor::SlotRegister();
278 Register vector = VectorLoadICDescriptor::VectorRegister();
279
280 __ Push(receiver, name, slot, vector);
281 } else {
282 __ Push(receiver, name);
283 }
284}
285
286
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000287void LoadIC::GenerateMiss(MacroAssembler* masm) {
288 // The return address is on the stack.
289 Isolate* isolate = masm->isolate();
290
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400291 DCHECK(!FLAG_vector_ics ||
292 !AreAliased(a4, a5, VectorLoadICDescriptor::SlotRegister(),
293 VectorLoadICDescriptor::VectorRegister()));
294 __ IncrementCounter(isolate->counters()->load_miss(), 1, a4, a5);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000295
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400296 LoadIC_PushArgs(masm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000297
298 // Perform tail call to the entry.
299 ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400300 int arg_count = FLAG_vector_ics ? 4 : 2;
301 __ TailCallExternalReference(ref, arg_count, 1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000302}
303
304
305void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
306 // The return address is in ra.
307
308 __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
309 __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
310
311 __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
312}
313
314
315static MemOperand GenerateMappedArgumentsLookup(
316 MacroAssembler* masm, Register object, Register key, Register scratch1,
317 Register scratch2, Register scratch3, Label* unmapped_case,
318 Label* slow_case) {
319 Heap* heap = masm->isolate()->heap();
320
321 // Check that the receiver is a JSObject. Because of the map check
322 // later, we do not need to check for interceptors or whether it
323 // requires access checks.
324 __ JumpIfSmi(object, slow_case);
325 // Check that the object is some kind of JSObject.
326 __ GetObjectType(object, scratch1, scratch2);
327 __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE));
328
329 // Check that the key is a positive smi.
330 __ NonNegativeSmiTst(key, scratch1);
331 __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
332
333 // Load the elements into scratch1 and check its map.
334 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
335 __ ld(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
336 __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
337 // Check if element is in the range of mapped arguments. If not, jump
338 // to the unmapped lookup with the parameter map in scratch1.
339 __ ld(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
340 __ Dsubu(scratch2, scratch2, Operand(Smi::FromInt(2)));
341 __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2));
342
343 // Load element index and check whether it is the hole.
344 const int kOffset =
345 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
346
347 __ SmiUntag(scratch3, key);
348 __ dsll(scratch3, scratch3, kPointerSizeLog2);
349 __ Daddu(scratch3, scratch3, Operand(kOffset));
350
351 __ Daddu(scratch2, scratch1, scratch3);
352 __ ld(scratch2, MemOperand(scratch2));
353 __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
354 __ Branch(unmapped_case, eq, scratch2, Operand(scratch3));
355
356 // Load value from context and return it. We can reuse scratch1 because
357 // we do not jump to the unmapped lookup (which requires the parameter
358 // map in scratch1).
359 __ ld(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
360 __ SmiUntag(scratch3, scratch2);
361 __ dsll(scratch3, scratch3, kPointerSizeLog2);
362 __ Daddu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
363 __ Daddu(scratch2, scratch1, scratch3);
364 return MemOperand(scratch2);
365}
366
367
368static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
369 Register key,
370 Register parameter_map,
371 Register scratch,
372 Label* slow_case) {
373 // Element is in arguments backing store, which is referenced by the
374 // second element of the parameter_map. The parameter_map register
375 // must be loaded with the parameter map of the arguments object and is
376 // overwritten.
377 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
378 Register backing_store = parameter_map;
379 __ ld(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
380 __ CheckMap(backing_store, scratch, Heap::kFixedArrayMapRootIndex, slow_case,
381 DONT_DO_SMI_CHECK);
382 __ ld(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
383 __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
384 __ SmiUntag(scratch, key);
385 __ dsll(scratch, scratch, kPointerSizeLog2);
386 __ Daddu(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
387 __ Daddu(scratch, backing_store, scratch);
388 return MemOperand(scratch);
389}
390
391
392void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
393 Register receiver = StoreDescriptor::ReceiverRegister();
394 Register key = StoreDescriptor::NameRegister();
395 Register value = StoreDescriptor::ValueRegister();
396 DCHECK(value.is(a0));
397
398 Label slow, notin;
399 // Store address is returned in register (of MemOperand) mapped_location.
400 MemOperand mapped_location = GenerateMappedArgumentsLookup(
401 masm, receiver, key, a3, a4, a5, &notin, &slow);
402 __ sd(value, mapped_location);
403 __ mov(t1, value);
404 DCHECK_EQ(mapped_location.offset(), 0);
405 __ RecordWrite(a3, mapped_location.rm(), t1, kRAHasNotBeenSaved,
406 kDontSaveFPRegs);
407 __ Ret(USE_DELAY_SLOT);
408 __ mov(v0, value); // (In delay slot) return the value stored in v0.
409 __ bind(&notin);
410 // The unmapped lookup expects that the parameter map is in a3.
411 // Store address is returned in register (of MemOperand) unmapped_location.
412 MemOperand unmapped_location =
413 GenerateUnmappedArgumentsLookup(masm, key, a3, a4, &slow);
414 __ sd(value, unmapped_location);
415 __ mov(t1, value);
416 DCHECK_EQ(unmapped_location.offset(), 0);
417 __ RecordWrite(a3, unmapped_location.rm(), t1, kRAHasNotBeenSaved,
418 kDontSaveFPRegs);
419 __ Ret(USE_DELAY_SLOT);
420 __ mov(v0, a0); // (In delay slot) return the value stored in v0.
421 __ bind(&slow);
422 GenerateMiss(masm);
423}
424
425
426void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
427 // The return address is in ra.
428 Isolate* isolate = masm->isolate();
429
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400430 DCHECK(!FLAG_vector_ics ||
431 !AreAliased(a4, a5, VectorLoadICDescriptor::SlotRegister(),
432 VectorLoadICDescriptor::VectorRegister()));
433 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a4, a5);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000434
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400435 LoadIC_PushArgs(masm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000436
437 // Perform tail call to the entry.
438 ExternalReference ref =
439 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
440
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400441 int arg_count = FLAG_vector_ics ? 4 : 2;
442 __ TailCallExternalReference(ref, arg_count, 1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000443}
444
445
446void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
447 // The return address is in ra.
448
449 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
450
451 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
452}
453
454
455void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
456 // The return address is in ra.
457 Label slow, check_name, index_smi, index_name, property_array_property;
458 Label probe_dictionary, check_number_dictionary;
459
460 Register key = LoadDescriptor::NameRegister();
461 Register receiver = LoadDescriptor::ReceiverRegister();
462 DCHECK(key.is(a2));
463 DCHECK(receiver.is(a1));
464
465 Isolate* isolate = masm->isolate();
466
467 // Check that the key is a smi.
468 __ JumpIfNotSmi(key, &check_name);
469 __ bind(&index_smi);
470 // Now the key is known to be a smi. This place is also jumped to from below
471 // where a numeric string is converted to a smi.
472
473 GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
474 Map::kHasIndexedInterceptor, &slow);
475
476 // Check the receiver's map to see if it has fast elements.
477 __ CheckFastElements(a0, a3, &check_number_dictionary);
478
479 GenerateFastArrayLoad(masm, receiver, key, a0, a3, a4, v0, NULL, &slow);
480 __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a4, a3);
481 __ Ret();
482
483 __ bind(&check_number_dictionary);
484 __ ld(a4, FieldMemOperand(receiver, JSObject::kElementsOffset));
485 __ ld(a3, FieldMemOperand(a4, JSObject::kMapOffset));
486
487 // Check whether the elements is a number dictionary.
488 // a3: elements map
489 // a4: elements
490 __ LoadRoot(at, Heap::kHashTableMapRootIndex);
491 __ Branch(&slow, ne, a3, Operand(at));
492 __ dsra32(a0, key, 0);
493 __ LoadFromNumberDictionary(&slow, a4, key, v0, a0, a3, a5);
494 __ Ret();
495
496 // Slow case, key and receiver still in a2 and a1.
497 __ bind(&slow);
498 __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, a4,
499 a3);
500 GenerateRuntimeGetProperty(masm);
501
502 __ bind(&check_name);
503 GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
504
505 GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
506 Map::kHasNamedInterceptor, &slow);
507
508
509 // If the receiver is a fast-case object, check the keyed lookup
510 // cache. Otherwise probe the dictionary.
511 __ ld(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
512 __ ld(a4, FieldMemOperand(a3, HeapObject::kMapOffset));
513 __ LoadRoot(at, Heap::kHashTableMapRootIndex);
514 __ Branch(&probe_dictionary, eq, a4, Operand(at));
515
516 // Load the map of the receiver, compute the keyed lookup cache hash
517 // based on 32 bits of the map pointer and the name hash.
518 __ ld(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
519 __ dsll32(a3, a0, 0);
520 __ dsrl32(a3, a3, 0);
521 __ dsra(a3, a3, KeyedLookupCache::kMapHashShift);
522 __ lwu(a4, FieldMemOperand(key, Name::kHashFieldOffset));
523 __ dsra(at, a4, Name::kHashShift);
524 __ xor_(a3, a3, at);
525 int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
526 __ And(a3, a3, Operand(mask));
527
528 // Load the key (consisting of map and unique name) from the cache and
529 // check for match.
530 Label load_in_object_property;
531 static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
532 Label hit_on_nth_entry[kEntriesPerBucket];
533 ExternalReference cache_keys =
534 ExternalReference::keyed_lookup_cache_keys(isolate);
535 __ li(a4, Operand(cache_keys));
536 __ dsll(at, a3, kPointerSizeLog2 + 1);
537 __ daddu(a4, a4, at);
538
539 for (int i = 0; i < kEntriesPerBucket - 1; i++) {
540 Label try_next_entry;
541 __ ld(a5, MemOperand(a4, kPointerSize * i * 2));
542 __ Branch(&try_next_entry, ne, a0, Operand(a5));
543 __ ld(a5, MemOperand(a4, kPointerSize * (i * 2 + 1)));
544 __ Branch(&hit_on_nth_entry[i], eq, key, Operand(a5));
545 __ bind(&try_next_entry);
546 }
547
548 __ ld(a5, MemOperand(a4, kPointerSize * (kEntriesPerBucket - 1) * 2));
549 __ Branch(&slow, ne, a0, Operand(a5));
550 __ ld(a5, MemOperand(a4, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1)));
551 __ Branch(&slow, ne, key, Operand(a5));
552
553 // Get field offset.
554 // a0 : receiver's map
555 // a3 : lookup cache index
556 ExternalReference cache_field_offsets =
557 ExternalReference::keyed_lookup_cache_field_offsets(isolate);
558
559 // Hit on nth entry.
560 for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
561 __ bind(&hit_on_nth_entry[i]);
562 __ li(a4, Operand(cache_field_offsets));
563
564 // TODO(yy) This data structure does NOT follow natural pointer size.
565 __ dsll(at, a3, kPointerSizeLog2 - 1);
566 __ daddu(at, a4, at);
567 __ lwu(a5, MemOperand(at, kPointerSize / 2 * i));
568
569 __ lbu(a6, FieldMemOperand(a0, Map::kInObjectPropertiesOffset));
570 __ Dsubu(a5, a5, a6);
571 __ Branch(&property_array_property, ge, a5, Operand(zero_reg));
572 if (i != 0) {
573 __ Branch(&load_in_object_property);
574 }
575 }
576
577 // Load in-object property.
578 __ bind(&load_in_object_property);
579 __ lbu(a6, FieldMemOperand(a0, Map::kInstanceSizeOffset));
580 // Index from start of object.
581 __ daddu(a6, a6, a5);
582 // Remove the heap tag.
583 __ Dsubu(receiver, receiver, Operand(kHeapObjectTag));
584 __ dsll(at, a6, kPointerSizeLog2);
585 __ daddu(at, receiver, at);
586 __ ld(v0, MemOperand(at));
587 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
588 a4, a3);
589 __ Ret();
590
591 // Load property array property.
592 __ bind(&property_array_property);
593 __ ld(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
594 __ Daddu(receiver, receiver, FixedArray::kHeaderSize - kHeapObjectTag);
595 __ dsll(v0, a5, kPointerSizeLog2);
596 __ Daddu(v0, v0, a1);
597 __ ld(v0, MemOperand(v0));
598 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
599 a4, a3);
600 __ Ret();
601
602
603 // Do a quick inline probe of the receiver's dictionary, if it
604 // exists.
605 __ bind(&probe_dictionary);
606 // a3: elements
607 __ ld(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
608 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
609 GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
610 // Load the property to v0.
611 GenerateDictionaryLoad(masm, &slow, a3, key, v0, a5, a4);
612 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, a4,
613 a3);
614 __ Ret();
615
616 __ bind(&index_name);
617 __ IndexFromHash(a3, key);
618 // Now jump to the place where smi keys are handled.
619 __ Branch(&index_smi);
620}
621
622
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400623static void KeyedStoreGenerateMegamorphicHelper(
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000624 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
625 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
626 Register value, Register key, Register receiver, Register receiver_map,
627 Register elements_map, Register elements) {
628 Label transition_smi_elements;
629 Label finish_object_store, non_double_value, transition_double_elements;
630 Label fast_double_without_map_check;
631
632 // Fast case: Do the store, could be either Object or double.
633 __ bind(fast_object);
634 Register scratch_value = a4;
635 Register address = a5;
636 if (check_map == kCheckMap) {
637 __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
638 __ Branch(fast_double, ne, elements_map,
639 Operand(masm->isolate()->factory()->fixed_array_map()));
640 }
641
642 // HOLECHECK: guards "A[i] = V"
643 // We have to go to the runtime if the current value is the hole because
644 // there may be a callback on the element.
645 Label holecheck_passed1;
646 __ Daddu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
647 __ SmiScale(at, key, kPointerSizeLog2);
648 __ daddu(address, address, at);
649 __ ld(scratch_value, MemOperand(address));
650
651 __ Branch(&holecheck_passed1, ne, scratch_value,
652 Operand(masm->isolate()->factory()->the_hole_value()));
653 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
654 slow);
655
656 __ bind(&holecheck_passed1);
657
658 // Smi stores don't require further checks.
659 Label non_smi_value;
660 __ JumpIfNotSmi(value, &non_smi_value);
661
662 if (increment_length == kIncrementLength) {
663 // Add 1 to receiver->length.
664 __ Daddu(scratch_value, key, Operand(Smi::FromInt(1)));
665 __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
666 }
667 // It's irrelevant whether array is smi-only or not when writing a smi.
668 __ Daddu(address, elements,
669 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
670 __ SmiScale(scratch_value, key, kPointerSizeLog2);
671 __ Daddu(address, address, scratch_value);
672 __ sd(value, MemOperand(address));
673 __ Ret();
674
675 __ bind(&non_smi_value);
676 // Escape to elements kind transition case.
677 __ CheckFastObjectElements(receiver_map, scratch_value,
678 &transition_smi_elements);
679
680 // Fast elements array, store the value to the elements backing store.
681 __ bind(&finish_object_store);
682 if (increment_length == kIncrementLength) {
683 // Add 1 to receiver->length.
684 __ Daddu(scratch_value, key, Operand(Smi::FromInt(1)));
685 __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
686 }
687 __ Daddu(address, elements,
688 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
689 __ SmiScale(scratch_value, key, kPointerSizeLog2);
690 __ Daddu(address, address, scratch_value);
691 __ sd(value, MemOperand(address));
692 // Update write barrier for the elements array address.
693 __ mov(scratch_value, value); // Preserve the value which is returned.
694 __ RecordWrite(elements, address, scratch_value, kRAHasNotBeenSaved,
695 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
696 __ Ret();
697
698 __ bind(fast_double);
699 if (check_map == kCheckMap) {
700 // Check for fast double array case. If this fails, call through to the
701 // runtime.
702 __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
703 __ Branch(slow, ne, elements_map, Operand(at));
704 }
705
706 // HOLECHECK: guards "A[i] double hole?"
707 // We have to see if the double version of the hole is present. If so
708 // go to the runtime.
709 __ Daddu(address, elements,
710 Operand(FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32) -
711 kHeapObjectTag));
712 __ SmiScale(at, key, kPointerSizeLog2);
713 __ daddu(address, address, at);
714 __ lw(scratch_value, MemOperand(address));
715 __ Branch(&fast_double_without_map_check, ne, scratch_value,
716 Operand(kHoleNanUpper32));
717 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
718 slow);
719
720 __ bind(&fast_double_without_map_check);
721 __ StoreNumberToDoubleElements(value, key,
722 elements, // Overwritten.
723 a3, // Scratch regs...
724 a4, a5, &transition_double_elements);
725 if (increment_length == kIncrementLength) {
726 // Add 1 to receiver->length.
727 __ Daddu(scratch_value, key, Operand(Smi::FromInt(1)));
728 __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
729 }
730 __ Ret();
731
732 __ bind(&transition_smi_elements);
733 // Transition the array appropriately depending on the value type.
734 __ ld(a4, FieldMemOperand(value, HeapObject::kMapOffset));
735 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
736 __ Branch(&non_double_value, ne, a4, Operand(at));
737
738 // Value is a double. Transition FAST_SMI_ELEMENTS ->
739 // FAST_DOUBLE_ELEMENTS and complete the store.
740 __ LoadTransitionedArrayMapConditional(
741 FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, a4, slow);
742 AllocationSiteMode mode =
743 AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
744 ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
745 receiver_map, mode, slow);
746 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
747 __ jmp(&fast_double_without_map_check);
748
749 __ bind(&non_double_value);
750 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
751 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
752 receiver_map, a4, slow);
753 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
754 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
755 masm, receiver, key, value, receiver_map, mode, slow);
756 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
757 __ jmp(&finish_object_store);
758
759 __ bind(&transition_double_elements);
760 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
761 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
762 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
763 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
764 receiver_map, a4, slow);
765 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
766 ElementsTransitionGenerator::GenerateDoubleToObject(
767 masm, receiver, key, value, receiver_map, mode, slow);
768 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
769 __ jmp(&finish_object_store);
770}
771
772
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400773void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
774 StrictMode strict_mode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000775 // ---------- S t a t e --------------
776 // -- a0 : value
777 // -- a1 : key
778 // -- a2 : receiver
779 // -- ra : return address
780 // -----------------------------------
781 Label slow, fast_object, fast_object_grow;
782 Label fast_double, fast_double_grow;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400783 Label array, extra, check_if_double_array, maybe_name_key, miss;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000784
785 // Register usage.
786 Register value = StoreDescriptor::ValueRegister();
787 Register key = StoreDescriptor::NameRegister();
788 Register receiver = StoreDescriptor::ReceiverRegister();
789 DCHECK(value.is(a0));
790 Register receiver_map = a3;
791 Register elements_map = a6;
792 Register elements = a7; // Elements array of the receiver.
793 // a4 and a5 are used as general scratch registers.
794
795 // Check that the key is a smi.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400796 __ JumpIfNotSmi(key, &maybe_name_key);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000797 // Check that the object isn't a smi.
798 __ JumpIfSmi(receiver, &slow);
799 // Get the map of the object.
800 __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
801 // Check that the receiver does not require access checks and is not observed.
802 // The generic stub does not perform map checks or handle observed objects.
803 __ lbu(a4, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
804 __ And(a4, a4,
805 Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
806 __ Branch(&slow, ne, a4, Operand(zero_reg));
807 // Check if the object is a JS array or not.
808 __ lbu(a4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
809 __ Branch(&array, eq, a4, Operand(JS_ARRAY_TYPE));
810 // Check that the object is some kind of JSObject.
811 __ Branch(&slow, lt, a4, Operand(FIRST_JS_OBJECT_TYPE));
812
813 // Object case: Check key against length in the elements array.
814 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
815 // Check array bounds. Both the key and the length of FixedArray are smis.
816 __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
817 __ Branch(&fast_object, lo, key, Operand(a4));
818
819 // Slow case, handle jump to runtime.
820 __ bind(&slow);
821 // Entry registers are intact.
822 // a0: value.
823 // a1: key.
824 // a2: receiver.
825 PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400826 // Never returns to here.
827
828 __ bind(&maybe_name_key);
829 __ ld(a4, FieldMemOperand(key, HeapObject::kMapOffset));
830 __ lb(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
831 __ JumpIfNotUniqueNameInstanceType(a4, &slow);
832 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
833 Code::ComputeHandlerFlags(Code::STORE_IC));
834 masm->isolate()->stub_cache()->GenerateProbe(
835 masm, Code::STORE_IC, flags, false, receiver, key, a3, a4, a5, a6);
836 // Cache miss.
837 __ Branch(&miss);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000838
839 // Extra capacity case: Check if there is extra capacity to
840 // perform the store and update the length. Used for adding one
841 // element to the array by writing to array[array.length].
842 __ bind(&extra);
843 // Condition code from comparing key and array length is still available.
844 // Only support writing to array[array.length].
845 __ Branch(&slow, ne, key, Operand(a4));
846 // Check for room in the elements backing store.
847 // Both the key and the length of FixedArray are smis.
848 __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
849 __ Branch(&slow, hs, key, Operand(a4));
850 __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
851 __ Branch(&check_if_double_array, ne, elements_map,
852 Heap::kFixedArrayMapRootIndex);
853
854 __ jmp(&fast_object_grow);
855
856 __ bind(&check_if_double_array);
857 __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
858 __ jmp(&fast_double_grow);
859
860 // Array case: Get the length and the elements array from the JS
861 // array. Check that the array is in fast mode (and writable); if it
862 // is the length is always a smi.
863 __ bind(&array);
864 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
865
866 // Check the key against the length in the array.
867 __ ld(a4, FieldMemOperand(receiver, JSArray::kLengthOffset));
868 __ Branch(&extra, hs, key, Operand(a4));
869
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400870 KeyedStoreGenerateMegamorphicHelper(
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000871 masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
872 value, key, receiver, receiver_map, elements_map, elements);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400873 KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
874 &fast_double_grow, &slow, kDontCheckMap,
875 kIncrementLength, value, key, receiver,
876 receiver_map, elements_map, elements);
877
878 __ bind(&miss);
879 GenerateMiss(masm);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000880}
881
882
883void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
884 // Push receiver, key and value for runtime call.
885 __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
886 StoreDescriptor::ValueRegister());
887
888 ExternalReference ref =
889 ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
890 __ TailCallExternalReference(ref, 3, 1);
891}
892
893
894void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
895 Register receiver = StoreDescriptor::ReceiverRegister();
896 Register name = StoreDescriptor::NameRegister();
897 DCHECK(receiver.is(a1));
898 DCHECK(name.is(a2));
899 DCHECK(StoreDescriptor::ValueRegister().is(a0));
900
901 // Get the receiver from the stack and probe the stub cache.
902 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
903 Code::ComputeHandlerFlags(Code::STORE_IC));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400904 masm->isolate()->stub_cache()->GenerateProbe(
905 masm, Code::STORE_IC, flags, false, receiver, name, a3, a4, a5, a6);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000906
907 // Cache miss: Jump to runtime.
908 GenerateMiss(masm);
909}
910
911
912void StoreIC::GenerateMiss(MacroAssembler* masm) {
913 __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
914 StoreDescriptor::ValueRegister());
915 // Perform tail call to the entry.
916 ExternalReference ref =
917 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
918 __ TailCallExternalReference(ref, 3, 1);
919}
920
921
922void StoreIC::GenerateNormal(MacroAssembler* masm) {
923 Label miss;
924 Register receiver = StoreDescriptor::ReceiverRegister();
925 Register name = StoreDescriptor::NameRegister();
926 Register value = StoreDescriptor::ValueRegister();
927 Register dictionary = a3;
928 DCHECK(!AreAliased(value, receiver, name, dictionary, a4, a5));
929
930 __ ld(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
931
932 GenerateDictionaryStore(masm, &miss, a3, name, value, a4, a5);
933 Counters* counters = masm->isolate()->counters();
934 __ IncrementCounter(counters->store_normal_hit(), 1, a4, a5);
935 __ Ret();
936
937 __ bind(&miss);
938 __ IncrementCounter(counters->store_normal_miss(), 1, a4, a5);
939 GenerateMiss(masm);
940}
941
942
943#undef __
944
945
946Condition CompareIC::ComputeCondition(Token::Value op) {
947 switch (op) {
948 case Token::EQ_STRICT:
949 case Token::EQ:
950 return eq;
951 case Token::LT:
952 return lt;
953 case Token::GT:
954 return gt;
955 case Token::LTE:
956 return le;
957 case Token::GTE:
958 return ge;
959 default:
960 UNREACHABLE();
961 return kNoCondition;
962 }
963}
964
965
966bool CompareIC::HasInlinedSmiCode(Address address) {
967 // The address of the instruction following the call.
968 Address andi_instruction_address =
969 address + Assembler::kCallTargetAddressOffset;
970
971 // If the instruction following the call is not a andi at, rx, #yyy, nothing
972 // was inlined.
973 Instr instr = Assembler::instr_at(andi_instruction_address);
974 return Assembler::IsAndImmediate(instr) &&
975 Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
976}
977
978
979void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
980 Address andi_instruction_address =
981 address + Assembler::kCallTargetAddressOffset;
982
983 // If the instruction following the call is not a andi at, rx, #yyy, nothing
984 // was inlined.
985 Instr instr = Assembler::instr_at(andi_instruction_address);
986 if (!(Assembler::IsAndImmediate(instr) &&
987 Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
988 return;
989 }
990
991 // The delta to the start of the map check instruction and the
992 // condition code uses at the patched jump.
993 int delta = Assembler::GetImmediate16(instr);
994 delta += Assembler::GetRs(instr) * kImm16Mask;
995 // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
996 // signals that nothing was inlined.
997 if (delta == 0) {
998 return;
999 }
1000
1001 if (FLAG_trace_ic) {
1002 PrintF("[ patching ic at %p, andi=%p, delta=%d\n", address,
1003 andi_instruction_address, delta);
1004 }
1005
1006 Address patch_address =
1007 andi_instruction_address - delta * Instruction::kInstrSize;
1008 Instr instr_at_patch = Assembler::instr_at(patch_address);
1009 Instr branch_instr =
1010 Assembler::instr_at(patch_address + Instruction::kInstrSize);
1011 // This is patching a conditional "jump if not smi/jump if smi" site.
1012 // Enabling by changing from
1013 // andi at, rx, 0
1014 // Branch <target>, eq, at, Operand(zero_reg)
1015 // to:
1016 // andi at, rx, #kSmiTagMask
1017 // Branch <target>, ne, at, Operand(zero_reg)
1018 // and vice-versa to be disabled again.
1019 CodePatcher patcher(patch_address, 2);
1020 Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
1021 if (check == ENABLE_INLINED_SMI_CHECK) {
1022 DCHECK(Assembler::IsAndImmediate(instr_at_patch));
1023 DCHECK_EQ(0, Assembler::GetImmediate16(instr_at_patch));
1024 patcher.masm()->andi(at, reg, kSmiTagMask);
1025 } else {
1026 DCHECK(check == DISABLE_INLINED_SMI_CHECK);
1027 DCHECK(Assembler::IsAndImmediate(instr_at_patch));
1028 patcher.masm()->andi(at, reg, 0);
1029 }
1030 DCHECK(Assembler::IsBranch(branch_instr));
1031 if (Assembler::IsBeq(branch_instr)) {
1032 patcher.ChangeBranchCondition(ne);
1033 } else {
1034 DCHECK(Assembler::IsBne(branch_instr));
1035 patcher.ChangeBranchCondition(eq);
1036 }
1037}
1038}
1039} // namespace v8::internal
1040
1041#endif // V8_TARGET_ARCH_MIPS64