Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1 | // Copyright 2012 the V8 project authors. All rights reserved. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
Andrei Popescu | 3100271 | 2010-02-23 13:46:05 +0000 | [diff] [blame] | 4 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 5 | #include "src/mips/codegen-mips.h" |
Andrei Popescu | 3100271 | 2010-02-23 13:46:05 +0000 | [diff] [blame] | 6 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 7 | #if V8_TARGET_ARCH_MIPS |
Leon Clarke | f7060e2 | 2010-06-03 12:02:55 +0100 | [diff] [blame] | 8 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 9 | #include "src/codegen.h" |
| 10 | #include "src/macro-assembler.h" |
| 11 | #include "src/mips/simulator-mips.h" |
Andrei Popescu | 3100271 | 2010-02-23 13:46:05 +0000 | [diff] [blame] | 12 | |
| 13 | namespace v8 { |
| 14 | namespace internal { |
| 15 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 16 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 17 | #define __ masm. |
| 18 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 19 | #if defined(V8_HOST_ARCH_MIPS) |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 20 | MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate, |
| 21 | MemCopyUint8Function stub) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 22 | #if defined(USE_SIMULATOR) || defined(_MIPS_ARCH_MIPS32R6) || \ |
| 23 | defined(_MIPS_ARCH_MIPS32RX) |
| 24 | return stub; |
| 25 | #else |
| 26 | size_t actual_size; |
| 27 | byte* buffer = |
| 28 | static_cast<byte*>(base::OS::Allocate(3 * KB, &actual_size, true)); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 29 | if (buffer == nullptr) return stub; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 30 | |
| 31 | // This code assumes that cache lines are 32 bytes and if the cache line is |
| 32 | // larger it will not work correctly. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 33 | MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size), |
| 34 | CodeObjectRequired::kNo); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 35 | |
| 36 | { |
| 37 | Label lastb, unaligned, aligned, chkw, |
| 38 | loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop, |
| 39 | leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw, |
| 40 | ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop; |
| 41 | |
| 42 | // The size of each prefetch. |
| 43 | uint32_t pref_chunk = 32; |
| 44 | // The maximum size of a prefetch, it must not be less then pref_chunk. |
| 45 | // If the real size of a prefetch is greater then max_pref_size and |
| 46 | // the kPrefHintPrepareForStore hint is used, the code will not work |
| 47 | // correctly. |
| 48 | uint32_t max_pref_size = 128; |
| 49 | DCHECK(pref_chunk < max_pref_size); |
| 50 | |
| 51 | // pref_limit is set based on the fact that we never use an offset |
| 52 | // greater then 5 on a store pref and that a single pref can |
| 53 | // never be larger then max_pref_size. |
| 54 | uint32_t pref_limit = (5 * pref_chunk) + max_pref_size; |
| 55 | int32_t pref_hint_load = kPrefHintLoadStreamed; |
| 56 | int32_t pref_hint_store = kPrefHintPrepareForStore; |
| 57 | uint32_t loadstore_chunk = 4; |
| 58 | |
| 59 | // The initial prefetches may fetch bytes that are before the buffer being |
| 60 | // copied. Start copies with an offset of 4 so avoid this situation when |
| 61 | // using kPrefHintPrepareForStore. |
| 62 | DCHECK(pref_hint_store != kPrefHintPrepareForStore || |
| 63 | pref_chunk * 4 >= max_pref_size); |
| 64 | |
| 65 | // If the size is less than 8, go to lastb. Regardless of size, |
| 66 | // copy dst pointer to v0 for the retuen value. |
| 67 | __ slti(t2, a2, 2 * loadstore_chunk); |
| 68 | __ bne(t2, zero_reg, &lastb); |
| 69 | __ mov(v0, a0); // In delay slot. |
| 70 | |
| 71 | // If src and dst have different alignments, go to unaligned, if they |
| 72 | // have the same alignment (but are not actually aligned) do a partial |
| 73 | // load/store to make them aligned. If they are both already aligned |
| 74 | // we can start copying at aligned. |
| 75 | __ xor_(t8, a1, a0); |
| 76 | __ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement. |
| 77 | __ bne(t8, zero_reg, &unaligned); |
| 78 | __ subu(a3, zero_reg, a0); // In delay slot. |
| 79 | |
| 80 | __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1. |
| 81 | __ beq(a3, zero_reg, &aligned); // Already aligned. |
| 82 | __ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count. |
| 83 | |
| 84 | if (kArchEndian == kLittle) { |
| 85 | __ lwr(t8, MemOperand(a1)); |
| 86 | __ addu(a1, a1, a3); |
| 87 | __ swr(t8, MemOperand(a0)); |
| 88 | __ addu(a0, a0, a3); |
| 89 | } else { |
| 90 | __ lwl(t8, MemOperand(a1)); |
| 91 | __ addu(a1, a1, a3); |
| 92 | __ swl(t8, MemOperand(a0)); |
| 93 | __ addu(a0, a0, a3); |
| 94 | } |
| 95 | // Now dst/src are both aligned to (word) aligned addresses. Set a2 to |
| 96 | // count how many bytes we have to copy after all the 64 byte chunks are |
| 97 | // copied and a3 to the dst pointer after all the 64 byte chunks have been |
| 98 | // copied. We will loop, incrementing a0 and a1 until a0 equals a3. |
| 99 | __ bind(&aligned); |
| 100 | __ andi(t8, a2, 0x3f); |
| 101 | __ beq(a2, t8, &chkw); // Less than 64? |
| 102 | __ subu(a3, a2, t8); // In delay slot. |
| 103 | __ addu(a3, a0, a3); // Now a3 is the final dst after loop. |
| 104 | |
| 105 | // When in the loop we prefetch with kPrefHintPrepareForStore hint, |
| 106 | // in this case the a0+x should be past the "t0-32" address. This means: |
| 107 | // for x=128 the last "safe" a0 address is "t0-160". Alternatively, for |
| 108 | // x=64 the last "safe" a0 address is "t0-96". In the current version we |
| 109 | // will use "pref hint, 128(a0)", so "t0-160" is the limit. |
| 110 | if (pref_hint_store == kPrefHintPrepareForStore) { |
| 111 | __ addu(t0, a0, a2); // t0 is the "past the end" address. |
| 112 | __ Subu(t9, t0, pref_limit); // t9 is the "last safe pref" address. |
| 113 | } |
| 114 | |
| 115 | __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk)); |
| 116 | __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk)); |
| 117 | __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk)); |
| 118 | __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk)); |
| 119 | |
| 120 | if (pref_hint_store != kPrefHintPrepareForStore) { |
| 121 | __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk)); |
| 122 | __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk)); |
| 123 | __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk)); |
| 124 | } |
| 125 | __ bind(&loop16w); |
| 126 | __ lw(t0, MemOperand(a1)); |
| 127 | |
| 128 | if (pref_hint_store == kPrefHintPrepareForStore) { |
| 129 | __ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch. |
| 130 | __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg)); |
| 131 | } |
| 132 | __ lw(t1, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot. |
| 133 | |
| 134 | __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk)); |
| 135 | __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk)); |
| 136 | |
| 137 | __ bind(&skip_pref); |
| 138 | __ lw(t2, MemOperand(a1, 2, loadstore_chunk)); |
| 139 | __ lw(t3, MemOperand(a1, 3, loadstore_chunk)); |
| 140 | __ lw(t4, MemOperand(a1, 4, loadstore_chunk)); |
| 141 | __ lw(t5, MemOperand(a1, 5, loadstore_chunk)); |
| 142 | __ lw(t6, MemOperand(a1, 6, loadstore_chunk)); |
| 143 | __ lw(t7, MemOperand(a1, 7, loadstore_chunk)); |
| 144 | __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk)); |
| 145 | |
| 146 | __ sw(t0, MemOperand(a0)); |
| 147 | __ sw(t1, MemOperand(a0, 1, loadstore_chunk)); |
| 148 | __ sw(t2, MemOperand(a0, 2, loadstore_chunk)); |
| 149 | __ sw(t3, MemOperand(a0, 3, loadstore_chunk)); |
| 150 | __ sw(t4, MemOperand(a0, 4, loadstore_chunk)); |
| 151 | __ sw(t5, MemOperand(a0, 5, loadstore_chunk)); |
| 152 | __ sw(t6, MemOperand(a0, 6, loadstore_chunk)); |
| 153 | __ sw(t7, MemOperand(a0, 7, loadstore_chunk)); |
| 154 | |
| 155 | __ lw(t0, MemOperand(a1, 8, loadstore_chunk)); |
| 156 | __ lw(t1, MemOperand(a1, 9, loadstore_chunk)); |
| 157 | __ lw(t2, MemOperand(a1, 10, loadstore_chunk)); |
| 158 | __ lw(t3, MemOperand(a1, 11, loadstore_chunk)); |
| 159 | __ lw(t4, MemOperand(a1, 12, loadstore_chunk)); |
| 160 | __ lw(t5, MemOperand(a1, 13, loadstore_chunk)); |
| 161 | __ lw(t6, MemOperand(a1, 14, loadstore_chunk)); |
| 162 | __ lw(t7, MemOperand(a1, 15, loadstore_chunk)); |
| 163 | __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk)); |
| 164 | |
| 165 | __ sw(t0, MemOperand(a0, 8, loadstore_chunk)); |
| 166 | __ sw(t1, MemOperand(a0, 9, loadstore_chunk)); |
| 167 | __ sw(t2, MemOperand(a0, 10, loadstore_chunk)); |
| 168 | __ sw(t3, MemOperand(a0, 11, loadstore_chunk)); |
| 169 | __ sw(t4, MemOperand(a0, 12, loadstore_chunk)); |
| 170 | __ sw(t5, MemOperand(a0, 13, loadstore_chunk)); |
| 171 | __ sw(t6, MemOperand(a0, 14, loadstore_chunk)); |
| 172 | __ sw(t7, MemOperand(a0, 15, loadstore_chunk)); |
| 173 | __ addiu(a0, a0, 16 * loadstore_chunk); |
| 174 | __ bne(a0, a3, &loop16w); |
| 175 | __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot. |
| 176 | __ mov(a2, t8); |
| 177 | |
| 178 | // Here we have src and dest word-aligned but less than 64-bytes to go. |
| 179 | // Check for a 32 bytes chunk and copy if there is one. Otherwise jump |
| 180 | // down to chk1w to handle the tail end of the copy. |
| 181 | __ bind(&chkw); |
| 182 | __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk)); |
| 183 | __ andi(t8, a2, 0x1f); |
| 184 | __ beq(a2, t8, &chk1w); // Less than 32? |
| 185 | __ nop(); // In delay slot. |
| 186 | __ lw(t0, MemOperand(a1)); |
| 187 | __ lw(t1, MemOperand(a1, 1, loadstore_chunk)); |
| 188 | __ lw(t2, MemOperand(a1, 2, loadstore_chunk)); |
| 189 | __ lw(t3, MemOperand(a1, 3, loadstore_chunk)); |
| 190 | __ lw(t4, MemOperand(a1, 4, loadstore_chunk)); |
| 191 | __ lw(t5, MemOperand(a1, 5, loadstore_chunk)); |
| 192 | __ lw(t6, MemOperand(a1, 6, loadstore_chunk)); |
| 193 | __ lw(t7, MemOperand(a1, 7, loadstore_chunk)); |
| 194 | __ addiu(a1, a1, 8 * loadstore_chunk); |
| 195 | __ sw(t0, MemOperand(a0)); |
| 196 | __ sw(t1, MemOperand(a0, 1, loadstore_chunk)); |
| 197 | __ sw(t2, MemOperand(a0, 2, loadstore_chunk)); |
| 198 | __ sw(t3, MemOperand(a0, 3, loadstore_chunk)); |
| 199 | __ sw(t4, MemOperand(a0, 4, loadstore_chunk)); |
| 200 | __ sw(t5, MemOperand(a0, 5, loadstore_chunk)); |
| 201 | __ sw(t6, MemOperand(a0, 6, loadstore_chunk)); |
| 202 | __ sw(t7, MemOperand(a0, 7, loadstore_chunk)); |
| 203 | __ addiu(a0, a0, 8 * loadstore_chunk); |
| 204 | |
| 205 | // Here we have less than 32 bytes to copy. Set up for a loop to copy |
| 206 | // one word at a time. Set a2 to count how many bytes we have to copy |
| 207 | // after all the word chunks are copied and a3 to the dst pointer after |
| 208 | // all the word chunks have been copied. We will loop, incrementing a0 |
| 209 | // and a1 untill a0 equals a3. |
| 210 | __ bind(&chk1w); |
| 211 | __ andi(a2, t8, loadstore_chunk - 1); |
| 212 | __ beq(a2, t8, &lastb); |
| 213 | __ subu(a3, t8, a2); // In delay slot. |
| 214 | __ addu(a3, a0, a3); |
| 215 | |
| 216 | __ bind(&wordCopy_loop); |
| 217 | __ lw(t3, MemOperand(a1)); |
| 218 | __ addiu(a0, a0, loadstore_chunk); |
| 219 | __ addiu(a1, a1, loadstore_chunk); |
| 220 | __ bne(a0, a3, &wordCopy_loop); |
| 221 | __ sw(t3, MemOperand(a0, -1, loadstore_chunk)); // In delay slot. |
| 222 | |
| 223 | __ bind(&lastb); |
| 224 | __ Branch(&leave, le, a2, Operand(zero_reg)); |
| 225 | __ addu(a3, a0, a2); |
| 226 | |
| 227 | __ bind(&lastbloop); |
| 228 | __ lb(v1, MemOperand(a1)); |
| 229 | __ addiu(a0, a0, 1); |
| 230 | __ addiu(a1, a1, 1); |
| 231 | __ bne(a0, a3, &lastbloop); |
| 232 | __ sb(v1, MemOperand(a0, -1)); // In delay slot. |
| 233 | |
| 234 | __ bind(&leave); |
| 235 | __ jr(ra); |
| 236 | __ nop(); |
| 237 | |
| 238 | // Unaligned case. Only the dst gets aligned so we need to do partial |
| 239 | // loads of the source followed by normal stores to the dst (once we |
| 240 | // have aligned the destination). |
| 241 | __ bind(&unaligned); |
| 242 | __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1. |
| 243 | __ beq(a3, zero_reg, &ua_chk16w); |
| 244 | __ subu(a2, a2, a3); // In delay slot. |
| 245 | |
| 246 | if (kArchEndian == kLittle) { |
| 247 | __ lwr(v1, MemOperand(a1)); |
| 248 | __ lwl(v1, |
| 249 | MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); |
| 250 | __ addu(a1, a1, a3); |
| 251 | __ swr(v1, MemOperand(a0)); |
| 252 | __ addu(a0, a0, a3); |
| 253 | } else { |
| 254 | __ lwl(v1, MemOperand(a1)); |
| 255 | __ lwr(v1, |
| 256 | MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); |
| 257 | __ addu(a1, a1, a3); |
| 258 | __ swl(v1, MemOperand(a0)); |
| 259 | __ addu(a0, a0, a3); |
| 260 | } |
| 261 | |
| 262 | // Now the dst (but not the source) is aligned. Set a2 to count how many |
| 263 | // bytes we have to copy after all the 64 byte chunks are copied and a3 to |
| 264 | // the dst pointer after all the 64 byte chunks have been copied. We will |
| 265 | // loop, incrementing a0 and a1 until a0 equals a3. |
| 266 | __ bind(&ua_chk16w); |
| 267 | __ andi(t8, a2, 0x3f); |
| 268 | __ beq(a2, t8, &ua_chkw); |
| 269 | __ subu(a3, a2, t8); // In delay slot. |
| 270 | __ addu(a3, a0, a3); |
| 271 | |
| 272 | if (pref_hint_store == kPrefHintPrepareForStore) { |
| 273 | __ addu(t0, a0, a2); |
| 274 | __ Subu(t9, t0, pref_limit); |
| 275 | } |
| 276 | |
| 277 | __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk)); |
| 278 | __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk)); |
| 279 | __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk)); |
| 280 | |
| 281 | if (pref_hint_store != kPrefHintPrepareForStore) { |
| 282 | __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk)); |
| 283 | __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk)); |
| 284 | __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk)); |
| 285 | } |
| 286 | |
| 287 | __ bind(&ua_loop16w); |
| 288 | __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk)); |
| 289 | if (kArchEndian == kLittle) { |
| 290 | __ lwr(t0, MemOperand(a1)); |
| 291 | __ lwr(t1, MemOperand(a1, 1, loadstore_chunk)); |
| 292 | __ lwr(t2, MemOperand(a1, 2, loadstore_chunk)); |
| 293 | |
| 294 | if (pref_hint_store == kPrefHintPrepareForStore) { |
| 295 | __ sltu(v1, t9, a0); |
| 296 | __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg)); |
| 297 | } |
| 298 | __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot. |
| 299 | |
| 300 | __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk)); |
| 301 | __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk)); |
| 302 | |
| 303 | __ bind(&ua_skip_pref); |
| 304 | __ lwr(t4, MemOperand(a1, 4, loadstore_chunk)); |
| 305 | __ lwr(t5, MemOperand(a1, 5, loadstore_chunk)); |
| 306 | __ lwr(t6, MemOperand(a1, 6, loadstore_chunk)); |
| 307 | __ lwr(t7, MemOperand(a1, 7, loadstore_chunk)); |
| 308 | __ lwl(t0, |
| 309 | MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); |
| 310 | __ lwl(t1, |
| 311 | MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); |
| 312 | __ lwl(t2, |
| 313 | MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); |
| 314 | __ lwl(t3, |
| 315 | MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); |
| 316 | __ lwl(t4, |
| 317 | MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); |
| 318 | __ lwl(t5, |
| 319 | MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); |
| 320 | __ lwl(t6, |
| 321 | MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); |
| 322 | __ lwl(t7, |
| 323 | MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); |
| 324 | } else { |
| 325 | __ lwl(t0, MemOperand(a1)); |
| 326 | __ lwl(t1, MemOperand(a1, 1, loadstore_chunk)); |
| 327 | __ lwl(t2, MemOperand(a1, 2, loadstore_chunk)); |
| 328 | |
| 329 | if (pref_hint_store == kPrefHintPrepareForStore) { |
| 330 | __ sltu(v1, t9, a0); |
| 331 | __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg)); |
| 332 | } |
| 333 | __ lwl(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot. |
| 334 | |
| 335 | __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk)); |
| 336 | __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk)); |
| 337 | |
| 338 | __ bind(&ua_skip_pref); |
| 339 | __ lwl(t4, MemOperand(a1, 4, loadstore_chunk)); |
| 340 | __ lwl(t5, MemOperand(a1, 5, loadstore_chunk)); |
| 341 | __ lwl(t6, MemOperand(a1, 6, loadstore_chunk)); |
| 342 | __ lwl(t7, MemOperand(a1, 7, loadstore_chunk)); |
| 343 | __ lwr(t0, |
| 344 | MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); |
| 345 | __ lwr(t1, |
| 346 | MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); |
| 347 | __ lwr(t2, |
| 348 | MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); |
| 349 | __ lwr(t3, |
| 350 | MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); |
| 351 | __ lwr(t4, |
| 352 | MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); |
| 353 | __ lwr(t5, |
| 354 | MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); |
| 355 | __ lwr(t6, |
| 356 | MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); |
| 357 | __ lwr(t7, |
| 358 | MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); |
| 359 | } |
| 360 | __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk)); |
| 361 | __ sw(t0, MemOperand(a0)); |
| 362 | __ sw(t1, MemOperand(a0, 1, loadstore_chunk)); |
| 363 | __ sw(t2, MemOperand(a0, 2, loadstore_chunk)); |
| 364 | __ sw(t3, MemOperand(a0, 3, loadstore_chunk)); |
| 365 | __ sw(t4, MemOperand(a0, 4, loadstore_chunk)); |
| 366 | __ sw(t5, MemOperand(a0, 5, loadstore_chunk)); |
| 367 | __ sw(t6, MemOperand(a0, 6, loadstore_chunk)); |
| 368 | __ sw(t7, MemOperand(a0, 7, loadstore_chunk)); |
| 369 | if (kArchEndian == kLittle) { |
| 370 | __ lwr(t0, MemOperand(a1, 8, loadstore_chunk)); |
| 371 | __ lwr(t1, MemOperand(a1, 9, loadstore_chunk)); |
| 372 | __ lwr(t2, MemOperand(a1, 10, loadstore_chunk)); |
| 373 | __ lwr(t3, MemOperand(a1, 11, loadstore_chunk)); |
| 374 | __ lwr(t4, MemOperand(a1, 12, loadstore_chunk)); |
| 375 | __ lwr(t5, MemOperand(a1, 13, loadstore_chunk)); |
| 376 | __ lwr(t6, MemOperand(a1, 14, loadstore_chunk)); |
| 377 | __ lwr(t7, MemOperand(a1, 15, loadstore_chunk)); |
| 378 | __ lwl(t0, |
| 379 | MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one)); |
| 380 | __ lwl(t1, |
| 381 | MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one)); |
| 382 | __ lwl(t2, |
| 383 | MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one)); |
| 384 | __ lwl(t3, |
| 385 | MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one)); |
| 386 | __ lwl(t4, |
| 387 | MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one)); |
| 388 | __ lwl(t5, |
| 389 | MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one)); |
| 390 | __ lwl(t6, |
| 391 | MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one)); |
| 392 | __ lwl(t7, |
| 393 | MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one)); |
| 394 | } else { |
| 395 | __ lwl(t0, MemOperand(a1, 8, loadstore_chunk)); |
| 396 | __ lwl(t1, MemOperand(a1, 9, loadstore_chunk)); |
| 397 | __ lwl(t2, MemOperand(a1, 10, loadstore_chunk)); |
| 398 | __ lwl(t3, MemOperand(a1, 11, loadstore_chunk)); |
| 399 | __ lwl(t4, MemOperand(a1, 12, loadstore_chunk)); |
| 400 | __ lwl(t5, MemOperand(a1, 13, loadstore_chunk)); |
| 401 | __ lwl(t6, MemOperand(a1, 14, loadstore_chunk)); |
| 402 | __ lwl(t7, MemOperand(a1, 15, loadstore_chunk)); |
| 403 | __ lwr(t0, |
| 404 | MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one)); |
| 405 | __ lwr(t1, |
| 406 | MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one)); |
| 407 | __ lwr(t2, |
| 408 | MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one)); |
| 409 | __ lwr(t3, |
| 410 | MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one)); |
| 411 | __ lwr(t4, |
| 412 | MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one)); |
| 413 | __ lwr(t5, |
| 414 | MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one)); |
| 415 | __ lwr(t6, |
| 416 | MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one)); |
| 417 | __ lwr(t7, |
| 418 | MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one)); |
| 419 | } |
| 420 | __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk)); |
| 421 | __ sw(t0, MemOperand(a0, 8, loadstore_chunk)); |
| 422 | __ sw(t1, MemOperand(a0, 9, loadstore_chunk)); |
| 423 | __ sw(t2, MemOperand(a0, 10, loadstore_chunk)); |
| 424 | __ sw(t3, MemOperand(a0, 11, loadstore_chunk)); |
| 425 | __ sw(t4, MemOperand(a0, 12, loadstore_chunk)); |
| 426 | __ sw(t5, MemOperand(a0, 13, loadstore_chunk)); |
| 427 | __ sw(t6, MemOperand(a0, 14, loadstore_chunk)); |
| 428 | __ sw(t7, MemOperand(a0, 15, loadstore_chunk)); |
| 429 | __ addiu(a0, a0, 16 * loadstore_chunk); |
| 430 | __ bne(a0, a3, &ua_loop16w); |
| 431 | __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot. |
| 432 | __ mov(a2, t8); |
| 433 | |
| 434 | // Here less than 64-bytes. Check for |
| 435 | // a 32 byte chunk and copy if there is one. Otherwise jump down to |
| 436 | // ua_chk1w to handle the tail end of the copy. |
| 437 | __ bind(&ua_chkw); |
| 438 | __ Pref(pref_hint_load, MemOperand(a1)); |
| 439 | __ andi(t8, a2, 0x1f); |
| 440 | |
| 441 | __ beq(a2, t8, &ua_chk1w); |
| 442 | __ nop(); // In delay slot. |
| 443 | if (kArchEndian == kLittle) { |
| 444 | __ lwr(t0, MemOperand(a1)); |
| 445 | __ lwr(t1, MemOperand(a1, 1, loadstore_chunk)); |
| 446 | __ lwr(t2, MemOperand(a1, 2, loadstore_chunk)); |
| 447 | __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); |
| 448 | __ lwr(t4, MemOperand(a1, 4, loadstore_chunk)); |
| 449 | __ lwr(t5, MemOperand(a1, 5, loadstore_chunk)); |
| 450 | __ lwr(t6, MemOperand(a1, 6, loadstore_chunk)); |
| 451 | __ lwr(t7, MemOperand(a1, 7, loadstore_chunk)); |
| 452 | __ lwl(t0, |
| 453 | MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); |
| 454 | __ lwl(t1, |
| 455 | MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); |
| 456 | __ lwl(t2, |
| 457 | MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); |
| 458 | __ lwl(t3, |
| 459 | MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); |
| 460 | __ lwl(t4, |
| 461 | MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); |
| 462 | __ lwl(t5, |
| 463 | MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); |
| 464 | __ lwl(t6, |
| 465 | MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); |
| 466 | __ lwl(t7, |
| 467 | MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); |
| 468 | } else { |
| 469 | __ lwl(t0, MemOperand(a1)); |
| 470 | __ lwl(t1, MemOperand(a1, 1, loadstore_chunk)); |
| 471 | __ lwl(t2, MemOperand(a1, 2, loadstore_chunk)); |
| 472 | __ lwl(t3, MemOperand(a1, 3, loadstore_chunk)); |
| 473 | __ lwl(t4, MemOperand(a1, 4, loadstore_chunk)); |
| 474 | __ lwl(t5, MemOperand(a1, 5, loadstore_chunk)); |
| 475 | __ lwl(t6, MemOperand(a1, 6, loadstore_chunk)); |
| 476 | __ lwl(t7, MemOperand(a1, 7, loadstore_chunk)); |
| 477 | __ lwr(t0, |
| 478 | MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); |
| 479 | __ lwr(t1, |
| 480 | MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); |
| 481 | __ lwr(t2, |
| 482 | MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); |
| 483 | __ lwr(t3, |
| 484 | MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); |
| 485 | __ lwr(t4, |
| 486 | MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); |
| 487 | __ lwr(t5, |
| 488 | MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); |
| 489 | __ lwr(t6, |
| 490 | MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); |
| 491 | __ lwr(t7, |
| 492 | MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); |
| 493 | } |
| 494 | __ addiu(a1, a1, 8 * loadstore_chunk); |
| 495 | __ sw(t0, MemOperand(a0)); |
| 496 | __ sw(t1, MemOperand(a0, 1, loadstore_chunk)); |
| 497 | __ sw(t2, MemOperand(a0, 2, loadstore_chunk)); |
| 498 | __ sw(t3, MemOperand(a0, 3, loadstore_chunk)); |
| 499 | __ sw(t4, MemOperand(a0, 4, loadstore_chunk)); |
| 500 | __ sw(t5, MemOperand(a0, 5, loadstore_chunk)); |
| 501 | __ sw(t6, MemOperand(a0, 6, loadstore_chunk)); |
| 502 | __ sw(t7, MemOperand(a0, 7, loadstore_chunk)); |
| 503 | __ addiu(a0, a0, 8 * loadstore_chunk); |
| 504 | |
| 505 | // Less than 32 bytes to copy. Set up for a loop to |
| 506 | // copy one word at a time. |
| 507 | __ bind(&ua_chk1w); |
| 508 | __ andi(a2, t8, loadstore_chunk - 1); |
| 509 | __ beq(a2, t8, &ua_smallCopy); |
| 510 | __ subu(a3, t8, a2); // In delay slot. |
| 511 | __ addu(a3, a0, a3); |
| 512 | |
| 513 | __ bind(&ua_wordCopy_loop); |
| 514 | if (kArchEndian == kLittle) { |
| 515 | __ lwr(v1, MemOperand(a1)); |
| 516 | __ lwl(v1, |
| 517 | MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); |
| 518 | } else { |
| 519 | __ lwl(v1, MemOperand(a1)); |
| 520 | __ lwr(v1, |
| 521 | MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); |
| 522 | } |
| 523 | __ addiu(a0, a0, loadstore_chunk); |
| 524 | __ addiu(a1, a1, loadstore_chunk); |
| 525 | __ bne(a0, a3, &ua_wordCopy_loop); |
| 526 | __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot. |
| 527 | |
| 528 | // Copy the last 8 bytes. |
| 529 | __ bind(&ua_smallCopy); |
| 530 | __ beq(a2, zero_reg, &leave); |
| 531 | __ addu(a3, a0, a2); // In delay slot. |
| 532 | |
| 533 | __ bind(&ua_smallCopy_loop); |
| 534 | __ lb(v1, MemOperand(a1)); |
| 535 | __ addiu(a0, a0, 1); |
| 536 | __ addiu(a1, a1, 1); |
| 537 | __ bne(a0, a3, &ua_smallCopy_loop); |
| 538 | __ sb(v1, MemOperand(a0, -1)); // In delay slot. |
| 539 | |
| 540 | __ jr(ra); |
| 541 | __ nop(); |
| 542 | } |
| 543 | CodeDesc desc; |
| 544 | masm.GetCode(&desc); |
| 545 | DCHECK(!RelocInfo::RequiresRelocation(desc)); |
| 546 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 547 | Assembler::FlushICache(isolate, buffer, actual_size); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 548 | base::OS::ProtectCode(buffer, actual_size); |
| 549 | return FUNCTION_CAST<MemCopyUint8Function>(buffer); |
| 550 | #endif |
| 551 | } |
| 552 | #endif |
| 553 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 554 | UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 555 | #if defined(USE_SIMULATOR) |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 556 | return nullptr; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 557 | #else |
| 558 | size_t actual_size; |
| 559 | byte* buffer = |
| 560 | static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 561 | if (buffer == nullptr) return nullptr; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 562 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 563 | MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size), |
| 564 | CodeObjectRequired::kNo); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 565 | |
| 566 | __ MovFromFloatParameter(f12); |
| 567 | __ sqrt_d(f0, f12); |
| 568 | __ MovToFloatResult(f0); |
| 569 | __ Ret(); |
| 570 | |
| 571 | CodeDesc desc; |
| 572 | masm.GetCode(&desc); |
| 573 | DCHECK(!RelocInfo::RequiresRelocation(desc)); |
| 574 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 575 | Assembler::FlushICache(isolate, buffer, actual_size); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 576 | base::OS::ProtectCode(buffer, actual_size); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 577 | return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 578 | #endif |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 579 | } |
| 580 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 581 | #undef __ |
| 582 | |
| 583 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 584 | // ------------------------------------------------------------------------- |
| 585 | // Platform-specific RuntimeCallHelper functions. |
| 586 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 587 | void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 588 | masm->EnterFrame(StackFrame::INTERNAL); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 589 | DCHECK(!masm->has_frame()); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 590 | masm->set_has_frame(true); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 591 | } |
| 592 | |
| 593 | |
| 594 | void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 595 | masm->LeaveFrame(StackFrame::INTERNAL); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 596 | DCHECK(masm->has_frame()); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 597 | masm->set_has_frame(false); |
Andrei Popescu | 3100271 | 2010-02-23 13:46:05 +0000 | [diff] [blame] | 598 | } |
| 599 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 600 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 601 | // ------------------------------------------------------------------------- |
| 602 | // Code generators |
| 603 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 604 | #define __ ACCESS_MASM(masm) |
| 605 | |
| 606 | void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( |
| 607 | MacroAssembler* masm, |
| 608 | Register receiver, |
| 609 | Register key, |
| 610 | Register value, |
| 611 | Register target_map, |
| 612 | AllocationSiteMode mode, |
| 613 | Label* allocation_memento_found) { |
| 614 | Register scratch_elements = t0; |
| 615 | DCHECK(!AreAliased(receiver, key, value, target_map, |
| 616 | scratch_elements)); |
| 617 | |
| 618 | if (mode == TRACK_ALLOCATION_SITE) { |
| 619 | DCHECK(allocation_memento_found != NULL); |
| 620 | __ JumpIfJSArrayHasAllocationMemento( |
| 621 | receiver, scratch_elements, allocation_memento_found); |
| 622 | } |
| 623 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 624 | // Set transitioned map. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 625 | __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 626 | __ RecordWriteField(receiver, |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 627 | HeapObject::kMapOffset, |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 628 | target_map, |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 629 | t5, |
| 630 | kRAHasNotBeenSaved, |
| 631 | kDontSaveFPRegs, |
| 632 | EMIT_REMEMBERED_SET, |
| 633 | OMIT_SMI_CHECK); |
| 634 | } |
| 635 | |
| 636 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 637 | void ElementsTransitionGenerator::GenerateSmiToDouble( |
| 638 | MacroAssembler* masm, |
| 639 | Register receiver, |
| 640 | Register key, |
| 641 | Register value, |
| 642 | Register target_map, |
| 643 | AllocationSiteMode mode, |
| 644 | Label* fail) { |
| 645 | // Register ra contains the return address. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 646 | Label loop, entry, convert_hole, gc_required, only_change_map, done; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 647 | Register elements = t0; |
| 648 | Register length = t1; |
| 649 | Register array = t2; |
| 650 | Register array_end = array; |
| 651 | |
| 652 | // target_map parameter can be clobbered. |
| 653 | Register scratch1 = target_map; |
| 654 | Register scratch2 = t5; |
| 655 | Register scratch3 = t3; |
| 656 | |
| 657 | // Verify input registers don't conflict with locals. |
| 658 | DCHECK(!AreAliased(receiver, key, value, target_map, |
| 659 | elements, length, array, scratch2)); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 660 | |
| 661 | Register scratch = t6; |
| 662 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 663 | if (mode == TRACK_ALLOCATION_SITE) { |
| 664 | __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); |
| 665 | } |
| 666 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 667 | // Check for empty arrays, which only require a map transition and no changes |
| 668 | // to the backing store. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 669 | __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 670 | __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 671 | __ Branch(&only_change_map, eq, at, Operand(elements)); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 672 | |
| 673 | __ push(ra); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 674 | __ lw(length, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 675 | // elements: source FixedArray |
| 676 | // length: number of elements (smi-tagged) |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 677 | |
| 678 | // Allocate new FixedDoubleArray. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 679 | __ sll(scratch, length, 2); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 680 | __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 681 | __ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT); |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 682 | // array: destination FixedDoubleArray, tagged as heap object |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 683 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 684 | // Set destination FixedDoubleArray's length and map. |
| 685 | __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex); |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 686 | __ sw(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 687 | // Update receiver's map. |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 688 | __ sw(scratch2, FieldMemOperand(array, HeapObject::kMapOffset)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 689 | |
| 690 | __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 691 | __ RecordWriteField(receiver, |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 692 | HeapObject::kMapOffset, |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 693 | target_map, |
| 694 | scratch2, |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 695 | kRAHasBeenSaved, |
| 696 | kDontSaveFPRegs, |
| 697 | OMIT_REMEMBERED_SET, |
| 698 | OMIT_SMI_CHECK); |
| 699 | // Replace receiver's backing store with newly created FixedDoubleArray. |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 700 | __ Addu(scratch1, array, Operand(kHeapObjectTag - kHeapObjectTag)); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 701 | __ sw(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 702 | __ RecordWriteField(receiver, |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 703 | JSObject::kElementsOffset, |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 704 | scratch1, |
| 705 | scratch2, |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 706 | kRAHasBeenSaved, |
| 707 | kDontSaveFPRegs, |
| 708 | EMIT_REMEMBERED_SET, |
| 709 | OMIT_SMI_CHECK); |
| 710 | |
| 711 | |
| 712 | // Prepare for conversion loop. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 713 | __ Addu(scratch1, elements, |
| 714 | Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 715 | __ Addu(scratch3, array, |
| 716 | Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 717 | __ Lsa(array_end, scratch3, length, 2); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 718 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 719 | // Repurpose registers no longer in use. |
| 720 | Register hole_lower = elements; |
| 721 | Register hole_upper = length; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 722 | __ li(hole_lower, Operand(kHoleNanLower32)); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 723 | __ li(hole_upper, Operand(kHoleNanUpper32)); |
| 724 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 725 | // scratch1: begin of source FixedArray element fields, not tagged |
| 726 | // hole_lower: kHoleNanLower32 |
| 727 | // hole_upper: kHoleNanUpper32 |
| 728 | // array_end: end of destination FixedDoubleArray, not tagged |
| 729 | // scratch3: begin of FixedDoubleArray element fields, not tagged |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 730 | |
| 731 | __ Branch(&entry); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 732 | |
| 733 | __ bind(&only_change_map); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 734 | __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 735 | __ RecordWriteField(receiver, |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 736 | HeapObject::kMapOffset, |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 737 | target_map, |
| 738 | scratch2, |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 739 | kRAHasBeenSaved, |
| 740 | kDontSaveFPRegs, |
| 741 | OMIT_REMEMBERED_SET, |
| 742 | OMIT_SMI_CHECK); |
| 743 | __ Branch(&done); |
| 744 | |
| 745 | // Call into runtime if GC is required. |
| 746 | __ bind(&gc_required); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 747 | __ lw(ra, MemOperand(sp, 0)); |
| 748 | __ Branch(USE_DELAY_SLOT, fail); |
| 749 | __ addiu(sp, sp, kPointerSize); // In delay slot. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 750 | |
| 751 | // Convert and copy elements. |
| 752 | __ bind(&loop); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 753 | __ lw(scratch2, MemOperand(scratch1)); |
| 754 | __ Addu(scratch1, scratch1, kIntSize); |
| 755 | // scratch2: current element |
| 756 | __ UntagAndJumpIfNotSmi(scratch2, scratch2, &convert_hole); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 757 | |
| 758 | // Normal smi, convert to double and store. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 759 | __ mtc1(scratch2, f0); |
| 760 | __ cvt_d_w(f0, f0); |
| 761 | __ sdc1(f0, MemOperand(scratch3)); |
| 762 | __ Branch(USE_DELAY_SLOT, &entry); |
| 763 | __ addiu(scratch3, scratch3, kDoubleSize); // In delay slot. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 764 | |
| 765 | // Hole found, store the-hole NaN. |
| 766 | __ bind(&convert_hole); |
| 767 | if (FLAG_debug_code) { |
| 768 | // Restore a "smi-untagged" heap object. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 769 | __ SmiTag(scratch2); |
| 770 | __ Or(scratch2, scratch2, Operand(1)); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 771 | __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 772 | __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2)); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 773 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 774 | // mantissa |
| 775 | __ sw(hole_lower, MemOperand(scratch3, Register::kMantissaOffset)); |
| 776 | // exponent |
| 777 | __ sw(hole_upper, MemOperand(scratch3, Register::kExponentOffset)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 778 | __ addiu(scratch3, scratch3, kDoubleSize); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 779 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 780 | __ bind(&entry); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 781 | __ Branch(&loop, lt, scratch3, Operand(array_end)); |
| 782 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 783 | __ bind(&done); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 784 | __ pop(ra); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 785 | } |
| 786 | |
| 787 | |
| 788 | void ElementsTransitionGenerator::GenerateDoubleToObject( |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 789 | MacroAssembler* masm, |
| 790 | Register receiver, |
| 791 | Register key, |
| 792 | Register value, |
| 793 | Register target_map, |
| 794 | AllocationSiteMode mode, |
| 795 | Label* fail) { |
| 796 | // Register ra contains the return address. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 797 | Label entry, loop, convert_hole, gc_required, only_change_map; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 798 | Register elements = t0; |
| 799 | Register array = t2; |
| 800 | Register length = t1; |
| 801 | Register scratch = t5; |
| 802 | |
| 803 | // Verify input registers don't conflict with locals. |
| 804 | DCHECK(!AreAliased(receiver, key, value, target_map, |
| 805 | elements, array, length, scratch)); |
| 806 | |
| 807 | if (mode == TRACK_ALLOCATION_SITE) { |
| 808 | __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); |
| 809 | } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 810 | |
| 811 | // Check for empty arrays, which only require a map transition and no changes |
| 812 | // to the backing store. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 813 | __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 814 | __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 815 | __ Branch(&only_change_map, eq, at, Operand(elements)); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 816 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 817 | __ MultiPush( |
| 818 | value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit()); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 819 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 820 | __ lw(length, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 821 | // elements: source FixedArray |
| 822 | // length: number of elements (smi-tagged) |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 823 | |
| 824 | // Allocate new FixedArray. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 825 | // Re-use value and target_map registers, as they have been saved on the |
| 826 | // stack. |
| 827 | Register array_size = value; |
| 828 | Register allocate_scratch = target_map; |
| 829 | __ sll(array_size, length, 1); |
| 830 | __ Addu(array_size, array_size, FixedDoubleArray::kHeaderSize); |
| 831 | __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required, |
| 832 | NO_ALLOCATION_FLAGS); |
| 833 | // array: destination FixedArray, not tagged as heap object |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 834 | // Set destination FixedDoubleArray's length and map. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 835 | __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex); |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 836 | __ sw(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset)); |
| 837 | __ sw(scratch, FieldMemOperand(array, HeapObject::kMapOffset)); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 838 | |
| 839 | // Prepare for conversion loop. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 840 | Register src_elements = elements; |
| 841 | Register dst_elements = target_map; |
| 842 | Register dst_end = length; |
| 843 | Register heap_number_map = scratch; |
| 844 | __ Addu(src_elements, src_elements, Operand( |
| 845 | FixedDoubleArray::kHeaderSize - kHeapObjectTag |
| 846 | + Register::kExponentOffset)); |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 847 | __ Addu(dst_elements, array, |
| 848 | Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 849 | __ Lsa(dst_end, dst_elements, dst_end, 1); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 850 | |
| 851 | // Allocating heap numbers in the loop below can fail and cause a jump to |
| 852 | // gc_required. We can't leave a partly initialized FixedArray behind, |
| 853 | // so pessimistically fill it with holes now. |
| 854 | Label initialization_loop, initialization_loop_entry; |
| 855 | __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); |
| 856 | __ Branch(&initialization_loop_entry); |
| 857 | __ bind(&initialization_loop); |
| 858 | __ sw(scratch, MemOperand(dst_elements)); |
| 859 | __ Addu(dst_elements, dst_elements, Operand(kPointerSize)); |
| 860 | __ bind(&initialization_loop_entry); |
| 861 | __ Branch(&initialization_loop, lt, dst_elements, Operand(dst_end)); |
| 862 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 863 | __ Addu(dst_elements, array, |
| 864 | Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 865 | __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 866 | // Using offsetted addresses. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 867 | // dst_elements: begin of destination FixedArray element fields, not tagged |
| 868 | // src_elements: begin of source FixedDoubleArray element fields, not tagged, |
| 869 | // points to the exponent |
| 870 | // dst_end: end of destination FixedArray, not tagged |
| 871 | // array: destination FixedArray |
| 872 | // heap_number_map: heap number map |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 873 | __ Branch(&entry); |
| 874 | |
| 875 | // Call into runtime if GC is required. |
| 876 | __ bind(&gc_required); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 877 | __ MultiPop( |
| 878 | value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit()); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 879 | |
| 880 | __ Branch(fail); |
| 881 | |
| 882 | __ bind(&loop); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 883 | Register upper_bits = key; |
| 884 | __ lw(upper_bits, MemOperand(src_elements)); |
| 885 | __ Addu(src_elements, src_elements, kDoubleSize); |
| 886 | // upper_bits: current element's upper 32 bit |
| 887 | // src_elements: address of next element's upper 32 bit |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 888 | __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32)); |
| 889 | |
| 890 | // Non-hole double, copy value into a heap number. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 891 | Register heap_number = receiver; |
| 892 | Register scratch2 = value; |
| 893 | Register scratch3 = t6; |
| 894 | __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map, |
| 895 | &gc_required); |
| 896 | // heap_number: new heap number |
| 897 | // Load mantissa of current element, src_elements |
| 898 | // point to exponent of next element. |
| 899 | __ lw(scratch2, MemOperand(src_elements, (Register::kMantissaOffset |
| 900 | - Register::kExponentOffset - kDoubleSize))); |
| 901 | __ sw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset)); |
| 902 | __ sw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset)); |
| 903 | __ mov(scratch2, dst_elements); |
| 904 | __ sw(heap_number, MemOperand(dst_elements)); |
| 905 | __ Addu(dst_elements, dst_elements, kIntSize); |
| 906 | __ RecordWrite(array, |
| 907 | scratch2, |
| 908 | heap_number, |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 909 | kRAHasBeenSaved, |
| 910 | kDontSaveFPRegs, |
| 911 | EMIT_REMEMBERED_SET, |
| 912 | OMIT_SMI_CHECK); |
| 913 | __ Branch(&entry); |
| 914 | |
| 915 | // Replace the-hole NaN with the-hole pointer. |
| 916 | __ bind(&convert_hole); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 917 | __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex); |
| 918 | __ sw(scratch2, MemOperand(dst_elements)); |
| 919 | __ Addu(dst_elements, dst_elements, kIntSize); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 920 | |
| 921 | __ bind(&entry); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 922 | __ Branch(&loop, lt, dst_elements, Operand(dst_end)); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 923 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 924 | __ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit()); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 925 | // Replace receiver's backing store with newly created and filled FixedArray. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 926 | __ sw(array, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 927 | __ RecordWriteField(receiver, |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 928 | JSObject::kElementsOffset, |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 929 | array, |
| 930 | scratch, |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 931 | kRAHasBeenSaved, |
| 932 | kDontSaveFPRegs, |
| 933 | EMIT_REMEMBERED_SET, |
| 934 | OMIT_SMI_CHECK); |
| 935 | __ pop(ra); |
| 936 | |
| 937 | __ bind(&only_change_map); |
| 938 | // Update receiver's map. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 939 | __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 940 | __ RecordWriteField(receiver, |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 941 | HeapObject::kMapOffset, |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 942 | target_map, |
| 943 | scratch, |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 944 | kRAHasNotBeenSaved, |
| 945 | kDontSaveFPRegs, |
| 946 | OMIT_REMEMBERED_SET, |
| 947 | OMIT_SMI_CHECK); |
| 948 | } |
| 949 | |
| 950 | |
| 951 | void StringCharLoadGenerator::Generate(MacroAssembler* masm, |
| 952 | Register string, |
| 953 | Register index, |
| 954 | Register result, |
| 955 | Label* call_runtime) { |
| 956 | // Fetch the instance type of the receiver into result register. |
| 957 | __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset)); |
| 958 | __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); |
| 959 | |
| 960 | // We need special handling for indirect strings. |
| 961 | Label check_sequential; |
| 962 | __ And(at, result, Operand(kIsIndirectStringMask)); |
| 963 | __ Branch(&check_sequential, eq, at, Operand(zero_reg)); |
| 964 | |
| 965 | // Dispatch on the indirect string shape: slice or cons. |
| 966 | Label cons_string; |
| 967 | __ And(at, result, Operand(kSlicedNotConsMask)); |
| 968 | __ Branch(&cons_string, eq, at, Operand(zero_reg)); |
| 969 | |
| 970 | // Handle slices. |
| 971 | Label indirect_string_loaded; |
| 972 | __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset)); |
| 973 | __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset)); |
| 974 | __ sra(at, result, kSmiTagSize); |
| 975 | __ Addu(index, index, at); |
| 976 | __ jmp(&indirect_string_loaded); |
| 977 | |
| 978 | // Handle cons strings. |
| 979 | // Check whether the right hand side is the empty string (i.e. if |
| 980 | // this is really a flat string in a cons string). If that is not |
| 981 | // the case we would rather go to the runtime system now to flatten |
| 982 | // the string. |
| 983 | __ bind(&cons_string); |
| 984 | __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 985 | __ LoadRoot(at, Heap::kempty_stringRootIndex); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 986 | __ Branch(call_runtime, ne, result, Operand(at)); |
| 987 | // Get the first of the two strings and load its instance type. |
| 988 | __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset)); |
| 989 | |
| 990 | __ bind(&indirect_string_loaded); |
| 991 | __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset)); |
| 992 | __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); |
| 993 | |
| 994 | // Distinguish sequential and external strings. Only these two string |
| 995 | // representations can reach here (slices and flat cons strings have been |
| 996 | // reduced to the underlying sequential or external string). |
| 997 | Label external_string, check_encoding; |
| 998 | __ bind(&check_sequential); |
| 999 | STATIC_ASSERT(kSeqStringTag == 0); |
| 1000 | __ And(at, result, Operand(kStringRepresentationMask)); |
| 1001 | __ Branch(&external_string, ne, at, Operand(zero_reg)); |
| 1002 | |
| 1003 | // Prepare sequential strings |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1004 | STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1005 | __ Addu(string, |
| 1006 | string, |
| 1007 | SeqTwoByteString::kHeaderSize - kHeapObjectTag); |
| 1008 | __ jmp(&check_encoding); |
| 1009 | |
| 1010 | // Handle external strings. |
| 1011 | __ bind(&external_string); |
| 1012 | if (FLAG_debug_code) { |
| 1013 | // Assert that we do not have a cons or slice (indirect strings) here. |
| 1014 | // Sequential strings have already been ruled out. |
| 1015 | __ And(at, result, Operand(kIsIndirectStringMask)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1016 | __ Assert(eq, kExternalStringExpectedButNotFound, |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1017 | at, Operand(zero_reg)); |
| 1018 | } |
| 1019 | // Rule out short external strings. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1020 | STATIC_ASSERT(kShortExternalStringTag != 0); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1021 | __ And(at, result, Operand(kShortExternalStringMask)); |
| 1022 | __ Branch(call_runtime, ne, at, Operand(zero_reg)); |
| 1023 | __ lw(string, FieldMemOperand(string, ExternalString::kResourceDataOffset)); |
| 1024 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1025 | Label one_byte, done; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1026 | __ bind(&check_encoding); |
| 1027 | STATIC_ASSERT(kTwoByteStringTag == 0); |
| 1028 | __ And(at, result, Operand(kStringEncodingMask)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1029 | __ Branch(&one_byte, ne, at, Operand(zero_reg)); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1030 | // Two-byte string. |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 1031 | __ Lsa(at, string, index, 1); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1032 | __ lhu(result, MemOperand(at)); |
| 1033 | __ jmp(&done); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1034 | __ bind(&one_byte); |
| 1035 | // One_byte string. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1036 | __ Addu(at, string, index); |
| 1037 | __ lbu(result, MemOperand(at)); |
| 1038 | __ bind(&done); |
| 1039 | } |
| 1040 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1041 | #ifdef DEBUG |
| 1042 | // nop(CODE_AGE_MARKER_NOP) |
| 1043 | static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180; |
| 1044 | #endif |
| 1045 | |
| 1046 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1047 | CodeAgingHelper::CodeAgingHelper(Isolate* isolate) { |
| 1048 | USE(isolate); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1049 | DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength); |
| 1050 | // Since patcher is a large object, allocate it dynamically when needed, |
| 1051 | // to avoid overloading the stack in stress conditions. |
| 1052 | // DONT_FLUSH is used because the CodeAgingHelper is initialized early in |
| 1053 | // the process, before MIPS simulator ICache is setup. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1054 | base::SmartPointer<CodePatcher> patcher( |
| 1055 | new CodePatcher(isolate, young_sequence_.start(), |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1056 | young_sequence_.length() / Assembler::kInstrSize, |
| 1057 | CodePatcher::DONT_FLUSH)); |
| 1058 | PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length()); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1059 | patcher->masm()->PushStandardFrame(a1); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1060 | patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1061 | } |
| 1062 | |
| 1063 | |
| 1064 | #ifdef DEBUG |
| 1065 | bool CodeAgingHelper::IsOld(byte* candidate) const { |
| 1066 | return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction; |
| 1067 | } |
| 1068 | #endif |
| 1069 | |
| 1070 | |
| 1071 | bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) { |
| 1072 | bool result = isolate->code_aging_helper()->IsYoung(sequence); |
| 1073 | DCHECK(result || isolate->code_aging_helper()->IsOld(sequence)); |
| 1074 | return result; |
| 1075 | } |
| 1076 | |
| 1077 | |
| 1078 | void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, |
| 1079 | MarkingParity* parity) { |
| 1080 | if (IsYoungSequence(isolate, sequence)) { |
| 1081 | *age = kNoAgeCodeAge; |
| 1082 | *parity = NO_MARKING_PARITY; |
| 1083 | } else { |
| 1084 | Address target_address = Assembler::target_address_at( |
| 1085 | sequence + Assembler::kInstrSize); |
| 1086 | Code* stub = GetCodeFromTargetAddress(target_address); |
| 1087 | GetCodeAgeAndParity(stub, age, parity); |
| 1088 | } |
| 1089 | } |
| 1090 | |
| 1091 | |
| 1092 | void Code::PatchPlatformCodeAge(Isolate* isolate, |
| 1093 | byte* sequence, |
| 1094 | Code::Age age, |
| 1095 | MarkingParity parity) { |
| 1096 | uint32_t young_length = isolate->code_aging_helper()->young_sequence_length(); |
| 1097 | if (age == kNoAgeCodeAge) { |
| 1098 | isolate->code_aging_helper()->CopyYoungSequenceTo(sequence); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1099 | Assembler::FlushICache(isolate, sequence, young_length); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1100 | } else { |
| 1101 | Code* stub = GetCodeAgeStub(isolate, age, parity); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1102 | CodePatcher patcher(isolate, sequence, |
| 1103 | young_length / Assembler::kInstrSize); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1104 | // Mark this code sequence for FindPlatformCodeAgeSequence(). |
| 1105 | patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP); |
| 1106 | // Load the stub address to t9 and call it, |
| 1107 | // GetCodeAgeAndParity() extracts the stub address from this instruction. |
| 1108 | patcher.masm()->li( |
| 1109 | t9, |
| 1110 | Operand(reinterpret_cast<uint32_t>(stub->instruction_start())), |
| 1111 | CONSTANT_SIZE); |
| 1112 | patcher.masm()->nop(); // Prevent jalr to jal optimization. |
| 1113 | patcher.masm()->jalr(t9, a0); |
| 1114 | patcher.masm()->nop(); // Branch delay slot nop. |
| 1115 | patcher.masm()->nop(); // Pad the empty space. |
| 1116 | } |
| 1117 | } |
| 1118 | |
| 1119 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1120 | #undef __ |
Andrei Popescu | 3100271 | 2010-02-23 13:46:05 +0000 | [diff] [blame] | 1121 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1122 | } // namespace internal |
| 1123 | } // namespace v8 |
Leon Clarke | f7060e2 | 2010-06-03 12:02:55 +0100 | [diff] [blame] | 1124 | |
| 1125 | #endif // V8_TARGET_ARCH_MIPS |