blob: 1b79433d3760ca1cadbde60c17fee339480e4bb2 [file] [log] [blame]
danno@chromium.orgfa458e42012-02-01 10:48:36 +00001// Copyright 2012 the V8 project authors. All rights reserved.
ager@chromium.org5c838252010-02-19 08:53:10 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
ager@chromium.org5c838252010-02-19 08:53:10 +000028#include "v8.h"
29
jkummerow@chromium.org93a47f42013-07-02 14:43:41 +000030#if V8_TARGET_ARCH_MIPS
erik.corry@gmail.com9dfbea42010-05-21 12:58:28 +000031
karlklose@chromium.org83a47282011-05-11 11:54:09 +000032#include "codegen.h"
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +000033#include "macro-assembler.h"
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +000034#include "simulator-mips.h"
ager@chromium.org5c838252010-02-19 08:53:10 +000035
36namespace v8 {
37namespace internal {
38
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +000039
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +000040#define __ masm.
41
42
43#if defined(USE_SIMULATOR)
44byte* fast_exp_mips_machine_code = NULL;
45double fast_exp_simulator(double x) {
46 return Simulator::current(Isolate::Current())->CallFP(
47 fast_exp_mips_machine_code, x, 0);
48}
49#endif
50
51
52UnaryMathFunction CreateExpFunction() {
machenbach@chromium.orge31286d2014-01-15 10:29:52 +000053 if (!FLAG_fast_math) return &std::exp;
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +000054 size_t actual_size;
55 byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
machenbach@chromium.orge31286d2014-01-15 10:29:52 +000056 if (buffer == NULL) return &std::exp;
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +000057 ExternalReference::InitializeMathExpData();
58
59 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
60
61 {
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +000062 DoubleRegister input = f12;
63 DoubleRegister result = f0;
64 DoubleRegister double_scratch1 = f4;
65 DoubleRegister double_scratch2 = f6;
66 Register temp1 = t0;
67 Register temp2 = t1;
68 Register temp3 = t2;
69
70 if (!IsMipsSoftFloatABI) {
71 // Input value is in f12 anyway, nothing to do.
72 } else {
73 __ Move(input, a0, a1);
74 }
75 __ Push(temp3, temp2, temp1);
76 MathExpGenerator::EmitMathExp(
77 &masm, input, result, double_scratch1, double_scratch2,
78 temp1, temp2, temp3);
79 __ Pop(temp3, temp2, temp1);
80 if (!IsMipsSoftFloatABI) {
81 // Result is already in f0, nothing to do.
82 } else {
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +000083 __ Move(v0, v1, result);
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +000084 }
85 __ Ret();
86 }
87
88 CodeDesc desc;
89 masm.GetCode(&desc);
ulan@chromium.org2e04b582013-02-21 14:06:02 +000090 ASSERT(!RelocInfo::RequiresRelocation(desc));
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +000091
92 CPU::FlushICache(buffer, actual_size);
93 OS::ProtectCode(buffer, actual_size);
94
95#if !defined(USE_SIMULATOR)
96 return FUNCTION_CAST<UnaryMathFunction>(buffer);
97#else
98 fast_exp_mips_machine_code = buffer;
99 return &fast_exp_simulator;
100#endif
101}
102
103
machenbach@chromium.orgafbdadc2013-12-09 16:12:18 +0000104#if defined(V8_HOST_ARCH_MIPS)
105OS::MemCopyUint8Function CreateMemCopyUint8Function(
106 OS::MemCopyUint8Function stub) {
107#if defined(USE_SIMULATOR)
108 return stub;
109#else
110 if (Serializer::enabled()) {
111 return stub;
112 }
113
114 size_t actual_size;
115 byte* buffer = static_cast<byte*>(OS::Allocate(3 * KB, &actual_size, true));
116 if (buffer == NULL) return stub;
117
118 // This code assumes that cache lines are 32 bytes and if the cache line is
119 // larger it will not work correctly.
120 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
121
122 {
123 Label lastb, unaligned, aligned, chkw,
124 loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
125 leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
126 ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
127
128 // The size of each prefetch.
129 uint32_t pref_chunk = 32;
130 // The maximum size of a prefetch, it must not be less then pref_chunk.
131 // If the real size of a prefetch is greater then max_pref_size and
132 // the kPrefHintPrepareForStore hint is used, the code will not work
133 // correctly.
134 uint32_t max_pref_size = 128;
135 ASSERT(pref_chunk < max_pref_size);
136
137 // pref_limit is set based on the fact that we never use an offset
138 // greater then 5 on a store pref and that a single pref can
139 // never be larger then max_pref_size.
140 uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
141 int32_t pref_hint_load = kPrefHintLoadStreamed;
142 int32_t pref_hint_store = kPrefHintPrepareForStore;
143 uint32_t loadstore_chunk = 4;
144
145 // The initial prefetches may fetch bytes that are before the buffer being
146 // copied. Start copies with an offset of 4 so avoid this situation when
147 // using kPrefHintPrepareForStore.
148 ASSERT(pref_hint_store != kPrefHintPrepareForStore ||
149 pref_chunk * 4 >= max_pref_size);
150
151 // If the size is less than 8, go to lastb. Regardless of size,
152 // copy dst pointer to v0 for the retuen value.
153 __ slti(t2, a2, 2 * loadstore_chunk);
154 __ bne(t2, zero_reg, &lastb);
155 __ mov(v0, a0); // In delay slot.
156
157 // If src and dst have different alignments, go to unaligned, if they
158 // have the same alignment (but are not actually aligned) do a partial
159 // load/store to make them aligned. If they are both already aligned
160 // we can start copying at aligned.
161 __ xor_(t8, a1, a0);
162 __ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement.
163 __ bne(t8, zero_reg, &unaligned);
164 __ subu(a3, zero_reg, a0); // In delay slot.
165
166 __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
167 __ beq(a3, zero_reg, &aligned); // Already aligned.
168 __ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count.
169
170 __ lwr(t8, MemOperand(a1));
171 __ addu(a1, a1, a3);
172 __ swr(t8, MemOperand(a0));
173 __ addu(a0, a0, a3);
174
175 // Now dst/src are both aligned to (word) aligned addresses. Set a2 to
176 // count how many bytes we have to copy after all the 64 byte chunks are
177 // copied and a3 to the dst pointer after all the 64 byte chunks have been
178 // copied. We will loop, incrementing a0 and a1 until a0 equals a3.
179 __ bind(&aligned);
180 __ andi(t8, a2, 0x3f);
181 __ beq(a2, t8, &chkw); // Less than 64?
182 __ subu(a3, a2, t8); // In delay slot.
183 __ addu(a3, a0, a3); // Now a3 is the final dst after loop.
184
185 // When in the loop we prefetch with kPrefHintPrepareForStore hint,
186 // in this case the a0+x should be past the "t0-32" address. This means:
187 // for x=128 the last "safe" a0 address is "t0-160". Alternatively, for
188 // x=64 the last "safe" a0 address is "t0-96". In the current version we
189 // will use "pref hint, 128(a0)", so "t0-160" is the limit.
190 if (pref_hint_store == kPrefHintPrepareForStore) {
191 __ addu(t0, a0, a2); // t0 is the "past the end" address.
192 __ Subu(t9, t0, pref_limit); // t9 is the "last safe pref" address.
193 }
194
195 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
196 __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
197 __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
198 __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
199
200 if (pref_hint_store != kPrefHintPrepareForStore) {
201 __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
202 __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
203 __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
204 }
205 __ bind(&loop16w);
206 __ lw(t0, MemOperand(a1));
207
208 if (pref_hint_store == kPrefHintPrepareForStore) {
209 __ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch.
210 __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
211 }
212 __ lw(t1, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot.
213
214 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
215 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
216
217 __ bind(&skip_pref);
218 __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
219 __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
220 __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
221 __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
222 __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
223 __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
224 __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
225
226 __ sw(t0, MemOperand(a0));
227 __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
228 __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
229 __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
230 __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
231 __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
232 __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
233 __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
234
235 __ lw(t0, MemOperand(a1, 8, loadstore_chunk));
236 __ lw(t1, MemOperand(a1, 9, loadstore_chunk));
237 __ lw(t2, MemOperand(a1, 10, loadstore_chunk));
238 __ lw(t3, MemOperand(a1, 11, loadstore_chunk));
239 __ lw(t4, MemOperand(a1, 12, loadstore_chunk));
240 __ lw(t5, MemOperand(a1, 13, loadstore_chunk));
241 __ lw(t6, MemOperand(a1, 14, loadstore_chunk));
242 __ lw(t7, MemOperand(a1, 15, loadstore_chunk));
243 __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
244
245 __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
246 __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
247 __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
248 __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
249 __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
250 __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
251 __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
252 __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
253 __ addiu(a0, a0, 16 * loadstore_chunk);
254 __ bne(a0, a3, &loop16w);
255 __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
256 __ mov(a2, t8);
257
258 // Here we have src and dest word-aligned but less than 64-bytes to go.
259 // Check for a 32 bytes chunk and copy if there is one. Otherwise jump
260 // down to chk1w to handle the tail end of the copy.
261 __ bind(&chkw);
262 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
263 __ andi(t8, a2, 0x1f);
264 __ beq(a2, t8, &chk1w); // Less than 32?
265 __ nop(); // In delay slot.
266 __ lw(t0, MemOperand(a1));
267 __ lw(t1, MemOperand(a1, 1, loadstore_chunk));
268 __ lw(t2, MemOperand(a1, 2, loadstore_chunk));
269 __ lw(t3, MemOperand(a1, 3, loadstore_chunk));
270 __ lw(t4, MemOperand(a1, 4, loadstore_chunk));
271 __ lw(t5, MemOperand(a1, 5, loadstore_chunk));
272 __ lw(t6, MemOperand(a1, 6, loadstore_chunk));
273 __ lw(t7, MemOperand(a1, 7, loadstore_chunk));
274 __ addiu(a1, a1, 8 * loadstore_chunk);
275 __ sw(t0, MemOperand(a0));
276 __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
277 __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
278 __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
279 __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
280 __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
281 __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
282 __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
283 __ addiu(a0, a0, 8 * loadstore_chunk);
284
285 // Here we have less than 32 bytes to copy. Set up for a loop to copy
286 // one word at a time. Set a2 to count how many bytes we have to copy
287 // after all the word chunks are copied and a3 to the dst pointer after
288 // all the word chunks have been copied. We will loop, incrementing a0
289 // and a1 untill a0 equals a3.
290 __ bind(&chk1w);
291 __ andi(a2, t8, loadstore_chunk - 1);
292 __ beq(a2, t8, &lastb);
293 __ subu(a3, t8, a2); // In delay slot.
294 __ addu(a3, a0, a3);
295
296 __ bind(&wordCopy_loop);
297 __ lw(t3, MemOperand(a1));
298 __ addiu(a0, a0, loadstore_chunk);
299 __ addiu(a1, a1, loadstore_chunk);
300 __ bne(a0, a3, &wordCopy_loop);
301 __ sw(t3, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
302
303 __ bind(&lastb);
304 __ Branch(&leave, le, a2, Operand(zero_reg));
305 __ addu(a3, a0, a2);
306
307 __ bind(&lastbloop);
308 __ lb(v1, MemOperand(a1));
309 __ addiu(a0, a0, 1);
310 __ addiu(a1, a1, 1);
311 __ bne(a0, a3, &lastbloop);
312 __ sb(v1, MemOperand(a0, -1)); // In delay slot.
313
314 __ bind(&leave);
315 __ jr(ra);
316 __ nop();
317
318 // Unaligned case. Only the dst gets aligned so we need to do partial
319 // loads of the source followed by normal stores to the dst (once we
320 // have aligned the destination).
321 __ bind(&unaligned);
322 __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1.
323 __ beq(a3, zero_reg, &ua_chk16w);
324 __ subu(a2, a2, a3); // In delay slot.
325
326 __ lwr(v1, MemOperand(a1));
327 __ lwl(v1,
328 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
329 __ addu(a1, a1, a3);
330 __ swr(v1, MemOperand(a0));
331 __ addu(a0, a0, a3);
332
333 // Now the dst (but not the source) is aligned. Set a2 to count how many
334 // bytes we have to copy after all the 64 byte chunks are copied and a3 to
335 // the dst pointer after all the 64 byte chunks have been copied. We will
336 // loop, incrementing a0 and a1 until a0 equals a3.
337 __ bind(&ua_chk16w);
338 __ andi(t8, a2, 0x3f);
339 __ beq(a2, t8, &ua_chkw);
340 __ subu(a3, a2, t8); // In delay slot.
341 __ addu(a3, a0, a3);
342
343 if (pref_hint_store == kPrefHintPrepareForStore) {
344 __ addu(t0, a0, a2);
345 __ Subu(t9, t0, pref_limit);
346 }
347
348 __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
349 __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
350 __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
351
352 if (pref_hint_store != kPrefHintPrepareForStore) {
353 __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
354 __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
355 __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
356 }
357
358 __ bind(&ua_loop16w);
359 __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
360 __ lwr(t0, MemOperand(a1));
361 __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
362 __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
363
364 if (pref_hint_store == kPrefHintPrepareForStore) {
365 __ sltu(v1, t9, a0);
366 __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
367 }
368 __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot.
369
370 __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
371 __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
372
373 __ bind(&ua_skip_pref);
374 __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
375 __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
376 __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
377 __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
378 __ lwl(t0,
379 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
380 __ lwl(t1,
381 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
382 __ lwl(t2,
383 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
384 __ lwl(t3,
385 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
386 __ lwl(t4,
387 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
388 __ lwl(t5,
389 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
390 __ lwl(t6,
391 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
392 __ lwl(t7,
393 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
394 __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
395 __ sw(t0, MemOperand(a0));
396 __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
397 __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
398 __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
399 __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
400 __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
401 __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
402 __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
403 __ lwr(t0, MemOperand(a1, 8, loadstore_chunk));
404 __ lwr(t1, MemOperand(a1, 9, loadstore_chunk));
405 __ lwr(t2, MemOperand(a1, 10, loadstore_chunk));
406 __ lwr(t3, MemOperand(a1, 11, loadstore_chunk));
407 __ lwr(t4, MemOperand(a1, 12, loadstore_chunk));
408 __ lwr(t5, MemOperand(a1, 13, loadstore_chunk));
409 __ lwr(t6, MemOperand(a1, 14, loadstore_chunk));
410 __ lwr(t7, MemOperand(a1, 15, loadstore_chunk));
411 __ lwl(t0,
412 MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
413 __ lwl(t1,
414 MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
415 __ lwl(t2,
416 MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
417 __ lwl(t3,
418 MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
419 __ lwl(t4,
420 MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
421 __ lwl(t5,
422 MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
423 __ lwl(t6,
424 MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
425 __ lwl(t7,
426 MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
427 __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
428 __ sw(t0, MemOperand(a0, 8, loadstore_chunk));
429 __ sw(t1, MemOperand(a0, 9, loadstore_chunk));
430 __ sw(t2, MemOperand(a0, 10, loadstore_chunk));
431 __ sw(t3, MemOperand(a0, 11, loadstore_chunk));
432 __ sw(t4, MemOperand(a0, 12, loadstore_chunk));
433 __ sw(t5, MemOperand(a0, 13, loadstore_chunk));
434 __ sw(t6, MemOperand(a0, 14, loadstore_chunk));
435 __ sw(t7, MemOperand(a0, 15, loadstore_chunk));
436 __ addiu(a0, a0, 16 * loadstore_chunk);
437 __ bne(a0, a3, &ua_loop16w);
438 __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot.
439 __ mov(a2, t8);
440
441 // Here less than 64-bytes. Check for
442 // a 32 byte chunk and copy if there is one. Otherwise jump down to
443 // ua_chk1w to handle the tail end of the copy.
444 __ bind(&ua_chkw);
445 __ Pref(pref_hint_load, MemOperand(a1));
446 __ andi(t8, a2, 0x1f);
447
448 __ beq(a2, t8, &ua_chk1w);
449 __ nop(); // In delay slot.
450 __ lwr(t0, MemOperand(a1));
451 __ lwr(t1, MemOperand(a1, 1, loadstore_chunk));
452 __ lwr(t2, MemOperand(a1, 2, loadstore_chunk));
453 __ lwr(t3, MemOperand(a1, 3, loadstore_chunk));
454 __ lwr(t4, MemOperand(a1, 4, loadstore_chunk));
455 __ lwr(t5, MemOperand(a1, 5, loadstore_chunk));
456 __ lwr(t6, MemOperand(a1, 6, loadstore_chunk));
457 __ lwr(t7, MemOperand(a1, 7, loadstore_chunk));
458 __ lwl(t0,
459 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
460 __ lwl(t1,
461 MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
462 __ lwl(t2,
463 MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
464 __ lwl(t3,
465 MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
466 __ lwl(t4,
467 MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
468 __ lwl(t5,
469 MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
470 __ lwl(t6,
471 MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
472 __ lwl(t7,
473 MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
474 __ addiu(a1, a1, 8 * loadstore_chunk);
475 __ sw(t0, MemOperand(a0));
476 __ sw(t1, MemOperand(a0, 1, loadstore_chunk));
477 __ sw(t2, MemOperand(a0, 2, loadstore_chunk));
478 __ sw(t3, MemOperand(a0, 3, loadstore_chunk));
479 __ sw(t4, MemOperand(a0, 4, loadstore_chunk));
480 __ sw(t5, MemOperand(a0, 5, loadstore_chunk));
481 __ sw(t6, MemOperand(a0, 6, loadstore_chunk));
482 __ sw(t7, MemOperand(a0, 7, loadstore_chunk));
483 __ addiu(a0, a0, 8 * loadstore_chunk);
484
485 // Less than 32 bytes to copy. Set up for a loop to
486 // copy one word at a time.
487 __ bind(&ua_chk1w);
488 __ andi(a2, t8, loadstore_chunk - 1);
489 __ beq(a2, t8, &ua_smallCopy);
490 __ subu(a3, t8, a2); // In delay slot.
491 __ addu(a3, a0, a3);
492
493 __ bind(&ua_wordCopy_loop);
494 __ lwr(v1, MemOperand(a1));
495 __ lwl(v1,
496 MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
497 __ addiu(a0, a0, loadstore_chunk);
498 __ addiu(a1, a1, loadstore_chunk);
499 __ bne(a0, a3, &ua_wordCopy_loop);
500 __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot.
501
502 // Copy the last 8 bytes.
503 __ bind(&ua_smallCopy);
504 __ beq(a2, zero_reg, &leave);
505 __ addu(a3, a0, a2); // In delay slot.
506
507 __ bind(&ua_smallCopy_loop);
508 __ lb(v1, MemOperand(a1));
509 __ addiu(a0, a0, 1);
510 __ addiu(a1, a1, 1);
511 __ bne(a0, a3, &ua_smallCopy_loop);
512 __ sb(v1, MemOperand(a0, -1)); // In delay slot.
513
514 __ jr(ra);
515 __ nop();
516 }
517 CodeDesc desc;
518 masm.GetCode(&desc);
519 ASSERT(!RelocInfo::RequiresRelocation(desc));
520
521 CPU::FlushICache(buffer, actual_size);
522 OS::ProtectCode(buffer, actual_size);
523 return FUNCTION_CAST<OS::MemCopyUint8Function>(buffer);
524#endif
525}
526#endif
527
yangguo@chromium.org154ff992012-03-13 08:09:54 +0000528UnaryMathFunction CreateSqrtFunction() {
machenbach@chromium.org03453962014-01-10 14:16:31 +0000529#if defined(USE_SIMULATOR)
530 return &std::sqrt;
531#else
532 size_t actual_size;
533 byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
534 if (buffer == NULL) return &std::sqrt;
535
536 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
537
machenbach@chromium.org4ddd2f12014-01-14 08:13:44 +0000538 __ MovFromFloatParameter(f12);
machenbach@chromium.org03453962014-01-10 14:16:31 +0000539 __ sqrt_d(f0, f12);
machenbach@chromium.org4ddd2f12014-01-14 08:13:44 +0000540 __ MovToFloatResult(f0);
machenbach@chromium.org03453962014-01-10 14:16:31 +0000541 __ Ret();
542
543 CodeDesc desc;
544 masm.GetCode(&desc);
545 ASSERT(!RelocInfo::RequiresRelocation(desc));
546
547 CPU::FlushICache(buffer, actual_size);
548 OS::ProtectCode(buffer, actual_size);
549 return FUNCTION_CAST<UnaryMathFunction>(buffer);
550#endif
yangguo@chromium.org154ff992012-03-13 08:09:54 +0000551}
552
machenbach@chromium.org03453962014-01-10 14:16:31 +0000553#undef __
554
mstarzinger@chromium.orge0e1b0d2013-07-08 08:38:06 +0000555
lrn@chromium.org7516f052011-03-30 08:52:27 +0000556// -------------------------------------------------------------------------
557// Platform-specific RuntimeCallHelper functions.
558
lrn@chromium.org7516f052011-03-30 08:52:27 +0000559void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000560 masm->EnterFrame(StackFrame::INTERNAL);
561 ASSERT(!masm->has_frame());
562 masm->set_has_frame(true);
lrn@chromium.org7516f052011-03-30 08:52:27 +0000563}
564
565
566void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000567 masm->LeaveFrame(StackFrame::INTERNAL);
568 ASSERT(masm->has_frame());
569 masm->set_has_frame(false);
ager@chromium.org5c838252010-02-19 08:53:10 +0000570}
571
mstarzinger@chromium.orge0e1b0d2013-07-08 08:38:06 +0000572
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000573// -------------------------------------------------------------------------
574// Code generators
575
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +0000576#define __ ACCESS_MASM(masm)
577
svenpanne@chromium.org830d30c2012-05-29 13:20:14 +0000578void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
yangguo@chromium.org28381b42013-01-21 14:39:38 +0000579 MacroAssembler* masm, AllocationSiteMode mode,
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +0000580 Label* allocation_memento_found) {
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000581 // ----------- S t a t e -------------
582 // -- a0 : value
583 // -- a1 : key
584 // -- a2 : receiver
585 // -- ra : return address
586 // -- a3 : target map, scratch for subsequent call
587 // -- t0 : scratch (elements)
588 // -----------------------------------
yangguo@chromium.org28381b42013-01-21 14:39:38 +0000589 if (mode == TRACK_ALLOCATION_SITE) {
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +0000590 ASSERT(allocation_memento_found != NULL);
mstarzinger@chromium.orgb4968be2013-10-16 09:00:56 +0000591 __ JumpIfJSArrayHasAllocationMemento(a2, t0, allocation_memento_found);
yangguo@chromium.org28381b42013-01-21 14:39:38 +0000592 }
593
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000594 // Set transitioned map.
595 __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
596 __ RecordWriteField(a2,
597 HeapObject::kMapOffset,
598 a3,
599 t5,
600 kRAHasNotBeenSaved,
601 kDontSaveFPRegs,
602 EMIT_REMEMBERED_SET,
603 OMIT_SMI_CHECK);
604}
605
606
svenpanne@chromium.org830d30c2012-05-29 13:20:14 +0000607void ElementsTransitionGenerator::GenerateSmiToDouble(
yangguo@chromium.org28381b42013-01-21 14:39:38 +0000608 MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000609 // ----------- S t a t e -------------
610 // -- a0 : value
611 // -- a1 : key
612 // -- a2 : receiver
613 // -- ra : return address
614 // -- a3 : target map, scratch for subsequent call
615 // -- t0 : scratch (elements)
616 // -----------------------------------
yangguo@chromium.org56454712012-02-16 15:33:53 +0000617 Label loop, entry, convert_hole, gc_required, only_change_map, done;
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000618
619 Register scratch = t6;
620
yangguo@chromium.org28381b42013-01-21 14:39:38 +0000621 if (mode == TRACK_ALLOCATION_SITE) {
mstarzinger@chromium.orgb4968be2013-10-16 09:00:56 +0000622 __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail);
jkummerow@chromium.org59297c72013-01-09 16:32:23 +0000623 }
624
yangguo@chromium.org56454712012-02-16 15:33:53 +0000625 // Check for empty arrays, which only require a map transition and no changes
626 // to the backing store.
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000627 __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
yangguo@chromium.org56454712012-02-16 15:33:53 +0000628 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
629 __ Branch(&only_change_map, eq, at, Operand(t0));
630
631 __ push(ra);
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000632 __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
633 // t0: source FixedArray
634 // t1: number of elements (smi-tagged)
635
636 // Allocate new FixedDoubleArray.
637 __ sll(scratch, t1, 2);
638 __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
danno@chromium.org59400602013-08-13 17:09:37 +0000639 __ Allocate(scratch, t2, t3, t5, &gc_required, DOUBLE_ALIGNMENT);
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000640 // t2: destination FixedDoubleArray, not tagged as heap object
mstarzinger@chromium.orgf705b502013-04-04 11:38:09 +0000641
danno@chromium.orgfa458e42012-02-01 10:48:36 +0000642 // Set destination FixedDoubleArray's length and map.
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000643 __ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex);
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000644 __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
danno@chromium.orgfa458e42012-02-01 10:48:36 +0000645 __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000646 // Update receiver's map.
647
648 __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
649 __ RecordWriteField(a2,
650 HeapObject::kMapOffset,
651 a3,
652 t5,
653 kRAHasBeenSaved,
654 kDontSaveFPRegs,
yangguo@chromium.org56454712012-02-16 15:33:53 +0000655 OMIT_REMEMBERED_SET,
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000656 OMIT_SMI_CHECK);
657 // Replace receiver's backing store with newly created FixedDoubleArray.
658 __ Addu(a3, t2, Operand(kHeapObjectTag));
659 __ sw(a3, FieldMemOperand(a2, JSObject::kElementsOffset));
660 __ RecordWriteField(a2,
661 JSObject::kElementsOffset,
662 a3,
663 t5,
664 kRAHasBeenSaved,
665 kDontSaveFPRegs,
666 EMIT_REMEMBERED_SET,
667 OMIT_SMI_CHECK);
668
669
670 // Prepare for conversion loop.
671 __ Addu(a3, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
672 __ Addu(t3, t2, Operand(FixedDoubleArray::kHeaderSize));
673 __ sll(t2, t1, 2);
674 __ Addu(t2, t2, t3);
675 __ li(t0, Operand(kHoleNanLower32));
676 __ li(t1, Operand(kHoleNanUpper32));
677 // t0: kHoleNanLower32
678 // t1: kHoleNanUpper32
679 // t2: end of destination FixedDoubleArray, not tagged
680 // t3: begin of FixedDoubleArray element fields, not tagged
681
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000682 __ Branch(&entry);
683
yangguo@chromium.org56454712012-02-16 15:33:53 +0000684 __ bind(&only_change_map);
685 __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
686 __ RecordWriteField(a2,
687 HeapObject::kMapOffset,
688 a3,
689 t5,
yangguo@chromium.orga6bbcc82012-12-21 12:35:02 +0000690 kRAHasNotBeenSaved,
yangguo@chromium.org56454712012-02-16 15:33:53 +0000691 kDontSaveFPRegs,
692 OMIT_REMEMBERED_SET,
693 OMIT_SMI_CHECK);
694 __ Branch(&done);
695
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000696 // Call into runtime if GC is required.
697 __ bind(&gc_required);
698 __ pop(ra);
699 __ Branch(fail);
700
701 // Convert and copy elements.
702 __ bind(&loop);
703 __ lw(t5, MemOperand(a3));
704 __ Addu(a3, a3, kIntSize);
705 // t5: current element
danno@chromium.orgfa458e42012-02-01 10:48:36 +0000706 __ UntagAndJumpIfNotSmi(t5, t5, &convert_hole);
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000707
708 // Normal smi, convert to double and store.
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +0000709 __ mtc1(t5, f0);
710 __ cvt_d_w(f0, f0);
711 __ sdc1(f0, MemOperand(t3));
712 __ Addu(t3, t3, kDoubleSize);
713
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000714 __ Branch(&entry);
715
716 // Hole found, store the-hole NaN.
717 __ bind(&convert_hole);
danno@chromium.orgc612e022011-11-10 11:38:15 +0000718 if (FLAG_debug_code) {
danno@chromium.orgfa458e42012-02-01 10:48:36 +0000719 // Restore a "smi-untagged" heap object.
720 __ SmiTag(t5);
721 __ Or(t5, t5, Operand(1));
danno@chromium.orgc612e022011-11-10 11:38:15 +0000722 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
danno@chromium.org59400602013-08-13 17:09:37 +0000723 __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(t5));
danno@chromium.orgc612e022011-11-10 11:38:15 +0000724 }
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000725 __ sw(t0, MemOperand(t3)); // mantissa
726 __ sw(t1, MemOperand(t3, kIntSize)); // exponent
727 __ Addu(t3, t3, kDoubleSize);
728
729 __ bind(&entry);
730 __ Branch(&loop, lt, t3, Operand(t2));
731
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000732 __ pop(ra);
yangguo@chromium.org56454712012-02-16 15:33:53 +0000733 __ bind(&done);
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000734}
735
736
737void ElementsTransitionGenerator::GenerateDoubleToObject(
yangguo@chromium.org28381b42013-01-21 14:39:38 +0000738 MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000739 // ----------- S t a t e -------------
740 // -- a0 : value
741 // -- a1 : key
742 // -- a2 : receiver
743 // -- ra : return address
744 // -- a3 : target map, scratch for subsequent call
745 // -- t0 : scratch (elements)
746 // -----------------------------------
yangguo@chromium.org56454712012-02-16 15:33:53 +0000747 Label entry, loop, convert_hole, gc_required, only_change_map;
748
yangguo@chromium.org28381b42013-01-21 14:39:38 +0000749 if (mode == TRACK_ALLOCATION_SITE) {
mstarzinger@chromium.orgb4968be2013-10-16 09:00:56 +0000750 __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail);
yangguo@chromium.org28381b42013-01-21 14:39:38 +0000751 }
752
yangguo@chromium.org56454712012-02-16 15:33:53 +0000753 // Check for empty arrays, which only require a map transition and no changes
754 // to the backing store.
755 __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
756 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
757 __ Branch(&only_change_map, eq, at, Operand(t0));
758
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000759 __ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
760
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000761 __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
762 // t0: source FixedArray
763 // t1: number of elements (smi-tagged)
764
765 // Allocate new FixedArray.
766 __ sll(a0, t1, 1);
767 __ Addu(a0, a0, FixedDoubleArray::kHeaderSize);
mstarzinger@chromium.orgf705b502013-04-04 11:38:09 +0000768 __ Allocate(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000769 // t2: destination FixedArray, not tagged as heap object
danno@chromium.orgfa458e42012-02-01 10:48:36 +0000770 // Set destination FixedDoubleArray's length and map.
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000771 __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000772 __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
danno@chromium.orgfa458e42012-02-01 10:48:36 +0000773 __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000774
775 // Prepare for conversion loop.
776 __ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
777 __ Addu(a3, t2, Operand(FixedArray::kHeaderSize));
778 __ Addu(t2, t2, Operand(kHeapObjectTag));
779 __ sll(t1, t1, 1);
780 __ Addu(t1, a3, t1);
781 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
782 __ LoadRoot(t5, Heap::kHeapNumberMapRootIndex);
783 // Using offsetted addresses.
784 // a3: begin of destination FixedArray element fields, not tagged
785 // t0: begin of source FixedDoubleArray element fields, not tagged, +4
786 // t1: end of destination FixedArray, not tagged
787 // t2: destination FixedArray
788 // t3: the-hole pointer
789 // t5: heap number map
790 __ Branch(&entry);
791
792 // Call into runtime if GC is required.
793 __ bind(&gc_required);
794 __ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
795
796 __ Branch(fail);
797
798 __ bind(&loop);
799 __ lw(a1, MemOperand(t0));
800 __ Addu(t0, t0, kDoubleSize);
801 // a1: current element's upper 32 bit
802 // t0: address of next element's upper 32 bit
803 __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
804
805 // Non-hole double, copy value into a heap number.
806 __ AllocateHeapNumber(a2, a0, t6, t5, &gc_required);
807 // a2: new heap number
808 __ lw(a0, MemOperand(t0, -12));
809 __ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset));
810 __ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset));
811 __ mov(a0, a3);
812 __ sw(a2, MemOperand(a3));
813 __ Addu(a3, a3, kIntSize);
814 __ RecordWrite(t2,
815 a0,
816 a2,
817 kRAHasBeenSaved,
818 kDontSaveFPRegs,
819 EMIT_REMEMBERED_SET,
820 OMIT_SMI_CHECK);
821 __ Branch(&entry);
822
823 // Replace the-hole NaN with the-hole pointer.
824 __ bind(&convert_hole);
825 __ sw(t3, MemOperand(a3));
826 __ Addu(a3, a3, kIntSize);
827
828 __ bind(&entry);
829 __ Branch(&loop, lt, a3, Operand(t1));
830
831 __ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit());
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000832 // Replace receiver's backing store with newly created and filled FixedArray.
833 __ sw(t2, FieldMemOperand(a2, JSObject::kElementsOffset));
834 __ RecordWriteField(a2,
835 JSObject::kElementsOffset,
836 t2,
837 t5,
838 kRAHasBeenSaved,
839 kDontSaveFPRegs,
840 EMIT_REMEMBERED_SET,
841 OMIT_SMI_CHECK);
842 __ pop(ra);
yangguo@chromium.org56454712012-02-16 15:33:53 +0000843
844 __ bind(&only_change_map);
845 // Update receiver's map.
846 __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
847 __ RecordWriteField(a2,
848 HeapObject::kMapOffset,
849 a3,
850 t5,
851 kRAHasNotBeenSaved,
852 kDontSaveFPRegs,
853 OMIT_REMEMBERED_SET,
854 OMIT_SMI_CHECK);
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000855}
856
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +0000857
858void StringCharLoadGenerator::Generate(MacroAssembler* masm,
859 Register string,
860 Register index,
861 Register result,
862 Label* call_runtime) {
863 // Fetch the instance type of the receiver into result register.
864 __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
865 __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
866
867 // We need special handling for indirect strings.
868 Label check_sequential;
869 __ And(at, result, Operand(kIsIndirectStringMask));
870 __ Branch(&check_sequential, eq, at, Operand(zero_reg));
871
872 // Dispatch on the indirect string shape: slice or cons.
873 Label cons_string;
874 __ And(at, result, Operand(kSlicedNotConsMask));
875 __ Branch(&cons_string, eq, at, Operand(zero_reg));
876
877 // Handle slices.
878 Label indirect_string_loaded;
879 __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
danno@chromium.orgfa458e42012-02-01 10:48:36 +0000880 __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +0000881 __ sra(at, result, kSmiTagSize);
882 __ Addu(index, index, at);
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +0000883 __ jmp(&indirect_string_loaded);
884
885 // Handle cons strings.
886 // Check whether the right hand side is the empty string (i.e. if
887 // this is really a flat string in a cons string). If that is not
888 // the case we would rather go to the runtime system now to flatten
889 // the string.
890 __ bind(&cons_string);
891 __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
ulan@chromium.org750145a2013-03-07 15:14:13 +0000892 __ LoadRoot(at, Heap::kempty_stringRootIndex);
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +0000893 __ Branch(call_runtime, ne, result, Operand(at));
894 // Get the first of the two strings and load its instance type.
895 __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
896
897 __ bind(&indirect_string_loaded);
898 __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
899 __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
900
901 // Distinguish sequential and external strings. Only these two string
902 // representations can reach here (slices and flat cons strings have been
903 // reduced to the underlying sequential or external string).
904 Label external_string, check_encoding;
905 __ bind(&check_sequential);
906 STATIC_ASSERT(kSeqStringTag == 0);
907 __ And(at, result, Operand(kStringRepresentationMask));
908 __ Branch(&external_string, ne, at, Operand(zero_reg));
909
910 // Prepare sequential strings
yangguo@chromium.orgfb377212012-11-16 14:43:43 +0000911 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +0000912 __ Addu(string,
913 string,
914 SeqTwoByteString::kHeaderSize - kHeapObjectTag);
915 __ jmp(&check_encoding);
916
917 // Handle external strings.
918 __ bind(&external_string);
919 if (FLAG_debug_code) {
920 // Assert that we do not have a cons or slice (indirect strings) here.
921 // Sequential strings have already been ruled out.
922 __ And(at, result, Operand(kIsIndirectStringMask));
danno@chromium.org59400602013-08-13 17:09:37 +0000923 __ Assert(eq, kExternalStringExpectedButNotFound,
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +0000924 at, Operand(zero_reg));
925 }
926 // Rule out short external strings.
927 STATIC_CHECK(kShortExternalStringTag != 0);
928 __ And(at, result, Operand(kShortExternalStringMask));
929 __ Branch(call_runtime, ne, at, Operand(zero_reg));
930 __ lw(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
931
932 Label ascii, done;
933 __ bind(&check_encoding);
934 STATIC_ASSERT(kTwoByteStringTag == 0);
935 __ And(at, result, Operand(kStringEncodingMask));
936 __ Branch(&ascii, ne, at, Operand(zero_reg));
937 // Two-byte string.
938 __ sll(at, index, 1);
939 __ Addu(at, string, at);
940 __ lhu(result, MemOperand(at));
941 __ jmp(&done);
942 __ bind(&ascii);
943 // Ascii string.
944 __ Addu(at, string, index);
945 __ lbu(result, MemOperand(at));
946 __ bind(&done);
947}
948
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +0000949
950static MemOperand ExpConstant(int index, Register base) {
951 return MemOperand(base, index * kDoubleSize);
952}
953
954
955void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
956 DoubleRegister input,
957 DoubleRegister result,
958 DoubleRegister double_scratch1,
959 DoubleRegister double_scratch2,
960 Register temp1,
961 Register temp2,
962 Register temp3) {
963 ASSERT(!input.is(result));
964 ASSERT(!input.is(double_scratch1));
965 ASSERT(!input.is(double_scratch2));
966 ASSERT(!result.is(double_scratch1));
967 ASSERT(!result.is(double_scratch2));
968 ASSERT(!double_scratch1.is(double_scratch2));
969 ASSERT(!temp1.is(temp2));
970 ASSERT(!temp1.is(temp3));
971 ASSERT(!temp2.is(temp3));
972 ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
973
machenbach@chromium.org8e36b5b2013-09-26 07:36:30 +0000974 Label zero, infinity, done;
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +0000975
976 __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
977
978 __ ldc1(double_scratch1, ExpConstant(0, temp3));
machenbach@chromium.org8e36b5b2013-09-26 07:36:30 +0000979 __ BranchF(&zero, NULL, ge, double_scratch1, input);
980
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +0000981 __ ldc1(double_scratch2, ExpConstant(1, temp3));
machenbach@chromium.org8e36b5b2013-09-26 07:36:30 +0000982 __ BranchF(&infinity, NULL, ge, input, double_scratch2);
983
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +0000984 __ ldc1(double_scratch1, ExpConstant(3, temp3));
985 __ ldc1(result, ExpConstant(4, temp3));
986 __ mul_d(double_scratch1, double_scratch1, input);
987 __ add_d(double_scratch1, double_scratch1, result);
machenbach@chromium.org8e36b5b2013-09-26 07:36:30 +0000988 __ FmoveLow(temp2, double_scratch1);
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +0000989 __ sub_d(double_scratch1, double_scratch1, result);
990 __ ldc1(result, ExpConstant(6, temp3));
991 __ ldc1(double_scratch2, ExpConstant(5, temp3));
992 __ mul_d(double_scratch1, double_scratch1, double_scratch2);
993 __ sub_d(double_scratch1, double_scratch1, input);
994 __ sub_d(result, result, double_scratch1);
machenbach@chromium.org8e36b5b2013-09-26 07:36:30 +0000995 __ mul_d(double_scratch2, double_scratch1, double_scratch1);
996 __ mul_d(result, result, double_scratch2);
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +0000997 __ ldc1(double_scratch2, ExpConstant(7, temp3));
998 __ mul_d(result, result, double_scratch2);
999 __ sub_d(result, result, double_scratch1);
machenbach@chromium.org8e36b5b2013-09-26 07:36:30 +00001000 // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
1001 ASSERT(*reinterpret_cast<double*>
1002 (ExternalReference::math_exp_constants(8).address()) == 1);
1003 __ Move(double_scratch2, 1);
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +00001004 __ add_d(result, result, double_scratch2);
machenbach@chromium.org8e36b5b2013-09-26 07:36:30 +00001005 __ srl(temp1, temp2, 11);
1006 __ Ext(temp2, temp2, 0, 11);
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +00001007 __ Addu(temp1, temp1, Operand(0x3ff));
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +00001008
1009 // Must not call ExpConstant() after overwriting temp3!
1010 __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
1011 __ sll(at, temp2, 3);
machenbach@chromium.org8e36b5b2013-09-26 07:36:30 +00001012 __ Addu(temp3, temp3, Operand(at));
1013 __ lw(temp2, MemOperand(temp3, 0));
1014 __ lw(temp3, MemOperand(temp3, kPointerSize));
1015 // The first word is loaded is the lower number register.
1016 if (temp2.code() < temp3.code()) {
1017 __ sll(at, temp1, 20);
1018 __ Or(temp1, temp3, at);
1019 __ Move(double_scratch1, temp2, temp1);
1020 } else {
1021 __ sll(at, temp1, 20);
1022 __ Or(temp1, temp2, at);
1023 __ Move(double_scratch1, temp3, temp1);
1024 }
1025 __ mul_d(result, result, double_scratch1);
bmeurer@chromium.org25530ce2014-02-07 09:11:16 +00001026 __ BranchShort(&done);
machenbach@chromium.org8e36b5b2013-09-26 07:36:30 +00001027
1028 __ bind(&zero);
1029 __ Move(result, kDoubleRegZero);
bmeurer@chromium.org25530ce2014-02-07 09:11:16 +00001030 __ BranchShort(&done);
machenbach@chromium.org8e36b5b2013-09-26 07:36:30 +00001031
1032 __ bind(&infinity);
1033 __ ldc1(result, ExpConstant(2, temp3));
1034
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +00001035 __ bind(&done);
1036}
1037
machenbach@chromium.orgafbdadc2013-12-09 16:12:18 +00001038#ifdef DEBUG
yangguo@chromium.orgfb377212012-11-16 14:43:43 +00001039// nop(CODE_AGE_MARKER_NOP)
1040static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
machenbach@chromium.orgafbdadc2013-12-09 16:12:18 +00001041#endif
yangguo@chromium.orgfb377212012-11-16 14:43:43 +00001042
1043static byte* GetNoCodeAgeSequence(uint32_t* length) {
1044 // The sequence of instructions that is patched out for aging code is the
1045 // following boilerplate stack-building prologue that is found in FUNCTIONS
1046 static bool initialized = false;
1047 static uint32_t sequence[kNoCodeAgeSequenceLength];
1048 byte* byte_sequence = reinterpret_cast<byte*>(sequence);
1049 *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
1050 if (!initialized) {
machenbach@chromium.orgaf4fba32014-01-27 01:05:32 +00001051 // Since patcher is a large object, allocate it dynamically when needed,
1052 // to avoid overloading the stack in stress conditions.
1053 SmartPointer<CodePatcher>
1054 patcher(new CodePatcher(byte_sequence, kNoCodeAgeSequenceLength));
1055 PredictableCodeSizeScope scope(patcher->masm(), *length);
1056 patcher->masm()->Push(ra, fp, cp, a1);
1057 patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
1058 patcher->masm()->Addu(
1059 fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
yangguo@chromium.orgfb377212012-11-16 14:43:43 +00001060 initialized = true;
1061 }
1062 return byte_sequence;
1063}
1064
1065
yangguo@chromium.orgfb377212012-11-16 14:43:43 +00001066bool Code::IsYoungSequence(byte* sequence) {
1067 uint32_t young_length;
1068 byte* young_sequence = GetNoCodeAgeSequence(&young_length);
1069 bool result = !memcmp(sequence, young_sequence, young_length);
1070 ASSERT(result ||
1071 Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
1072 return result;
1073}
1074
1075
1076void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
1077 MarkingParity* parity) {
1078 if (IsYoungSequence(sequence)) {
bmeurer@chromium.orge94b5ff2013-10-25 09:22:31 +00001079 *age = kNoAgeCodeAge;
yangguo@chromium.orgfb377212012-11-16 14:43:43 +00001080 *parity = NO_MARKING_PARITY;
1081 } else {
machenbach@chromium.orgf9841892013-11-25 12:01:13 +00001082 Address target_address = Assembler::target_address_at(
1083 sequence + Assembler::kInstrSize);
yangguo@chromium.orgfb377212012-11-16 14:43:43 +00001084 Code* stub = GetCodeFromTargetAddress(target_address);
1085 GetCodeAgeAndParity(stub, age, parity);
1086 }
1087}
1088
1089
machenbach@chromium.org528ce022013-09-23 14:09:36 +00001090void Code::PatchPlatformCodeAge(Isolate* isolate,
1091 byte* sequence,
yangguo@chromium.orgfb377212012-11-16 14:43:43 +00001092 Code::Age age,
1093 MarkingParity parity) {
1094 uint32_t young_length;
1095 byte* young_sequence = GetNoCodeAgeSequence(&young_length);
bmeurer@chromium.orge94b5ff2013-10-25 09:22:31 +00001096 if (age == kNoAgeCodeAge) {
danno@chromium.orgc99cd482013-03-21 15:26:42 +00001097 CopyBytes(sequence, young_sequence, young_length);
yangguo@chromium.orgfb377212012-11-16 14:43:43 +00001098 CPU::FlushICache(sequence, young_length);
1099 } else {
machenbach@chromium.org528ce022013-09-23 14:09:36 +00001100 Code* stub = GetCodeAgeStub(isolate, age, parity);
yangguo@chromium.orgfb377212012-11-16 14:43:43 +00001101 CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
machenbach@chromium.orgf9841892013-11-25 12:01:13 +00001102 // Mark this code sequence for FindPlatformCodeAgeSequence().
yangguo@chromium.orgfb377212012-11-16 14:43:43 +00001103 patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
machenbach@chromium.orgf9841892013-11-25 12:01:13 +00001104 // Load the stub address to t9 and call it,
1105 // GetCodeAgeAndParity() extracts the stub address from this instruction.
1106 patcher.masm()->li(
1107 t9,
1108 Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
1109 CONSTANT_SIZE);
1110 patcher.masm()->nop(); // Prevent jalr to jal optimization.
1111 patcher.masm()->jalr(t9, a0);
1112 patcher.masm()->nop(); // Branch delay slot nop.
1113 patcher.masm()->nop(); // Pad the empty space.
yangguo@chromium.orgfb377212012-11-16 14:43:43 +00001114 }
1115}
1116
1117
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +00001118#undef __
ager@chromium.org5c838252010-02-19 08:53:10 +00001119
ager@chromium.org5c838252010-02-19 08:53:10 +00001120} } // namespace v8::internal
erik.corry@gmail.com9dfbea42010-05-21 12:58:28 +00001121
1122#endif // V8_TARGET_ARCH_MIPS