blob: cfa4de9b3c0a02a4a17be5a1ced19a48c2cdbe08 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/compiler/code-generator.h"
6
7#include "src/arm/macro-assembler-arm.h"
8#include "src/compiler/code-generator-impl.h"
9#include "src/compiler/gap-resolver.h"
10#include "src/compiler/node-matchers.h"
11#include "src/compiler/node-properties-inl.h"
12#include "src/scopes.h"
13
14namespace v8 {
15namespace internal {
16namespace compiler {
17
18#define __ masm()->
19
20
21#define kScratchReg r9
22
23
24// Adds Arm-specific methods to convert InstructionOperands.
Emily Bernierd0a1eb72015-03-24 16:35:39 -040025class ArmOperandConverter FINAL : public InstructionOperandConverter {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000026 public:
27 ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
28 : InstructionOperandConverter(gen, instr) {}
29
Emily Bernierd0a1eb72015-03-24 16:35:39 -040030 SwVfpRegister OutputFloat32Register(int index = 0) {
31 return ToFloat32Register(instr_->OutputAt(index));
32 }
33
34 SwVfpRegister InputFloat32Register(int index) {
35 return ToFloat32Register(instr_->InputAt(index));
36 }
37
38 SwVfpRegister ToFloat32Register(InstructionOperand* op) {
39 return ToFloat64Register(op).low();
40 }
41
42 LowDwVfpRegister OutputFloat64Register(int index = 0) {
43 return ToFloat64Register(instr_->OutputAt(index));
44 }
45
46 LowDwVfpRegister InputFloat64Register(int index) {
47 return ToFloat64Register(instr_->InputAt(index));
48 }
49
50 LowDwVfpRegister ToFloat64Register(InstructionOperand* op) {
51 return LowDwVfpRegister::from_code(ToDoubleRegister(op).code());
52 }
53
Ben Murdochb8a8cc12014-11-26 15:28:44 +000054 SBit OutputSBit() const {
55 switch (instr_->flags_mode()) {
56 case kFlags_branch:
57 case kFlags_set:
58 return SetCC;
59 case kFlags_none:
60 return LeaveCC;
61 }
62 UNREACHABLE();
63 return LeaveCC;
64 }
65
66 Operand InputImmediate(int index) {
67 Constant constant = ToConstant(instr_->InputAt(index));
68 switch (constant.type()) {
69 case Constant::kInt32:
70 return Operand(constant.ToInt32());
Emily Bernierd0a1eb72015-03-24 16:35:39 -040071 case Constant::kFloat32:
72 return Operand(
73 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
Ben Murdochb8a8cc12014-11-26 15:28:44 +000074 case Constant::kFloat64:
75 return Operand(
76 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
77 case Constant::kInt64:
78 case Constant::kExternalReference:
79 case Constant::kHeapObject:
Emily Bernierd0a1eb72015-03-24 16:35:39 -040080 case Constant::kRpoNumber:
Ben Murdochb8a8cc12014-11-26 15:28:44 +000081 break;
82 }
83 UNREACHABLE();
84 return Operand::Zero();
85 }
86
87 Operand InputOperand2(int first_index) {
88 const int index = first_index;
89 switch (AddressingModeField::decode(instr_->opcode())) {
90 case kMode_None:
91 case kMode_Offset_RI:
92 case kMode_Offset_RR:
93 break;
94 case kMode_Operand2_I:
95 return InputImmediate(index + 0);
96 case kMode_Operand2_R:
97 return Operand(InputRegister(index + 0));
98 case kMode_Operand2_R_ASR_I:
99 return Operand(InputRegister(index + 0), ASR, InputInt5(index + 1));
100 case kMode_Operand2_R_ASR_R:
101 return Operand(InputRegister(index + 0), ASR, InputRegister(index + 1));
102 case kMode_Operand2_R_LSL_I:
103 return Operand(InputRegister(index + 0), LSL, InputInt5(index + 1));
104 case kMode_Operand2_R_LSL_R:
105 return Operand(InputRegister(index + 0), LSL, InputRegister(index + 1));
106 case kMode_Operand2_R_LSR_I:
107 return Operand(InputRegister(index + 0), LSR, InputInt5(index + 1));
108 case kMode_Operand2_R_LSR_R:
109 return Operand(InputRegister(index + 0), LSR, InputRegister(index + 1));
110 case kMode_Operand2_R_ROR_I:
111 return Operand(InputRegister(index + 0), ROR, InputInt5(index + 1));
112 case kMode_Operand2_R_ROR_R:
113 return Operand(InputRegister(index + 0), ROR, InputRegister(index + 1));
114 }
115 UNREACHABLE();
116 return Operand::Zero();
117 }
118
119 MemOperand InputOffset(int* first_index) {
120 const int index = *first_index;
121 switch (AddressingModeField::decode(instr_->opcode())) {
122 case kMode_None:
123 case kMode_Operand2_I:
124 case kMode_Operand2_R:
125 case kMode_Operand2_R_ASR_I:
126 case kMode_Operand2_R_ASR_R:
127 case kMode_Operand2_R_LSL_I:
128 case kMode_Operand2_R_LSL_R:
129 case kMode_Operand2_R_LSR_I:
130 case kMode_Operand2_R_LSR_R:
131 case kMode_Operand2_R_ROR_I:
132 case kMode_Operand2_R_ROR_R:
133 break;
134 case kMode_Offset_RI:
135 *first_index += 2;
136 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
137 case kMode_Offset_RR:
138 *first_index += 2;
139 return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
140 }
141 UNREACHABLE();
142 return MemOperand(r0);
143 }
144
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400145 MemOperand InputOffset(int first_index = 0) {
146 return InputOffset(&first_index);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000147 }
148
149 MemOperand ToMemOperand(InstructionOperand* op) const {
150 DCHECK(op != NULL);
151 DCHECK(!op->IsRegister());
152 DCHECK(!op->IsDoubleRegister());
153 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
154 // The linkage computes where all spill slots are located.
155 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
156 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
157 }
158};
159
160
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400161namespace {
162
163class OutOfLineLoadFloat32 FINAL : public OutOfLineCode {
164 public:
165 OutOfLineLoadFloat32(CodeGenerator* gen, SwVfpRegister result)
166 : OutOfLineCode(gen), result_(result) {}
167
168 void Generate() FINAL {
169 __ vmov(result_, std::numeric_limits<float>::quiet_NaN());
170 }
171
172 private:
173 SwVfpRegister const result_;
174};
175
176
177class OutOfLineLoadFloat64 FINAL : public OutOfLineCode {
178 public:
179 OutOfLineLoadFloat64(CodeGenerator* gen, DwVfpRegister result)
180 : OutOfLineCode(gen), result_(result) {}
181
182 void Generate() FINAL {
183 __ vmov(result_, std::numeric_limits<double>::quiet_NaN(), kScratchReg);
184 }
185
186 private:
187 DwVfpRegister const result_;
188};
189
190
191class OutOfLineLoadInteger FINAL : public OutOfLineCode {
192 public:
193 OutOfLineLoadInteger(CodeGenerator* gen, Register result)
194 : OutOfLineCode(gen), result_(result) {}
195
196 void Generate() FINAL { __ mov(result_, Operand::Zero()); }
197
198 private:
199 Register const result_;
200};
201
202} // namespace
203
204
205#define ASSEMBLE_CHECKED_LOAD_FLOAT(width) \
206 do { \
207 auto result = i.OutputFloat##width##Register(); \
208 auto offset = i.InputRegister(0); \
209 if (instr->InputAt(1)->IsRegister()) { \
210 __ cmp(offset, i.InputRegister(1)); \
211 } else { \
212 __ cmp(offset, i.InputImmediate(1)); \
213 } \
214 auto ool = new (zone()) OutOfLineLoadFloat##width(this, result); \
215 __ b(hs, ool->entry()); \
216 __ vldr(result, i.InputOffset(2)); \
217 __ bind(ool->exit()); \
218 DCHECK_EQ(LeaveCC, i.OutputSBit()); \
219 } while (0)
220
221
222#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr) \
223 do { \
224 auto result = i.OutputRegister(); \
225 auto offset = i.InputRegister(0); \
226 if (instr->InputAt(1)->IsRegister()) { \
227 __ cmp(offset, i.InputRegister(1)); \
228 } else { \
229 __ cmp(offset, i.InputImmediate(1)); \
230 } \
231 auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
232 __ b(hs, ool->entry()); \
233 __ asm_instr(result, i.InputOffset(2)); \
234 __ bind(ool->exit()); \
235 DCHECK_EQ(LeaveCC, i.OutputSBit()); \
236 } while (0)
237
238
239#define ASSEMBLE_CHECKED_STORE_FLOAT(width) \
240 do { \
241 auto offset = i.InputRegister(0); \
242 if (instr->InputAt(1)->IsRegister()) { \
243 __ cmp(offset, i.InputRegister(1)); \
244 } else { \
245 __ cmp(offset, i.InputImmediate(1)); \
246 } \
247 auto value = i.InputFloat##width##Register(2); \
248 __ vstr(value, i.InputOffset(3), lo); \
249 DCHECK_EQ(LeaveCC, i.OutputSBit()); \
250 } while (0)
251
252
253#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
254 do { \
255 auto offset = i.InputRegister(0); \
256 if (instr->InputAt(1)->IsRegister()) { \
257 __ cmp(offset, i.InputRegister(1)); \
258 } else { \
259 __ cmp(offset, i.InputImmediate(1)); \
260 } \
261 auto value = i.InputRegister(2); \
262 __ asm_instr(value, i.InputOffset(3), lo); \
263 DCHECK_EQ(LeaveCC, i.OutputSBit()); \
264 } while (0)
265
266
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000267// Assembles an instruction after register allocation, producing machine code.
268void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
269 ArmOperandConverter i(this, instr);
270
271 switch (ArchOpcodeField::decode(instr->opcode())) {
272 case kArchCallCodeObject: {
273 EnsureSpaceForLazyDeopt();
274 if (instr->InputAt(0)->IsImmediate()) {
275 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
276 RelocInfo::CODE_TARGET);
277 } else {
278 __ add(ip, i.InputRegister(0),
279 Operand(Code::kHeaderSize - kHeapObjectTag));
280 __ Call(ip);
281 }
282 AddSafepointAndDeopt(instr);
283 DCHECK_EQ(LeaveCC, i.OutputSBit());
284 break;
285 }
286 case kArchCallJSFunction: {
287 EnsureSpaceForLazyDeopt();
288 Register func = i.InputRegister(0);
289 if (FLAG_debug_code) {
290 // Check the function's context matches the context argument.
291 __ ldr(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
292 __ cmp(cp, kScratchReg);
293 __ Assert(eq, kWrongFunctionContext);
294 }
295 __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
296 __ Call(ip);
297 AddSafepointAndDeopt(instr);
298 DCHECK_EQ(LeaveCC, i.OutputSBit());
299 break;
300 }
301 case kArchJmp:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400302 AssembleArchJump(i.InputRpo(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000303 DCHECK_EQ(LeaveCC, i.OutputSBit());
304 break;
305 case kArchNop:
306 // don't emit code for nops.
307 DCHECK_EQ(LeaveCC, i.OutputSBit());
308 break;
309 case kArchRet:
310 AssembleReturn();
311 DCHECK_EQ(LeaveCC, i.OutputSBit());
312 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400313 case kArchStackPointer:
314 __ mov(i.OutputRegister(), sp);
315 DCHECK_EQ(LeaveCC, i.OutputSBit());
316 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000317 case kArchTruncateDoubleToI:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400318 __ TruncateDoubleToI(i.OutputRegister(), i.InputFloat64Register(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000319 DCHECK_EQ(LeaveCC, i.OutputSBit());
320 break;
321 case kArmAdd:
322 __ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
323 i.OutputSBit());
324 break;
325 case kArmAnd:
326 __ and_(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
327 i.OutputSBit());
328 break;
329 case kArmBic:
330 __ bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
331 i.OutputSBit());
332 break;
333 case kArmMul:
334 __ mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
335 i.OutputSBit());
336 break;
337 case kArmMla:
338 __ mla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
339 i.InputRegister(2), i.OutputSBit());
340 break;
341 case kArmMls: {
342 CpuFeatureScope scope(masm(), MLS);
343 __ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
344 i.InputRegister(2));
345 DCHECK_EQ(LeaveCC, i.OutputSBit());
346 break;
347 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400348 case kArmSmmul:
349 __ smmul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
350 DCHECK_EQ(LeaveCC, i.OutputSBit());
351 break;
352 case kArmSmmla:
353 __ smmla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
354 i.InputRegister(2));
355 DCHECK_EQ(LeaveCC, i.OutputSBit());
356 break;
357 case kArmUmull:
358 __ umull(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0),
359 i.InputRegister(1), i.OutputSBit());
360 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000361 case kArmSdiv: {
362 CpuFeatureScope scope(masm(), SUDIV);
363 __ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
364 DCHECK_EQ(LeaveCC, i.OutputSBit());
365 break;
366 }
367 case kArmUdiv: {
368 CpuFeatureScope scope(masm(), SUDIV);
369 __ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
370 DCHECK_EQ(LeaveCC, i.OutputSBit());
371 break;
372 }
373 case kArmMov:
374 __ Move(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
375 break;
376 case kArmMvn:
377 __ mvn(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
378 break;
379 case kArmOrr:
380 __ orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
381 i.OutputSBit());
382 break;
383 case kArmEor:
384 __ eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
385 i.OutputSBit());
386 break;
387 case kArmSub:
388 __ sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
389 i.OutputSBit());
390 break;
391 case kArmRsb:
392 __ rsb(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
393 i.OutputSBit());
394 break;
395 case kArmBfc: {
396 CpuFeatureScope scope(masm(), ARMv7);
397 __ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
398 DCHECK_EQ(LeaveCC, i.OutputSBit());
399 break;
400 }
401 case kArmUbfx: {
402 CpuFeatureScope scope(masm(), ARMv7);
403 __ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
404 i.InputInt8(2));
405 DCHECK_EQ(LeaveCC, i.OutputSBit());
406 break;
407 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400408 case kArmSxtb:
409 __ sxtb(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
410 DCHECK_EQ(LeaveCC, i.OutputSBit());
411 break;
412 case kArmSxth:
413 __ sxth(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
414 DCHECK_EQ(LeaveCC, i.OutputSBit());
415 break;
416 case kArmSxtab:
417 __ sxtab(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
418 i.InputInt32(2));
419 DCHECK_EQ(LeaveCC, i.OutputSBit());
420 break;
421 case kArmSxtah:
422 __ sxtah(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
423 i.InputInt32(2));
424 DCHECK_EQ(LeaveCC, i.OutputSBit());
425 break;
426 case kArmUxtb:
427 __ uxtb(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
428 DCHECK_EQ(LeaveCC, i.OutputSBit());
429 break;
430 case kArmUxth:
431 __ uxth(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
432 DCHECK_EQ(LeaveCC, i.OutputSBit());
433 break;
434 case kArmUxtab:
435 __ uxtab(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
436 i.InputInt32(2));
437 DCHECK_EQ(LeaveCC, i.OutputSBit());
438 break;
439 case kArmUxtah:
440 __ uxtah(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
441 i.InputInt32(2));
442 DCHECK_EQ(LeaveCC, i.OutputSBit());
443 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000444 case kArmCmp:
445 __ cmp(i.InputRegister(0), i.InputOperand2(1));
446 DCHECK_EQ(SetCC, i.OutputSBit());
447 break;
448 case kArmCmn:
449 __ cmn(i.InputRegister(0), i.InputOperand2(1));
450 DCHECK_EQ(SetCC, i.OutputSBit());
451 break;
452 case kArmTst:
453 __ tst(i.InputRegister(0), i.InputOperand2(1));
454 DCHECK_EQ(SetCC, i.OutputSBit());
455 break;
456 case kArmTeq:
457 __ teq(i.InputRegister(0), i.InputOperand2(1));
458 DCHECK_EQ(SetCC, i.OutputSBit());
459 break;
460 case kArmVcmpF64:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400461 __ VFPCompareAndSetFlags(i.InputFloat64Register(0),
462 i.InputFloat64Register(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000463 DCHECK_EQ(SetCC, i.OutputSBit());
464 break;
465 case kArmVaddF64:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400466 __ vadd(i.OutputFloat64Register(), i.InputFloat64Register(0),
467 i.InputFloat64Register(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000468 DCHECK_EQ(LeaveCC, i.OutputSBit());
469 break;
470 case kArmVsubF64:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400471 __ vsub(i.OutputFloat64Register(), i.InputFloat64Register(0),
472 i.InputFloat64Register(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000473 DCHECK_EQ(LeaveCC, i.OutputSBit());
474 break;
475 case kArmVmulF64:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400476 __ vmul(i.OutputFloat64Register(), i.InputFloat64Register(0),
477 i.InputFloat64Register(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000478 DCHECK_EQ(LeaveCC, i.OutputSBit());
479 break;
480 case kArmVmlaF64:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400481 __ vmla(i.OutputFloat64Register(), i.InputFloat64Register(1),
482 i.InputFloat64Register(2));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000483 DCHECK_EQ(LeaveCC, i.OutputSBit());
484 break;
485 case kArmVmlsF64:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400486 __ vmls(i.OutputFloat64Register(), i.InputFloat64Register(1),
487 i.InputFloat64Register(2));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000488 DCHECK_EQ(LeaveCC, i.OutputSBit());
489 break;
490 case kArmVdivF64:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400491 __ vdiv(i.OutputFloat64Register(), i.InputFloat64Register(0),
492 i.InputFloat64Register(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000493 DCHECK_EQ(LeaveCC, i.OutputSBit());
494 break;
495 case kArmVmodF64: {
496 // TODO(bmeurer): We should really get rid of this special instruction,
497 // and generate a CallAddress instruction instead.
498 FrameScope scope(masm(), StackFrame::MANUAL);
499 __ PrepareCallCFunction(0, 2, kScratchReg);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400500 __ MovToFloatParameters(i.InputFloat64Register(0),
501 i.InputFloat64Register(1));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000502 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
503 0, 2);
504 // Move the result in the double result register.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400505 __ MovFromFloatResult(i.OutputFloat64Register());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000506 DCHECK_EQ(LeaveCC, i.OutputSBit());
507 break;
508 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000509 case kArmVsqrtF64:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400510 __ vsqrt(i.OutputFloat64Register(), i.InputFloat64Register(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000511 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400512 case kArmVfloorF64:
513 __ vrintm(i.OutputFloat64Register(), i.InputFloat64Register(0));
514 break;
515 case kArmVceilF64:
516 __ vrintp(i.OutputFloat64Register(), i.InputFloat64Register(0));
517 break;
518 case kArmVroundTruncateF64:
519 __ vrintz(i.OutputFloat64Register(), i.InputFloat64Register(0));
520 break;
521 case kArmVroundTiesAwayF64:
522 __ vrinta(i.OutputFloat64Register(), i.InputFloat64Register(0));
523 break;
524 case kArmVnegF64:
525 __ vneg(i.OutputFloat64Register(), i.InputFloat64Register(0));
526 break;
527 case kArmVcvtF32F64: {
528 __ vcvt_f32_f64(i.OutputFloat32Register(), i.InputFloat64Register(0));
529 DCHECK_EQ(LeaveCC, i.OutputSBit());
530 break;
531 }
532 case kArmVcvtF64F32: {
533 __ vcvt_f64_f32(i.OutputFloat64Register(), i.InputFloat32Register(0));
534 DCHECK_EQ(LeaveCC, i.OutputSBit());
535 break;
536 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000537 case kArmVcvtF64S32: {
538 SwVfpRegister scratch = kScratchDoubleReg.low();
539 __ vmov(scratch, i.InputRegister(0));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400540 __ vcvt_f64_s32(i.OutputFloat64Register(), scratch);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000541 DCHECK_EQ(LeaveCC, i.OutputSBit());
542 break;
543 }
544 case kArmVcvtF64U32: {
545 SwVfpRegister scratch = kScratchDoubleReg.low();
546 __ vmov(scratch, i.InputRegister(0));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400547 __ vcvt_f64_u32(i.OutputFloat64Register(), scratch);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000548 DCHECK_EQ(LeaveCC, i.OutputSBit());
549 break;
550 }
551 case kArmVcvtS32F64: {
552 SwVfpRegister scratch = kScratchDoubleReg.low();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400553 __ vcvt_s32_f64(scratch, i.InputFloat64Register(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000554 __ vmov(i.OutputRegister(), scratch);
555 DCHECK_EQ(LeaveCC, i.OutputSBit());
556 break;
557 }
558 case kArmVcvtU32F64: {
559 SwVfpRegister scratch = kScratchDoubleReg.low();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400560 __ vcvt_u32_f64(scratch, i.InputFloat64Register(0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000561 __ vmov(i.OutputRegister(), scratch);
562 DCHECK_EQ(LeaveCC, i.OutputSBit());
563 break;
564 }
565 case kArmLdrb:
566 __ ldrb(i.OutputRegister(), i.InputOffset());
567 DCHECK_EQ(LeaveCC, i.OutputSBit());
568 break;
569 case kArmLdrsb:
570 __ ldrsb(i.OutputRegister(), i.InputOffset());
571 DCHECK_EQ(LeaveCC, i.OutputSBit());
572 break;
573 case kArmStrb: {
574 int index = 0;
575 MemOperand operand = i.InputOffset(&index);
576 __ strb(i.InputRegister(index), operand);
577 DCHECK_EQ(LeaveCC, i.OutputSBit());
578 break;
579 }
580 case kArmLdrh:
581 __ ldrh(i.OutputRegister(), i.InputOffset());
582 break;
583 case kArmLdrsh:
584 __ ldrsh(i.OutputRegister(), i.InputOffset());
585 break;
586 case kArmStrh: {
587 int index = 0;
588 MemOperand operand = i.InputOffset(&index);
589 __ strh(i.InputRegister(index), operand);
590 DCHECK_EQ(LeaveCC, i.OutputSBit());
591 break;
592 }
593 case kArmLdr:
594 __ ldr(i.OutputRegister(), i.InputOffset());
595 break;
596 case kArmStr: {
597 int index = 0;
598 MemOperand operand = i.InputOffset(&index);
599 __ str(i.InputRegister(index), operand);
600 DCHECK_EQ(LeaveCC, i.OutputSBit());
601 break;
602 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400603 case kArmVldrF32: {
604 __ vldr(i.OutputFloat32Register(), i.InputOffset());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000605 DCHECK_EQ(LeaveCC, i.OutputSBit());
606 break;
607 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400608 case kArmVstrF32: {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000609 int index = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000610 MemOperand operand = i.InputOffset(&index);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400611 __ vstr(i.InputFloat32Register(index), operand);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000612 DCHECK_EQ(LeaveCC, i.OutputSBit());
613 break;
614 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400615 case kArmVldrF64:
616 __ vldr(i.OutputFloat64Register(), i.InputOffset());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000617 DCHECK_EQ(LeaveCC, i.OutputSBit());
618 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400619 case kArmVstrF64: {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000620 int index = 0;
621 MemOperand operand = i.InputOffset(&index);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400622 __ vstr(i.InputFloat64Register(index), operand);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000623 DCHECK_EQ(LeaveCC, i.OutputSBit());
624 break;
625 }
626 case kArmPush:
627 __ Push(i.InputRegister(0));
628 DCHECK_EQ(LeaveCC, i.OutputSBit());
629 break;
630 case kArmStoreWriteBarrier: {
631 Register object = i.InputRegister(0);
632 Register index = i.InputRegister(1);
633 Register value = i.InputRegister(2);
634 __ add(index, object, index);
635 __ str(value, MemOperand(index));
636 SaveFPRegsMode mode =
637 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
638 LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
639 __ RecordWrite(object, index, value, lr_status, mode);
640 DCHECK_EQ(LeaveCC, i.OutputSBit());
641 break;
642 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400643 case kCheckedLoadInt8:
644 ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsb);
645 break;
646 case kCheckedLoadUint8:
647 ASSEMBLE_CHECKED_LOAD_INTEGER(ldrb);
648 break;
649 case kCheckedLoadInt16:
650 ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsh);
651 break;
652 case kCheckedLoadUint16:
653 ASSEMBLE_CHECKED_LOAD_INTEGER(ldrh);
654 break;
655 case kCheckedLoadWord32:
656 ASSEMBLE_CHECKED_LOAD_INTEGER(ldr);
657 break;
658 case kCheckedLoadFloat32:
659 ASSEMBLE_CHECKED_LOAD_FLOAT(32);
660 break;
661 case kCheckedLoadFloat64:
662 ASSEMBLE_CHECKED_LOAD_FLOAT(64);
663 break;
664 case kCheckedStoreWord8:
665 ASSEMBLE_CHECKED_STORE_INTEGER(strb);
666 break;
667 case kCheckedStoreWord16:
668 ASSEMBLE_CHECKED_STORE_INTEGER(strh);
669 break;
670 case kCheckedStoreWord32:
671 ASSEMBLE_CHECKED_STORE_INTEGER(str);
672 break;
673 case kCheckedStoreFloat32:
674 ASSEMBLE_CHECKED_STORE_FLOAT(32);
675 break;
676 case kCheckedStoreFloat64:
677 ASSEMBLE_CHECKED_STORE_FLOAT(64);
678 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000679 }
680}
681
682
683// Assembles branches after an instruction.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400684void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000685 ArmOperandConverter i(this, instr);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400686 Label* tlabel = branch->true_label;
687 Label* flabel = branch->false_label;
688 switch (branch->condition) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000689 case kUnorderedEqual:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400690 // The "eq" condition will not catch the unordered case.
691 // The jump/fall through to false label will be used if the comparison
692 // was unordered.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000693 case kEqual:
694 __ b(eq, tlabel);
695 break;
696 case kUnorderedNotEqual:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400697 // Unordered or not equal can be tested with "ne" condtion.
698 // See ARMv7 manual A8.3 - Conditional execution.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000699 case kNotEqual:
700 __ b(ne, tlabel);
701 break;
702 case kSignedLessThan:
703 __ b(lt, tlabel);
704 break;
705 case kSignedGreaterThanOrEqual:
706 __ b(ge, tlabel);
707 break;
708 case kSignedLessThanOrEqual:
709 __ b(le, tlabel);
710 break;
711 case kSignedGreaterThan:
712 __ b(gt, tlabel);
713 break;
714 case kUnorderedLessThan:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400715 // The "lo" condition will not catch the unordered case.
716 // The jump/fall through to false label will be used if the comparison
717 // was unordered.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000718 case kUnsignedLessThan:
719 __ b(lo, tlabel);
720 break;
721 case kUnorderedGreaterThanOrEqual:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400722 // Unordered, greater than or equal can be tested with "hs" condtion.
723 // See ARMv7 manual A8.3 - Conditional execution.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000724 case kUnsignedGreaterThanOrEqual:
725 __ b(hs, tlabel);
726 break;
727 case kUnorderedLessThanOrEqual:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400728 // The "ls" condition will not catch the unordered case.
729 // The jump/fall through to false label will be used if the comparison
730 // was unordered.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000731 case kUnsignedLessThanOrEqual:
732 __ b(ls, tlabel);
733 break;
734 case kUnorderedGreaterThan:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400735 // Unordered or greater than can be tested with "hi" condtion.
736 // See ARMv7 manual A8.3 - Conditional execution.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000737 case kUnsignedGreaterThan:
738 __ b(hi, tlabel);
739 break;
740 case kOverflow:
741 __ b(vs, tlabel);
742 break;
743 case kNotOverflow:
744 __ b(vc, tlabel);
745 break;
746 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400747 if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
748}
749
750
751void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
752 if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000753}
754
755
756// Assembles boolean materializations after an instruction.
757void CodeGenerator::AssembleArchBoolean(Instruction* instr,
758 FlagsCondition condition) {
759 ArmOperandConverter i(this, instr);
760 Label done;
761
762 // Materialize a full 32-bit 1 or 0 value. The result register is always the
763 // last output of the instruction.
764 Label check;
765 DCHECK_NE(0, instr->OutputCount());
766 Register reg = i.OutputRegister(instr->OutputCount() - 1);
767 Condition cc = kNoCondition;
768 switch (condition) {
769 case kUnorderedEqual:
770 __ b(vc, &check);
771 __ mov(reg, Operand(0));
772 __ b(&done);
773 // Fall through.
774 case kEqual:
775 cc = eq;
776 break;
777 case kUnorderedNotEqual:
778 __ b(vc, &check);
779 __ mov(reg, Operand(1));
780 __ b(&done);
781 // Fall through.
782 case kNotEqual:
783 cc = ne;
784 break;
785 case kSignedLessThan:
786 cc = lt;
787 break;
788 case kSignedGreaterThanOrEqual:
789 cc = ge;
790 break;
791 case kSignedLessThanOrEqual:
792 cc = le;
793 break;
794 case kSignedGreaterThan:
795 cc = gt;
796 break;
797 case kUnorderedLessThan:
798 __ b(vc, &check);
799 __ mov(reg, Operand(0));
800 __ b(&done);
801 // Fall through.
802 case kUnsignedLessThan:
803 cc = lo;
804 break;
805 case kUnorderedGreaterThanOrEqual:
806 __ b(vc, &check);
807 __ mov(reg, Operand(1));
808 __ b(&done);
809 // Fall through.
810 case kUnsignedGreaterThanOrEqual:
811 cc = hs;
812 break;
813 case kUnorderedLessThanOrEqual:
814 __ b(vc, &check);
815 __ mov(reg, Operand(0));
816 __ b(&done);
817 // Fall through.
818 case kUnsignedLessThanOrEqual:
819 cc = ls;
820 break;
821 case kUnorderedGreaterThan:
822 __ b(vc, &check);
823 __ mov(reg, Operand(1));
824 __ b(&done);
825 // Fall through.
826 case kUnsignedGreaterThan:
827 cc = hi;
828 break;
829 case kOverflow:
830 cc = vs;
831 break;
832 case kNotOverflow:
833 cc = vc;
834 break;
835 }
836 __ bind(&check);
837 __ mov(reg, Operand(0));
838 __ mov(reg, Operand(1), LeaveCC, cc);
839 __ bind(&done);
840}
841
842
843void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
844 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
845 isolate(), deoptimization_id, Deoptimizer::LAZY);
846 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
847}
848
849
850void CodeGenerator::AssemblePrologue() {
851 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
852 if (descriptor->kind() == CallDescriptor::kCallAddress) {
853 bool saved_pp;
854 if (FLAG_enable_ool_constant_pool) {
855 __ Push(lr, fp, pp);
856 // Adjust FP to point to saved FP.
857 __ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
858 saved_pp = true;
859 } else {
860 __ Push(lr, fp);
861 __ mov(fp, sp);
862 saved_pp = false;
863 }
864 const RegList saves = descriptor->CalleeSavedRegisters();
865 if (saves != 0 || saved_pp) {
866 // Save callee-saved registers.
867 int register_save_area_size = saved_pp ? kPointerSize : 0;
868 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
869 if (!((1 << i) & saves)) continue;
870 register_save_area_size += kPointerSize;
871 }
872 frame()->SetRegisterSaveAreaSize(register_save_area_size);
873 __ stm(db_w, sp, saves);
874 }
875 } else if (descriptor->IsJSFunctionCall()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400876 CompilationInfo* info = this->info();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000877 __ Prologue(info->IsCodePreAgingActive());
878 frame()->SetRegisterSaveAreaSize(
879 StandardFrameConstants::kFixedFrameSizeFromFp);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000880 } else {
881 __ StubPrologue();
882 frame()->SetRegisterSaveAreaSize(
883 StandardFrameConstants::kFixedFrameSizeFromFp);
884 }
885 int stack_slots = frame()->GetSpillSlotCount();
886 if (stack_slots > 0) {
887 __ sub(sp, sp, Operand(stack_slots * kPointerSize));
888 }
889}
890
891
892void CodeGenerator::AssembleReturn() {
893 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
894 if (descriptor->kind() == CallDescriptor::kCallAddress) {
895 if (frame()->GetRegisterSaveAreaSize() > 0) {
896 // Remove this frame's spill slots first.
897 int stack_slots = frame()->GetSpillSlotCount();
898 if (stack_slots > 0) {
899 __ add(sp, sp, Operand(stack_slots * kPointerSize));
900 }
901 // Restore registers.
902 const RegList saves = descriptor->CalleeSavedRegisters();
903 if (saves != 0) {
904 __ ldm(ia_w, sp, saves);
905 }
906 }
907 __ LeaveFrame(StackFrame::MANUAL);
908 __ Ret();
909 } else {
910 __ LeaveFrame(StackFrame::MANUAL);
911 int pop_count = descriptor->IsJSFunctionCall()
912 ? static_cast<int>(descriptor->JSParameterCount())
913 : 0;
914 __ Drop(pop_count);
915 __ Ret();
916 }
917}
918
919
920void CodeGenerator::AssembleMove(InstructionOperand* source,
921 InstructionOperand* destination) {
922 ArmOperandConverter g(this, NULL);
923 // Dispatch on the source and destination operand kinds. Not all
924 // combinations are possible.
925 if (source->IsRegister()) {
926 DCHECK(destination->IsRegister() || destination->IsStackSlot());
927 Register src = g.ToRegister(source);
928 if (destination->IsRegister()) {
929 __ mov(g.ToRegister(destination), src);
930 } else {
931 __ str(src, g.ToMemOperand(destination));
932 }
933 } else if (source->IsStackSlot()) {
934 DCHECK(destination->IsRegister() || destination->IsStackSlot());
935 MemOperand src = g.ToMemOperand(source);
936 if (destination->IsRegister()) {
937 __ ldr(g.ToRegister(destination), src);
938 } else {
939 Register temp = kScratchReg;
940 __ ldr(temp, src);
941 __ str(temp, g.ToMemOperand(destination));
942 }
943 } else if (source->IsConstant()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400944 Constant src = g.ToConstant(source);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000945 if (destination->IsRegister() || destination->IsStackSlot()) {
946 Register dst =
947 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000948 switch (src.type()) {
949 case Constant::kInt32:
950 __ mov(dst, Operand(src.ToInt32()));
951 break;
952 case Constant::kInt64:
953 UNREACHABLE();
954 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400955 case Constant::kFloat32:
956 __ Move(dst,
957 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
958 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000959 case Constant::kFloat64:
960 __ Move(dst,
961 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
962 break;
963 case Constant::kExternalReference:
964 __ mov(dst, Operand(src.ToExternalReference()));
965 break;
966 case Constant::kHeapObject:
967 __ Move(dst, src.ToHeapObject());
968 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400969 case Constant::kRpoNumber:
970 UNREACHABLE(); // TODO(dcarney): loading RPO constants on arm.
971 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000972 }
973 if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400974 } else if (src.type() == Constant::kFloat32) {
975 if (destination->IsDoubleStackSlot()) {
976 MemOperand dst = g.ToMemOperand(destination);
977 __ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
978 __ str(ip, dst);
979 } else {
980 SwVfpRegister dst = g.ToFloat32Register(destination);
981 __ vmov(dst, src.ToFloat32());
982 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000983 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400984 DCHECK_EQ(Constant::kFloat64, src.type());
985 DwVfpRegister dst = destination->IsDoubleRegister()
986 ? g.ToFloat64Register(destination)
987 : kScratchDoubleReg;
988 __ vmov(dst, src.ToFloat64(), kScratchReg);
989 if (destination->IsDoubleStackSlot()) {
990 __ vstr(dst, g.ToMemOperand(destination));
991 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000992 }
993 } else if (source->IsDoubleRegister()) {
994 DwVfpRegister src = g.ToDoubleRegister(source);
995 if (destination->IsDoubleRegister()) {
996 DwVfpRegister dst = g.ToDoubleRegister(destination);
997 __ Move(dst, src);
998 } else {
999 DCHECK(destination->IsDoubleStackSlot());
1000 __ vstr(src, g.ToMemOperand(destination));
1001 }
1002 } else if (source->IsDoubleStackSlot()) {
1003 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1004 MemOperand src = g.ToMemOperand(source);
1005 if (destination->IsDoubleRegister()) {
1006 __ vldr(g.ToDoubleRegister(destination), src);
1007 } else {
1008 DwVfpRegister temp = kScratchDoubleReg;
1009 __ vldr(temp, src);
1010 __ vstr(temp, g.ToMemOperand(destination));
1011 }
1012 } else {
1013 UNREACHABLE();
1014 }
1015}
1016
1017
1018void CodeGenerator::AssembleSwap(InstructionOperand* source,
1019 InstructionOperand* destination) {
1020 ArmOperandConverter g(this, NULL);
1021 // Dispatch on the source and destination operand kinds. Not all
1022 // combinations are possible.
1023 if (source->IsRegister()) {
1024 // Register-register.
1025 Register temp = kScratchReg;
1026 Register src = g.ToRegister(source);
1027 if (destination->IsRegister()) {
1028 Register dst = g.ToRegister(destination);
1029 __ Move(temp, src);
1030 __ Move(src, dst);
1031 __ Move(dst, temp);
1032 } else {
1033 DCHECK(destination->IsStackSlot());
1034 MemOperand dst = g.ToMemOperand(destination);
1035 __ mov(temp, src);
1036 __ ldr(src, dst);
1037 __ str(temp, dst);
1038 }
1039 } else if (source->IsStackSlot()) {
1040 DCHECK(destination->IsStackSlot());
1041 Register temp_0 = kScratchReg;
1042 SwVfpRegister temp_1 = kScratchDoubleReg.low();
1043 MemOperand src = g.ToMemOperand(source);
1044 MemOperand dst = g.ToMemOperand(destination);
1045 __ ldr(temp_0, src);
1046 __ vldr(temp_1, dst);
1047 __ str(temp_0, dst);
1048 __ vstr(temp_1, src);
1049 } else if (source->IsDoubleRegister()) {
1050 DwVfpRegister temp = kScratchDoubleReg;
1051 DwVfpRegister src = g.ToDoubleRegister(source);
1052 if (destination->IsDoubleRegister()) {
1053 DwVfpRegister dst = g.ToDoubleRegister(destination);
1054 __ Move(temp, src);
1055 __ Move(src, dst);
1056 __ Move(dst, temp);
1057 } else {
1058 DCHECK(destination->IsDoubleStackSlot());
1059 MemOperand dst = g.ToMemOperand(destination);
1060 __ Move(temp, src);
1061 __ vldr(src, dst);
1062 __ vstr(temp, dst);
1063 }
1064 } else if (source->IsDoubleStackSlot()) {
1065 DCHECK(destination->IsDoubleStackSlot());
1066 Register temp_0 = kScratchReg;
1067 DwVfpRegister temp_1 = kScratchDoubleReg;
1068 MemOperand src0 = g.ToMemOperand(source);
1069 MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
1070 MemOperand dst0 = g.ToMemOperand(destination);
1071 MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
1072 __ vldr(temp_1, dst0); // Save destination in temp_1.
1073 __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
1074 __ str(temp_0, dst0);
1075 __ ldr(temp_0, src1);
1076 __ str(temp_0, dst1);
1077 __ vstr(temp_1, src0);
1078 } else {
1079 // No other combinations are possible.
1080 UNREACHABLE();
1081 }
1082}
1083
1084
1085void CodeGenerator::AddNopForSmiCodeInlining() {
1086 // On 32-bit ARM we do not insert nops for inlined Smi code.
1087}
1088
1089
1090void CodeGenerator::EnsureSpaceForLazyDeopt() {
1091 int space_needed = Deoptimizer::patch_size();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001092 if (!info()->IsStub()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001093 // Ensure that we have enough space after the previous lazy-bailout
1094 // instruction for patching the code here.
1095 int current_pc = masm()->pc_offset();
1096 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1097 // Block literal pool emission for duration of padding.
1098 v8::internal::Assembler::BlockConstPoolScope block_const_pool(masm());
1099 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1100 DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
1101 while (padding_size > 0) {
1102 __ nop();
1103 padding_size -= v8::internal::Assembler::kInstrSize;
1104 }
1105 }
1106 }
1107 MarkLazyDeoptSite();
1108}
1109
1110#undef __
1111
1112} // namespace compiler
1113} // namespace internal
1114} // namespace v8