blob: b3a6173cf0de82622b26fabe8b905455d3df1929 [file] [log] [blame]
Ben Murdoch3ef787d2012-04-12 10:51:47 +01001// Copyright 2012 the V8 project authors. All rights reserved.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
Ben Murdochb0fe1622011-05-05 13:52:32 +01004
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005#include "src/v8.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +01006
Ben Murdochb8a8cc12014-11-26 15:28:44 +00007#include "src/codegen.h"
8#include "src/deoptimizer.h"
9#include "src/full-codegen.h"
10#include "src/safepoint-table.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010011
12namespace v8 {
13namespace internal {
14
Ben Murdochb8a8cc12014-11-26 15:28:44 +000015const int Deoptimizer::table_entry_size_ = 8;
Ben Murdochb0fe1622011-05-05 13:52:32 +010016
Steve Block1e0659c2011-05-24 12:43:12 +010017
18int Deoptimizer::patch_size() {
19 const int kCallInstructionSizeInWords = 3;
20 return kCallInstructionSizeInWords * Assembler::kInstrSize;
21}
22
23
Emily Bernierd0a1eb72015-03-24 16:35:39 -040024void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
25 // Empty because there is no need for relocation information for the code
26 // patching in Deoptimizer::PatchCodeForDeoptimization below.
27}
28
29
Ben Murdochb8a8cc12014-11-26 15:28:44 +000030void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Ben Murdoch2b4ba112012-01-20 14:57:15 +000031 Address code_start_address = code->instruction_start();
Ben Murdochb0fe1622011-05-05 13:52:32 +010032 // Invalidate the relocation information, as it will become invalid by the
33 // code patching below, and is not needed any more.
34 code->InvalidateRelocation();
35
Ben Murdochb8a8cc12014-11-26 15:28:44 +000036 if (FLAG_zap_code_space) {
37 // Fail hard and early if we enter this code object again.
38 byte* pointer = code->FindCodeAgeSequence();
39 if (pointer != NULL) {
40 pointer += kNoCodeAgeSequenceLength;
41 } else {
42 pointer = code->instruction_start();
43 }
44 CodePatcher patcher(pointer, 1);
45 patcher.masm()->bkpt(0);
46
47 DeoptimizationInputData* data =
48 DeoptimizationInputData::cast(code->deoptimization_data());
49 int osr_offset = data->OsrPcOffset()->value();
50 if (osr_offset > 0) {
51 CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
52 osr_patcher.masm()->bkpt(0);
53 }
54 }
55
Ben Murdoch2b4ba112012-01-20 14:57:15 +000056 DeoptimizationInputData* deopt_data =
57 DeoptimizationInputData::cast(code->deoptimization_data());
Ben Murdochb0fe1622011-05-05 13:52:32 +010058#ifdef DEBUG
Ben Murdoch2b4ba112012-01-20 14:57:15 +000059 Address prev_call_address = NULL;
Ben Murdochb0fe1622011-05-05 13:52:32 +010060#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +000061 // For each LLazyBailout instruction insert a call to the corresponding
62 // deoptimization entry.
Ben Murdoch2b4ba112012-01-20 14:57:15 +000063 for (int i = 0; i < deopt_data->DeoptCount(); i++) {
64 if (deopt_data->Pc(i)->value() == -1) continue;
65 Address call_address = code_start_address + deopt_data->Pc(i)->value();
Ben Murdochb8a8cc12014-11-26 15:28:44 +000066 Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
67 // We need calls to have a predictable size in the unoptimized code, but
68 // this is optimized code, so we don't have to have a predictable size.
69 int call_size_in_bytes =
70 MacroAssembler::CallSizeNotPredictableCodeSize(isolate,
71 deopt_entry,
72 RelocInfo::NONE32);
Ben Murdoch2b4ba112012-01-20 14:57:15 +000073 int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +000074 DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
75 DCHECK(call_size_in_bytes <= patch_size());
Ben Murdoch2b4ba112012-01-20 14:57:15 +000076 CodePatcher patcher(call_address, call_size_in_words);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000077 patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
78 DCHECK(prev_call_address == NULL ||
Ben Murdoch2b4ba112012-01-20 14:57:15 +000079 call_address >= prev_call_address + patch_size());
Ben Murdochb8a8cc12014-11-26 15:28:44 +000080 DCHECK(call_address + patch_size() <= code->instruction_end());
Ben Murdochb0fe1622011-05-05 13:52:32 +010081#ifdef DEBUG
Ben Murdoch2b4ba112012-01-20 14:57:15 +000082 prev_call_address = call_address;
Ben Murdochb0fe1622011-05-05 13:52:32 +010083#endif
Ben Murdoch2b4ba112012-01-20 14:57:15 +000084 }
Ben Murdochb0fe1622011-05-05 13:52:32 +010085}
86
87
Ben Murdoch3fb3ca82011-12-02 17:19:32 +000088void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
89 // Set the register values. The values are not important as there are no
90 // callee saved registers in JavaScript frames, so all registers are
91 // spilled. Registers fp and sp are set to the correct values though.
Ben Murdochb0fe1622011-05-05 13:52:32 +010092
Ben Murdoch3fb3ca82011-12-02 17:19:32 +000093 for (int i = 0; i < Register::kNumRegisters; i++) {
94 input_->SetRegister(i, i * 4);
95 }
96 input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
97 input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +000098 for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +000099 input_->SetDoubleRegister(i, 0.0);
100 }
101
102 // Fill the frame content from the actual data on the frame.
103 for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
104 input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
105 }
106}
107
108
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000109void Deoptimizer::SetPlatformCompiledStubRegisters(
110 FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
111 ApiFunction function(descriptor->deoptimization_handler());
112 ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
113 intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
114 int params = descriptor->GetHandlerParameterCount();
115 output_frame->SetRegister(r0.code(), params);
116 output_frame->SetRegister(r1.code(), handler);
117}
118
119
120void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
121 for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
122 double double_value = input_->GetDoubleRegister(i);
123 output_frame->SetDoubleRegister(i, double_value);
124 }
125}
126
127
128bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
129 // There is no dynamic alignment padding on ARM in the input frame.
130 return false;
131}
132
133
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000134#define __ masm()->
Ben Murdochb0fe1622011-05-05 13:52:32 +0100135
136// This code tries to be close to ia32 code so that any changes can be
137// easily ported.
138void Deoptimizer::EntryGenerator::Generate() {
139 GeneratePrologue();
Steve Block44f0eee2011-05-26 01:26:41 +0100140
Ben Murdochb0fe1622011-05-05 13:52:32 +0100141 // Save all general purpose registers before messing with them.
142 const int kNumberOfRegisters = Register::kNumRegisters;
143
144 // Everything but pc, lr and ip which will be saved but not restored.
145 RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit();
146
147 const int kDoubleRegsSize =
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000148 kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100149
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000150 // Save all allocatable VFP registers before messing with them.
151 DCHECK(kDoubleRegZero.code() == 14);
152 DCHECK(kScratchDoubleReg.code() == 15);
153
154 // Check CPU flags for number of registers, setting the Z condition flag.
155 __ CheckFor32DRegs(ip);
156
157 // Push registers d0-d13, and possibly d16-d31, on the stack.
158 // If d16-d31 are not pushed, decrease the stack pointer instead.
159 __ vstm(db_w, sp, d16, d31, ne);
160 __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
161 __ vstm(db_w, sp, d0, d13);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100162
163 // Push all 16 registers (needed to populate FrameDescription::registers_).
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000164 // TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
165 // handle this a bit differently.
Ben Murdochb0fe1622011-05-05 13:52:32 +0100166 __ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
167
168 const int kSavedRegistersAreaSize =
169 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
170
171 // Get the bailout id from the stack.
172 __ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize));
173
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000174 // Get the address of the location in the code object (r3) (return
Ben Murdochb0fe1622011-05-05 13:52:32 +0100175 // address for lazy deoptimization) and compute the fp-to-sp delta in
176 // register r4.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000177 __ mov(r3, lr);
178 // Correct one word for bailout id.
179 __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
Ben Murdochb0fe1622011-05-05 13:52:32 +0100180 __ sub(r4, fp, r4);
181
182 // Allocate a new deoptimizer object.
183 // Pass four arguments in r0 to r3 and fifth argument on stack.
Ben Murdoch8b112d22011-06-08 16:22:53 +0100184 __ PrepareCallCFunction(6, r5);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100185 __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
186 __ mov(r1, Operand(type())); // bailout type,
187 // r2: bailout id already loaded.
188 // r3: code address or 0 already loaded.
189 __ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000190 __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
Ben Murdoch8b112d22011-06-08 16:22:53 +0100191 __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
Ben Murdochb0fe1622011-05-05 13:52:32 +0100192 // Call Deoptimizer::New().
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100193 {
194 AllowExternalCallThatCantCauseGC scope(masm());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000195 __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100196 }
Ben Murdochb0fe1622011-05-05 13:52:32 +0100197
198 // Preserve "deoptimizer" object in register r0 and get the input
199 // frame descriptor pointer to r1 (deoptimizer->input_);
200 __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
201
Ben Murdochb0fe1622011-05-05 13:52:32 +0100202 // Copy core registers into FrameDescription::registers_[kNumRegisters].
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000203 DCHECK(Register::kNumRegisters == kNumberOfRegisters);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100204 for (int i = 0; i < kNumberOfRegisters; i++) {
Steve Block1e0659c2011-05-24 12:43:12 +0100205 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
Ben Murdochb0fe1622011-05-05 13:52:32 +0100206 __ ldr(r2, MemOperand(sp, i * kPointerSize));
207 __ str(r2, MemOperand(r1, offset));
208 }
209
210 // Copy VFP registers to
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000211 // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
Ben Murdochb0fe1622011-05-05 13:52:32 +0100212 int double_regs_offset = FrameDescription::double_registers_offset();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000213 for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100214 int dst_offset = i * kDoubleSize + double_regs_offset;
215 int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
216 __ vldr(d0, sp, src_offset);
217 __ vstr(d0, r1, dst_offset);
218 }
219
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000220 // Remove the bailout id and the saved registers from the stack.
221 __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
Ben Murdochb0fe1622011-05-05 13:52:32 +0100222
223 // Compute a pointer to the unwinding limit in register r2; that is
224 // the first stack slot not part of the input frame.
225 __ ldr(r2, MemOperand(r1, FrameDescription::frame_size_offset()));
226 __ add(r2, r2, sp);
227
228 // Unwind the stack down to - but not including - the unwinding
229 // limit and copy the contents of the activation frame to the input
230 // frame description.
231 __ add(r3, r1, Operand(FrameDescription::frame_content_offset()));
232 Label pop_loop;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000233 Label pop_loop_header;
234 __ b(&pop_loop_header);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100235 __ bind(&pop_loop);
236 __ pop(r4);
237 __ str(r4, MemOperand(r3, 0));
238 __ add(r3, r3, Operand(sizeof(uint32_t)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000239 __ bind(&pop_loop_header);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100240 __ cmp(r2, sp);
241 __ b(ne, &pop_loop);
242
243 // Compute the output frame in the deoptimizer.
244 __ push(r0); // Preserve deoptimizer object across call.
245 // r0: deoptimizer object; r1: scratch.
246 __ PrepareCallCFunction(1, r1);
247 // Call Deoptimizer::ComputeOutputFrames().
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100248 {
249 AllowExternalCallThatCantCauseGC scope(masm());
250 __ CallCFunction(
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000251 ExternalReference::compute_output_frames_function(isolate()), 1);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100252 }
Ben Murdochb0fe1622011-05-05 13:52:32 +0100253 __ pop(r0); // Restore deoptimizer object (class Deoptimizer).
254
255 // Replace the current (input) frame with the output frames.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000256 Label outer_push_loop, inner_push_loop,
257 outer_loop_header, inner_loop_header;
258 // Outer loop state: r4 = current "FrameDescription** output_",
Ben Murdochb0fe1622011-05-05 13:52:32 +0100259 // r1 = one past the last FrameDescription**.
260 __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000261 __ ldr(r4, MemOperand(r0, Deoptimizer::output_offset())); // r4 is output_.
262 __ add(r1, r4, Operand(r1, LSL, 2));
263 __ jmp(&outer_loop_header);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100264 __ bind(&outer_push_loop);
265 // Inner loop state: r2 = current FrameDescription*, r3 = loop index.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000266 __ ldr(r2, MemOperand(r4, 0)); // output_[ix]
Ben Murdochb0fe1622011-05-05 13:52:32 +0100267 __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000268 __ jmp(&inner_loop_header);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100269 __ bind(&inner_push_loop);
270 __ sub(r3, r3, Operand(sizeof(uint32_t)));
Ben Murdochb0fe1622011-05-05 13:52:32 +0100271 __ add(r6, r2, Operand(r3));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000272 __ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset()));
273 __ push(r6);
274 __ bind(&inner_loop_header);
275 __ cmp(r3, Operand::Zero());
Ben Murdochb0fe1622011-05-05 13:52:32 +0100276 __ b(ne, &inner_push_loop); // test for gt?
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000277 __ add(r4, r4, Operand(kPointerSize));
278 __ bind(&outer_loop_header);
279 __ cmp(r4, r1);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100280 __ b(lt, &outer_push_loop);
281
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000282 // Check CPU flags for number of registers, setting the Z condition flag.
283 __ CheckFor32DRegs(ip);
284
285 __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
286 int src_offset = FrameDescription::double_registers_offset();
287 for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
288 if (i == kDoubleRegZero.code()) continue;
289 if (i == kScratchDoubleReg.code()) continue;
290
291 const DwVfpRegister reg = DwVfpRegister::from_code(i);
292 __ vldr(reg, r1, src_offset, i < 16 ? al : ne);
293 src_offset += kDoubleSize;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100294 }
295
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000296 // Push state, pc, and continuation from the last output frame.
297 __ ldr(r6, MemOperand(r2, FrameDescription::state_offset()));
298 __ push(r6);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100299 __ ldr(r6, MemOperand(r2, FrameDescription::pc_offset()));
300 __ push(r6);
301 __ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset()));
302 __ push(r6);
303
304 // Push the registers from the last output frame.
305 for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
Steve Block1e0659c2011-05-24 12:43:12 +0100306 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
Ben Murdochb0fe1622011-05-05 13:52:32 +0100307 __ ldr(r6, MemOperand(r2, offset));
308 __ push(r6);
309 }
310
311 // Restore the registers from the stack.
312 __ ldm(ia_w, sp, restored_regs); // all but pc registers.
313 __ pop(ip); // remove sp
314 __ pop(ip); // remove lr
315
Ben Murdochc7cc0282012-03-05 14:35:55 +0000316 __ InitializeRootRegister();
Ben Murdochb0fe1622011-05-05 13:52:32 +0100317
318 __ pop(ip); // remove pc
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000319 __ pop(ip); // get continuation, leave pc on stack
Ben Murdochb0fe1622011-05-05 13:52:32 +0100320 __ pop(lr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000321 __ Jump(ip);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100322 __ stop("Unreachable.");
323}
324
325
326void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000327 // Create a sequence of deoptimization entries.
328 // Note that registers are still live when jumping to an entry.
Ben Murdochb0fe1622011-05-05 13:52:32 +0100329 Label done;
330 for (int i = 0; i < count(); i++) {
331 int start = masm()->pc_offset();
332 USE(start);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100333 __ mov(ip, Operand(i));
Ben Murdochb0fe1622011-05-05 13:52:32 +0100334 __ b(&done);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000335 DCHECK(masm()->pc_offset() - start == table_entry_size_);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100336 }
337 __ bind(&done);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000338 __ push(ip);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100339}
340
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000341
342void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
343 SetFrameSlot(offset, value);
344}
345
346
347void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
348 SetFrameSlot(offset, value);
349}
350
351
352void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
353 DCHECK(FLAG_enable_ool_constant_pool);
354 SetFrameSlot(offset, value);
355}
356
357
Ben Murdochb0fe1622011-05-05 13:52:32 +0100358#undef __
359
360} } // namespace v8::internal