blob: b83bbbe121e52d6d8cbcc56baab5a4eaa41dcb48 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/v8.h"
6
7#include "src/codegen.h"
8#include "src/deoptimizer.h"
9#include "src/full-codegen.h"
10#include "src/safepoint-table.h"
11
12
13namespace v8 {
14namespace internal {
15
16
17int Deoptimizer::patch_size() {
18 // Size of the code used to patch lazy bailout points.
19 // Patching is done by Deoptimizer::DeoptimizeFunction.
20 return 4 * kInstructionSize;
21}
22
23
Emily Bernierd0a1eb72015-03-24 16:35:39 -040024void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
25 // Empty because there is no need for relocation information for the code
26 // patching in Deoptimizer::PatchCodeForDeoptimization below.
27}
28
Ben Murdochb8a8cc12014-11-26 15:28:44 +000029
30void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
31 // Invalidate the relocation information, as it will become invalid by the
32 // code patching below, and is not needed any more.
33 code->InvalidateRelocation();
34
35 // TODO(jkummerow): if (FLAG_zap_code_space), make the code object's
36 // entry sequence unusable (see other architectures).
37
38 DeoptimizationInputData* deopt_data =
39 DeoptimizationInputData::cast(code->deoptimization_data());
40 Address code_start_address = code->instruction_start();
41#ifdef DEBUG
42 Address prev_call_address = NULL;
43#endif
44 // For each LLazyBailout instruction insert a call to the corresponding
45 // deoptimization entry.
46 for (int i = 0; i < deopt_data->DeoptCount(); i++) {
47 if (deopt_data->Pc(i)->value() == -1) continue;
48
49 Address call_address = code_start_address + deopt_data->Pc(i)->value();
50 Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
51
52 PatchingAssembler patcher(call_address, patch_size() / kInstructionSize);
53 patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2);
54 patcher.blr(ip0);
55 patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));
56
57 DCHECK((prev_call_address == NULL) ||
58 (call_address >= prev_call_address + patch_size()));
59 DCHECK(call_address + patch_size() <= code->instruction_end());
60#ifdef DEBUG
61 prev_call_address = call_address;
62#endif
63 }
64}
65
66
67void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
68 // Set the register values. The values are not important as there are no
69 // callee saved registers in JavaScript frames, so all registers are
70 // spilled. Registers fp and sp are set to the correct values though.
71 for (int i = 0; i < Register::NumRegisters(); i++) {
72 input_->SetRegister(i, 0);
73 }
74
75 // TODO(all): Do we also need to set a value to csp?
76 input_->SetRegister(jssp.code(), reinterpret_cast<intptr_t>(frame->sp()));
77 input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
78
79 for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
80 input_->SetDoubleRegister(i, 0.0);
81 }
82
83 // Fill the frame content from the actual data on the frame.
84 for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
85 input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
86 }
87}
88
89
90bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
91 // There is no dynamic alignment padding on ARM64 in the input frame.
92 return false;
93}
94
95
96void Deoptimizer::SetPlatformCompiledStubRegisters(
97 FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
98 ApiFunction function(descriptor->deoptimization_handler());
99 ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
100 intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
101 int params = descriptor->GetHandlerParameterCount();
102 output_frame->SetRegister(x0.code(), params);
103 output_frame->SetRegister(x1.code(), handler);
104}
105
106
107void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
108 for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
109 double double_value = input_->GetDoubleRegister(i);
110 output_frame->SetDoubleRegister(i, double_value);
111 }
112}
113
114
115
116#define __ masm()->
117
118void Deoptimizer::EntryGenerator::Generate() {
119 GeneratePrologue();
120
121 // TODO(all): This code needs to be revisited. We probably only need to save
122 // caller-saved registers here. Callee-saved registers can be stored directly
123 // in the input frame.
124
125 // Save all allocatable floating point registers.
126 CPURegList saved_fp_registers(CPURegister::kFPRegister, kDRegSizeInBits,
127 FPRegister::kAllocatableFPRegisters);
128 __ PushCPURegList(saved_fp_registers);
129
130 // We save all the registers expcept jssp, sp and lr.
131 CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 27);
132 saved_registers.Combine(fp);
133 __ PushCPURegList(saved_registers);
134
135 const int kSavedRegistersAreaSize =
136 (saved_registers.Count() * kXRegSize) +
137 (saved_fp_registers.Count() * kDRegSize);
138
139 // Floating point registers are saved on the stack above core registers.
140 const int kFPRegistersOffset = saved_registers.Count() * kXRegSize;
141
142 // Get the bailout id from the stack.
143 Register bailout_id = x2;
144 __ Peek(bailout_id, kSavedRegistersAreaSize);
145
146 Register code_object = x3;
147 Register fp_to_sp = x4;
148 // Get the address of the location in the code object. This is the return
149 // address for lazy deoptimization.
150 __ Mov(code_object, lr);
151 // Compute the fp-to-sp delta, and correct one word for bailout id.
152 __ Add(fp_to_sp, masm()->StackPointer(),
153 kSavedRegistersAreaSize + (1 * kPointerSize));
154 __ Sub(fp_to_sp, fp, fp_to_sp);
155
156 // Allocate a new deoptimizer object.
157 __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
158 __ Mov(x1, type());
159 // Following arguments are already loaded:
160 // - x2: bailout id
161 // - x3: code object address
162 // - x4: fp-to-sp delta
163 __ Mov(x5, ExternalReference::isolate_address(isolate()));
164
165 {
166 // Call Deoptimizer::New().
167 AllowExternalCallThatCantCauseGC scope(masm());
168 __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
169 }
170
171 // Preserve "deoptimizer" object in register x0.
172 Register deoptimizer = x0;
173
174 // Get the input frame descriptor pointer.
175 __ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
176
177 // Copy core registers into the input frame.
178 CPURegList copy_to_input = saved_registers;
179 for (int i = 0; i < saved_registers.Count(); i++) {
180 __ Peek(x2, i * kPointerSize);
181 CPURegister current_reg = copy_to_input.PopLowestIndex();
182 int offset = (current_reg.code() * kPointerSize) +
183 FrameDescription::registers_offset();
184 __ Str(x2, MemOperand(x1, offset));
185 }
186
187 // Copy FP registers to the input frame.
188 for (int i = 0; i < saved_fp_registers.Count(); i++) {
189 int dst_offset = FrameDescription::double_registers_offset() +
190 (i * kDoubleSize);
191 int src_offset = kFPRegistersOffset + (i * kDoubleSize);
192 __ Peek(x2, src_offset);
193 __ Str(x2, MemOperand(x1, dst_offset));
194 }
195
196 // Remove the bailout id and the saved registers from the stack.
197 __ Drop(1 + (kSavedRegistersAreaSize / kXRegSize));
198
199 // Compute a pointer to the unwinding limit in register x2; that is
200 // the first stack slot not part of the input frame.
201 Register unwind_limit = x2;
202 __ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
203 __ Add(unwind_limit, unwind_limit, __ StackPointer());
204
205 // Unwind the stack down to - but not including - the unwinding
206 // limit and copy the contents of the activation frame to the input
207 // frame description.
208 __ Add(x3, x1, FrameDescription::frame_content_offset());
209 Label pop_loop;
210 Label pop_loop_header;
211 __ B(&pop_loop_header);
212 __ Bind(&pop_loop);
213 __ Pop(x4);
214 __ Str(x4, MemOperand(x3, kPointerSize, PostIndex));
215 __ Bind(&pop_loop_header);
216 __ Cmp(unwind_limit, __ StackPointer());
217 __ B(ne, &pop_loop);
218
219 // Compute the output frame in the deoptimizer.
220 __ Push(x0); // Preserve deoptimizer object across call.
221
222 {
223 // Call Deoptimizer::ComputeOutputFrames().
224 AllowExternalCallThatCantCauseGC scope(masm());
225 __ CallCFunction(
226 ExternalReference::compute_output_frames_function(isolate()), 1);
227 }
228 __ Pop(x4); // Restore deoptimizer object (class Deoptimizer).
229
230 // Replace the current (input) frame with the output frames.
231 Label outer_push_loop, inner_push_loop,
232 outer_loop_header, inner_loop_header;
233 __ Ldrsw(x1, MemOperand(x4, Deoptimizer::output_count_offset()));
234 __ Ldr(x0, MemOperand(x4, Deoptimizer::output_offset()));
235 __ Add(x1, x0, Operand(x1, LSL, kPointerSizeLog2));
236 __ B(&outer_loop_header);
237
238 __ Bind(&outer_push_loop);
239 Register current_frame = x2;
240 __ Ldr(current_frame, MemOperand(x0, 0));
241 __ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
242 __ B(&inner_loop_header);
243
244 __ Bind(&inner_push_loop);
245 __ Sub(x3, x3, kPointerSize);
246 __ Add(x6, current_frame, x3);
247 __ Ldr(x7, MemOperand(x6, FrameDescription::frame_content_offset()));
248 __ Push(x7);
249 __ Bind(&inner_loop_header);
250 __ Cbnz(x3, &inner_push_loop);
251
252 __ Add(x0, x0, kPointerSize);
253 __ Bind(&outer_loop_header);
254 __ Cmp(x0, x1);
255 __ B(lt, &outer_push_loop);
256
257 __ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
258 DCHECK(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
259 !saved_fp_registers.IncludesAliasOf(fp_zero) &&
260 !saved_fp_registers.IncludesAliasOf(fp_scratch));
261 int src_offset = FrameDescription::double_registers_offset();
262 while (!saved_fp_registers.IsEmpty()) {
263 const CPURegister reg = saved_fp_registers.PopLowestIndex();
264 __ Ldr(reg, MemOperand(x1, src_offset));
265 src_offset += kDoubleSize;
266 }
267
268 // Push state from the last output frame.
269 __ Ldr(x6, MemOperand(current_frame, FrameDescription::state_offset()));
270 __ Push(x6);
271
272 // TODO(all): ARM copies a lot (if not all) of the last output frame onto the
273 // stack, then pops it all into registers. Here, we try to load it directly
274 // into the relevant registers. Is this correct? If so, we should improve the
275 // ARM code.
276
277 // TODO(all): This code needs to be revisited, We probably don't need to
278 // restore all the registers as fullcodegen does not keep live values in
279 // registers (note that at least fp must be restored though).
280
281 // Restore registers from the last output frame.
282 // Note that lr is not in the list of saved_registers and will be restored
283 // later. We can use it to hold the address of last output frame while
284 // reloading the other registers.
285 DCHECK(!saved_registers.IncludesAliasOf(lr));
286 Register last_output_frame = lr;
287 __ Mov(last_output_frame, current_frame);
288
289 // We don't need to restore x7 as it will be clobbered later to hold the
290 // continuation address.
291 Register continuation = x7;
292 saved_registers.Remove(continuation);
293
294 while (!saved_registers.IsEmpty()) {
295 // TODO(all): Look for opportunities to optimize this by using ldp.
296 CPURegister current_reg = saved_registers.PopLowestIndex();
297 int offset = (current_reg.code() * kPointerSize) +
298 FrameDescription::registers_offset();
299 __ Ldr(current_reg, MemOperand(last_output_frame, offset));
300 }
301
302 __ Ldr(continuation, MemOperand(last_output_frame,
303 FrameDescription::continuation_offset()));
304 __ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
305 __ InitializeRootRegister();
306 __ Br(continuation);
307}
308
309
310// Size of an entry of the second level deopt table.
311// This is the code size generated by GeneratePrologue for one entry.
312const int Deoptimizer::table_entry_size_ = 2 * kInstructionSize;
313
314
315void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
316 UseScratchRegisterScope temps(masm());
317 Register entry_id = temps.AcquireX();
318
319 // Create a sequence of deoptimization entries.
320 // Note that registers are still live when jumping to an entry.
321 Label done;
322 {
323 InstructionAccurateScope scope(masm());
324
325 // The number of entry will never exceed kMaxNumberOfEntries.
326 // As long as kMaxNumberOfEntries is a valid 16 bits immediate you can use
327 // a movz instruction to load the entry id.
328 DCHECK(is_uint16(Deoptimizer::kMaxNumberOfEntries));
329
330 for (int i = 0; i < count(); i++) {
331 int start = masm()->pc_offset();
332 USE(start);
333 __ movz(entry_id, i);
334 __ b(&done);
335 DCHECK(masm()->pc_offset() - start == table_entry_size_);
336 }
337 }
338 __ Bind(&done);
339 __ Push(entry_id);
340}
341
342
343void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
344 SetFrameSlot(offset, value);
345}
346
347
348void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
349 SetFrameSlot(offset, value);
350}
351
352
353void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
354 // No out-of-line constant pool support.
355 UNREACHABLE();
356}
357
358
359#undef __
360
361} } // namespace v8::internal