blob: 3aa1e4dfa1f067955909d8983483a8427572e205 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005#include "src/arm64/frames-arm64.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006#include "src/codegen.h"
7#include "src/deoptimizer.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00008#include "src/full-codegen/full-codegen.h"
9#include "src/register-configuration.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000010#include "src/safepoint-table.h"
11
12
13namespace v8 {
14namespace internal {
15
16
17int Deoptimizer::patch_size() {
18 // Size of the code used to patch lazy bailout points.
19 // Patching is done by Deoptimizer::DeoptimizeFunction.
20 return 4 * kInstructionSize;
21}
22
23
Emily Bernierd0a1eb72015-03-24 16:35:39 -040024void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
25 // Empty because there is no need for relocation information for the code
26 // patching in Deoptimizer::PatchCodeForDeoptimization below.
27}
28
Ben Murdochb8a8cc12014-11-26 15:28:44 +000029
30void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
31 // Invalidate the relocation information, as it will become invalid by the
32 // code patching below, and is not needed any more.
33 code->InvalidateRelocation();
34
35 // TODO(jkummerow): if (FLAG_zap_code_space), make the code object's
36 // entry sequence unusable (see other architectures).
37
38 DeoptimizationInputData* deopt_data =
39 DeoptimizationInputData::cast(code->deoptimization_data());
40 Address code_start_address = code->instruction_start();
41#ifdef DEBUG
42 Address prev_call_address = NULL;
43#endif
44 // For each LLazyBailout instruction insert a call to the corresponding
45 // deoptimization entry.
46 for (int i = 0; i < deopt_data->DeoptCount(); i++) {
47 if (deopt_data->Pc(i)->value() == -1) continue;
48
49 Address call_address = code_start_address + deopt_data->Pc(i)->value();
50 Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
51
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000052 PatchingAssembler patcher(isolate, call_address,
53 patch_size() / kInstructionSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000054 patcher.ldr_pcrel(ip0, (2 * kInstructionSize) >> kLoadLiteralScaleLog2);
55 patcher.blr(ip0);
56 patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));
57
58 DCHECK((prev_call_address == NULL) ||
59 (call_address >= prev_call_address + patch_size()));
60 DCHECK(call_address + patch_size() <= code->instruction_end());
61#ifdef DEBUG
62 prev_call_address = call_address;
63#endif
64 }
65}
66
67
Ben Murdoch097c5b22016-05-18 11:27:45 +010068bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000069 // There is no dynamic alignment padding on ARM64 in the input frame.
70 return false;
71}
72
73
74void Deoptimizer::SetPlatformCompiledStubRegisters(
75 FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
76 ApiFunction function(descriptor->deoptimization_handler());
77 ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
78 intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
79 int params = descriptor->GetHandlerParameterCount();
80 output_frame->SetRegister(x0.code(), params);
81 output_frame->SetRegister(x1.code(), handler);
82}
83
84
85void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
86 for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
87 double double_value = input_->GetDoubleRegister(i);
88 output_frame->SetDoubleRegister(i, double_value);
89 }
90}
91
92
93
94#define __ masm()->
95
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000096void Deoptimizer::TableEntryGenerator::Generate() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000097 GeneratePrologue();
98
99 // TODO(all): This code needs to be revisited. We probably only need to save
100 // caller-saved registers here. Callee-saved registers can be stored directly
101 // in the input frame.
102
103 // Save all allocatable floating point registers.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000104 CPURegList saved_fp_registers(
105 CPURegister::kFPRegister, kDRegSizeInBits,
106 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
107 ->allocatable_double_codes_mask());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000108 __ PushCPURegList(saved_fp_registers);
109
110 // We save all the registers expcept jssp, sp and lr.
111 CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 27);
112 saved_registers.Combine(fp);
113 __ PushCPURegList(saved_registers);
114
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000115 __ Mov(x3, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
116 __ Str(fp, MemOperand(x3));
117
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000118 const int kSavedRegistersAreaSize =
119 (saved_registers.Count() * kXRegSize) +
120 (saved_fp_registers.Count() * kDRegSize);
121
122 // Floating point registers are saved on the stack above core registers.
123 const int kFPRegistersOffset = saved_registers.Count() * kXRegSize;
124
125 // Get the bailout id from the stack.
126 Register bailout_id = x2;
127 __ Peek(bailout_id, kSavedRegistersAreaSize);
128
129 Register code_object = x3;
130 Register fp_to_sp = x4;
131 // Get the address of the location in the code object. This is the return
132 // address for lazy deoptimization.
133 __ Mov(code_object, lr);
134 // Compute the fp-to-sp delta, and correct one word for bailout id.
135 __ Add(fp_to_sp, masm()->StackPointer(),
136 kSavedRegistersAreaSize + (1 * kPointerSize));
137 __ Sub(fp_to_sp, fp, fp_to_sp);
138
139 // Allocate a new deoptimizer object.
140 __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
141 __ Mov(x1, type());
142 // Following arguments are already loaded:
143 // - x2: bailout id
144 // - x3: code object address
145 // - x4: fp-to-sp delta
146 __ Mov(x5, ExternalReference::isolate_address(isolate()));
147
148 {
149 // Call Deoptimizer::New().
150 AllowExternalCallThatCantCauseGC scope(masm());
151 __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
152 }
153
154 // Preserve "deoptimizer" object in register x0.
155 Register deoptimizer = x0;
156
157 // Get the input frame descriptor pointer.
158 __ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
159
160 // Copy core registers into the input frame.
161 CPURegList copy_to_input = saved_registers;
162 for (int i = 0; i < saved_registers.Count(); i++) {
163 __ Peek(x2, i * kPointerSize);
164 CPURegister current_reg = copy_to_input.PopLowestIndex();
165 int offset = (current_reg.code() * kPointerSize) +
166 FrameDescription::registers_offset();
167 __ Str(x2, MemOperand(x1, offset));
168 }
169
170 // Copy FP registers to the input frame.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100171 CPURegList copy_fp_to_input = saved_fp_registers;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000172 for (int i = 0; i < saved_fp_registers.Count(); i++) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000173 int src_offset = kFPRegistersOffset + (i * kDoubleSize);
174 __ Peek(x2, src_offset);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100175 CPURegister reg = copy_fp_to_input.PopLowestIndex();
176 int dst_offset = FrameDescription::double_registers_offset() +
177 (reg.code() * kDoubleSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000178 __ Str(x2, MemOperand(x1, dst_offset));
179 }
180
181 // Remove the bailout id and the saved registers from the stack.
182 __ Drop(1 + (kSavedRegistersAreaSize / kXRegSize));
183
184 // Compute a pointer to the unwinding limit in register x2; that is
185 // the first stack slot not part of the input frame.
186 Register unwind_limit = x2;
187 __ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
188 __ Add(unwind_limit, unwind_limit, __ StackPointer());
189
190 // Unwind the stack down to - but not including - the unwinding
191 // limit and copy the contents of the activation frame to the input
192 // frame description.
193 __ Add(x3, x1, FrameDescription::frame_content_offset());
194 Label pop_loop;
195 Label pop_loop_header;
196 __ B(&pop_loop_header);
197 __ Bind(&pop_loop);
198 __ Pop(x4);
199 __ Str(x4, MemOperand(x3, kPointerSize, PostIndex));
200 __ Bind(&pop_loop_header);
201 __ Cmp(unwind_limit, __ StackPointer());
202 __ B(ne, &pop_loop);
203
204 // Compute the output frame in the deoptimizer.
205 __ Push(x0); // Preserve deoptimizer object across call.
206
207 {
208 // Call Deoptimizer::ComputeOutputFrames().
209 AllowExternalCallThatCantCauseGC scope(masm());
210 __ CallCFunction(
211 ExternalReference::compute_output_frames_function(isolate()), 1);
212 }
213 __ Pop(x4); // Restore deoptimizer object (class Deoptimizer).
214
215 // Replace the current (input) frame with the output frames.
216 Label outer_push_loop, inner_push_loop,
217 outer_loop_header, inner_loop_header;
218 __ Ldrsw(x1, MemOperand(x4, Deoptimizer::output_count_offset()));
219 __ Ldr(x0, MemOperand(x4, Deoptimizer::output_offset()));
220 __ Add(x1, x0, Operand(x1, LSL, kPointerSizeLog2));
221 __ B(&outer_loop_header);
222
223 __ Bind(&outer_push_loop);
224 Register current_frame = x2;
225 __ Ldr(current_frame, MemOperand(x0, 0));
226 __ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
227 __ B(&inner_loop_header);
228
229 __ Bind(&inner_push_loop);
230 __ Sub(x3, x3, kPointerSize);
231 __ Add(x6, current_frame, x3);
232 __ Ldr(x7, MemOperand(x6, FrameDescription::frame_content_offset()));
233 __ Push(x7);
234 __ Bind(&inner_loop_header);
235 __ Cbnz(x3, &inner_push_loop);
236
237 __ Add(x0, x0, kPointerSize);
238 __ Bind(&outer_loop_header);
239 __ Cmp(x0, x1);
240 __ B(lt, &outer_push_loop);
241
242 __ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
243 DCHECK(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
244 !saved_fp_registers.IncludesAliasOf(fp_zero) &&
245 !saved_fp_registers.IncludesAliasOf(fp_scratch));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000246 while (!saved_fp_registers.IsEmpty()) {
247 const CPURegister reg = saved_fp_registers.PopLowestIndex();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100248 int src_offset = FrameDescription::double_registers_offset() +
249 (reg.code() * kDoubleSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000250 __ Ldr(reg, MemOperand(x1, src_offset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000251 }
252
253 // Push state from the last output frame.
254 __ Ldr(x6, MemOperand(current_frame, FrameDescription::state_offset()));
255 __ Push(x6);
256
257 // TODO(all): ARM copies a lot (if not all) of the last output frame onto the
258 // stack, then pops it all into registers. Here, we try to load it directly
259 // into the relevant registers. Is this correct? If so, we should improve the
260 // ARM code.
261
262 // TODO(all): This code needs to be revisited, We probably don't need to
263 // restore all the registers as fullcodegen does not keep live values in
264 // registers (note that at least fp must be restored though).
265
266 // Restore registers from the last output frame.
267 // Note that lr is not in the list of saved_registers and will be restored
268 // later. We can use it to hold the address of last output frame while
269 // reloading the other registers.
270 DCHECK(!saved_registers.IncludesAliasOf(lr));
271 Register last_output_frame = lr;
272 __ Mov(last_output_frame, current_frame);
273
274 // We don't need to restore x7 as it will be clobbered later to hold the
275 // continuation address.
276 Register continuation = x7;
277 saved_registers.Remove(continuation);
278
279 while (!saved_registers.IsEmpty()) {
280 // TODO(all): Look for opportunities to optimize this by using ldp.
281 CPURegister current_reg = saved_registers.PopLowestIndex();
282 int offset = (current_reg.code() * kPointerSize) +
283 FrameDescription::registers_offset();
284 __ Ldr(current_reg, MemOperand(last_output_frame, offset));
285 }
286
287 __ Ldr(continuation, MemOperand(last_output_frame,
288 FrameDescription::continuation_offset()));
289 __ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
290 __ InitializeRootRegister();
291 __ Br(continuation);
292}
293
294
295// Size of an entry of the second level deopt table.
296// This is the code size generated by GeneratePrologue for one entry.
297const int Deoptimizer::table_entry_size_ = 2 * kInstructionSize;
298
299
300void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
301 UseScratchRegisterScope temps(masm());
302 Register entry_id = temps.AcquireX();
303
304 // Create a sequence of deoptimization entries.
305 // Note that registers are still live when jumping to an entry.
306 Label done;
307 {
308 InstructionAccurateScope scope(masm());
309
310 // The number of entry will never exceed kMaxNumberOfEntries.
311 // As long as kMaxNumberOfEntries is a valid 16 bits immediate you can use
312 // a movz instruction to load the entry id.
313 DCHECK(is_uint16(Deoptimizer::kMaxNumberOfEntries));
314
315 for (int i = 0; i < count(); i++) {
316 int start = masm()->pc_offset();
317 USE(start);
318 __ movz(entry_id, i);
319 __ b(&done);
320 DCHECK(masm()->pc_offset() - start == table_entry_size_);
321 }
322 }
323 __ Bind(&done);
324 __ Push(entry_id);
325}
326
327
328void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
329 SetFrameSlot(offset, value);
330}
331
332
333void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
334 SetFrameSlot(offset, value);
335}
336
337
338void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000339 // No embedded constant pool support.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000340 UNREACHABLE();
341}
342
343
344#undef __
345
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000346} // namespace internal
347} // namespace v8