blob: e9caaadadbcbd257fd0f1bf642a6b0cb875c5563 [file] [log] [blame]
Steve Block44f0eee2011-05-26 01:26:41 +01001// Copyright 2011 the V8 project authors. All rights reserved.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
Steve Block44f0eee2011-05-26 01:26:41 +01004
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005#include "src/codegen.h"
6#include "src/deoptimizer.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00007#include "src/full-codegen/full-codegen.h"
8#include "src/register-configuration.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +00009#include "src/safepoint-table.h"
Steve Block44f0eee2011-05-26 01:26:41 +010010
Steve Block44f0eee2011-05-26 01:26:41 +010011namespace v8 {
12namespace internal {
13
14
Steve Block44f0eee2011-05-26 01:26:41 +010015int Deoptimizer::patch_size() {
Ben Murdoch3ef787d2012-04-12 10:51:47 +010016 const int kCallInstructionSizeInWords = 4;
Steve Block44f0eee2011-05-26 01:26:41 +010017 return kCallInstructionSizeInWords * Assembler::kInstrSize;
18}
19
20
Emily Bernierd0a1eb72015-03-24 16:35:39 -040021void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
22 // Empty because there is no need for relocation information for the code
23 // patching in Deoptimizer::PatchCodeForDeoptimization below.
24}
25
26
Ben Murdochb8a8cc12014-11-26 15:28:44 +000027void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
Ben Murdoch3ef787d2012-04-12 10:51:47 +010028 Address code_start_address = code->instruction_start();
Ben Murdoch3ef787d2012-04-12 10:51:47 +010029 // Invalidate the relocation information, as it will become invalid by the
30 // code patching below, and is not needed any more.
31 code->InvalidateRelocation();
32
Ben Murdochb8a8cc12014-11-26 15:28:44 +000033 if (FLAG_zap_code_space) {
34 // Fail hard and early if we enter this code object again.
35 byte* pointer = code->FindCodeAgeSequence();
36 if (pointer != NULL) {
37 pointer += kNoCodeAgeSequenceLength;
38 } else {
39 pointer = code->instruction_start();
40 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000041 CodePatcher patcher(isolate, pointer, 1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000042 patcher.masm()->break_(0xCC);
43
44 DeoptimizationInputData* data =
45 DeoptimizationInputData::cast(code->deoptimization_data());
46 int osr_offset = data->OsrPcOffset()->value();
47 if (osr_offset > 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000048 CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
49 1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000050 osr_patcher.masm()->break_(0xCC);
51 }
52 }
53
Ben Murdoch3ef787d2012-04-12 10:51:47 +010054 DeoptimizationInputData* deopt_data =
55 DeoptimizationInputData::cast(code->deoptimization_data());
56#ifdef DEBUG
57 Address prev_call_address = NULL;
58#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +000059 // For each LLazyBailout instruction insert a call to the corresponding
60 // deoptimization entry.
Ben Murdoch3ef787d2012-04-12 10:51:47 +010061 for (int i = 0; i < deopt_data->DeoptCount(); i++) {
62 if (deopt_data->Pc(i)->value() == -1) continue;
63 Address call_address = code_start_address + deopt_data->Pc(i)->value();
Ben Murdochb8a8cc12014-11-26 15:28:44 +000064 Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
Ben Murdoch3ef787d2012-04-12 10:51:47 +010065 int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
Ben Murdochb8a8cc12014-11-26 15:28:44 +000066 RelocInfo::NONE32);
Ben Murdoch3ef787d2012-04-12 10:51:47 +010067 int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +000068 DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
69 DCHECK(call_size_in_bytes <= patch_size());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000070 CodePatcher patcher(isolate, call_address, call_size_in_words);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000071 patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
72 DCHECK(prev_call_address == NULL ||
Ben Murdoch3ef787d2012-04-12 10:51:47 +010073 call_address >= prev_call_address + patch_size());
Ben Murdochb8a8cc12014-11-26 15:28:44 +000074 DCHECK(call_address + patch_size() <= code->instruction_end());
Ben Murdoch3ef787d2012-04-12 10:51:47 +010075
76#ifdef DEBUG
77 prev_call_address = call_address;
78#endif
79 }
Steve Block44f0eee2011-05-26 01:26:41 +010080}
81
82
Ben Murdochb8a8cc12014-11-26 15:28:44 +000083void Deoptimizer::SetPlatformCompiledStubRegisters(
84 FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
85 ApiFunction function(descriptor->deoptimization_handler());
86 ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
87 intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
88 int params = descriptor->GetHandlerParameterCount();
Emily Bernierd0a1eb72015-03-24 16:35:39 -040089 output_frame->SetRegister(a0.code(), params);
90 output_frame->SetRegister(a1.code(), handler);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000091}
92
93
94void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
95 for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
96 double double_value = input_->GetDoubleRegister(i);
97 output_frame->SetDoubleRegister(i, double_value);
98 }
99}
100
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100101#define __ masm()->
102
103
104// This code tries to be close to ia32 code so that any changes can be
105// easily ported.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000106void Deoptimizer::TableEntryGenerator::Generate() {
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100107 GeneratePrologue();
108
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100109 // Unlike on ARM we don't save all the registers, just the useful ones.
110 // For the rest, there are gaps on the stack, so the offsets remain the same.
111 const int kNumberOfRegisters = Register::kNumRegisters;
112
113 RegList restored_regs = kJSCallerSaved | kCalleeSaved;
114 RegList saved_regs = restored_regs | sp.bit() | ra.bit();
115
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000116 const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kMaxNumRegisters;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100117
118 // Save all FPU registers before messing with them.
119 __ Subu(sp, sp, Operand(kDoubleRegsSize));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000120 const RegisterConfiguration* config =
121 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
122 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
123 int code = config->GetAllocatableDoubleCode(i);
124 const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
125 int offset = code * kDoubleSize;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100126 __ sdc1(fpu_reg, MemOperand(sp, offset));
127 }
128
129 // Push saved_regs (needed to populate FrameDescription::registers_).
130 // Leave gaps for other registers.
131 __ Subu(sp, sp, kNumberOfRegisters * kPointerSize);
132 for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
133 if ((saved_regs & (1 << i)) != 0) {
134 __ sw(ToRegister(i), MemOperand(sp, kPointerSize * i));
135 }
136 }
137
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000138 __ li(a2, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
139 __ sw(fp, MemOperand(a2));
140
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100141 const int kSavedRegistersAreaSize =
142 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
143
144 // Get the bailout id from the stack.
145 __ lw(a2, MemOperand(sp, kSavedRegistersAreaSize));
146
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000147 // Get the address of the location in the code object (a3) (return
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100148 // address for lazy deoptimization) and compute the fp-to-sp delta in
149 // register t0.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000150 __ mov(a3, ra);
151 // Correct one word for bailout id.
152 __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100153
154 __ Subu(t0, fp, t0);
155
156 // Allocate a new deoptimizer object.
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100157 __ PrepareCallCFunction(6, t1);
Ben Murdochda12d292016-06-02 14:46:10 +0100158 // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
159 __ mov(a0, zero_reg);
160 Label context_check;
161 __ lw(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
162 __ JumpIfSmi(a1, &context_check);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100163 __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
Ben Murdochda12d292016-06-02 14:46:10 +0100164 __ bind(&context_check);
165 __ li(a1, Operand(type())); // Bailout type.
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100166 // a2: bailout id already loaded.
167 // a3: code address or 0 already loaded.
168 __ sw(t0, CFunctionArgumentOperand(5)); // Fp-to-sp delta.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000169 __ li(t1, Operand(ExternalReference::isolate_address(isolate())));
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100170 __ sw(t1, CFunctionArgumentOperand(6)); // Isolate.
171 // Call Deoptimizer::New().
172 {
173 AllowExternalCallThatCantCauseGC scope(masm());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000174 __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100175 }
176
177 // Preserve "deoptimizer" object in register v0 and get the input
178 // frame descriptor pointer to a1 (deoptimizer->input_);
179 // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
180 __ mov(a0, v0);
181 __ lw(a1, MemOperand(v0, Deoptimizer::input_offset()));
182
183 // Copy core registers into FrameDescription::registers_[kNumRegisters].
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000184 DCHECK(Register::kNumRegisters == kNumberOfRegisters);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100185 for (int i = 0; i < kNumberOfRegisters; i++) {
186 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
187 if ((saved_regs & (1 << i)) != 0) {
188 __ lw(a2, MemOperand(sp, i * kPointerSize));
189 __ sw(a2, MemOperand(a1, offset));
190 } else if (FLAG_debug_code) {
191 __ li(a2, kDebugZapValue);
192 __ sw(a2, MemOperand(a1, offset));
193 }
194 }
195
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000196 int double_regs_offset = FrameDescription::double_registers_offset();
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100197 // Copy FPU registers to
198 // double_registers_[DoubleRegister::kNumAllocatableRegisters]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000199 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
200 int code = config->GetAllocatableDoubleCode(i);
201 int dst_offset = code * kDoubleSize + double_regs_offset;
202 int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100203 __ ldc1(f0, MemOperand(sp, src_offset));
204 __ sdc1(f0, MemOperand(a1, dst_offset));
205 }
206
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000207 // Remove the bailout id and the saved registers from the stack.
208 __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100209
210 // Compute a pointer to the unwinding limit in register a2; that is
211 // the first stack slot not part of the input frame.
212 __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
213 __ Addu(a2, a2, sp);
214
215 // Unwind the stack down to - but not including - the unwinding
216 // limit and copy the contents of the activation frame to the input
217 // frame description.
218 __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
219 Label pop_loop;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000220 Label pop_loop_header;
221 __ BranchShort(&pop_loop_header);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100222 __ bind(&pop_loop);
223 __ pop(t0);
224 __ sw(t0, MemOperand(a3, 0));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000225 __ addiu(a3, a3, sizeof(uint32_t));
226 __ bind(&pop_loop_header);
227 __ BranchShort(&pop_loop, ne, a2, Operand(sp));
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100228
229 // Compute the output frame in the deoptimizer.
230 __ push(a0); // Preserve deoptimizer object across call.
231 // a0: deoptimizer object; a1: scratch.
232 __ PrepareCallCFunction(1, a1);
233 // Call Deoptimizer::ComputeOutputFrames().
234 {
235 AllowExternalCallThatCantCauseGC scope(masm());
236 __ CallCFunction(
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000237 ExternalReference::compute_output_frames_function(isolate()), 1);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100238 }
239 __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
240
Ben Murdochda12d292016-06-02 14:46:10 +0100241 __ lw(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
242
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100243 // Replace the current (input) frame with the output frames.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000244 Label outer_push_loop, inner_push_loop,
245 outer_loop_header, inner_loop_header;
246 // Outer loop state: t0 = current "FrameDescription** output_",
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100247 // a1 = one past the last FrameDescription**.
248 __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000249 __ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100250 __ Lsa(a1, t0, a1, kPointerSizeLog2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000251 __ BranchShort(&outer_loop_header);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100252 __ bind(&outer_push_loop);
253 // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000254 __ lw(a2, MemOperand(t0, 0)); // output_[ix]
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100255 __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000256 __ BranchShort(&inner_loop_header);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100257 __ bind(&inner_push_loop);
258 __ Subu(a3, a3, Operand(sizeof(uint32_t)));
259 __ Addu(t2, a2, Operand(a3));
260 __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
261 __ push(t3);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000262 __ bind(&inner_loop_header);
263 __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100264
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000265 __ Addu(t0, t0, Operand(kPointerSize));
266 __ bind(&outer_loop_header);
267 __ BranchShort(&outer_push_loop, lt, t0, Operand(a1));
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100268
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000269 __ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000270 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
271 int code = config->GetAllocatableDoubleCode(i);
272 const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
273 int src_offset = code * kDoubleSize + double_regs_offset;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000274 __ ldc1(fpu_reg, MemOperand(a1, src_offset));
275 }
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100276
277 // Push state, pc, and continuation from the last output frame.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000278 __ lw(t2, MemOperand(a2, FrameDescription::state_offset()));
279 __ push(t2);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100280
281 __ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
282 __ push(t2);
283 __ lw(t2, MemOperand(a2, FrameDescription::continuation_offset()));
284 __ push(t2);
285
286
287 // Technically restoring 'at' should work unless zero_reg is also restored
288 // but it's safer to check for this.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000289 DCHECK(!(at.bit() & restored_regs));
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100290 // Restore the registers from the last output frame.
291 __ mov(at, a2);
292 for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
293 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
294 if ((restored_regs & (1 << i)) != 0) {
295 __ lw(ToRegister(i), MemOperand(at, offset));
296 }
297 }
298
299 __ InitializeRootRegister();
300
301 __ pop(at); // Get continuation, leave pc on stack.
302 __ pop(ra);
303 __ Jump(at);
304 __ stop("Unreachable.");
Steve Block44f0eee2011-05-26 01:26:41 +0100305}
306
307
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100308// Maximum size of a table entry generated below.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000309const int Deoptimizer::table_entry_size_ = 2 * Assembler::kInstrSize;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100310
Steve Block44f0eee2011-05-26 01:26:41 +0100311void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100312 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
313
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000314 // Create a sequence of deoptimization entries.
315 // Note that registers are still live when jumping to an entry.
316 Label table_start, done, done_special, trampoline_jump;
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100317 __ bind(&table_start);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000318 int kMaxEntriesBranchReach = (1 << (kImm16Bits - 2))/
319 (table_entry_size_ / Assembler::kInstrSize);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100320
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000321 if (count() <= kMaxEntriesBranchReach) {
322 // Common case.
323 for (int i = 0; i < count(); i++) {
324 Label start;
325 __ bind(&start);
326 DCHECK(is_int16(i));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000327 __ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000328 __ li(at, i); // In the delay slot.
329
330 DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100331 }
332
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000333 DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
334 count() * table_entry_size_);
335 __ bind(&done);
336 __ Push(at);
337 } else {
338 // Uncommon case, the branch cannot reach.
339 // Create mini trampoline and adjust id constants to get proper value at
340 // the end of table.
341 for (int i = kMaxEntriesBranchReach; i > 1; i--) {
342 Label start;
343 __ bind(&start);
344 DCHECK(is_int16(i));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000345 __ BranchShort(USE_DELAY_SLOT, &trampoline_jump); // Expose delay slot.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000346 __ li(at, - i); // In the delay slot.
347 DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
348 }
349 // Entry with id == kMaxEntriesBranchReach - 1.
350 __ bind(&trampoline_jump);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000351 __ BranchShort(USE_DELAY_SLOT, &done_special);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000352 __ li(at, -1);
353
354 for (int i = kMaxEntriesBranchReach ; i < count(); i++) {
355 Label start;
356 __ bind(&start);
357 DCHECK(is_int16(i));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000358 __ BranchShort(USE_DELAY_SLOT, &done); // Expose delay slot.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000359 __ li(at, i); // In the delay slot.
360 }
361
362 DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
363 count() * table_entry_size_);
364 __ bind(&done_special);
365 __ addiu(at, at, kMaxEntriesBranchReach);
366 __ bind(&done);
367 __ Push(at);
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100368 }
Steve Block44f0eee2011-05-26 01:26:41 +0100369}
370
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000371
372void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
373 SetFrameSlot(offset, value);
374}
375
376
377void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
378 SetFrameSlot(offset, value);
379}
380
381
382void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000383 // No embedded constant pool support.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000384 UNREACHABLE();
385}
386
387
Ben Murdoch3ef787d2012-04-12 10:51:47 +0100388#undef __
389
Steve Block44f0eee2011-05-26 01:26:41 +0100390
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000391} // namespace internal
392} // namespace v8