Upgrade V8 to version 4.9.385.28
https://chromium.googlesource.com/v8/v8/+/4.9.385.28
FPIIM-449
Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/src/arm64/assembler-arm64.cc b/src/arm64/assembler-arm64.cc
index 770d425..ea7a732 100644
--- a/src/arm64/assembler-arm64.cc
+++ b/src/arm64/assembler-arm64.cc
@@ -26,15 +26,16 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/v8.h"
-
#if V8_TARGET_ARCH_ARM64
#define ARM64_DEFINE_REG_STATICS
+#include "src/arm64/assembler-arm64.h"
#include "src/arm64/assembler-arm64-inl.h"
+#include "src/arm64/frames-arm64.h"
#include "src/base/bits.h"
#include "src/base/cpu.h"
+#include "src/register-configuration.h"
namespace v8 {
namespace internal {
@@ -53,7 +54,8 @@
// Probe for runtime features
base::CPU cpu;
if (cpu.implementer() == base::CPU::NVIDIA &&
- cpu.variant() == base::CPU::NVIDIA_DENVER) {
+ cpu.variant() == base::CPU::NVIDIA_DENVER &&
+ cpu.part() <= base::CPU::NVIDIA_DENVER_V10) {
supported_ |= 1u << COHERENT_CACHE;
}
}
@@ -108,17 +110,17 @@
}
-CPURegList CPURegList::GetCalleeSaved(unsigned size) {
+CPURegList CPURegList::GetCalleeSaved(int size) {
return CPURegList(CPURegister::kRegister, size, 19, 29);
}
-CPURegList CPURegList::GetCalleeSavedFP(unsigned size) {
+CPURegList CPURegList::GetCalleeSavedFP(int size) {
return CPURegList(CPURegister::kFPRegister, size, 8, 15);
}
-CPURegList CPURegList::GetCallerSaved(unsigned size) {
+CPURegList CPURegList::GetCallerSaved(int size) {
// Registers x0-x18 and lr (x30) are caller-saved.
CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
list.Combine(lr);
@@ -126,7 +128,7 @@
}
-CPURegList CPURegList::GetCallerSavedFP(unsigned size) {
+CPURegList CPURegList::GetCallerSavedFP(int size) {
// Registers d0-d7 and d16-d31 are caller-saved.
CPURegList list = CPURegList(CPURegister::kFPRegister, size, 0, 7);
list.Combine(CPURegList(CPURegister::kFPRegister, size, 16, 31));
@@ -171,7 +173,7 @@
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
-const int RelocInfo::kApplyMask = 0;
+const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
bool RelocInfo::IsCodedSpecially() {
@@ -188,31 +190,14 @@
}
-void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
- // Patch the code at the current address with the supplied instructions.
- Instr* pc = reinterpret_cast<Instr*>(pc_);
- Instr* instr = reinterpret_cast<Instr*>(instructions);
- for (int i = 0; i < instruction_count; i++) {
- *(pc + i) = *(instr + i);
- }
-
- // Indicate that code has changed.
- CpuFeatures::FlushICache(pc_, instruction_count * kInstructionSize);
-}
-
-
-// Patch the code at the current PC with a call to the target address.
-// Additional guard instructions can be added if required.
-void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
- UNIMPLEMENTED();
-}
-
-
Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
Register reg3, Register reg4) {
CPURegList regs(reg1, reg2, reg3, reg4);
- for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
- Register candidate = Register::FromAllocationIndex(i);
+ const RegisterConfiguration* config =
+ RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ Register candidate = Register::from_code(code);
if (regs.IncludesAliasOf(candidate)) continue;
return candidate;
}
@@ -311,8 +296,8 @@
mode != RelocInfo::STATEMENT_POSITION &&
mode != RelocInfo::CONST_POOL &&
mode != RelocInfo::VENEER_POOL &&
- mode != RelocInfo::CODE_AGE_SEQUENCE);
-
+ mode != RelocInfo::CODE_AGE_SEQUENCE &&
+ mode != RelocInfo::DEOPT_REASON);
uint64_t raw_data = static_cast<uint64_t>(data);
int offset = assm_->pc_offset();
if (IsEmpty()) {
@@ -526,7 +511,7 @@
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
- instr->SetImmPCOffsetTarget(assm_->pc());
+ instr->SetImmPCOffsetTarget(assm_->isolate(), assm_->pc());
}
assm_->dc64(data);
}
@@ -542,7 +527,7 @@
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
- instr->SetImmPCOffsetTarget(assm_->pc());
+ instr->SetImmPCOffsetTarget(assm_->isolate(), assm_->pc());
assm_->dc64(unique_it->first);
}
unique_entries_.clear();
@@ -590,6 +575,7 @@
void Assembler::GetCode(CodeDesc* desc) {
+ reloc_info_writer.Finish();
// Emit constant pool if necessary.
CheckConstPool(true, false);
DCHECK(constpool_.IsEmpty());
@@ -599,9 +585,11 @@
desc->buffer = reinterpret_cast<byte*>(buffer_);
desc->buffer_size = buffer_size_;
desc->instr_size = pc_offset();
- desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) -
- reloc_info_writer.pos();
+ desc->reloc_size =
+ static_cast<int>((reinterpret_cast<byte*>(buffer_) + buffer_size_) -
+ reloc_info_writer.pos());
desc->origin = this;
+ desc->constant_pool_size = 0;
}
}
@@ -619,13 +607,13 @@
if (label->is_linked()) {
static const int kMaxLinksToCheck = 64; // Avoid O(n2) behaviour.
int links_checked = 0;
- int linkoffset = label->pos();
+ int64_t linkoffset = label->pos();
bool end_of_chain = false;
while (!end_of_chain) {
if (++links_checked > kMaxLinksToCheck) break;
Instruction * link = InstructionAt(linkoffset);
- int linkpcoffset = link->ImmPCOffset();
- int prevlinkoffset = linkoffset + linkpcoffset;
+ int64_t linkpcoffset = link->ImmPCOffset();
+ int64_t prevlinkoffset = linkoffset + linkpcoffset;
end_of_chain = (linkoffset == prevlinkoffset);
linkoffset = linkoffset + linkpcoffset;
@@ -664,27 +652,28 @@
// currently referring to this label.
label->Unuse();
} else {
- label->link_to(reinterpret_cast<byte*>(next_link) - buffer_);
+ label->link_to(
+ static_cast<int>(reinterpret_cast<byte*>(next_link) - buffer_));
}
} else if (branch == next_link) {
// The branch is the last (but not also the first) instruction in the chain.
- prev_link->SetImmPCOffsetTarget(prev_link);
+ prev_link->SetImmPCOffsetTarget(isolate(), prev_link);
} else {
// The branch is in the middle of the chain.
if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
- prev_link->SetImmPCOffsetTarget(next_link);
+ prev_link->SetImmPCOffsetTarget(isolate(), next_link);
} else if (label_veneer != NULL) {
// Use the veneer for all previous links in the chain.
- prev_link->SetImmPCOffsetTarget(prev_link);
+ prev_link->SetImmPCOffsetTarget(isolate(), prev_link);
end_of_chain = false;
link = next_link;
while (!end_of_chain) {
next_link = link->ImmPCOffsetTarget();
end_of_chain = (link == next_link);
- link->SetImmPCOffsetTarget(label_veneer);
+ link->SetImmPCOffsetTarget(isolate(), label_veneer);
link = next_link;
}
} else {
@@ -740,7 +729,7 @@
while (label->is_linked()) {
int linkoffset = label->pos();
Instruction* link = InstructionAt(linkoffset);
- int prevlinkoffset = linkoffset + link->ImmPCOffset();
+ int prevlinkoffset = linkoffset + static_cast<int>(link->ImmPCOffset());
CheckLabelLinkChain(label);
@@ -751,7 +740,16 @@
DCHECK(prevlinkoffset >= 0);
// Update the link to point to the label.
- link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
+ if (link->IsUnresolvedInternalReference()) {
+ // Internal references do not get patched to an instruction but directly
+ // to an address.
+ internal_reference_positions_.push_back(linkoffset);
+ PatchingAssembler patcher(isolate(), link, 2);
+ patcher.dc64(reinterpret_cast<uintptr_t>(pc_));
+ } else {
+ link->SetImmPCOffsetTarget(isolate(),
+ reinterpret_cast<Instruction*>(pc_));
+ }
// Link the label to the previous link in the chain.
if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) {
@@ -822,12 +820,13 @@
while (!end_of_chain) {
Instruction * link = InstructionAt(link_offset);
- link_pcoffset = link->ImmPCOffset();
+ link_pcoffset = static_cast<int>(link->ImmPCOffset());
// ADR instructions are not handled by veneers.
if (link->IsImmBranch()) {
- int max_reachable_pc = InstructionOffset(link) +
- Instruction::ImmBranchRange(link->BranchType());
+ int max_reachable_pc =
+ static_cast<int>(InstructionOffset(link) +
+ Instruction::ImmBranchRange(link->BranchType()));
typedef std::multimap<int, FarBranchInfo>::iterator unresolved_info_it;
std::pair<unresolved_info_it, unresolved_info_it> range;
range = unresolved_branches_.equal_range(max_reachable_pc);
@@ -899,12 +898,12 @@
// The constant pool marker is made of two instructions. These instructions
// will never be emitted by the JIT, so checking for the first one is enough:
// 0: ldr xzr, #<size of pool>
- bool result = instr->IsLdrLiteralX() && (instr->Rt() == xzr.code());
+ bool result = instr->IsLdrLiteralX() && (instr->Rt() == kZeroRegCode);
// It is still worth asserting the marker is complete.
// 4: blr xzr
DCHECK(!result || (instr->following()->IsBranchAndLinkToRegister() &&
- instr->following()->Rn() == xzr.code()));
+ instr->following()->Rn() == kZeroRegCode));
return result;
}
@@ -920,7 +919,7 @@
const char* message =
reinterpret_cast<const char*>(
instr->InstructionAtOffset(kDebugMessageOffset));
- int size = kDebugMessageOffset + strlen(message) + 1;
+ int size = static_cast<int>(kDebugMessageOffset + strlen(message) + 1);
return RoundUp(size, kInstructionSize) / kInstructionSize;
}
// Same for printf support, see MacroAssembler::CallPrintf().
@@ -1282,10 +1281,8 @@
// Bitfield operations.
-void Assembler::bfm(const Register& rd,
- const Register& rn,
- unsigned immr,
- unsigned imms) {
+void Assembler::bfm(const Register& rd, const Register& rn, int immr,
+ int imms) {
DCHECK(rd.SizeInBits() == rn.SizeInBits());
Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
Emit(SF(rd) | BFM | N |
@@ -1295,10 +1292,8 @@
}
-void Assembler::sbfm(const Register& rd,
- const Register& rn,
- unsigned immr,
- unsigned imms) {
+void Assembler::sbfm(const Register& rd, const Register& rn, int immr,
+ int imms) {
DCHECK(rd.Is64Bits() || rn.Is32Bits());
Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
Emit(SF(rd) | SBFM | N |
@@ -1308,10 +1303,8 @@
}
-void Assembler::ubfm(const Register& rd,
- const Register& rn,
- unsigned immr,
- unsigned imms) {
+void Assembler::ubfm(const Register& rd, const Register& rn, int immr,
+ int imms) {
DCHECK(rd.SizeInBits() == rn.SizeInBits());
Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
Emit(SF(rd) | UBFM | N |
@@ -1321,10 +1314,8 @@
}
-void Assembler::extr(const Register& rd,
- const Register& rn,
- const Register& rm,
- unsigned lsb) {
+void Assembler::extr(const Register& rd, const Register& rn, const Register& rm,
+ int lsb) {
DCHECK(rd.SizeInBits() == rn.SizeInBits());
DCHECK(rd.SizeInBits() == rm.SizeInBits());
Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
@@ -1610,9 +1601,11 @@
// 'rt' and 'rt2' can only be aliased for stores.
DCHECK(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
DCHECK(AreSameSizeAndType(rt, rt2));
+ DCHECK(IsImmLSPair(addr.offset(), CalcLSPairDataSize(op)));
+ int offset = static_cast<int>(addr.offset());
Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
- ImmLSPair(addr.offset(), CalcLSPairDataSize(op));
+ ImmLSPair(offset, CalcLSPairDataSize(op));
Instr addrmodeop;
if (addr.IsImmediateOffset()) {
@@ -1633,37 +1626,6 @@
}
-void Assembler::ldnp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& src) {
- LoadStorePairNonTemporal(rt, rt2, src,
- LoadPairNonTemporalOpFor(rt, rt2));
-}
-
-
-void Assembler::stnp(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& dst) {
- LoadStorePairNonTemporal(rt, rt2, dst,
- StorePairNonTemporalOpFor(rt, rt2));
-}
-
-
-void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
- const CPURegister& rt2,
- const MemOperand& addr,
- LoadStorePairNonTemporalOp op) {
- DCHECK(!rt.Is(rt2));
- DCHECK(AreSameSizeAndType(rt, rt2));
- DCHECK(addr.IsImmediateOffset());
-
- LSDataSize size = CalcLSPairDataSize(
- static_cast<LoadStorePairOp>(op & LoadStorePairMask));
- Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
- ImmLSPair(addr.offset(), size));
-}
-
-
// Memory instructions.
void Assembler::ldrb(const Register& rt, const MemOperand& src) {
LoadStore(rt, src, LDRB_w);
@@ -2079,6 +2041,50 @@
}
+void Assembler::dcptr(Label* label) {
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ if (label->is_bound()) {
+ // The label is bound, so it does not need to be updated and the internal
+ // reference should be emitted.
+ //
+ // In this case, label->pos() returns the offset of the label from the
+ // start of the buffer.
+ internal_reference_positions_.push_back(pc_offset());
+ dc64(reinterpret_cast<uintptr_t>(buffer_ + label->pos()));
+ } else {
+ int32_t offset;
+ if (label->is_linked()) {
+ // The label is linked, so the internal reference should be added
+ // onto the end of the label's link chain.
+ //
+ // In this case, label->pos() returns the offset of the last linked
+ // instruction from the start of the buffer.
+ offset = label->pos() - pc_offset();
+ DCHECK(offset != kStartOfLabelLinkChain);
+ } else {
+ // The label is unused, so it now becomes linked and the internal
+ // reference is at the start of the new link chain.
+ offset = kStartOfLabelLinkChain;
+ }
+ // The instruction at pc is now the last link in the label's chain.
+ label->link_to(pc_offset());
+
+ // Traditionally the offset to the previous instruction in the chain is
+ // encoded in the instruction payload (e.g. branch range) but internal
+ // references are not instructions so while unbound they are encoded as
+ // two consecutive brk instructions. The two 16-bit immediates are used
+ // to encode the offset.
+ offset >>= kInstructionSizeLog2;
+ DCHECK(is_int32(offset));
+ uint32_t high16 = unsigned_bitextract_32(31, 16, offset);
+ uint32_t low16 = unsigned_bitextract_32(15, 0, offset);
+
+ brk(high16);
+ brk(low16);
+ }
+}
+
+
// Note:
// Below, a difference in case for the same letter indicates a
// negated bit.
@@ -2104,13 +2110,13 @@
// 0000.0000.0000.0000.0000.0000.0000.0000
uint64_t bits = double_to_rawbits(imm);
// bit7: a000.0000
- uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
+ uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
// bit6: 0b00.0000
- uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
+ uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
// bit5_to_0: 00cd.efgh
- uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
+ uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
- return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
+ return static_cast<Instr>((bit7 | bit6 | bit5_to_0) << ImmFP_offset);
}
@@ -2155,8 +2161,8 @@
DCHECK(is_uint16(imm));
- Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
- Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
+ Emit(SF(rd) | MoveWideImmediateFixed | mov_op | Rd(rd) |
+ ImmMoveWide(static_cast<int>(imm)) | ShiftMoveWide(shift));
}
@@ -2172,7 +2178,7 @@
DCHECK(IsImmAddSub(immediate));
Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
- ImmAddSub(immediate) | dest_reg | RnSP(rn));
+ ImmAddSub(static_cast<int>(immediate)) | dest_reg | RnSP(rn));
} else if (operand.IsShiftedRegister()) {
DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
DCHECK(operand.shift() != ROR);
@@ -2226,7 +2232,7 @@
void Assembler::EmitStringData(const char* string) {
size_t len = strlen(string) + 1;
DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
- EmitData(string, len);
+ EmitData(string, static_cast<int>(len));
// Pad with NULL characters until pc_ is aligned.
const char pad[] = {'\0', '\0', '\0', '\0'};
STATIC_ASSERT(sizeof(pad) == kInstructionSize);
@@ -2329,7 +2335,8 @@
if (operand.IsImmediate()) {
int64_t immediate = operand.ImmediateValue();
DCHECK(IsImmConditionalCompare(immediate));
- ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate);
+ ccmpop = ConditionalCompareImmediateFixed | op |
+ ImmCondCmp(static_cast<unsigned>(immediate));
} else {
DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
@@ -2469,15 +2476,16 @@
const MemOperand& addr,
LoadStoreOp op) {
Instr memop = op | Rt(rt) | RnSP(addr.base());
- int64_t offset = addr.offset();
if (addr.IsImmediateOffset()) {
LSDataSize size = CalcLSDataSize(op);
- if (IsImmLSScaled(offset, size)) {
+ if (IsImmLSScaled(addr.offset(), size)) {
+ int offset = static_cast<int>(addr.offset());
// Use the scaled addressing mode.
Emit(LoadStoreUnsignedOffsetFixed | memop |
ImmLSUnsigned(offset >> size));
- } else if (IsImmLSUnscaled(offset)) {
+ } else if (IsImmLSUnscaled(addr.offset())) {
+ int offset = static_cast<int>(addr.offset());
// Use the unscaled addressing mode.
Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset));
} else {
@@ -2503,7 +2511,8 @@
} else {
// Pre-index and post-index modes.
DCHECK(!rt.Is(addr.base()));
- if (IsImmLSUnscaled(offset)) {
+ if (IsImmLSUnscaled(addr.offset())) {
+ int offset = static_cast<int>(addr.offset());
if (addr.IsPreIndex()) {
Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
} else {
@@ -2535,6 +2544,14 @@
}
+bool Assembler::IsImmLLiteral(int64_t offset) {
+ int inst_size = static_cast<int>(kInstructionSizeLog2);
+ bool offset_is_inst_multiple =
+ (((offset >> inst_size) << inst_size) == offset);
+ return offset_is_inst_multiple && is_intn(offset, ImmLLiteral_width);
+}
+
+
// Test if a given value can be encoded in the immediate field of a logical
// instruction.
// If it can be encoded, the function returns true, and values pointed to by n,
@@ -2814,9 +2831,11 @@
// Set up new buffer.
desc.buffer = NewArray<byte>(desc.buffer_size);
+ desc.origin = this;
desc.instr_size = pc_offset();
- desc.reloc_size = (buffer + buffer_size_) - reloc_info_writer.pos();
+ desc.reloc_size =
+ static_cast<int>((buffer + buffer_size_) - reloc_info_writer.pos());
// Copy the data.
intptr_t pc_delta = desc.buffer - buffer;
@@ -2838,24 +2857,31 @@
// buffer nor pc absolute pointing inside the code buffer, so there is no need
// to relocate any emitted relocation entries.
+ // Relocate internal references.
+ for (auto pos : internal_reference_positions_) {
+ intptr_t* p = reinterpret_cast<intptr_t*>(buffer_ + pos);
+ *p += pc_delta;
+ }
+
// Pending relocation entries are also relative, no need to relocate.
}
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
- RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
- if (((rmode >= RelocInfo::JS_RETURN) &&
- (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
- (rmode == RelocInfo::CONST_POOL) ||
- (rmode == RelocInfo::VENEER_POOL)) {
+ RelocInfo rinfo(isolate(), reinterpret_cast<byte*>(pc_), rmode, data, NULL);
+ if (((rmode >= RelocInfo::COMMENT) &&
+ (rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CALL)) ||
+ (rmode == RelocInfo::INTERNAL_REFERENCE) ||
+ (rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
+ (rmode == RelocInfo::DEOPT_REASON) ||
+ (rmode == RelocInfo::GENERATOR_CONTINUATION)) {
// Adjust code for new modes.
- DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
- || RelocInfo::IsJSReturn(rmode)
- || RelocInfo::IsComment(rmode)
- || RelocInfo::IsPosition(rmode)
- || RelocInfo::IsConstPool(rmode)
- || RelocInfo::IsVeneerPool(rmode));
+ DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsComment(rmode) ||
+ RelocInfo::IsDeoptReason(rmode) || RelocInfo::IsPosition(rmode) ||
+ RelocInfo::IsInternalReference(rmode) ||
+ RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode) ||
+ RelocInfo::IsGeneratorContinuation(rmode));
// These modes do not need an entry in the constant pool.
} else {
constpool_.RecordEntry(data, rmode);
@@ -2872,8 +2898,8 @@
}
DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(
- reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL);
+ RelocInfo reloc_info_with_ast_id(isolate(), reinterpret_cast<byte*>(pc_),
+ rmode, RecordedAstId().ToInt(), NULL);
ClearRecordedAstId();
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
@@ -2962,9 +2988,8 @@
void Assembler::RecordVeneerPool(int location_offset, int size) {
- RelocInfo rinfo(buffer_ + location_offset,
- RelocInfo::VENEER_POOL, static_cast<intptr_t>(size),
- NULL);
+ RelocInfo rinfo(isolate(), buffer_ + location_offset, RelocInfo::VENEER_POOL,
+ static_cast<intptr_t>(size), NULL);
reloc_info_writer.Write(&rinfo);
}
@@ -3006,7 +3031,7 @@
// to the label.
Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
RemoveBranchFromLabelLinkChain(branch, label, veneer);
- branch->SetImmPCOffsetTarget(veneer);
+ branch->SetImmPCOffsetTarget(isolate(), veneer);
b(label);
#ifdef DEBUG
DCHECK(SizeOfCodeGeneratedSince(&veneer_size_check) <=
@@ -3022,7 +3047,7 @@
}
// Record the veneer pool size.
- int pool_size = SizeOfCodeGeneratedSince(&size_check);
+ int pool_size = static_cast<int>(SizeOfCodeGeneratedSince(&size_check));
RecordVeneerPool(veneer_pool_relocinfo_loc, pool_size);
if (unresolved_branches_.empty()) {
@@ -3069,30 +3094,9 @@
}
-void Assembler::RecordComment(const char* msg) {
- if (FLAG_code_comments) {
- CheckBuffer();
- RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
- }
-}
-
-
int Assembler::buffer_space() const {
- return reloc_info_writer.pos() - reinterpret_cast<byte*>(pc_);
-}
-
-
-void Assembler::RecordJSReturn() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::JS_RETURN);
-}
-
-
-void Assembler::RecordDebugBreakSlot() {
- positions_recorder()->WriteRecordedPositions();
- CheckBuffer();
- RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
+ return static_cast<int>(reloc_info_writer.pos() -
+ reinterpret_cast<byte*>(pc_));
}
@@ -3103,20 +3107,6 @@
}
-Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
- // No out-of-line constant pool support.
- DCHECK(!FLAG_enable_ool_constant_pool);
- return isolate->factory()->empty_constant_pool_array();
-}
-
-
-void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
- // No out-of-line constant pool support.
- DCHECK(!FLAG_enable_ool_constant_pool);
- return;
-}
-
-
void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
// The code at the current instruction should be:
// adr rd, 0
@@ -3150,6 +3140,7 @@
}
-} } // namespace v8::internal
+} // namespace internal
+} // namespace v8
#endif // V8_TARGET_ARCH_ARM64