Merge "ART: A couple of checks were missed in class LockWord"
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index d55f310..0dcefea 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -157,7 +157,8 @@
-Wno-sign-promo \
-Wno-unused-parameter \
-Wstrict-aliasing \
- -fstrict-aliasing
+ -fstrict-aliasing \
+ -fvisibility=protected
ART_TARGET_CLANG_CFLAGS :=
ART_TARGET_CLANG_CFLAGS_arm :=
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index 52d1ee3..e2f3949 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -33,7 +33,13 @@
test-art-target-run-test-gcstress-optimizing-relocate-004-SignalTest32 \
test-art-target-run-test-gcstress-default-relocate-004-SignalTest32 \
test-art-target-run-test-gcstress-optimizing-no-prebuild-004-SignalTest32 \
- test-art-target-run-test-gcstress-default-no-prebuild-004-SignalTest32
+ test-art-target-run-test-gcstress-default-no-prebuild-004-SignalTest32 \
+ test-art-host-run-test-gcstress-default-prebuild-114-ParallelGC32 \
+ test-art-host-run-test-gcstress-interpreter-prebuild-114-ParallelGC32 \
+ test-art-host-run-test-gcstress-optimizing-prebuild-114-ParallelGC32 \
+ test-art-host-run-test-gcstress-default-prebuild-114-ParallelGC64 \
+ test-art-host-run-test-gcstress-interpreter-prebuild-114-ParallelGC64 \
+ test-art-host-run-test-gcstress-optimizing-prebuild-114-ParallelGC64
# List of known failing tests that when executed won't cause test execution to not finish.
# The test name must be the full rule name such as test-art-host-oat-optimizing-HelloWorld64.
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index dd9f414..0579f9b 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -61,8 +61,8 @@
# The elf writer test has dependencies on core.oat.
ART_GTEST_elf_writer_test_HOST_DEPS := $(HOST_CORE_OAT_OUT) $(2ND_HOST_CORE_OAT_OUT)
ART_GTEST_elf_writer_test_TARGET_DEPS := $(TARGET_CORE_OAT_OUT) $(2ND_TARGET_CORE_OAT_OUT)
-ART_GTEST_jni_internal_test_TARGET_DEPS := $(TARGET_CORE_JARS)
-ART_GTEST_proxy_test_TARGET_DEPS := $(TARGET_CORE_JARS)
+ART_GTEST_jni_internal_test_TARGET_DEPS := $(TARGET_CORE_DEX_FILES)
+ART_GTEST_proxy_test_TARGET_DEPS := $(TARGET_CORE_DEX_FILES)
ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_OAT_OUT) $(2ND_HOST_CORE_OAT_OUT)
# The path for which all the source files are relative, not actually the current directory.
@@ -117,7 +117,6 @@
runtime/monitor_pool_test.cc \
runtime/monitor_test.cc \
runtime/parsed_options_test.cc \
- runtime/proxy_test.cc \
runtime/reference_table_test.cc \
runtime/thread_pool_test.cc \
runtime/transaction_test.cc \
@@ -128,6 +127,7 @@
COMPILER_GTEST_COMMON_SRC_FILES := \
runtime/jni_internal_test.cc \
+ runtime/proxy_test.cc \
runtime/reflection_test.cc \
compiler/dex/global_value_numbering_test.cc \
compiler/dex/local_value_numbering_test.cc \
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index f2a8d84..f9a78be 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -148,16 +148,18 @@
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
+ SrcMap* src_mapping_table,
const std::vector<uint8_t>& mapping_table,
const std::vector<uint8_t>& vmap_table,
const std::vector<uint8_t>& native_gc_map,
const std::vector<uint8_t>* cfi_info)
: CompiledCode(driver, instruction_set, quick_code), frame_size_in_bytes_(frame_size_in_bytes),
core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask),
- mapping_table_(driver->DeduplicateMappingTable(mapping_table)),
- vmap_table_(driver->DeduplicateVMapTable(vmap_table)),
- gc_map_(driver->DeduplicateGCMap(native_gc_map)),
- cfi_info_(driver->DeduplicateCFIInfo(cfi_info)) {
+ src_mapping_table_(driver->DeduplicateSrcMappingTable(src_mapping_table->Arrange())),
+ mapping_table_(driver->DeduplicateMappingTable(mapping_table)),
+ vmap_table_(driver->DeduplicateVMapTable(vmap_table)),
+ gc_map_(driver->DeduplicateGCMap(native_gc_map)),
+ cfi_info_(driver->DeduplicateCFIInfo(cfi_info)) {
}
CompiledMethod::CompiledMethod(CompilerDriver* driver,
@@ -170,6 +172,7 @@
: CompiledCode(driver, instruction_set, code),
frame_size_in_bytes_(frame_size_in_bytes),
core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask),
+ src_mapping_table_(driver->DeduplicateSrcMappingTable(SrcMap())),
mapping_table_(driver->DeduplicateMappingTable(std::vector<uint8_t>())),
vmap_table_(driver->DeduplicateVMapTable(std::vector<uint8_t>())),
gc_map_(driver->DeduplicateGCMap(std::vector<uint8_t>())),
@@ -182,19 +185,22 @@
const std::string& symbol)
: CompiledCode(driver, instruction_set, code, symbol),
frame_size_in_bytes_(kStackAlignment), core_spill_mask_(0),
- fp_spill_mask_(0), gc_map_(driver->DeduplicateGCMap(gc_map)) {
- mapping_table_ = driver->DeduplicateMappingTable(std::vector<uint8_t>());
- vmap_table_ = driver->DeduplicateVMapTable(std::vector<uint8_t>());
+ fp_spill_mask_(0),
+ src_mapping_table_(driver->DeduplicateSrcMappingTable(SrcMap())),
+ mapping_table_(driver->DeduplicateMappingTable(std::vector<uint8_t>())),
+ vmap_table_(driver->DeduplicateVMapTable(std::vector<uint8_t>())),
+ gc_map_(driver->DeduplicateGCMap(gc_map)) {
}
CompiledMethod::CompiledMethod(CompilerDriver* driver, InstructionSet instruction_set,
const std::string& code, const std::string& symbol)
: CompiledCode(driver, instruction_set, code, symbol),
frame_size_in_bytes_(kStackAlignment), core_spill_mask_(0),
- fp_spill_mask_(0) {
- mapping_table_ = driver->DeduplicateMappingTable(std::vector<uint8_t>());
- vmap_table_ = driver->DeduplicateVMapTable(std::vector<uint8_t>());
- gc_map_ = driver->DeduplicateGCMap(std::vector<uint8_t>());
+ fp_spill_mask_(0),
+ src_mapping_table_(driver->DeduplicateSrcMappingTable(SrcMap())),
+ mapping_table_(driver->DeduplicateMappingTable(std::vector<uint8_t>())),
+ vmap_table_(driver->DeduplicateVMapTable(std::vector<uint8_t>())),
+ gc_map_(driver->DeduplicateGCMap(std::vector<uint8_t>())) {
}
} // namespace art
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index c98d06a..d02cbff 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -100,7 +100,97 @@
std::vector<uint32_t> oatdata_offsets_to_compiled_code_offset_;
};
-class CompiledMethod : public CompiledCode {
+class SrcMapElem {
+ public:
+ uint32_t from_;
+ int32_t to_;
+
+ bool operator<(const SrcMapElem& sme) const {
+ uint64_t lhs = (static_cast<uint64_t>(from_) << 32) + to_;
+ uint64_t rhs = (static_cast<uint64_t>(sme.from_) << 32) + sme.to_;
+ return lhs < rhs;
+ }
+
+ operator uint8_t() const {
+ return static_cast<uint8_t>(from_ + to_);
+ }
+};
+
+class SrcMap FINAL : public std::vector<SrcMapElem> {
+ public:
+ struct CompareByTo {
+ bool operator()(const SrcMapElem& lhs, const SrcMapElem& rhs) {
+ return lhs.to_ < rhs.to_;
+ }
+ };
+
+ struct CompareByFrom {
+ bool operator()(const SrcMapElem& lhs, const SrcMapElem& rhs) {
+ return lhs.from_ < rhs.from_;
+ }
+ };
+
+ void SortByTo() {
+ std::sort(begin(), end(), CompareByTo());
+ }
+
+ void SortByFrom() {
+ std::sort(begin(), end(), CompareByFrom());
+ }
+
+ const_iterator FindByTo(int32_t to) const {
+ return std::lower_bound(begin(), end(), SrcMapElem({0, to}), CompareByTo());
+ }
+
+ SrcMap& Arrange() {
+ SortByTo();
+
+ // Remove duplicate pairs.
+ if (!empty()) {
+ SrcMap tmp;
+ tmp.swap(*this);
+ iterator it = tmp.begin();
+ iterator prev = it;
+ it++;
+ push_back(*prev);
+ for (; it != tmp.end(); it++) {
+ if (prev->from_ != it->from_ || prev->to_ != it->to_) {
+ push_back(*(prev = it));
+ }
+ }
+ }
+ return *this;
+ }
+
+ void DeltaFormat(const SrcMapElem& start, uint32_t highest_pc) {
+ // Convert from abs values to deltas.
+ if (!empty()) {
+ SortByFrom();
+
+ // TODO: one PC can be mapped to several Java src lines.
+ // do we want such a one-to-many correspondence?
+
+ // get rid of the highest values
+ size_t i = size() - 1;
+ for (; i > 0 ; i--) {
+ if ((*this)[i].from_ >= highest_pc) {
+ break;
+ }
+ }
+ this->resize(i + 1);
+
+ for (size_t i = size(); --i >= 1; ) {
+ (*this)[i].from_ -= (*this)[i-1].from_;
+ (*this)[i].to_ -= (*this)[i-1].to_;
+ }
+ DCHECK((*this)[0].from_ >= start.from_);
+ (*this)[0].from_ -= start.from_;
+ (*this)[0].to_ -= start.to_;
+ }
+ }
+};
+
+class CompiledMethod FINAL : public CompiledCode {
public:
// Constructs a CompiledMethod for the non-LLVM compilers.
CompiledMethod(CompilerDriver* driver,
@@ -109,6 +199,7 @@
const size_t frame_size_in_bytes,
const uint32_t core_spill_mask,
const uint32_t fp_spill_mask,
+ SrcMap* src_mapping_table,
const std::vector<uint8_t>& mapping_table,
const std::vector<uint8_t>& vmap_table,
const std::vector<uint8_t>& native_gc_map,
@@ -145,6 +236,11 @@
return fp_spill_mask_;
}
+ const SrcMap& GetSrcMappingTable() const {
+ DCHECK(src_mapping_table_ != nullptr);
+ return *src_mapping_table_;
+ }
+
const std::vector<uint8_t>& GetMappingTable() const {
DCHECK(mapping_table_ != nullptr);
return *mapping_table_;
@@ -171,6 +267,8 @@
const uint32_t core_spill_mask_;
// For quick code, a bit mask describing spilled FPR callee-save registers.
const uint32_t fp_spill_mask_;
+ // For quick code, a set of pairs (PC, Line) mapping from native PC offset to Java line
+ SrcMap* src_mapping_table_;
// For quick code, a uleb128 encoded map from native PC offset to dex PC aswell as dex PC to
// native PC offset. Size prefixed.
std::vector<uint8_t>* mapping_table_;
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index dcc67c3..63f3e64 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -528,6 +528,7 @@
kFixupLoad, // Mostly for immediates.
kFixupVLoad, // FP load which *may* be pc-relative.
kFixupCBxZ, // Cbz, Cbnz.
+ kFixupTBxZ, // Tbz, Tbnz.
kFixupPushPop, // Not really pc relative, but changes size based on args.
kFixupCondBranch, // Conditional branch
kFixupT1Branch, // Thumb1 Unconditional branch
diff --git a/compiler/dex/global_value_numbering.cc b/compiler/dex/global_value_numbering.cc
index d7ef6f0..3575ade 100644
--- a/compiler/dex/global_value_numbering.cc
+++ b/compiler/dex/global_value_numbering.cc
@@ -56,8 +56,11 @@
return nullptr;
}
if (UNLIKELY(bbs_processed_ == max_bbs_to_process_)) {
- last_value_ = kNoValue; // Make bad.
- return nullptr;
+ // If we're still trying to converge, stop now. Otherwise, proceed to apply optimizations.
+ if (!modifications_allowed_) {
+ last_value_ = kNoValue; // Make bad.
+ return nullptr;
+ }
}
if (allocator == nullptr) {
allocator = allocator_;
diff --git a/compiler/dex/global_value_numbering.h b/compiler/dex/global_value_numbering.h
index c06ff6f..1a38692 100644
--- a/compiler/dex/global_value_numbering.h
+++ b/compiler/dex/global_value_numbering.h
@@ -214,7 +214,7 @@
static constexpr uint32_t kMaxBbsToProcessMultiplyFactor = 20u;
uint32_t bbs_processed_;
- uint32_t max_bbs_to_process_;
+ uint32_t max_bbs_to_process_; // Doesn't apply after the main GVN has converged.
// We have 32-bit last_value_ so that we can detect when we run out of value names, see Good().
// We usually don't check Good() until the end of LVN unless we're about to modify code.
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index 5997568..8b02269 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -656,13 +656,37 @@
}
}
-void LocalValueNumbering::MergeNullChecked(const ValueNameSet::value_type& entry,
- ValueNameSet::iterator hint) {
- // Merge null_checked_ for this ref.
- merge_names_.clear();
- merge_names_.resize(gvn_->merge_lvns_.size(), entry);
- if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
- null_checked_.insert(hint, entry);
+void LocalValueNumbering::MergeNullChecked() {
+ DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
+
+ // Find the LVN with the least entries in the set.
+ const LocalValueNumbering* least_entries_lvn = gvn_->merge_lvns_[0];
+ for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
+ if (lvn->null_checked_.size() < least_entries_lvn->null_checked_.size()) {
+ least_entries_lvn = lvn;
+ }
+ }
+
+ // For each null-checked value name check if it's null-checked in all the LVNs.
+ for (const auto& value_name : least_entries_lvn->null_checked_) {
+ // Merge null_checked_ for this ref.
+ merge_names_.clear();
+ merge_names_.resize(gvn_->merge_lvns_.size(), value_name);
+ if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
+ null_checked_.insert(null_checked_.end(), value_name);
+ }
+ }
+
+ // Now check if the least_entries_lvn has a null-check as the last insn.
+ const BasicBlock* least_entries_bb = gvn_->GetBasicBlock(least_entries_lvn->Id());
+ if (gvn_->HasNullCheckLastInsn(least_entries_bb, id_)) {
+ int s_reg = least_entries_bb->last_mir_insn->ssa_rep->uses[0];
+ uint32_t value_name = least_entries_lvn->GetSRegValueName(s_reg);
+ merge_names_.clear();
+ merge_names_.resize(gvn_->merge_lvns_.size(), value_name);
+ if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
+ null_checked_.insert(value_name);
+ }
}
}
@@ -896,8 +920,7 @@
IntersectSets<RangeCheckSet, &LocalValueNumbering::range_checked_>();
// Merge null_checked_. We may later insert more, such as merged object field values.
- MergeSets<ValueNameSet, &LocalValueNumbering::null_checked_,
- &LocalValueNumbering::MergeNullChecked>();
+ MergeNullChecked();
if (merge_type == kCatchMerge) {
// Memory is clobbered. New memory version already created, don't merge aliasing locations.
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index 855d66d..f6a454b 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -343,11 +343,11 @@
EscapedIFieldClobberSet::iterator hint);
void MergeEscapedArrayClobberSets(const EscapedArrayClobberSet::value_type& entry,
EscapedArrayClobberSet::iterator hint);
- void MergeNullChecked(const ValueNameSet::value_type& entry, ValueNameSet::iterator hint);
void MergeSFieldValues(const SFieldToValueMap::value_type& entry,
SFieldToValueMap::iterator hint);
void MergeNonAliasingIFieldValues(const IFieldLocToValueMap::value_type& entry,
IFieldLocToValueMap::iterator hint);
+ void MergeNullChecked();
template <typename Map, Map LocalValueNumbering::*map_ptr, typename Versions>
void MergeAliasingValues(const typename Map::value_type& entry, typename Map::iterator hint);
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index dee1361..16b529a 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -19,6 +19,7 @@
#include <inttypes.h>
#include <queue>
+#include "base/bit_vector-inl.h"
#include "base/stl_util.h"
#include "compiler_internals.h"
#include "dex_file-inl.h"
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index c7dd85c..6658848 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "base/bit_vector-inl.h"
#include "compiler_internals.h"
#include "global_value_numbering.h"
#include "local_value_numbering.h"
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index 2ad11da..3eb7c83 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -383,8 +383,17 @@
RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
if (reg_class == kFPReg) {
NewLIR2(kThumb2Vabsd, rl_result.reg.GetReg(), rl_src.reg.GetReg());
+ } else if (rl_result.reg.GetLow().GetReg() != rl_src.reg.GetHigh().GetReg()) {
+ // No inconvenient overlap.
+ OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetLow());
+ OpRegRegImm(kOpAnd, rl_result.reg.GetHigh(), rl_src.reg.GetHigh(), 0x7fffffff);
} else {
- OpRegImm(kOpAnd, rl_result.reg.GetHigh(), 0x7fffffff);
+ // Inconvenient overlap, use a temp register to preserve the high word of the source.
+ RegStorage rs_tmp = AllocTemp();
+ OpRegCopy(rs_tmp, rl_src.reg.GetHigh());
+ OpRegCopy(rl_result.reg.GetLow(), rl_src.reg.GetLow());
+ OpRegRegImm(kOpAnd, rl_result.reg.GetHigh(), rs_tmp, 0x7fffffff);
+ FreeTemp(rs_tmp);
}
StoreValueWide(rl_dest, rl_result);
return true;
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index a449cbd..d001dd6 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -116,6 +116,7 @@
#define IS_SIGNED_IMM7(value) IS_SIGNED_IMM(7, value)
#define IS_SIGNED_IMM9(value) IS_SIGNED_IMM(9, value)
#define IS_SIGNED_IMM12(value) IS_SIGNED_IMM(12, value)
+#define IS_SIGNED_IMM14(value) IS_SIGNED_IMM(14, value)
#define IS_SIGNED_IMM19(value) IS_SIGNED_IMM(19, value)
#define IS_SIGNED_IMM21(value) IS_SIGNED_IMM(21, value)
@@ -355,7 +356,10 @@
kA64Sub4rrro, // sub [s1001011000] rm[20-16] imm_6[15-10] rn[9-5] rd[4-0].
kA64Sub4RRre, // sub [s1001011001] rm[20-16] option[15-13] imm_3[12-10] rn[9-5] rd[4-0].
kA64Subs3rRd, // subs[s111000100] imm_12[21-10] rn[9-5] rd[4-0].
+ kA64Tst2rl, // tst alias of "ands rzr, rn, #imm".
kA64Tst3rro, // tst alias of "ands rzr, arg1, arg2, arg3".
+ kA64Tbnz3rht, // tbnz imm_6_b5[31] [0110111] imm_6_b40[23-19] imm_14[18-5] rt[4-0].
+ kA64Tbz3rht, // tbz imm_6_b5[31] [0110110] imm_6_b40[23-19] imm_14[18-5] rt[4-0].
kA64Ubfm4rrdd, // ubfm[s10100110] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
kA64Last,
kA64NotWide = 0, // Flag used to select the first instruction variant.
@@ -400,23 +404,24 @@
enum ArmEncodingKind {
// All the formats below are encoded in the same way (as a kFmtBitBlt).
// These are grouped together, for fast handling (e.g. "if (LIKELY(fmt <= kFmtBitBlt)) ...").
- kFmtRegW = 0, // Word register (w) or wzr.
- kFmtRegX, // Extended word register (x) or xzr.
- kFmtRegR, // Register with same width as the instruction or zr.
- kFmtRegWOrSp, // Word register (w) or wsp.
- kFmtRegXOrSp, // Extended word register (x) or sp.
- kFmtRegROrSp, // Register with same width as the instruction or sp.
- kFmtRegS, // Single FP reg.
- kFmtRegD, // Double FP reg.
- kFmtRegF, // Single/double FP reg depending on the instruction width.
- kFmtBitBlt, // Bit string using end/start.
+ kFmtRegW = 0, // Word register (w) or wzr.
+ kFmtRegX, // Extended word register (x) or xzr.
+ kFmtRegR, // Register with same width as the instruction or zr.
+ kFmtRegWOrSp, // Word register (w) or wsp.
+ kFmtRegXOrSp, // Extended word register (x) or sp.
+ kFmtRegROrSp, // Register with same width as the instruction or sp.
+ kFmtRegS, // Single FP reg.
+ kFmtRegD, // Double FP reg.
+ kFmtRegF, // Single/double FP reg depending on the instruction width.
+ kFmtBitBlt, // Bit string using end/start.
// Less likely formats.
- kFmtUnused, // Unused field and marks end of formats.
- kFmtImm21, // Sign-extended immediate using [23..5,30..29].
- kFmtShift, // Register shift, 9-bit at [23..21, 15..10]..
- kFmtExtend, // Register extend, 9-bit at [23..21, 15..10].
- kFmtSkip, // Unused field, but continue to next.
+ kFmtUnused, // Unused field and marks end of formats.
+ kFmtImm6Shift, // Shift immediate, 6-bit at [31, 23..19].
+ kFmtImm21, // Sign-extended immediate using [23..5,30..29].
+ kFmtShift, // Register shift, 9-bit at [23..21, 15..10]..
+ kFmtExtend, // Register extend, 9-bit at [23..21, 15..10].
+ kFmtSkip, // Unused field, but continue to next.
};
// Struct used to define the snippet positions for each A64 opcode.
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 15c89f2..5115246 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -89,6 +89,7 @@
* M -> 16-bit shift expression ("" or ", lsl #16" or ", lsl #32"...)
* B -> dmb option string (sy, st, ish, ishst, nsh, hshst)
* H -> operand shift
+ * h -> 6-bit shift immediate
* T -> register shift (either ", lsl #0" or ", lsl #12")
* e -> register extend (e.g. uxtb #1)
* o -> register shift (e.g. lsl #1) for Word registers
@@ -614,10 +615,24 @@
kFmtRegR, 4, 0, kFmtRegROrSp, 9, 5, kFmtBitBlt, 21, 10,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
"subs", "!0r, !1R, #!2d", kFixupNone),
- ENCODING_MAP(WIDE(kA64Tst3rro), SF_VARIANTS(0x6a000000),
+ ENCODING_MAP(WIDE(kA64Tst2rl), SF_VARIANTS(0x7200001f),
+ kFmtRegR, 9, 5, kFmtBitBlt, 22, 10, kFmtUnused, -1, -1,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE0 | SETS_CCODES,
+ "tst", "!0r, !1l", kFixupNone),
+ ENCODING_MAP(WIDE(kA64Tst3rro), SF_VARIANTS(0x6a00001f),
kFmtRegR, 9, 5, kFmtRegR, 20, 16, kFmtShift, -1, -1,
- kFmtUnused, -1, -1, IS_QUAD_OP | REG_USE01 | SETS_CCODES,
+ kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | SETS_CCODES,
"tst", "!0r, !1r!2o", kFixupNone),
+ // NOTE: Tbz/Tbnz does not require SETS_CCODES, but it may be replaced by some other LIRs
+ // which require SETS_CCODES in the fix-up stage.
+ ENCODING_MAP(WIDE(kA64Tbnz3rht), CUSTOM_VARIANTS(0x37000000, 0x37000000),
+ kFmtRegR, 4, 0, kFmtImm6Shift, -1, -1, kFmtBitBlt, 18, 5, kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_USE0 | IS_BRANCH | NEEDS_FIXUP | SETS_CCODES,
+ "tbnz", "!0r, #!1h, !2t", kFixupTBxZ),
+ ENCODING_MAP(WIDE(kA64Tbz3rht), CUSTOM_VARIANTS(0x36000000, 0x36000000),
+ kFmtRegR, 4, 0, kFmtImm6Shift, -1, -1, kFmtBitBlt, 18, 5, kFmtUnused, -1, -1,
+ IS_TERTIARY_OP | REG_USE0 | IS_BRANCH | NEEDS_FIXUP | SETS_CCODES,
+ "tbz", "!0r, #!1h, !2t", kFixupTBxZ),
ENCODING_MAP(WIDE(kA64Ubfm4rrdd), SF_N_VARIANTS(0x53000000),
kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtBitBlt, 21, 16,
kFmtBitBlt, 15, 10, IS_QUAD_OP | REG_DEF0_USE1,
@@ -787,6 +802,11 @@
value |= ((operand & 0x1ffffc) >> 2) << 5;
bits |= value;
break;
+ case kFmtImm6Shift:
+ value = (operand & 0x1f) << 19;
+ value |= ((operand & 0x20) >> 5) << 31;
+ bits |= value;
+ break;
default:
LOG(FATAL) << "Bad fmt for arg. " << i << " in " << encoder->name
<< " (" << kind << ")";
@@ -827,11 +847,6 @@
*/
int generation = 0;
while (true) {
- // TODO(Arm64): check whether passes and offset adjustments are really necessary.
- // Currently they aren't, as - in the fixups below - LIR are never inserted.
- // Things can be different if jump ranges above 1 MB need to be supported.
- // If they are not, then we can get rid of the assembler retry logic.
-
offset_adjustment = 0;
AssemblerStatus res = kSuccess; // Assume success
generation ^= 1;
@@ -839,13 +854,9 @@
lir = first_fixup_;
prev_lir = NULL;
while (lir != NULL) {
- /*
- * NOTE: the lir being considered here will be encoded following the switch (so long as
- * we're not in a retry situation). However, any new non-pc_rel instructions inserted
- * due to retry must be explicitly encoded at the time of insertion. Note that
- * inserted instructions don't need use/def flags, but do need size and pc-rel status
- * properly updated.
- */
+ // NOTE: Any new non-pc_rel instructions inserted due to retry must be explicitly encoded at
+ // the time of insertion. Note that inserted instructions don't need use/def flags, but do
+ // need size and pc-rel status properly updated.
lir->offset += offset_adjustment;
// During pass, allows us to tell whether a node has been updated with offset_adjustment yet.
lir->flags.generation = generation;
@@ -861,7 +872,8 @@
CodeOffset target = target_lir->offset +
((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
int32_t delta = target - pc;
- if (!((delta & 0x3) == 0 && IS_SIGNED_IMM19(delta >> 2))) {
+ DCHECK_EQ(delta & 0x3, 0);
+ if (!IS_SIGNED_IMM19(delta >> 2)) {
LOG(FATAL) << "Invalid jump range in kFixupT1Branch";
}
lir->operands[0] = delta >> 2;
@@ -876,12 +888,75 @@
CodeOffset target = target_lir->offset +
((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
int32_t delta = target - pc;
- if (!((delta & 0x3) == 0 && IS_SIGNED_IMM19(delta >> 2))) {
+ DCHECK_EQ(delta & 0x3, 0);
+ if (!IS_SIGNED_IMM19(delta >> 2)) {
LOG(FATAL) << "Invalid jump range in kFixupLoad";
}
lir->operands[1] = delta >> 2;
break;
}
+ case kFixupTBxZ: {
+ int16_t opcode = lir->opcode;
+ RegStorage reg(lir->operands[0] | RegStorage::kValid);
+ int32_t imm = lir->operands[1];
+ DCHECK_EQ(IS_WIDE(opcode), reg.Is64Bit());
+ DCHECK_LT(imm, 64);
+ if (imm >= 32) {
+ DCHECK(IS_WIDE(opcode));
+ } else if (kIsDebugBuild && IS_WIDE(opcode)) {
+ // "tbz/tbnz x0, #imm(<32)" is the same with "tbz/tbnz w0, #imm(<32)", but GCC/oatdump
+ // will disassemble it as "tbz/tbnz w0, #imm(<32)". So unwide the LIR to make the
+ // compiler log behave the same with those disassembler in debug build.
+ // This will also affect tst instruction if it need to be replaced, but there is no
+ // performance difference between "tst Xt" and "tst Wt".
+ lir->opcode = UNWIDE(opcode);
+ lir->operands[0] = As32BitReg(reg).GetReg();
+ }
+
+ // Fix-up branch offset.
+ LIR *target_lir = lir->target;
+ DCHECK(target_lir);
+ CodeOffset pc = lir->offset;
+ CodeOffset target = target_lir->offset +
+ ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
+ int32_t delta = target - pc;
+ DCHECK_EQ(delta & 0x3, 0);
+ // Check if branch offset can be encoded in tbz/tbnz.
+ if (!IS_SIGNED_IMM14(delta >> 2)) {
+ DexOffset dalvik_offset = lir->dalvik_offset;
+ int16_t opcode = lir->opcode;
+ LIR* target = lir->target;
+ // "tbz/tbnz Rt, #imm, label" -> "tst Rt, #(1<<imm)".
+ offset_adjustment -= lir->flags.size;
+ int32_t imm = EncodeLogicalImmediate(IS_WIDE(opcode), 1 << lir->operands[1]);
+ DCHECK_NE(imm, -1);
+ lir->opcode = IS_WIDE(opcode) ? WIDE(kA64Tst2rl) : kA64Tst2rl;
+ lir->operands[1] = imm;
+ lir->target = nullptr;
+ lir->flags.fixup = EncodingMap[kA64Tst2rl].fixup;
+ lir->flags.size = EncodingMap[kA64Tst2rl].size;
+ offset_adjustment += lir->flags.size;
+ // Insert "beq/bneq label".
+ opcode = UNWIDE(opcode);
+ DCHECK(opcode == kA64Tbz3rht || opcode == kA64Tbnz3rht);
+ LIR* new_lir = RawLIR(dalvik_offset, kA64B2ct,
+ opcode == kA64Tbz3rht ? kArmCondEq : kArmCondNe, 0, 0, 0, 0, target);
+ InsertLIRAfter(lir, new_lir);
+ new_lir->offset = lir->offset + lir->flags.size;
+ new_lir->flags.generation = generation;
+ new_lir->flags.fixup = EncodingMap[kA64B2ct].fixup;
+ new_lir->flags.size = EncodingMap[kA64B2ct].size;
+ offset_adjustment += new_lir->flags.size;
+ // lir no longer pcrel, unlink and link in new_lir.
+ ReplaceFixup(prev_lir, lir, new_lir);
+ prev_lir = new_lir; // Continue with the new instruction.
+ lir = new_lir->u.a.pcrel_next;
+ res = kRetryAll;
+ continue;
+ }
+ lir->operands[2] = delta >> 2;
+ break;
+ }
case kFixupAdr: {
LIR* target_lir = lir->target;
int32_t delta;
@@ -910,6 +985,7 @@
}
if (res == kSuccess) {
+ DCHECK_EQ(offset_adjustment, 0);
break;
} else {
assembler_retries++;
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index 0538c31..eddc3a3 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -208,9 +208,9 @@
OpRegRegImm(kOpAdd, rs_x2, rs_x0, mirror::Object::MonitorOffset().Int32Value());
NewLIR2(kA64Ldxr2rX, rw3, rx2);
MarkPossibleNullPointerException(opt_flags);
- LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_x1, 0, NULL);
+ LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_w3, 0, NULL);
NewLIR3(kA64Stxr3wrX, rw3, rw1, rx2);
- LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_x1, 0, NULL);
+ LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_w3, 0, NULL);
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
not_unlocked_branch->target = slow_path_target;
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index d00c57d..d1b9c81 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -271,8 +271,12 @@
ArmOpcode opcode = kA64Cbz2rt;
ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
+ } else if (arm_cond == kArmCondLt || arm_cond == kArmCondGe) {
+ ArmOpcode opcode = (arm_cond == kArmCondLt) ? kA64Tbnz3rht : kA64Tbz3rht;
+ ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
+ int value = reg.Is64Bit() ? 63 : 31;
+ branch = NewLIR3(opcode | wide, reg.GetReg(), value, 0);
}
- // TODO: Use tbz/tbnz for < 0 or >= 0.
}
if (branch == nullptr) {
@@ -856,16 +860,14 @@
OpRegRegImm(kOpLsl, rs_length, rs_length, 1);
// Copy one element.
- OpRegRegImm(kOpAnd, rs_tmp, As32BitReg(rs_length), 2);
- LIR* jmp_to_copy_two = OpCmpImmBranch(kCondEq, rs_tmp, 0, nullptr);
+ LIR* jmp_to_copy_two = NewLIR3(WIDE(kA64Tbz3rht), rs_length.GetReg(), 1, 0);
OpRegImm(kOpSub, rs_length, 2);
LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, kSignedHalf);
StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, kSignedHalf);
// Copy two elements.
LIR *copy_two = NewLIR0(kPseudoTargetLabel);
- OpRegRegImm(kOpAnd, rs_tmp, As32BitReg(rs_length), 4);
- LIR* jmp_to_copy_four = OpCmpImmBranch(kCondEq, rs_tmp, 0, nullptr);
+ LIR* jmp_to_copy_four = NewLIR3(WIDE(kA64Tbz3rht), rs_length.GetReg(), 2, 0);
OpRegImm(kOpSub, rs_length, 4);
LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, k32);
StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, k32);
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index 9b4546a..685f8d5 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -504,6 +504,9 @@
else
strcpy(tbuf, ", DecodeError3");
break;
+ case 'h':
+ snprintf(tbuf, arraysize(tbuf), "%d", operand);
+ break;
default:
strcpy(tbuf, "DecodeError1");
break;
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index be79b63..ebebe70 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -15,6 +15,7 @@
*/
#include "dex/compiler_internals.h"
+#include "driver/compiler_options.h"
#include "dex_file-inl.h"
#include "gc_map.h"
#include "gc_map_builder.h"
@@ -648,15 +649,19 @@
void Mir2Lir::CreateMappingTables() {
+ bool generate_src_map = cu_->compiler_driver->GetCompilerOptions().GetIncludeDebugSymbols();
+
uint32_t pc2dex_data_size = 0u;
uint32_t pc2dex_entries = 0u;
uint32_t pc2dex_offset = 0u;
uint32_t pc2dex_dalvik_offset = 0u;
+ uint32_t pc2dex_src_entries = 0u;
uint32_t dex2pc_data_size = 0u;
uint32_t dex2pc_entries = 0u;
uint32_t dex2pc_offset = 0u;
uint32_t dex2pc_dalvik_offset = 0u;
for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+ pc2dex_src_entries++;
if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
pc2dex_entries += 1;
DCHECK(pc2dex_offset <= tgt_lir->offset);
@@ -677,6 +682,10 @@
}
}
+ if (generate_src_map) {
+ src_mapping_table_.reserve(pc2dex_src_entries);
+ }
+
uint32_t total_entries = pc2dex_entries + dex2pc_entries;
uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries);
uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size;
@@ -692,6 +701,10 @@
dex2pc_offset = 0u;
dex2pc_dalvik_offset = 0u;
for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+ if (generate_src_map && !tgt_lir->flags.is_nop) {
+ src_mapping_table_.push_back(SrcMapElem({tgt_lir->offset,
+ static_cast<int32_t>(tgt_lir->dalvik_offset)}));
+ }
if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
DCHECK(pc2dex_offset <= tgt_lir->offset);
write_pos = EncodeUnsignedLeb128(write_pos, tgt_lir->offset - pc2dex_offset);
@@ -1088,7 +1101,7 @@
std::unique_ptr<std::vector<uint8_t>> cfi_info(ReturnFrameDescriptionEntry());
CompiledMethod* result =
new CompiledMethod(cu_->compiler_driver, cu_->instruction_set, code_buffer_, frame_size_,
- core_spill_mask_, fp_spill_mask_, encoded_mapping_table_,
+ core_spill_mask_, fp_spill_mask_, &src_mapping_table_, encoded_mapping_table_,
vmap_encoder.GetData(), native_gc_map_, cfi_info.get());
return result;
}
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index ea56989..95c1262 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -220,9 +220,9 @@
int dest_reg_class) {
// Implement as a branch-over.
// TODO: Conditional move?
- LoadConstant(rs_dest, false_val); // Favors false.
- LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, NULL);
LoadConstant(rs_dest, true_val);
+ LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, NULL);
+ LoadConstant(rs_dest, false_val);
LIR* target_label = NewLIR0(kPseudoTargetLabel);
ne_branchover->target = target_label;
}
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 4b8f794..573bd91 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -1715,6 +1715,8 @@
*/
int live_sreg_;
CodeBuffer code_buffer_;
+ // The source mapping table data (pc -> dex). More entries than in encoded_mapping_table_
+ SrcMap src_mapping_table_;
// The encoding mapping table data (dex -> pc offset and pc offset -> dex) with a size prefix.
std::vector<uint8_t> encoded_mapping_table_;
ArenaVector<uint32_t> core_vmap_table_;
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index 93b7999..6173163 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -263,8 +263,10 @@
{ kX86Cmc, kNullary, NO_OPERAND, { 0, 0, 0xF5, 0, 0, 0, 0, 0, false }, "Cmc", "" },
{ kX86Shld32RRI, kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0_USE01 | SETS_CCODES, { 0, 0, 0x0F, 0xA4, 0, 0, 0, 1, false }, "Shld32RRI", "!0r,!1r,!2d" },
+ { kX86Shld32RRC, kShiftRegRegCl, IS_TERTIARY_OP | REG_DEF0_USE01 | REG_USEC | SETS_CCODES, { 0, 0, 0x0F, 0xA5, 0, 0, 0, 0, false }, "Shld32RRC", "!0r,!1r,cl" },
{ kX86Shld32MRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_LOAD | IS_STORE | SETS_CCODES, { 0, 0, 0x0F, 0xA4, 0, 0, 0, 1, false }, "Shld32MRI", "[!0r+!1d],!2r,!3d" },
{ kX86Shrd32RRI, kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0_USE01 | SETS_CCODES, { 0, 0, 0x0F, 0xAC, 0, 0, 0, 1, false }, "Shrd32RRI", "!0r,!1r,!2d" },
+ { kX86Shrd32RRC, kShiftRegRegCl, IS_TERTIARY_OP | REG_DEF0_USE01 | REG_USEC | SETS_CCODES, { 0, 0, 0x0F, 0xAD, 0, 0, 0, 0, false }, "Shrd32RRC", "!0r,!1r,cl" },
{ kX86Shrd32MRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_LOAD | IS_STORE | SETS_CCODES, { 0, 0, 0x0F, 0xAC, 0, 0, 0, 1, false }, "Shrd32MRI", "[!0r+!1d],!2r,!3d" },
{ kX86Shld64RRI, kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0_USE01 | SETS_CCODES, { REX_W, 0, 0x0F, 0xA4, 0, 0, 0, 1, false }, "Shld64RRI", "!0r,!1r,!2d" },
{ kX86Shld64MRI, kMemRegImm, IS_QUAD_OP | REG_USE02 | IS_LOAD | IS_STORE | SETS_CCODES, { REX_W, 0, 0x0F, 0xA4, 0, 0, 0, 1, false }, "Shld64MRI", "[!0r+!1d],!2r,!3d" },
@@ -591,6 +593,7 @@
case kShiftRegCl: return true;
case kRegCond: return true;
case kRegRegCond: return true;
+ case kShiftRegRegCl: return true;
case kJmp:
switch (entry->opcode) {
case kX86JmpR: return true;
@@ -768,6 +771,9 @@
DCHECK_EQ(rs_rCX.GetRegNum(), RegStorage::RegNum(lir->operands[4]));
return ComputeSize(entry, lir->operands[4], lir->operands[1], lir->operands[0],
lir->operands[3]);
+ case kShiftRegRegCl: // lir operands - 0: reg1, 1: reg2, 2: cl
+ DCHECK_EQ(rs_rCX.GetRegNum(), RegStorage::RegNum(lir->operands[2]));
+ return ComputeSize(entry, lir->operands[0], NO_REG, lir->operands[1], 0);
case kRegCond: // lir operands - 0: reg, 1: cond
return ComputeSize(entry, NO_REG, NO_REG, lir->operands[0], 0);
case kMemCond: // lir operands - 0: base, 1: disp, 2: cond
@@ -1336,6 +1342,19 @@
DCHECK_EQ(0, entry->skeleton.immediate_bytes);
}
+void X86Mir2Lir::EmitShiftRegRegCl(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2, int32_t raw_cl) {
+ DCHECK_EQ(false, entry->skeleton.r8_form);
+ DCHECK_EQ(rs_rCX.GetRegNum(), RegStorage::RegNum(raw_cl));
+ EmitPrefixAndOpcode(entry, raw_reg1, NO_REG, raw_reg2);
+ uint8_t low_reg1 = LowRegisterBits(raw_reg1);
+ uint8_t low_reg2 = LowRegisterBits(raw_reg2);
+ uint8_t modrm = (3 << 6) | (low_reg1 << 3) | low_reg2;
+ code_buffer_.push_back(modrm);
+ DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+ DCHECK_EQ(0, entry->skeleton.ax_opcode);
+ DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
void X86Mir2Lir::EmitShiftMemImm(const X86EncodingMap* entry, int32_t raw_base, int32_t disp,
int32_t imm) {
DCHECK_EQ(false, entry->skeleton.r8_form);
@@ -1829,6 +1848,9 @@
case kShiftMemCl: // lir operands - 0: base, 1:displacement, 2: cl
EmitShiftMemCl(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
break;
+ case kShiftRegRegCl: // lir operands - 0: reg1, 1: reg2, 2: cl
+ EmitShiftRegRegCl(entry, lir->operands[1], lir->operands[0], lir->operands[2]);
+ break;
case kRegCond: // lir operands - 0: reg, 1: condition
EmitRegCond(entry, lir->operands[0], lir->operands[1]);
break;
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 1f5b350..7d1e20e 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -457,6 +457,8 @@
void EmitShiftRegImm(const X86EncodingMap* entry, int32_t raw_reg, int32_t imm);
void EmitShiftRegCl(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_cl);
void EmitShiftMemCl(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t raw_cl);
+ void EmitShiftRegRegCl(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2,
+ int32_t raw_cl);
void EmitShiftMemImm(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t imm);
void EmitRegCond(const X86EncodingMap* entry, int32_t raw_reg, int32_t cc);
void EmitMemCond(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t cc);
@@ -478,8 +480,10 @@
void GenConstWide(RegLocation rl_dest, int64_t value);
void GenMultiplyVectorSignedByte(BasicBlock *bb, MIR *mir);
void GenShiftByteVector(BasicBlock *bb, MIR *mir);
- void AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3, uint32_t m4);
- void MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3, uint32_t m4);
+ void AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3,
+ uint32_t m4);
+ void MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m1, uint32_t m2,
+ uint32_t m3, uint32_t m4);
void AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir);
static bool ProvidesFullMemoryBarrier(X86OpCode opcode);
@@ -551,7 +555,8 @@
void GenMoveVector(BasicBlock *bb, MIR *mir);
/*
- * @brief Packed multiply of units in two vector registers: vB = vB .* @note vC using vA to know the type of the vector.
+ * @brief Packed multiply of units in two vector registers: vB = vB .* @note vC using vA to know
+ * the type of the vector.
* @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
@@ -561,7 +566,8 @@
void GenMultiplyVector(BasicBlock *bb, MIR *mir);
/*
- * @brief Packed addition of units in two vector registers: vB = vB .+ vC using vA to know the type of the vector.
+ * @brief Packed addition of units in two vector registers: vB = vB .+ vC using vA to know the
+ * type of the vector.
* @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
@@ -571,7 +577,8 @@
void GenAddVector(BasicBlock *bb, MIR *mir);
/*
- * @brief Packed subtraction of units in two vector registers: vB = vB .- vC using vA to know the type of the vector.
+ * @brief Packed subtraction of units in two vector registers: vB = vB .- vC using vA to know the
+ * type of the vector.
* @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
@@ -581,7 +588,8 @@
void GenSubtractVector(BasicBlock *bb, MIR *mir);
/*
- * @brief Packed shift left of units in two vector registers: vB = vB .<< vC using vA to know the type of the vector.
+ * @brief Packed shift left of units in two vector registers: vB = vB .<< vC using vA to know the
+ * type of the vector.
* @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
@@ -591,7 +599,8 @@
void GenShiftLeftVector(BasicBlock *bb, MIR *mir);
/*
- * @brief Packed signed shift right of units in two vector registers: vB = vB .>> vC using vA to know the type of the vector.
+ * @brief Packed signed shift right of units in two vector registers: vB = vB .>> vC using vA to
+ * know the type of the vector.
* @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
@@ -601,7 +610,8 @@
void GenSignedShiftRightVector(BasicBlock *bb, MIR *mir);
/*
- * @brief Packed unsigned shift right of units in two vector registers: vB = vB .>>> vC using vA to know the type of the vector.
+ * @brief Packed unsigned shift right of units in two vector registers: vB = vB .>>> vC using vA
+ * to know the type of the vector.
* @param bb The basic block in which the MIR is from..
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
@@ -611,7 +621,8 @@
void GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir);
/*
- * @brief Packed bitwise and of units in two vector registers: vB = vB .& vC using vA to know the type of the vector.
+ * @brief Packed bitwise and of units in two vector registers: vB = vB .& vC using vA to know the
+ * type of the vector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: source
@@ -619,7 +630,8 @@
void GenAndVector(BasicBlock *bb, MIR *mir);
/*
- * @brief Packed bitwise or of units in two vector registers: vB = vB .| vC using vA to know the type of the vector.
+ * @brief Packed bitwise or of units in two vector registers: vB = vB .| vC using vA to know the
+ * type of the vector.
* @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
@@ -629,7 +641,8 @@
void GenOrVector(BasicBlock *bb, MIR *mir);
/*
- * @brief Packed bitwise xor of units in two vector registers: vB = vB .^ vC using vA to know the type of the vector.
+ * @brief Packed bitwise xor of units in two vector registers: vB = vB .^ vC using vA to know the
+ * type of the vector.
* @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index afa2ae2..cc51538 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -793,8 +793,115 @@
bool X86Mir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
- if (is_long && cu_->instruction_set == kX86) {
- return false;
+ if (is_long && !cu_->target64) {
+ /*
+ * We want to implement the following algorithm
+ * mov eax, low part of arg1
+ * mov edx, high part of arg1
+ * mov ebx, low part of arg2
+ * mov ecx, high part of arg2
+ * mov edi, eax
+ * sub edi, ebx
+ * mov edi, edx
+ * sbb edi, ecx
+ * is_min ? "cmovgel eax, ebx" : "cmovll eax, ebx"
+ * is_min ? "cmovgel edx, ecx" : "cmovll edx, ecx"
+ *
+ * The algorithm above needs 5 registers: a pair for the first operand
+ * (which later will be used as result), a pair for the second operand
+ * and a temp register (e.g. 'edi') for intermediate calculations.
+ * Ideally we have 6 GP caller-save registers in 32-bit mode. They are:
+ * 'eax', 'ebx', 'ecx', 'edx', 'esi' and 'edi'. So there should be
+ * always enough registers to operate on. Practically, there is a pair
+ * of registers 'edi' and 'esi' which holds promoted values and
+ * sometimes should be treated as 'callee save'. If one of the operands
+ * is in the promoted registers then we have enough register to
+ * operate on. Otherwise there is lack of resources and we have to
+ * save 'edi' before calculations and restore after.
+ */
+
+ RegLocation rl_src1 = info->args[0];
+ RegLocation rl_src2 = info->args[2];
+ RegLocation rl_dest = InlineTargetWide(info);
+ int res_vreg, src1_vreg, src2_vreg;
+
+ /*
+ * If the result register is the same as the second element, then we
+ * need to be careful. The reason is that the first copy will
+ * inadvertently clobber the second element with the first one thus
+ * yielding the wrong result. Thus we do a swap in that case.
+ */
+ res_vreg = mir_graph_->SRegToVReg(rl_dest.s_reg_low);
+ src2_vreg = mir_graph_->SRegToVReg(rl_src2.s_reg_low);
+ if (res_vreg == src2_vreg) {
+ std::swap(rl_src1, rl_src2);
+ }
+
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+
+ // Pick the first integer as min/max.
+ OpRegCopyWide(rl_result.reg, rl_src1.reg);
+
+ /*
+ * If the integers are both in the same register, then there is
+ * nothing else to do because they are equal and we have already
+ * moved one into the result.
+ */
+ src1_vreg = mir_graph_->SRegToVReg(rl_src1.s_reg_low);
+ src2_vreg = mir_graph_->SRegToVReg(rl_src2.s_reg_low);
+ if (src1_vreg == src2_vreg) {
+ StoreValueWide(rl_dest, rl_result);
+ return true;
+ }
+
+ // Free registers to make some room for the second operand.
+ // But don't try to free ourselves or promoted registers.
+ if (res_vreg != src1_vreg &&
+ IsTemp(rl_src1.reg.GetLow()) && IsTemp(rl_src1.reg.GetHigh())) {
+ FreeTemp(rl_src1.reg);
+ }
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+
+ // Do we have a free register for intermediate calculations?
+ RegStorage tmp = AllocTemp(false);
+ if (tmp == RegStorage::InvalidReg()) {
+ /*
+ * No, will use 'edi'.
+ *
+ * As mentioned above we have 4 temporary and 2 promotable
+ * caller-save registers. Therefore, we assume that a free
+ * register can be allocated only if 'esi' and 'edi' are
+ * already used as operands. If number of promotable registers
+ * increases from 2 to 4 then our assumption fails and operand
+ * data is corrupted.
+ * Let's DCHECK it.
+ */
+ DCHECK(IsTemp(rl_src2.reg.GetLow()) &&
+ IsTemp(rl_src2.reg.GetHigh()) &&
+ IsTemp(rl_result.reg.GetLow()) &&
+ IsTemp(rl_result.reg.GetHigh()));
+ tmp = rs_rDI;
+ NewLIR1(kX86Push32R, tmp.GetReg());
+ }
+
+ // Now we are ready to do calculations.
+ OpRegReg(kOpMov, tmp, rl_result.reg.GetLow());
+ OpRegReg(kOpSub, tmp, rl_src2.reg.GetLow());
+ OpRegReg(kOpMov, tmp, rl_result.reg.GetHigh());
+ OpRegReg(kOpSbc, tmp, rl_src2.reg.GetHigh());
+
+ // Let's put pop 'edi' here to break a bit the dependency chain.
+ if (tmp == rs_rDI) {
+ NewLIR1(kX86Pop32R, tmp.GetReg());
+ }
+
+ // Conditionally move the other integer into the destination register.
+ ConditionCode cc = is_min ? kCondGe : kCondLt;
+ OpCondRegReg(kOpCmov, cc, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
+ OpCondRegReg(kOpCmov, cc, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh());
+ StoreValueWide(rl_dest, rl_result);
+ return true;
}
// Get the two arguments to the invoke and place them in GP registers.
@@ -3033,7 +3140,53 @@
void X86Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_shift) {
if (!cu_->target64) {
- Mir2Lir::GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
+ // Long shift operations in 32-bit. Use shld or shrd to create a 32-bit register filled from
+ // the other half, shift the other half, if the shift amount is less than 32 we're done,
+ // otherwise move one register to the other and place zero or sign bits in the other.
+ LIR* branch;
+ FlushAllRegs();
+ LockCallTemps();
+ LoadValueDirectFixed(rl_shift, rs_rCX);
+ RegStorage r_tmp = RegStorage::MakeRegPair(rs_rAX, rs_rDX);
+ LoadValueDirectWideFixed(rl_src1, r_tmp);
+ switch (opcode) {
+ case Instruction::SHL_LONG:
+ case Instruction::SHL_LONG_2ADDR:
+ NewLIR3(kX86Shld32RRC, r_tmp.GetHighReg(), r_tmp.GetLowReg(), rs_rCX.GetReg());
+ NewLIR2(kX86Sal32RC, r_tmp.GetLowReg(), rs_rCX.GetReg());
+ NewLIR2(kX86Test8RI, rs_rCX.GetReg(), 32);
+ branch = NewLIR2(kX86Jcc8, 0, kX86CondZ);
+ OpRegCopy(r_tmp.GetHigh(), r_tmp.GetLow());
+ LoadConstant(r_tmp.GetLow(), 0);
+ branch->target = NewLIR0(kPseudoTargetLabel);
+ break;
+ case Instruction::SHR_LONG:
+ case Instruction::SHR_LONG_2ADDR:
+ NewLIR3(kX86Shrd32RRC, r_tmp.GetLowReg(), r_tmp.GetHighReg(), rs_rCX.GetReg());
+ NewLIR2(kX86Sar32RC, r_tmp.GetHighReg(), rs_rCX.GetReg());
+ NewLIR2(kX86Test8RI, rs_rCX.GetReg(), 32);
+ branch = NewLIR2(kX86Jcc8, 0, kX86CondZ);
+ OpRegCopy(r_tmp.GetLow(), r_tmp.GetHigh());
+ NewLIR2(kX86Sar32RI, r_tmp.GetHighReg(), 31);
+ branch->target = NewLIR0(kPseudoTargetLabel);
+ break;
+ case Instruction::USHR_LONG:
+ case Instruction::USHR_LONG_2ADDR:
+ NewLIR3(kX86Shrd32RRC, r_tmp.GetLowReg(), r_tmp.GetHighReg(),
+ rs_rCX.GetReg());
+ NewLIR2(kX86Shr32RC, r_tmp.GetHighReg(), rs_rCX.GetReg());
+ NewLIR2(kX86Test8RI, rs_rCX.GetReg(), 32);
+ branch = NewLIR2(kX86Jcc8, 0, kX86CondZ);
+ OpRegCopy(r_tmp.GetLow(), r_tmp.GetHigh());
+ LoadConstant(r_tmp.GetHigh(), 0);
+ branch->target = NewLIR0(kPseudoTargetLabel);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected case: " << opcode;
+ return;
+ }
+ RegLocation rl_result = LocCReturnWide();
+ StoreValueWide(rl_dest, rl_result);
return;
}
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 95941e0..8c6aa5f 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -760,10 +760,7 @@
RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
// X86_64 can handle any size.
if (cu_->target64) {
- if (size == kReference) {
- return kRefReg;
- }
- return kCoreReg;
+ return RegClassBySize(size);
}
if (UNLIKELY(is_volatile)) {
@@ -1216,7 +1213,7 @@
RegLocation rl_obj = info->args[0];
RegLocation rl_char = info->args[1];
RegLocation rl_start; // Note: only present in III flavor or IndexOf.
- // RBX is callee-save register in 64-bit mode.
+ // RBX is promotable in 64-bit mode.
RegStorage rs_tmp = cu_->target64 ? rs_r11 : rs_rBX;
int start_value = -1;
@@ -1236,23 +1233,7 @@
// EBX or R11: temporary during execution (depending on mode).
// REP SCASW: search instruction.
- FlushReg(rs_rAX);
- Clobber(rs_rAX);
- LockTemp(rs_rAX);
- FlushReg(rs_rCX);
- Clobber(rs_rCX);
- LockTemp(rs_rCX);
- FlushReg(rs_rDX);
- Clobber(rs_rDX);
- LockTemp(rs_rDX);
- FlushReg(rs_tmp);
- Clobber(rs_tmp);
- LockTemp(rs_tmp);
- if (cu_->target64) {
- FlushReg(rs_rDI);
- Clobber(rs_rDI);
- LockTemp(rs_rDI);
- }
+ FlushAllRegs();
RegLocation rl_return = GetReturn(kCoreReg);
RegLocation rl_dest = InlineTarget(info);
@@ -1294,7 +1275,7 @@
MarkPossibleNullPointerException(0);
if (!cu_->target64) {
- // EDI is callee-save register in 32-bit mode.
+ // EDI is promotable in 32-bit mode.
NewLIR1(kX86Push32R, rs_rDI.GetReg());
}
@@ -1332,10 +1313,12 @@
if (!cu_->target64 && rl_start.location != kLocPhysReg) {
// Load the start index from stack, remembering that we pushed EDI.
int displacement = SRegOffset(rl_start.s_reg_low) + sizeof(uint32_t);
- {
- ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
- Load32Disp(rs_rX86_SP, displacement, rs_rDI);
- }
+ ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
+ Load32Disp(rs_rX86_SP, displacement, rs_rDI);
+ // Dalvik register annotation in LoadBaseIndexedDisp() used wrong offset. Fix it.
+ DCHECK(!DECODE_ALIAS_INFO_WIDE(last_lir_insn_->flags.alias_info));
+ int reg_id = DECODE_ALIAS_INFO_REG(last_lir_insn_->flags.alias_info) - 1;
+ AnnotateDalvikRegAccess(last_lir_insn_, reg_id, true, false);
} else {
LoadValueDirectFixed(rl_start, rs_rDI);
}
@@ -1399,15 +1382,6 @@
}
StoreValue(rl_dest, rl_return);
-
- FreeTemp(rs_rAX);
- FreeTemp(rs_rCX);
- FreeTemp(rs_rDX);
- FreeTemp(rs_tmp);
- if (cu_->target64) {
- FreeTemp(rs_rDI);
- }
-
return true;
}
@@ -1439,8 +1413,8 @@
// Generate the FDE for the method.
DCHECK_NE(data_offset_, 0U);
- WriteFDEHeader(cfi_info);
- WriteFDEAddressRange(cfi_info, data_offset_);
+ WriteFDEHeader(cfi_info, cu_->target64);
+ WriteFDEAddressRange(cfi_info, data_offset_, cu_->target64);
// The instructions in the FDE.
if (stack_decrement_ != nullptr) {
@@ -1500,7 +1474,7 @@
}
PadCFI(cfi_info);
- WriteCFILength(cfi_info);
+ WriteCFILength(cfi_info, cu_->target64);
return cfi_info;
}
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index 500c6b8..9620cd1 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -484,8 +484,10 @@
#undef BinaryShiftOpcode
kX86Cmc,
kX86Shld32RRI,
+ kX86Shld32RRC,
kX86Shld32MRI,
kX86Shrd32RRI,
+ kX86Shrd32RRC,
kX86Shrd32MRI,
kX86Shld64RRI,
kX86Shld64MRI,
@@ -675,6 +677,7 @@
kMemRegImm, // MRI instruction kinds.
kShiftRegImm, kShiftMemImm, kShiftArrayImm, // Shift opcode with immediate.
kShiftRegCl, kShiftMemCl, kShiftArrayCl, // Shift opcode with register CL.
+ kShiftRegRegCl,
// kRegRegReg, kRegRegMem, kRegRegArray, // RRR, RRM, RRA instruction kinds.
kRegCond, kMemCond, kArrayCond, // R, M, A instruction kinds following by a condition.
kRegRegCond, // RR instruction kind followed by a condition.
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index e26745a..4a55de6 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include "base/bit_vector-inl.h"
#include "compiler_internals.h"
#include "dataflow_iterator-inl.h"
#include "utils/scoped_arena_containers.h"
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index 645fc1c..3915381 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -354,6 +354,7 @@
compiler_get_method_code_addr_(NULL),
support_boot_image_fixup_(instruction_set != kMips),
dedupe_code_("dedupe code"),
+ dedupe_src_mapping_table_("dedupe source mapping table"),
dedupe_mapping_table_("dedupe mapping table"),
dedupe_vmap_table_("dedupe vmap table"),
dedupe_gc_map_("dedupe gc map"),
@@ -390,6 +391,10 @@
return dedupe_code_.Add(Thread::Current(), code);
}
+SrcMap* CompilerDriver::DeduplicateSrcMappingTable(const SrcMap& src_map) {
+ return dedupe_src_mapping_table_.Add(Thread::Current(), src_map);
+}
+
std::vector<uint8_t>* CompilerDriver::DeduplicateMappingTable(const std::vector<uint8_t>& code) {
return dedupe_mapping_table_.Add(Thread::Current(), code);
}
@@ -1524,7 +1529,13 @@
return true;
}
- return SkipClassCheckClassPath(descriptor, dex_file, dex_files);
+ if (dex_files.size() > 1) {
+ // Multi-dex compilation, only take first class.
+ return SkipClassCheckClassPath(descriptor, dex_file, dex_files);
+ } else {
+ // Single dex, take everything.
+ return false;
+ }
}
// A fast version of SkipClass above if the class pointer is available
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 233c4f8..d8f318b 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -601,6 +601,7 @@
LOCKS_EXCLUDED(compiled_classes_lock_);
std::vector<uint8_t>* DeduplicateCode(const std::vector<uint8_t>& code);
+ SrcMap* DeduplicateSrcMappingTable(const SrcMap& src_map);
std::vector<uint8_t>* DeduplicateMappingTable(const std::vector<uint8_t>& code);
std::vector<uint8_t>* DeduplicateVMapTable(const std::vector<uint8_t>& code);
std::vector<uint8_t>* DeduplicateGCMap(const std::vector<uint8_t>& code);
@@ -770,14 +771,15 @@
bool support_boot_image_fixup_;
// DeDuplication data structures, these own the corresponding byte arrays.
+ template <typename ByteArray>
class DedupeHashFunc {
public:
- size_t operator()(const std::vector<uint8_t>& array) const {
+ size_t operator()(const ByteArray& array) const {
// For small arrays compute a hash using every byte.
static const size_t kSmallArrayThreshold = 16;
size_t hash = 0x811c9dc5;
if (array.size() <= kSmallArrayThreshold) {
- for (uint8_t b : array) {
+ for (auto b : array) {
hash = (hash * 16777619) ^ b;
}
} else {
@@ -803,11 +805,13 @@
return hash;
}
};
- DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_code_;
- DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_mapping_table_;
- DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_vmap_table_;
- DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_gc_map_;
- DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_cfi_info_;
+
+ DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc<std::vector<uint8_t>>, 4> dedupe_code_;
+ DedupeSet<SrcMap, size_t, DedupeHashFunc<SrcMap>, 4> dedupe_src_mapping_table_;
+ DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc<std::vector<uint8_t>>, 4> dedupe_mapping_table_;
+ DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc<std::vector<uint8_t>>, 4> dedupe_vmap_table_;
+ DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc<std::vector<uint8_t>>, 4> dedupe_gc_map_;
+ DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc<std::vector<uint8_t>>, 4> dedupe_cfi_info_;
DISALLOW_COPY_AND_ASSIGN(CompilerDriver);
};
diff --git a/compiler/elf_fixup.cc b/compiler/elf_fixup.cc
index 60f76ef..bbfbc6e 100644
--- a/compiler/elf_fixup.cc
+++ b/compiler/elf_fixup.cc
@@ -38,28 +38,32 @@
Elf32_Off base_address = oat_data_begin - oatdata_address;
if (!FixupDynamic(*elf_file.get(), base_address)) {
- LOG(WARNING) << "Failed fo fixup .dynamic in " << file->GetPath();
- return false;
+ LOG(WARNING) << "Failed to fixup .dynamic in " << file->GetPath();
+ return false;
}
if (!FixupSectionHeaders(*elf_file.get(), base_address)) {
- LOG(WARNING) << "Failed fo fixup section headers in " << file->GetPath();
- return false;
+ LOG(WARNING) << "Failed to fixup section headers in " << file->GetPath();
+ return false;
}
if (!FixupProgramHeaders(*elf_file.get(), base_address)) {
- LOG(WARNING) << "Failed fo fixup program headers in " << file->GetPath();
- return false;
+ LOG(WARNING) << "Failed to fixup program headers in " << file->GetPath();
+ return false;
}
if (!FixupSymbols(*elf_file.get(), base_address, true)) {
- LOG(WARNING) << "Failed fo fixup .dynsym in " << file->GetPath();
- return false;
+ LOG(WARNING) << "Failed to fixup .dynsym in " << file->GetPath();
+ return false;
}
if (!FixupSymbols(*elf_file.get(), base_address, false)) {
- LOG(WARNING) << "Failed fo fixup .symtab in " << file->GetPath();
- return false;
+ LOG(WARNING) << "Failed to fixup .symtab in " << file->GetPath();
+ return false;
}
if (!FixupRelocations(*elf_file.get(), base_address)) {
- LOG(WARNING) << "Failed fo fixup .rel.dyn in " << file->GetPath();
- return false;
+ LOG(WARNING) << "Failed to fixup .rel.dyn in " << file->GetPath();
+ return false;
+ }
+ if (!elf_file->FixupDebugSections(base_address)) {
+ LOG(WARNING) << "Failed to fixup debug sections in " << file->GetPath();
+ return false;
}
return true;
}
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index bb5f7e0..e45eb61 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -16,11 +16,14 @@
#include "elf_writer_quick.h"
+#include <unordered_map>
+
#include "base/logging.h"
#include "base/unix_file/fd_file.h"
#include "buffered_output_stream.h"
#include "driver/compiler_driver.h"
#include "dwarf.h"
+#include "elf_file.h"
#include "elf_utils.h"
#include "file_output_stream.h"
#include "globals.h"
@@ -39,6 +42,30 @@
return ((binding) << 4) + ((type) & 0xf);
}
+static void PushByte(std::vector<uint8_t>* buf, int data) {
+ buf->push_back(data & 0xff);
+}
+
+static uint32_t PushStr(std::vector<uint8_t>* buf, const char* str, const char* def = nullptr) {
+ if (str == nullptr) {
+ str = def;
+ }
+
+ uint32_t offset = buf->size();
+ for (size_t i = 0; str[i] != '\0'; ++i) {
+ buf->push_back(str[i]);
+ }
+ buf->push_back('\0');
+ return offset;
+}
+
+static uint32_t PushStr(std::vector<uint8_t>* buf, const std::string &str) {
+ uint32_t offset = buf->size();
+ buf->insert(buf->end(), str.begin(), str.end());
+ buf->push_back('\0');
+ return offset;
+}
+
static void UpdateWord(std::vector<uint8_t>* buf, int offset, int data) {
(*buf)[offset+0] = data;
(*buf)[offset+1] = data >> 8;
@@ -51,7 +78,7 @@
buf->push_back((data >> 8) & 0xff);
}
-bool ElfWriterQuick::ElfBuilder::Write() {
+bool ElfWriterQuick::ElfBuilder::Init() {
// The basic layout of the elf file. Order may be different in final output.
// +-------------------------+
// | Elf32_Ehdr |
@@ -120,16 +147,19 @@
// | .debug_str\0 | (Optional)
// | .debug_info\0 | (Optional)
// | .eh_frame\0 | (Optional)
+ // | .debug_line\0 | (Optional)
// | .debug_abbrev\0 | (Optional)
// +-------------------------+ (Optional)
- // | .debug_str | (Optional)
- // +-------------------------+ (Optional)
// | .debug_info | (Optional)
// +-------------------------+ (Optional)
+ // | .debug_abbrev | (Optional)
+ // +-------------------------+ (Optional)
// | .eh_frame | (Optional)
// +-------------------------+ (Optional)
- // | .debug_abbrev | (Optional)
- // +-------------------------+
+ // | .debug_line | (Optional)
+ // +-------------------------+ (Optional)
+ // | .debug_str | (Optional)
+ // +-------------------------+ (Optional)
// | Elf32_Shdr NULL |
// | Elf32_Shdr .dynsym |
// | Elf32_Shdr .dynstr |
@@ -138,173 +168,117 @@
// | Elf32_Shdr .rodata |
// | Elf32_Shdr .dynamic |
// | Elf32_Shdr .shstrtab |
- // | Elf32_Shdr .debug_str | (Optional)
// | Elf32_Shdr .debug_info | (Optional)
- // | Elf32_Shdr .eh_frame | (Optional)
// | Elf32_Shdr .debug_abbrev| (Optional)
+ // | Elf32_Shdr .eh_frame | (Optional)
+ // | Elf32_Shdr .debug_line | (Optional)
+ // | Elf32_Shdr .debug_str | (Optional)
// +-------------------------+
-
if (fatal_error_) {
return false;
}
// Step 1. Figure out all the offsets.
- // What phdr is.
- uint32_t phdr_offset = sizeof(Elf32_Ehdr);
- const uint8_t PH_PHDR = 0;
- const uint8_t PH_LOAD_R__ = 1;
- const uint8_t PH_LOAD_R_X = 2;
- const uint8_t PH_LOAD_RW_ = 3;
- const uint8_t PH_DYNAMIC = 4;
- const uint8_t PH_NUM = 5;
- uint32_t phdr_size = sizeof(Elf32_Phdr) * PH_NUM;
if (debug_logging_) {
- LOG(INFO) << "phdr_offset=" << phdr_offset << std::hex << " " << phdr_offset;
- LOG(INFO) << "phdr_size=" << phdr_size << std::hex << " " << phdr_size;
+ LOG(INFO) << "phdr_offset=" << PHDR_OFFSET << std::hex << " " << PHDR_OFFSET;
+ LOG(INFO) << "phdr_size=" << PHDR_SIZE << std::hex << " " << PHDR_SIZE;
}
- Elf32_Phdr program_headers[PH_NUM];
- memset(&program_headers, 0, sizeof(program_headers));
- program_headers[PH_PHDR].p_type = PT_PHDR;
- program_headers[PH_PHDR].p_offset = phdr_offset;
- program_headers[PH_PHDR].p_vaddr = phdr_offset;
- program_headers[PH_PHDR].p_paddr = phdr_offset;
- program_headers[PH_PHDR].p_filesz = sizeof(program_headers);
- program_headers[PH_PHDR].p_memsz = sizeof(program_headers);
- program_headers[PH_PHDR].p_flags = PF_R;
- program_headers[PH_PHDR].p_align = sizeof(Elf32_Word);
- program_headers[PH_LOAD_R__].p_type = PT_LOAD;
- program_headers[PH_LOAD_R__].p_offset = 0;
- program_headers[PH_LOAD_R__].p_vaddr = 0;
- program_headers[PH_LOAD_R__].p_paddr = 0;
- program_headers[PH_LOAD_R__].p_flags = PF_R;
+ memset(&program_headers_, 0, sizeof(program_headers_));
+ program_headers_[PH_PHDR].p_type = PT_PHDR;
+ program_headers_[PH_PHDR].p_offset = PHDR_OFFSET;
+ program_headers_[PH_PHDR].p_vaddr = PHDR_OFFSET;
+ program_headers_[PH_PHDR].p_paddr = PHDR_OFFSET;
+ program_headers_[PH_PHDR].p_filesz = sizeof(program_headers_);
+ program_headers_[PH_PHDR].p_memsz = sizeof(program_headers_);
+ program_headers_[PH_PHDR].p_flags = PF_R;
+ program_headers_[PH_PHDR].p_align = sizeof(Elf32_Word);
- program_headers[PH_LOAD_R_X].p_type = PT_LOAD;
- program_headers[PH_LOAD_R_X].p_flags = PF_R | PF_X;
+ program_headers_[PH_LOAD_R__].p_type = PT_LOAD;
+ program_headers_[PH_LOAD_R__].p_offset = 0;
+ program_headers_[PH_LOAD_R__].p_vaddr = 0;
+ program_headers_[PH_LOAD_R__].p_paddr = 0;
+ program_headers_[PH_LOAD_R__].p_flags = PF_R;
- program_headers[PH_LOAD_RW_].p_type = PT_LOAD;
- program_headers[PH_LOAD_RW_].p_flags = PF_R | PF_W;
+ program_headers_[PH_LOAD_R_X].p_type = PT_LOAD;
+ program_headers_[PH_LOAD_R_X].p_flags = PF_R | PF_X;
- program_headers[PH_DYNAMIC].p_type = PT_DYNAMIC;
- program_headers[PH_DYNAMIC].p_flags = PF_R | PF_W;
+ program_headers_[PH_LOAD_RW_].p_type = PT_LOAD;
+ program_headers_[PH_LOAD_RW_].p_flags = PF_R | PF_W;
+
+ program_headers_[PH_DYNAMIC].p_type = PT_DYNAMIC;
+ program_headers_[PH_DYNAMIC].p_flags = PF_R | PF_W;
// Get the dynstr string.
- std::string dynstr(dynsym_builder_.GenerateStrtab());
+ dynstr_ = dynsym_builder_.GenerateStrtab();
// Add the SONAME to the dynstr.
- uint32_t dynstr_soname_offset = dynstr.size();
+ dynstr_soname_offset_ = dynstr_.size();
std::string file_name(elf_file_->GetPath());
size_t directory_separator_pos = file_name.rfind('/');
if (directory_separator_pos != std::string::npos) {
file_name = file_name.substr(directory_separator_pos + 1);
}
- dynstr += file_name;
- dynstr += '\0';
+ dynstr_ += file_name;
+ dynstr_ += '\0';
if (debug_logging_) {
- LOG(INFO) << "dynstr size (bytes) =" << dynstr.size()
- << std::hex << " " << dynstr.size();
+ LOG(INFO) << "dynstr size (bytes) =" << dynstr_.size()
+ << std::hex << " " << dynstr_.size();
LOG(INFO) << "dynsym size (elements)=" << dynsym_builder_.GetSize()
<< std::hex << " " << dynsym_builder_.GetSize();
}
- // get the strtab
- std::string strtab;
- if (IncludingDebugSymbols()) {
- strtab = symtab_builder_.GenerateStrtab();
- if (debug_logging_) {
- LOG(INFO) << "strtab size (bytes) =" << strtab.size()
- << std::hex << " " << strtab.size();
- LOG(INFO) << "symtab size (elements) =" << symtab_builder_.GetSize()
- << std::hex << " " << symtab_builder_.GetSize();
- }
- }
-
// Get the section header string table.
- std::vector<Elf32_Shdr*> section_ptrs;
- std::string shstrtab;
- shstrtab += '\0';
+ shstrtab_ += '\0';
// Setup sym_undef
- Elf32_Shdr null_hdr;
- memset(&null_hdr, 0, sizeof(null_hdr));
- null_hdr.sh_type = SHT_NULL;
- null_hdr.sh_link = SHN_UNDEF;
- section_ptrs.push_back(&null_hdr);
+ memset(&null_hdr_, 0, sizeof(null_hdr_));
+ null_hdr_.sh_type = SHT_NULL;
+ null_hdr_.sh_link = SHN_UNDEF;
+ section_ptrs_.push_back(&null_hdr_);
- uint32_t section_index = 1;
+ section_index_ = 1;
// setup .dynsym
- section_ptrs.push_back(&dynsym_builder_.section_);
- AssignSectionStr(&dynsym_builder_, &shstrtab);
- dynsym_builder_.section_index_ = section_index++;
+ section_ptrs_.push_back(&dynsym_builder_.section_);
+ AssignSectionStr(&dynsym_builder_, &shstrtab_);
+ dynsym_builder_.section_index_ = section_index_++;
// Setup .dynstr
- section_ptrs.push_back(&dynsym_builder_.strtab_.section_);
- AssignSectionStr(&dynsym_builder_.strtab_, &shstrtab);
- dynsym_builder_.strtab_.section_index_ = section_index++;
+ section_ptrs_.push_back(&dynsym_builder_.strtab_.section_);
+ AssignSectionStr(&dynsym_builder_.strtab_, &shstrtab_);
+ dynsym_builder_.strtab_.section_index_ = section_index_++;
// Setup .hash
- section_ptrs.push_back(&hash_builder_.section_);
- AssignSectionStr(&hash_builder_, &shstrtab);
- hash_builder_.section_index_ = section_index++;
+ section_ptrs_.push_back(&hash_builder_.section_);
+ AssignSectionStr(&hash_builder_, &shstrtab_);
+ hash_builder_.section_index_ = section_index_++;
// Setup .rodata
- section_ptrs.push_back(&rodata_builder_.section_);
- AssignSectionStr(&rodata_builder_, &shstrtab);
- rodata_builder_.section_index_ = section_index++;
+ section_ptrs_.push_back(&rodata_builder_.section_);
+ AssignSectionStr(&rodata_builder_, &shstrtab_);
+ rodata_builder_.section_index_ = section_index_++;
// Setup .text
- section_ptrs.push_back(&text_builder_.section_);
- AssignSectionStr(&text_builder_, &shstrtab);
- text_builder_.section_index_ = section_index++;
+ section_ptrs_.push_back(&text_builder_.section_);
+ AssignSectionStr(&text_builder_, &shstrtab_);
+ text_builder_.section_index_ = section_index_++;
// Setup .dynamic
- section_ptrs.push_back(&dynamic_builder_.section_);
- AssignSectionStr(&dynamic_builder_, &shstrtab);
- dynamic_builder_.section_index_ = section_index++;
-
- if (IncludingDebugSymbols()) {
- // Setup .symtab
- section_ptrs.push_back(&symtab_builder_.section_);
- AssignSectionStr(&symtab_builder_, &shstrtab);
- symtab_builder_.section_index_ = section_index++;
-
- // Setup .strtab
- section_ptrs.push_back(&symtab_builder_.strtab_.section_);
- AssignSectionStr(&symtab_builder_.strtab_, &shstrtab);
- symtab_builder_.strtab_.section_index_ = section_index++;
- }
- ElfRawSectionBuilder* it = other_builders_.data();
- for (uint32_t cnt = 0; cnt < other_builders_.size(); ++it, ++cnt) {
- // Setup all the other sections.
- section_ptrs.push_back(&it->section_);
- AssignSectionStr(it, &shstrtab);
- it->section_index_ = section_index++;
- }
-
- // Setup shstrtab
- section_ptrs.push_back(&shstrtab_builder_.section_);
- AssignSectionStr(&shstrtab_builder_, &shstrtab);
- shstrtab_builder_.section_index_ = section_index++;
-
- if (debug_logging_) {
- LOG(INFO) << ".shstrtab size (bytes) =" << shstrtab.size()
- << std::hex << " " << shstrtab.size();
- LOG(INFO) << "section list size (elements)=" << section_ptrs.size()
- << std::hex << " " << section_ptrs.size();
- }
+ section_ptrs_.push_back(&dynamic_builder_.section_);
+ AssignSectionStr(&dynamic_builder_, &shstrtab_);
+ dynamic_builder_.section_index_ = section_index_++;
// Fill in the hash section.
- std::vector<Elf32_Word> hash = dynsym_builder_.GenerateHashContents();
+ hash_ = dynsym_builder_.GenerateHashContents();
if (debug_logging_) {
- LOG(INFO) << ".hash size (bytes)=" << hash.size() * sizeof(Elf32_Word)
- << std::hex << " " << hash.size() * sizeof(Elf32_Word);
+ LOG(INFO) << ".hash size (bytes)=" << hash_.size() * sizeof(Elf32_Word)
+ << std::hex << " " << hash_.size() * sizeof(Elf32_Word);
}
- Elf32_Word base_offset = sizeof(Elf32_Ehdr) + sizeof(program_headers);
- std::vector<ElfFilePiece> pieces;
+ Elf32_Word base_offset = sizeof(Elf32_Ehdr) + sizeof(program_headers_);
// Get the layout in the sections.
//
@@ -318,14 +292,14 @@
dynsym_builder_.strtab_.section_.sh_offset = NextOffset(dynsym_builder_.strtab_.section_,
dynsym_builder_.section_);
dynsym_builder_.strtab_.section_.sh_addr = dynsym_builder_.strtab_.section_.sh_offset;
- dynsym_builder_.strtab_.section_.sh_size = dynstr.size();
+ dynsym_builder_.strtab_.section_.sh_size = dynstr_.size();
dynsym_builder_.strtab_.section_.sh_link = dynsym_builder_.strtab_.GetLink();
// Get the layout of the hash section
hash_builder_.section_.sh_offset = NextOffset(hash_builder_.section_,
dynsym_builder_.strtab_.section_);
hash_builder_.section_.sh_addr = hash_builder_.section_.sh_offset;
- hash_builder_.section_.sh_size = hash.size() * sizeof(Elf32_Word);
+ hash_builder_.section_.sh_size = hash_.size() * sizeof(Elf32_Word);
hash_builder_.section_.sh_link = hash_builder_.GetLink();
// Get the layout of the rodata section.
@@ -349,7 +323,70 @@
dynamic_builder_.section_.sh_size = dynamic_builder_.GetSize() * sizeof(Elf32_Dyn);
dynamic_builder_.section_.sh_link = dynamic_builder_.GetLink();
+ if (debug_logging_) {
+ LOG(INFO) << "dynsym off=" << dynsym_builder_.section_.sh_offset
+ << " dynsym size=" << dynsym_builder_.section_.sh_size;
+ LOG(INFO) << "dynstr off=" << dynsym_builder_.strtab_.section_.sh_offset
+ << " dynstr size=" << dynsym_builder_.strtab_.section_.sh_size;
+ LOG(INFO) << "hash off=" << hash_builder_.section_.sh_offset
+ << " hash size=" << hash_builder_.section_.sh_size;
+ LOG(INFO) << "rodata off=" << rodata_builder_.section_.sh_offset
+ << " rodata size=" << rodata_builder_.section_.sh_size;
+ LOG(INFO) << "text off=" << text_builder_.section_.sh_offset
+ << " text size=" << text_builder_.section_.sh_size;
+ LOG(INFO) << "dynamic off=" << dynamic_builder_.section_.sh_offset
+ << " dynamic size=" << dynamic_builder_.section_.sh_size;
+ }
+
+ return true;
+}
+
+bool ElfWriterQuick::ElfBuilder::Write() {
+ std::vector<ElfFilePiece> pieces;
Elf32_Shdr prev = dynamic_builder_.section_;
+ std::string strtab;
+
+ if (IncludingDebugSymbols()) {
+ // Setup .symtab
+ section_ptrs_.push_back(&symtab_builder_.section_);
+ AssignSectionStr(&symtab_builder_, &shstrtab_);
+ symtab_builder_.section_index_ = section_index_++;
+
+ // Setup .strtab
+ section_ptrs_.push_back(&symtab_builder_.strtab_.section_);
+ AssignSectionStr(&symtab_builder_.strtab_, &shstrtab_);
+ symtab_builder_.strtab_.section_index_ = section_index_++;
+
+ strtab = symtab_builder_.GenerateStrtab();
+ if (debug_logging_) {
+ LOG(INFO) << "strtab size (bytes) =" << strtab.size()
+ << std::hex << " " << strtab.size();
+ LOG(INFO) << "symtab size (elements) =" << symtab_builder_.GetSize()
+ << std::hex << " " << symtab_builder_.GetSize();
+ }
+ }
+
+ // Setup all the other sections.
+ for (ElfRawSectionBuilder *builder = other_builders_.data(),
+ *end = builder + other_builders_.size();
+ builder != end; ++builder) {
+ section_ptrs_.push_back(&builder->section_);
+ AssignSectionStr(builder, &shstrtab_);
+ builder->section_index_ = section_index_++;
+ }
+
+ // Setup shstrtab
+ section_ptrs_.push_back(&shstrtab_builder_.section_);
+ AssignSectionStr(&shstrtab_builder_, &shstrtab_);
+ shstrtab_builder_.section_index_ = section_index_++;
+
+ if (debug_logging_) {
+ LOG(INFO) << ".shstrtab size (bytes) =" << shstrtab_.size()
+ << std::hex << " " << shstrtab_.size();
+ LOG(INFO) << "section list size (elements)=" << section_ptrs_.size()
+ << std::hex << " " << section_ptrs_.size();
+ }
+
if (IncludingDebugSymbols()) {
// Get the layout of the symtab section.
symtab_builder_.section_.sh_offset = NextOffset(symtab_builder_.section_,
@@ -367,27 +404,14 @@
symtab_builder_.strtab_.section_.sh_link = symtab_builder_.strtab_.GetLink();
prev = symtab_builder_.strtab_.section_;
- }
- if (debug_logging_) {
- LOG(INFO) << "dynsym off=" << dynsym_builder_.section_.sh_offset
- << " dynsym size=" << dynsym_builder_.section_.sh_size;
- LOG(INFO) << "dynstr off=" << dynsym_builder_.strtab_.section_.sh_offset
- << " dynstr size=" << dynsym_builder_.strtab_.section_.sh_size;
- LOG(INFO) << "hash off=" << hash_builder_.section_.sh_offset
- << " hash size=" << hash_builder_.section_.sh_size;
- LOG(INFO) << "rodata off=" << rodata_builder_.section_.sh_offset
- << " rodata size=" << rodata_builder_.section_.sh_size;
- LOG(INFO) << "text off=" << text_builder_.section_.sh_offset
- << " text size=" << text_builder_.section_.sh_size;
- LOG(INFO) << "dynamic off=" << dynamic_builder_.section_.sh_offset
- << " dynamic size=" << dynamic_builder_.section_.sh_size;
- if (IncludingDebugSymbols()) {
+ if (debug_logging_) {
LOG(INFO) << "symtab off=" << symtab_builder_.section_.sh_offset
<< " symtab size=" << symtab_builder_.section_.sh_size;
LOG(INFO) << "strtab off=" << symtab_builder_.strtab_.section_.sh_offset
<< " strtab size=" << symtab_builder_.strtab_.section_.sh_size;
}
}
+
// Get the layout of the extra sections. (This will deal with the debug
// sections if they are there)
for (auto it = other_builders_.begin(); it != other_builders_.end(); ++it) {
@@ -403,10 +427,11 @@
<< " " << it->name_ << " size=" << it->section_.sh_size;
}
}
+
// Get the layout of the shstrtab section
shstrtab_builder_.section_.sh_offset = NextOffset(shstrtab_builder_.section_, prev);
shstrtab_builder_.section_.sh_addr = 0;
- shstrtab_builder_.section_.sh_size = shstrtab.size();
+ shstrtab_builder_.section_.sh_size = shstrtab_.size();
shstrtab_builder_.section_.sh_link = shstrtab_builder_.GetLink();
if (debug_logging_) {
LOG(INFO) << "shstrtab off=" << shstrtab_builder_.section_.sh_offset
@@ -430,58 +455,58 @@
// Setup the dynamic section.
// This will add the 2 values we cannot know until now time, namely the size
// and the soname_offset.
- std::vector<Elf32_Dyn> dynamic = dynamic_builder_.GetDynamics(dynstr.size(),
- dynstr_soname_offset);
+ std::vector<Elf32_Dyn> dynamic = dynamic_builder_.GetDynamics(dynstr_.size(),
+ dynstr_soname_offset_);
CHECK_EQ(dynamic.size() * sizeof(Elf32_Dyn), dynamic_builder_.section_.sh_size);
// Finish setup of the program headers now that we know the layout of the
// whole file.
Elf32_Word load_r_size = rodata_builder_.section_.sh_offset + rodata_builder_.section_.sh_size;
- program_headers[PH_LOAD_R__].p_filesz = load_r_size;
- program_headers[PH_LOAD_R__].p_memsz = load_r_size;
- program_headers[PH_LOAD_R__].p_align = rodata_builder_.section_.sh_addralign;
+ program_headers_[PH_LOAD_R__].p_filesz = load_r_size;
+ program_headers_[PH_LOAD_R__].p_memsz = load_r_size;
+ program_headers_[PH_LOAD_R__].p_align = rodata_builder_.section_.sh_addralign;
Elf32_Word load_rx_size = text_builder_.section_.sh_size;
- program_headers[PH_LOAD_R_X].p_offset = text_builder_.section_.sh_offset;
- program_headers[PH_LOAD_R_X].p_vaddr = text_builder_.section_.sh_offset;
- program_headers[PH_LOAD_R_X].p_paddr = text_builder_.section_.sh_offset;
- program_headers[PH_LOAD_R_X].p_filesz = load_rx_size;
- program_headers[PH_LOAD_R_X].p_memsz = load_rx_size;
- program_headers[PH_LOAD_R_X].p_align = text_builder_.section_.sh_addralign;
+ program_headers_[PH_LOAD_R_X].p_offset = text_builder_.section_.sh_offset;
+ program_headers_[PH_LOAD_R_X].p_vaddr = text_builder_.section_.sh_offset;
+ program_headers_[PH_LOAD_R_X].p_paddr = text_builder_.section_.sh_offset;
+ program_headers_[PH_LOAD_R_X].p_filesz = load_rx_size;
+ program_headers_[PH_LOAD_R_X].p_memsz = load_rx_size;
+ program_headers_[PH_LOAD_R_X].p_align = text_builder_.section_.sh_addralign;
- program_headers[PH_LOAD_RW_].p_offset = dynamic_builder_.section_.sh_offset;
- program_headers[PH_LOAD_RW_].p_vaddr = dynamic_builder_.section_.sh_offset;
- program_headers[PH_LOAD_RW_].p_paddr = dynamic_builder_.section_.sh_offset;
- program_headers[PH_LOAD_RW_].p_filesz = dynamic_builder_.section_.sh_size;
- program_headers[PH_LOAD_RW_].p_memsz = dynamic_builder_.section_.sh_size;
- program_headers[PH_LOAD_RW_].p_align = dynamic_builder_.section_.sh_addralign;
+ program_headers_[PH_LOAD_RW_].p_offset = dynamic_builder_.section_.sh_offset;
+ program_headers_[PH_LOAD_RW_].p_vaddr = dynamic_builder_.section_.sh_offset;
+ program_headers_[PH_LOAD_RW_].p_paddr = dynamic_builder_.section_.sh_offset;
+ program_headers_[PH_LOAD_RW_].p_filesz = dynamic_builder_.section_.sh_size;
+ program_headers_[PH_LOAD_RW_].p_memsz = dynamic_builder_.section_.sh_size;
+ program_headers_[PH_LOAD_RW_].p_align = dynamic_builder_.section_.sh_addralign;
- program_headers[PH_DYNAMIC].p_offset = dynamic_builder_.section_.sh_offset;
- program_headers[PH_DYNAMIC].p_vaddr = dynamic_builder_.section_.sh_offset;
- program_headers[PH_DYNAMIC].p_paddr = dynamic_builder_.section_.sh_offset;
- program_headers[PH_DYNAMIC].p_filesz = dynamic_builder_.section_.sh_size;
- program_headers[PH_DYNAMIC].p_memsz = dynamic_builder_.section_.sh_size;
- program_headers[PH_DYNAMIC].p_align = dynamic_builder_.section_.sh_addralign;
+ program_headers_[PH_DYNAMIC].p_offset = dynamic_builder_.section_.sh_offset;
+ program_headers_[PH_DYNAMIC].p_vaddr = dynamic_builder_.section_.sh_offset;
+ program_headers_[PH_DYNAMIC].p_paddr = dynamic_builder_.section_.sh_offset;
+ program_headers_[PH_DYNAMIC].p_filesz = dynamic_builder_.section_.sh_size;
+ program_headers_[PH_DYNAMIC].p_memsz = dynamic_builder_.section_.sh_size;
+ program_headers_[PH_DYNAMIC].p_align = dynamic_builder_.section_.sh_addralign;
// Finish setup of the Ehdr values.
- elf_header_.e_phoff = phdr_offset;
+ elf_header_.e_phoff = PHDR_OFFSET;
elf_header_.e_shoff = sections_offset;
elf_header_.e_phnum = PH_NUM;
- elf_header_.e_shnum = section_ptrs.size();
+ elf_header_.e_shnum = section_ptrs_.size();
elf_header_.e_shstrndx = shstrtab_builder_.section_index_;
// Add the rest of the pieces to the list.
pieces.push_back(ElfFilePiece("Elf Header", 0, &elf_header_, sizeof(elf_header_)));
- pieces.push_back(ElfFilePiece("Program headers", phdr_offset,
- &program_headers, sizeof(program_headers)));
+ pieces.push_back(ElfFilePiece("Program headers", PHDR_OFFSET,
+ &program_headers_, sizeof(program_headers_)));
pieces.push_back(ElfFilePiece(".dynamic", dynamic_builder_.section_.sh_offset,
dynamic.data(), dynamic_builder_.section_.sh_size));
pieces.push_back(ElfFilePiece(".dynsym", dynsym_builder_.section_.sh_offset,
dynsym.data(), dynsym.size() * sizeof(Elf32_Sym)));
pieces.push_back(ElfFilePiece(".dynstr", dynsym_builder_.strtab_.section_.sh_offset,
- dynstr.c_str(), dynstr.size()));
+ dynstr_.c_str(), dynstr_.size()));
pieces.push_back(ElfFilePiece(".hash", hash_builder_.section_.sh_offset,
- hash.data(), hash.size() * sizeof(Elf32_Word)));
+ hash_.data(), hash_.size() * sizeof(Elf32_Word)));
pieces.push_back(ElfFilePiece(".rodata", rodata_builder_.section_.sh_offset,
nullptr, rodata_builder_.section_.sh_size));
pieces.push_back(ElfFilePiece(".text", text_builder_.section_.sh_offset,
@@ -493,13 +518,13 @@
strtab.c_str(), strtab.size()));
}
pieces.push_back(ElfFilePiece(".shstrtab", shstrtab_builder_.section_.sh_offset,
- &shstrtab[0], shstrtab.size()));
- for (uint32_t i = 0; i < section_ptrs.size(); ++i) {
+ &shstrtab_[0], shstrtab_.size()));
+ for (uint32_t i = 0; i < section_ptrs_.size(); ++i) {
// Just add all the sections in induvidually since they are all over the
// place on the heap/stack.
Elf32_Word cur_off = sections_offset + i * sizeof(Elf32_Shdr);
pieces.push_back(ElfFilePiece("section table piece", cur_off,
- section_ptrs[i], sizeof(Elf32_Shdr)));
+ section_ptrs_[i], sizeof(Elf32_Shdr)));
}
if (!WriteOutFile(pieces)) {
@@ -664,7 +689,7 @@
// Lets say the state is something like this.
// +--------+ +--------+ +-----------+
// | symtab | | bucket | | chain |
- // | nullptr | | 1 | | STN_UNDEF |
+ // | null | | 1 | | STN_UNDEF |
// | <sym1> | | 4 | | 2 |
// | <sym2> | | | | 5 |
// | <sym3> | | | | STN_UNDEF |
@@ -836,13 +861,24 @@
}
std::vector<uint8_t>* ConstructCIEFrameX86(bool is_x86_64) {
- std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
+ std::vector<uint8_t>* cfi_info = new std::vector<uint8_t>;
// Length (will be filled in later in this routine).
- PushWord(cfi_info, 0);
+ if (is_x86_64) {
+ PushWord(cfi_info, 0xffffffff); // Indicates 64bit
+ PushWord(cfi_info, 0);
+ PushWord(cfi_info, 0);
+ } else {
+ PushWord(cfi_info, 0);
+ }
// CIE id: always 0.
- PushWord(cfi_info, 0);
+ if (is_x86_64) {
+ PushWord(cfi_info, 0);
+ PushWord(cfi_info, 0);
+ } else {
+ PushWord(cfi_info, 0);
+ }
// Version: always 1.
cfi_info->push_back(0x01);
@@ -874,8 +910,14 @@
// Augmentation length: 1.
cfi_info->push_back(1);
- // Augmentation data: 0x03 ((DW_EH_PE_absptr << 4) | DW_EH_PE_udata4).
- cfi_info->push_back(0x03);
+ // Augmentation data.
+ if (is_x86_64) {
+ // 0x04 ((DW_EH_PE_absptr << 4) | DW_EH_PE_udata8).
+ cfi_info->push_back(0x04);
+ } else {
+ // 0x03 ((DW_EH_PE_absptr << 4) | DW_EH_PE_udata4).
+ cfi_info->push_back(0x03);
+ }
// Initial instructions.
if (is_x86_64) {
@@ -905,11 +947,13 @@
}
// Set the length of the CIE inside the generated bytes.
- uint32_t length = cfi_info->size() - 4;
- (*cfi_info)[0] = length;
- (*cfi_info)[1] = length >> 8;
- (*cfi_info)[2] = length >> 16;
- (*cfi_info)[3] = length >> 24;
+ if (is_x86_64) {
+ uint32_t length = cfi_info->size() - 12;
+ UpdateWord(cfi_info, 4, length);
+ } else {
+ uint32_t length = cfi_info->size() - 4;
+ UpdateWord(cfi_info, 0, length);
+ }
return cfi_info;
}
@@ -940,8 +984,12 @@
compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols(),
debug);
+ if (!builder.Init()) {
+ return false;
+ }
+
if (compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols()) {
- WriteDebugSymbols(builder, oat_writer);
+ WriteDebugSymbols(&builder, oat_writer);
}
if (compiler_driver_->GetCompilerOptions().GetIncludePatchInformation()) {
@@ -954,15 +1002,17 @@
return builder.Write();
}
-void ElfWriterQuick::WriteDebugSymbols(ElfBuilder& builder, OatWriter* oat_writer) {
+void ElfWriterQuick::WriteDebugSymbols(ElfBuilder* builder, OatWriter* oat_writer) {
std::unique_ptr<std::vector<uint8_t>> cfi_info(
ConstructCIEFrame(compiler_driver_->GetInstructionSet()));
+ Elf32_Addr text_section_address = builder->text_builder_.section_.sh_addr;
+
// Iterate over the compiled methods.
const std::vector<OatWriter::DebugInfo>& method_info = oat_writer->GetCFIMethodInfo();
- ElfSymtabBuilder* symtab = &builder.symtab_builder_;
+ ElfSymtabBuilder* symtab = &builder->symtab_builder_;
for (auto it = method_info.begin(); it != method_info.end(); ++it) {
- symtab->AddSymbol(it->method_name_, &builder.text_builder_, it->low_pc_, true,
+ symtab->AddSymbol(it->method_name_, &builder->text_builder_, it->low_pc_, true,
it->high_pc_ - it->low_pc_, STB_GLOBAL, STT_FUNC);
// Include CFI for compiled method, if possible.
@@ -976,96 +1026,314 @@
int cur_offset = cfi_info->size();
cfi_info->insert(cfi_info->end(), fde->begin(), fde->end());
- // Set the 'CIE_pointer' field to cur_offset+4.
- uint32_t CIE_pointer = cur_offset + 4;
- uint32_t offset_to_update = cur_offset + sizeof(uint32_t);
- (*cfi_info)[offset_to_update+0] = CIE_pointer;
- (*cfi_info)[offset_to_update+1] = CIE_pointer >> 8;
- (*cfi_info)[offset_to_update+2] = CIE_pointer >> 16;
- (*cfi_info)[offset_to_update+3] = CIE_pointer >> 24;
+ bool is_64bit = *(reinterpret_cast<const uint32_t*>(fde->data())) == 0xffffffff;
- // Set the 'initial_location' field to address the start of the method.
- offset_to_update = cur_offset + 2*sizeof(uint32_t);
- const uint32_t quick_code_start = it->low_pc_;
- (*cfi_info)[offset_to_update+0] = quick_code_start;
- (*cfi_info)[offset_to_update+1] = quick_code_start >> 8;
- (*cfi_info)[offset_to_update+2] = quick_code_start >> 16;
- (*cfi_info)[offset_to_update+3] = quick_code_start >> 24;
+ // Set the 'CIE_pointer' field.
+ uint64_t CIE_pointer = cur_offset + (is_64bit ? 12 : 4);
+ uint64_t offset_to_update = CIE_pointer;
+ if (is_64bit) {
+ (*cfi_info)[offset_to_update+0] = CIE_pointer;
+ (*cfi_info)[offset_to_update+1] = CIE_pointer >> 8;
+ (*cfi_info)[offset_to_update+2] = CIE_pointer >> 16;
+ (*cfi_info)[offset_to_update+3] = CIE_pointer >> 24;
+ (*cfi_info)[offset_to_update+4] = CIE_pointer >> 32;
+ (*cfi_info)[offset_to_update+5] = CIE_pointer >> 40;
+ (*cfi_info)[offset_to_update+6] = CIE_pointer >> 48;
+ (*cfi_info)[offset_to_update+7] = CIE_pointer >> 56;
+ } else {
+ (*cfi_info)[offset_to_update+0] = CIE_pointer;
+ (*cfi_info)[offset_to_update+1] = CIE_pointer >> 8;
+ (*cfi_info)[offset_to_update+2] = CIE_pointer >> 16;
+ (*cfi_info)[offset_to_update+3] = CIE_pointer >> 24;
+ }
+
+ // Set the 'initial_location' field.
+ offset_to_update += is_64bit ? 8 : 4;
+ if (is_64bit) {
+ const uint64_t quick_code_start = it->low_pc_ + text_section_address;
+ (*cfi_info)[offset_to_update+0] = quick_code_start;
+ (*cfi_info)[offset_to_update+1] = quick_code_start >> 8;
+ (*cfi_info)[offset_to_update+2] = quick_code_start >> 16;
+ (*cfi_info)[offset_to_update+3] = quick_code_start >> 24;
+ (*cfi_info)[offset_to_update+4] = quick_code_start >> 32;
+ (*cfi_info)[offset_to_update+5] = quick_code_start >> 40;
+ (*cfi_info)[offset_to_update+6] = quick_code_start >> 48;
+ (*cfi_info)[offset_to_update+7] = quick_code_start >> 56;
+ } else {
+ const uint32_t quick_code_start = it->low_pc_ + text_section_address;
+ (*cfi_info)[offset_to_update+0] = quick_code_start;
+ (*cfi_info)[offset_to_update+1] = quick_code_start >> 8;
+ (*cfi_info)[offset_to_update+2] = quick_code_start >> 16;
+ (*cfi_info)[offset_to_update+3] = quick_code_start >> 24;
+ }
}
}
}
- if (cfi_info.get() != nullptr) {
- // Now lay down the Elf sections.
- ElfRawSectionBuilder debug_info(".debug_info", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
- ElfRawSectionBuilder debug_abbrev(".debug_abbrev", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
- ElfRawSectionBuilder debug_str(".debug_str", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
- ElfRawSectionBuilder eh_frame(".eh_frame", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, 4, 0);
- eh_frame.SetBuffer(std::move(*cfi_info.get()));
+ bool hasCFI = (cfi_info.get() != nullptr);
+ bool hasLineInfo = false;
+ for (auto& dbg_info : oat_writer->GetCFIMethodInfo()) {
+ if (dbg_info.dbgstream_ != nullptr &&
+ !dbg_info.compiled_method_->GetSrcMappingTable().empty()) {
+ hasLineInfo = true;
+ break;
+ }
+ }
- FillInCFIInformation(oat_writer, debug_info.GetBuffer(), debug_abbrev.GetBuffer(),
- debug_str.GetBuffer());
- builder.RegisterRawSection(debug_info);
- builder.RegisterRawSection(debug_abbrev);
- builder.RegisterRawSection(eh_frame);
- builder.RegisterRawSection(debug_str);
+ if (hasLineInfo || hasCFI) {
+ ElfRawSectionBuilder debug_info(".debug_info", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ ElfRawSectionBuilder debug_abbrev(".debug_abbrev", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ ElfRawSectionBuilder debug_str(".debug_str", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+ ElfRawSectionBuilder debug_line(".debug_line", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+
+ FillInCFIInformation(oat_writer, debug_info.GetBuffer(),
+ debug_abbrev.GetBuffer(), debug_str.GetBuffer(),
+ hasLineInfo ? debug_line.GetBuffer() : nullptr,
+ text_section_address);
+
+ builder->RegisterRawSection(debug_info);
+ builder->RegisterRawSection(debug_abbrev);
+
+ if (hasCFI) {
+ ElfRawSectionBuilder eh_frame(".eh_frame", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, 4, 0);
+ eh_frame.SetBuffer(std::move(*cfi_info.get()));
+ builder->RegisterRawSection(eh_frame);
+ }
+
+ if (hasLineInfo) {
+ builder->RegisterRawSection(debug_line);
+ }
+
+ builder->RegisterRawSection(debug_str);
+ }
+}
+
+class LineTableGenerator FINAL : public Leb128Encoder {
+ public:
+ LineTableGenerator(int line_base, int line_range, int opcode_base,
+ std::vector<uint8_t>* data, uintptr_t current_address,
+ size_t current_line)
+ : Leb128Encoder(data), line_base_(line_base), line_range_(line_range),
+ opcode_base_(opcode_base), current_address_(current_address),
+ current_line_(current_line) {}
+
+ void PutDelta(unsigned delta_addr, int delta_line) {
+ current_line_ += delta_line;
+ current_address_ += delta_addr;
+
+ if (delta_line >= line_base_ && delta_line < line_base_ + line_range_) {
+ unsigned special_opcode = (delta_line - line_base_) +
+ (line_range_ * delta_addr) + opcode_base_;
+ if (special_opcode <= 255) {
+ PushByte(data_, special_opcode);
+ return;
+ }
+ }
+
+ // generate standart opcode for address advance
+ if (delta_addr != 0) {
+ PushByte(data_, DW_LNS_advance_pc);
+ PushBackUnsigned(delta_addr);
+ }
+
+ // generate standart opcode for line delta
+ if (delta_line != 0) {
+ PushByte(data_, DW_LNS_advance_line);
+ PushBackSigned(delta_line);
+ }
+
+ // generate standart opcode for new LTN entry
+ PushByte(data_, DW_LNS_copy);
+ }
+
+ void SetAddr(uintptr_t addr) {
+ if (current_address_ == addr) {
+ return;
+ }
+
+ current_address_ = addr;
+
+ PushByte(data_, 0); // extended opcode:
+ PushByte(data_, 1 + 4); // length: opcode_size + address_size
+ PushByte(data_, DW_LNE_set_address);
+ PushWord(data_, addr);
+ }
+
+ void SetLine(unsigned line) {
+ int delta_line = line - current_line_;
+ if (delta_line) {
+ current_line_ = line;
+ PushByte(data_, DW_LNS_advance_line);
+ PushBackSigned(delta_line);
+ }
+ }
+
+ void SetFile(unsigned file_index) {
+ PushByte(data_, DW_LNS_set_file);
+ PushBackUnsigned(file_index);
+ }
+
+ void EndSequence() {
+ // End of Line Table Program
+ // 0(=ext), 1(len), DW_LNE_end_sequence
+ PushByte(data_, 0);
+ PushByte(data_, 1);
+ PushByte(data_, DW_LNE_end_sequence);
+ }
+
+ private:
+ const int line_base_;
+ const int line_range_;
+ const int opcode_base_;
+ uintptr_t current_address_;
+ size_t current_line_;
+
+ DISALLOW_COPY_AND_ASSIGN(LineTableGenerator);
+};
+
+// TODO: rewriting it using DexFile::DecodeDebugInfo needs unneeded stuff.
+static void GetLineInfoForJava(const uint8_t* dbgstream, const SrcMap& pc2dex,
+ SrcMap* result, uint32_t start_pc = 0) {
+ if (dbgstream == nullptr) {
+ return;
+ }
+
+ int adjopcode;
+ uint32_t dex_offset = 0;
+ uint32_t java_line = DecodeUnsignedLeb128(&dbgstream);
+
+ // skip parameters
+ for (uint32_t param_count = DecodeUnsignedLeb128(&dbgstream); param_count != 0; --param_count) {
+ DecodeUnsignedLeb128(&dbgstream);
+ }
+
+ for (bool is_end = false; is_end == false; ) {
+ uint8_t opcode = *dbgstream;
+ dbgstream++;
+ switch (opcode) {
+ case DexFile::DBG_END_SEQUENCE:
+ is_end = true;
+ break;
+
+ case DexFile::DBG_ADVANCE_PC:
+ dex_offset += DecodeUnsignedLeb128(&dbgstream);
+ break;
+
+ case DexFile::DBG_ADVANCE_LINE:
+ java_line += DecodeSignedLeb128(&dbgstream);
+ break;
+
+ case DexFile::DBG_START_LOCAL:
+ case DexFile::DBG_START_LOCAL_EXTENDED:
+ DecodeUnsignedLeb128(&dbgstream);
+ DecodeUnsignedLeb128(&dbgstream);
+ DecodeUnsignedLeb128(&dbgstream);
+
+ if (opcode == DexFile::DBG_START_LOCAL_EXTENDED) {
+ DecodeUnsignedLeb128(&dbgstream);
+ }
+ break;
+
+ case DexFile::DBG_END_LOCAL:
+ case DexFile::DBG_RESTART_LOCAL:
+ DecodeUnsignedLeb128(&dbgstream);
+ break;
+
+ case DexFile::DBG_SET_PROLOGUE_END:
+ case DexFile::DBG_SET_EPILOGUE_BEGIN:
+ case DexFile::DBG_SET_FILE:
+ break;
+
+ default:
+ adjopcode = opcode - DexFile::DBG_FIRST_SPECIAL;
+ dex_offset += adjopcode / DexFile::DBG_LINE_RANGE;
+ java_line += DexFile::DBG_LINE_BASE + (adjopcode % DexFile::DBG_LINE_RANGE);
+
+ for (SrcMap::const_iterator found = pc2dex.FindByTo(dex_offset);
+ found != pc2dex.end() && found->to_ == static_cast<int32_t>(dex_offset);
+ found++) {
+ result->push_back({found->from_ + start_pc, static_cast<int32_t>(java_line)});
+ }
+ break;
+ }
}
}
void ElfWriterQuick::FillInCFIInformation(OatWriter* oat_writer,
std::vector<uint8_t>* dbg_info,
std::vector<uint8_t>* dbg_abbrev,
- std::vector<uint8_t>* dbg_str) {
+ std::vector<uint8_t>* dbg_str,
+ std::vector<uint8_t>* dbg_line,
+ uint32_t text_section_offset) {
+ const std::vector<OatWriter::DebugInfo>& method_info = oat_writer->GetCFIMethodInfo();
+
+ uint32_t producer_str_offset = PushStr(dbg_str, "Android dex2oat");
+
// Create the debug_abbrev section with boilerplate information.
// We only care about low_pc and high_pc right now for the compilation
// unit and methods.
// Tag 1: Compilation unit: DW_TAG_compile_unit.
- dbg_abbrev->push_back(1);
- dbg_abbrev->push_back(DW_TAG_compile_unit);
+ PushByte(dbg_abbrev, 1);
+ PushByte(dbg_abbrev, DW_TAG_compile_unit);
// There are children (the methods).
- dbg_abbrev->push_back(DW_CHILDREN_yes);
+ PushByte(dbg_abbrev, DW_CHILDREN_yes);
+
+ // DW_AT_producer DW_FORM_data1.
+ // REVIEW: we can get rid of dbg_str section if
+ // DW_FORM_string (immediate string) was used everywhere instead of
+ // DW_FORM_strp (ref to string from .debug_str section).
+ // DW_FORM_strp makes sense only if we reuse the strings.
+ PushByte(dbg_abbrev, DW_AT_producer);
+ PushByte(dbg_abbrev, DW_FORM_strp);
// DW_LANG_Java DW_FORM_data1.
- dbg_abbrev->push_back(DW_AT_language);
- dbg_abbrev->push_back(DW_FORM_data1);
+ PushByte(dbg_abbrev, DW_AT_language);
+ PushByte(dbg_abbrev, DW_FORM_data1);
// DW_AT_low_pc DW_FORM_addr.
- dbg_abbrev->push_back(DW_AT_low_pc);
- dbg_abbrev->push_back(DW_FORM_addr);
+ PushByte(dbg_abbrev, DW_AT_low_pc);
+ PushByte(dbg_abbrev, DW_FORM_addr);
// DW_AT_high_pc DW_FORM_addr.
- dbg_abbrev->push_back(DW_AT_high_pc);
- dbg_abbrev->push_back(DW_FORM_addr);
+ PushByte(dbg_abbrev, DW_AT_high_pc);
+ PushByte(dbg_abbrev, DW_FORM_addr);
+
+ if (dbg_line != nullptr) {
+ // DW_AT_stmt_list DW_FORM_sec_offset.
+ PushByte(dbg_abbrev, DW_AT_stmt_list);
+ PushByte(dbg_abbrev, DW_FORM_sec_offset);
+ }
// End of DW_TAG_compile_unit.
PushHalf(dbg_abbrev, 0);
// Tag 2: Compilation unit: DW_TAG_subprogram.
- dbg_abbrev->push_back(2);
- dbg_abbrev->push_back(DW_TAG_subprogram);
+ PushByte(dbg_abbrev, 2);
+ PushByte(dbg_abbrev, DW_TAG_subprogram);
// There are no children.
- dbg_abbrev->push_back(DW_CHILDREN_no);
+ PushByte(dbg_abbrev, DW_CHILDREN_no);
// Name of the method.
- dbg_abbrev->push_back(DW_AT_name);
- dbg_abbrev->push_back(DW_FORM_strp);
+ PushByte(dbg_abbrev, DW_AT_name);
+ PushByte(dbg_abbrev, DW_FORM_strp);
// DW_AT_low_pc DW_FORM_addr.
- dbg_abbrev->push_back(DW_AT_low_pc);
- dbg_abbrev->push_back(DW_FORM_addr);
+ PushByte(dbg_abbrev, DW_AT_low_pc);
+ PushByte(dbg_abbrev, DW_FORM_addr);
// DW_AT_high_pc DW_FORM_addr.
- dbg_abbrev->push_back(DW_AT_high_pc);
- dbg_abbrev->push_back(DW_FORM_addr);
+ PushByte(dbg_abbrev, DW_AT_high_pc);
+ PushByte(dbg_abbrev, DW_FORM_addr);
// End of DW_TAG_subprogram.
PushHalf(dbg_abbrev, 0);
// Start the debug_info section with the header information
// 'unit_length' will be filled in later.
+ int cunit_length = dbg_info->size();
PushWord(dbg_info, 0);
// 'version' - 3.
@@ -1075,55 +1343,153 @@
PushWord(dbg_info, 0);
// Address size: 4.
- dbg_info->push_back(4);
+ PushByte(dbg_info, 4);
// Start the description for the compilation unit.
// This uses tag 1.
- dbg_info->push_back(1);
+ PushByte(dbg_info, 1);
+
+ // The producer is Android dex2oat.
+ PushWord(dbg_info, producer_str_offset);
// The language is Java.
- dbg_info->push_back(DW_LANG_Java);
+ PushByte(dbg_info, DW_LANG_Java);
- // Leave space for low_pc and high_pc.
- int low_pc_offset = dbg_info->size();
+ // low_pc and high_pc.
+ uint32_t cunit_low_pc = 0 - 1;
+ uint32_t cunit_high_pc = 0;
+ int cunit_low_pc_pos = dbg_info->size();
PushWord(dbg_info, 0);
PushWord(dbg_info, 0);
- // Walk through the information in the method table, and enter into dbg_info.
- const std::vector<OatWriter::DebugInfo>& dbg = oat_writer->GetCFIMethodInfo();
- uint32_t low_pc = 0xFFFFFFFFU;
- uint32_t high_pc = 0;
+ if (dbg_line == nullptr) {
+ for (size_t i = 0; i < method_info.size(); ++i) {
+ const OatWriter::DebugInfo &dbg = method_info[i];
- for (uint32_t i = 0; i < dbg.size(); i++) {
- const OatWriter::DebugInfo& info = dbg[i];
- if (info.low_pc_ < low_pc) {
- low_pc = info.low_pc_;
+ cunit_low_pc = std::min(cunit_low_pc, dbg.low_pc_);
+ cunit_high_pc = std::max(cunit_high_pc, dbg.high_pc_);
+
+ // Start a new TAG: subroutine (2).
+ PushByte(dbg_info, 2);
+
+ // Enter name, low_pc, high_pc.
+ PushWord(dbg_info, PushStr(dbg_str, dbg.method_name_));
+ PushWord(dbg_info, dbg.low_pc_ + text_section_offset);
+ PushWord(dbg_info, dbg.high_pc_ + text_section_offset);
}
- if (info.high_pc_ > high_pc) {
- high_pc = info.high_pc_;
+ } else {
+ // TODO: in gdb info functions <regexp> - reports Java functions, but
+ // source file is <unknown> because .debug_line is formed as one
+ // compilation unit. To fix this it is possible to generate
+ // a separate compilation unit for every distinct Java source.
+ // Each of the these compilation units can have several non-adjacent
+ // method ranges.
+
+ // Line number table offset
+ PushWord(dbg_info, dbg_line->size());
+
+ size_t lnt_length = dbg_line->size();
+ PushWord(dbg_line, 0);
+
+ PushHalf(dbg_line, 4); // LNT Version DWARF v4 => 4
+
+ size_t lnt_hdr_length = dbg_line->size();
+ PushWord(dbg_line, 0); // TODO: 64-bit uses 8-byte here
+
+ PushByte(dbg_line, 1); // minimum_instruction_length (ubyte)
+ PushByte(dbg_line, 1); // maximum_operations_per_instruction (ubyte) = always 1
+ PushByte(dbg_line, 1); // default_is_stmt (ubyte)
+
+ const int8_t LINE_BASE = -5;
+ PushByte(dbg_line, LINE_BASE); // line_base (sbyte)
+
+ const uint8_t LINE_RANGE = 14;
+ PushByte(dbg_line, LINE_RANGE); // line_range (ubyte)
+
+ const uint8_t OPCODE_BASE = 13;
+ PushByte(dbg_line, OPCODE_BASE); // opcode_base (ubyte)
+
+ // Standard_opcode_lengths (array of ubyte).
+ PushByte(dbg_line, 0); PushByte(dbg_line, 1); PushByte(dbg_line, 1);
+ PushByte(dbg_line, 1); PushByte(dbg_line, 1); PushByte(dbg_line, 0);
+ PushByte(dbg_line, 0); PushByte(dbg_line, 0); PushByte(dbg_line, 1);
+ PushByte(dbg_line, 0); PushByte(dbg_line, 0); PushByte(dbg_line, 1);
+
+ PushByte(dbg_line, 0); // include_directories (sequence of path names) = EMPTY
+
+ // File_names (sequence of file entries).
+ std::unordered_map<const char*, size_t> files;
+ for (size_t i = 0; i < method_info.size(); ++i) {
+ const OatWriter::DebugInfo &dbg = method_info[i];
+ // TODO: add package directory to the file name
+ const char* file_name = dbg.src_file_name_ == nullptr ? "null" : dbg.src_file_name_;
+ auto found = files.find(file_name);
+ if (found == files.end()) {
+ size_t file_index = 1 + files.size();
+ files[file_name] = file_index;
+ PushStr(dbg_line, file_name);
+ PushByte(dbg_line, 0); // include directory index = LEB128(0) - no directory
+ PushByte(dbg_line, 0); // modification time = LEB128(0) - NA
+ PushByte(dbg_line, 0); // file length = LEB128(0) - NA
+ }
+ }
+ PushByte(dbg_line, 0); // End of file_names.
+
+ // Set lnt header length.
+ UpdateWord(dbg_line, lnt_hdr_length, dbg_line->size() - lnt_hdr_length - 4);
+
+ // Generate Line Number Program code, one long program for all methods.
+ LineTableGenerator line_table_generator(LINE_BASE, LINE_RANGE, OPCODE_BASE,
+ dbg_line, 0, 1);
+
+ SrcMap pc2java_map;
+ for (size_t i = 0; i < method_info.size(); ++i) {
+ const OatWriter::DebugInfo &dbg = method_info[i];
+ const char* file_name = (dbg.src_file_name_ == nullptr) ? "null" : dbg.src_file_name_;
+ size_t file_index = files[file_name];
+ DCHECK_NE(file_index, 0U) << file_name;
+
+ cunit_low_pc = std::min(cunit_low_pc, dbg.low_pc_);
+ cunit_high_pc = std::max(cunit_high_pc, dbg.high_pc_);
+
+ // Start a new TAG: subroutine (2).
+ PushByte(dbg_info, 2);
+
+ // Enter name, low_pc, high_pc.
+ PushWord(dbg_info, PushStr(dbg_str, dbg.method_name_));
+ PushWord(dbg_info, dbg.low_pc_ + text_section_offset);
+ PushWord(dbg_info, dbg.high_pc_ + text_section_offset);
+
+ pc2java_map.clear();
+ GetLineInfoForJava(dbg.dbgstream_, dbg.compiled_method_->GetSrcMappingTable(),
+ &pc2java_map, dbg.low_pc_);
+ pc2java_map.DeltaFormat({dbg.low_pc_, 1}, dbg.high_pc_);
+
+ line_table_generator.SetFile(file_index);
+ line_table_generator.SetAddr(dbg.low_pc_ + text_section_offset);
+ line_table_generator.SetLine(1);
+ for (auto& src_map_elem : pc2java_map) {
+ line_table_generator.PutDelta(src_map_elem.from_, src_map_elem.to_);
+ }
}
- // Start a new TAG: subroutine (2).
- dbg_info->push_back(2);
+ // End Sequence should have the highest address set.
+ line_table_generator.SetAddr(cunit_high_pc + text_section_offset);
+ line_table_generator.EndSequence();
- // Enter the name into the string table (and NUL terminate).
- uint32_t str_offset = dbg_str->size();
- dbg_str->insert(dbg_str->end(), info.method_name_.begin(), info.method_name_.end());
- dbg_str->push_back('\0');
-
- // Enter name, low_pc, high_pc.
- PushWord(dbg_info, str_offset);
- PushWord(dbg_info, info.low_pc_);
- PushWord(dbg_info, info.high_pc_);
+ // set lnt length
+ UpdateWord(dbg_line, lnt_length, dbg_line->size() - lnt_length - 4);
}
// One byte terminator
- dbg_info->push_back(0);
+ PushByte(dbg_info, 0);
- // We have now walked all the methods. Fill in lengths and low/high PCs.
- UpdateWord(dbg_info, 0, dbg_info->size() - 4);
- UpdateWord(dbg_info, low_pc_offset, low_pc);
- UpdateWord(dbg_info, low_pc_offset + 4, high_pc);
+ // Fill in cunit's low_pc and high_pc.
+ UpdateWord(dbg_info, cunit_low_pc_pos, cunit_low_pc + text_section_offset);
+ UpdateWord(dbg_info, cunit_low_pc_pos + 4, cunit_high_pc + text_section_offset);
+
+ // We have now walked all the methods. Fill in lengths.
+ UpdateWord(dbg_info, cunit_length, dbg_info->size() - cunit_length - 4);
}
} // namespace art
diff --git a/compiler/elf_writer_quick.h b/compiler/elf_writer_quick.h
index 8cfe550..c7ef872 100644
--- a/compiler/elf_writer_quick.h
+++ b/compiler/elf_writer_quick.h
@@ -48,7 +48,7 @@
~ElfWriterQuick() {}
class ElfBuilder;
- void WriteDebugSymbols(ElfBuilder& builder, OatWriter* oat_writer);
+ void WriteDebugSymbols(ElfBuilder* builder, OatWriter* oat_writer);
void ReservePatchSpace(std::vector<uint8_t>* buffer, bool debug);
class ElfSectionBuilder {
@@ -237,6 +237,7 @@
}
~ElfBuilder() {}
+ bool Init();
bool Write();
// Adds the given raw section to the builder. This will copy it. The caller
@@ -253,8 +254,29 @@
bool fatal_error_ = false;
+ // What phdr is.
+ static const uint32_t PHDR_OFFSET = sizeof(Elf32_Ehdr);
+ enum : uint8_t {
+ PH_PHDR = 0,
+ PH_LOAD_R__ = 1,
+ PH_LOAD_R_X = 2,
+ PH_LOAD_RW_ = 3,
+ PH_DYNAMIC = 4,
+ PH_NUM = 5,
+ };
+ static const uint32_t PHDR_SIZE = sizeof(Elf32_Phdr) * PH_NUM;
+ Elf32_Phdr program_headers_[PH_NUM];
+
Elf32_Ehdr elf_header_;
+ Elf32_Shdr null_hdr_;
+ std::string shstrtab_;
+ uint32_t section_index_;
+ std::string dynstr_;
+ uint32_t dynstr_soname_offset_;
+ std::vector<Elf32_Shdr*> section_ptrs_;
+ std::vector<Elf32_Word> hash_;
+
public:
ElfOatSectionBuilder text_builder_;
ElfOatSectionBuilder rodata_builder_;
@@ -316,7 +338,8 @@
* @param dbg_str Debug strings.
*/
void FillInCFIInformation(OatWriter* oat_writer, std::vector<uint8_t>* dbg_info,
- std::vector<uint8_t>* dbg_abbrev, std::vector<uint8_t>* dbg_str);
+ std::vector<uint8_t>* dbg_abbrev, std::vector<uint8_t>* dbg_str,
+ std::vector<uint8_t>* dbg_line, uint32_t text_section_offset);
DISALLOW_IMPLICIT_CONSTRUCTORS(ElfWriterQuick);
};
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index ba7e13f..9c9cdf2 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -232,7 +232,7 @@
size_t length = RoundUp(Runtime::Current()->GetHeap()->GetTotalMemory(), kPageSize);
std::string error_msg;
image_.reset(MemMap::MapAnonymous("image writer image", NULL, length, PROT_READ | PROT_WRITE,
- true, &error_msg));
+ false, &error_msg));
if (UNLIKELY(image_.get() == nullptr)) {
LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg;
return false;
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 75d3030..a21004c 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -16,6 +16,8 @@
#include <memory>
+#include <math.h>
+
#include "class_linker.h"
#include "common_compiler_test.h"
#include "dex_file.h"
@@ -46,6 +48,15 @@
class JniCompilerTest : public CommonCompilerTest {
protected:
+ void SetUp() OVERRIDE {
+ CommonCompilerTest::SetUp();
+ check_generic_jni_ = false;
+ }
+
+ void SetCheckGenericJni(bool generic) {
+ check_generic_jni_ = generic;
+ }
+
void CompileForTest(jobject class_loader, bool direct,
const char* method_name, const char* method_sig) {
ScopedObjectAccess soa(Thread::Current());
@@ -61,13 +72,17 @@
method = c->FindVirtualMethod(method_name, method_sig);
}
ASSERT_TRUE(method != nullptr) << method_name << " " << method_sig;
- if (method->GetEntryPointFromQuickCompiledCode() == nullptr ||
- method->GetEntryPointFromQuickCompiledCode() == class_linker_->GetQuickGenericJniTrampoline()) {
- CompileMethod(method);
- ASSERT_TRUE(method->GetEntryPointFromQuickCompiledCode() != nullptr)
- << method_name << " " << method_sig;
- ASSERT_TRUE(method->GetEntryPointFromPortableCompiledCode() != nullptr)
- << method_name << " " << method_sig;
+ if (check_generic_jni_) {
+ method->SetEntryPointFromQuickCompiledCode(class_linker_->GetQuickGenericJniTrampoline());
+ } else {
+ if (method->GetEntryPointFromQuickCompiledCode() == nullptr ||
+ method->GetEntryPointFromQuickCompiledCode() == class_linker_->GetQuickGenericJniTrampoline()) {
+ CompileMethod(method);
+ ASSERT_TRUE(method->GetEntryPointFromQuickCompiledCode() != nullptr)
+ << method_name << " " << method_sig;
+ ASSERT_TRUE(method->GetEntryPointFromPortableCompiledCode() != nullptr)
+ << method_name << " " << method_sig;
+ }
}
}
@@ -115,16 +130,65 @@
static jobject jobj_;
static jobject class_loader_;
-
protected:
+ // We have to list the methods here so we can share them between default and generic JNI.
+ void CompileAndRunNoArgMethodImpl();
+ void CompileAndRunIntMethodThroughStubImpl();
+ void CompileAndRunStaticIntMethodThroughStubImpl();
+ void CompileAndRunIntMethodImpl();
+ void CompileAndRunIntIntMethodImpl();
+ void CompileAndRunLongLongMethodImpl();
+ void CompileAndRunDoubleDoubleMethodImpl();
+ void CompileAndRun_fooJJ_synchronizedImpl();
+ void CompileAndRunIntObjectObjectMethodImpl();
+ void CompileAndRunStaticIntIntMethodImpl();
+ void CompileAndRunStaticDoubleDoubleMethodImpl();
+ void RunStaticLogDoubleMethodImpl();
+ void RunStaticLogFloatMethodImpl();
+ void RunStaticReturnTrueImpl();
+ void RunStaticReturnFalseImpl();
+ void RunGenericStaticReturnIntImpl();
+ void CompileAndRunStaticIntObjectObjectMethodImpl();
+ void CompileAndRunStaticSynchronizedIntObjectObjectMethodImpl();
+ void ExceptionHandlingImpl();
+ void NativeStackTraceElementImpl();
+ void ReturnGlobalRefImpl();
+ void LocalReferenceTableClearingTestImpl();
+ void JavaLangSystemArrayCopyImpl();
+ void CompareAndSwapIntImpl();
+ void GetTextImpl();
+ void GetSinkPropertiesNativeImpl();
+ void UpcallReturnTypeChecking_InstanceImpl();
+ void UpcallReturnTypeChecking_StaticImpl();
+ void UpcallArgumentTypeChecking_InstanceImpl();
+ void UpcallArgumentTypeChecking_StaticImpl();
+ void CompileAndRunFloatFloatMethodImpl();
+ void CheckParameterAlignImpl();
+ void MaxParamNumberImpl();
+ void WithoutImplementationImpl();
+ void StackArgsIntsFirstImpl();
+ void StackArgsFloatsFirstImpl();
+ void StackArgsMixedImpl();
+
JNIEnv* env_;
jmethodID jmethod_;
+ bool check_generic_jni_;
};
jclass JniCompilerTest::jklass_;
jobject JniCompilerTest::jobj_;
jobject JniCompilerTest::class_loader_;
+#define JNI_TEST(TestName) \
+ TEST_F(JniCompilerTest, TestName ## Default) { \
+ TestName ## Impl(); \
+ } \
+ \
+ TEST_F(JniCompilerTest, TestName ## Generic) { \
+ TEST_DISABLED_FOR_MIPS(); \
+ SetCheckGenericJni(true); \
+ TestName ## Impl(); \
+ }
int gJava_MyClassNatives_foo_calls = 0;
void Java_MyClassNatives_foo(JNIEnv* env, jobject thisObj) {
@@ -139,7 +203,7 @@
EXPECT_EQ(1U, Thread::Current()->NumStackReferences());
}
-TEST_F(JniCompilerTest, CompileAndRunNoArgMethod) {
+void JniCompilerTest::CompileAndRunNoArgMethodImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "foo", "()V", reinterpret_cast<void*>(&Java_MyClassNatives_foo));
@@ -148,9 +212,13 @@
EXPECT_EQ(1, gJava_MyClassNatives_foo_calls);
env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_);
EXPECT_EQ(2, gJava_MyClassNatives_foo_calls);
+
+ gJava_MyClassNatives_foo_calls = 0;
}
-TEST_F(JniCompilerTest, CompileAndRunIntMethodThroughStub) {
+JNI_TEST(CompileAndRunNoArgMethod)
+
+void JniCompilerTest::CompileAndRunIntMethodThroughStubImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "bar", "(I)I", nullptr);
// calling through stub will link with &Java_MyClassNatives_bar
@@ -163,7 +231,9 @@
EXPECT_EQ(25, result);
}
-TEST_F(JniCompilerTest, CompileAndRunStaticIntMethodThroughStub) {
+JNI_TEST(CompileAndRunIntMethodThroughStub)
+
+void JniCompilerTest::CompileAndRunStaticIntMethodThroughStubImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "sbar", "(I)I", nullptr);
// calling through stub will link with &Java_MyClassNatives_sbar
@@ -176,6 +246,8 @@
EXPECT_EQ(43, result);
}
+JNI_TEST(CompileAndRunStaticIntMethodThroughStub)
+
int gJava_MyClassNatives_fooI_calls = 0;
jint Java_MyClassNatives_fooI(JNIEnv* env, jobject thisObj, jint x) {
// 1 = thisObj
@@ -189,7 +261,7 @@
return x;
}
-TEST_F(JniCompilerTest, CompileAndRunIntMethod) {
+void JniCompilerTest::CompileAndRunIntMethodImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooI", "(I)I",
reinterpret_cast<void*>(&Java_MyClassNatives_fooI));
@@ -201,8 +273,12 @@
result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 0xCAFED00D);
EXPECT_EQ(static_cast<jint>(0xCAFED00D), result);
EXPECT_EQ(2, gJava_MyClassNatives_fooI_calls);
+
+ gJava_MyClassNatives_fooI_calls = 0;
}
+JNI_TEST(CompileAndRunIntMethod)
+
int gJava_MyClassNatives_fooII_calls = 0;
jint Java_MyClassNatives_fooII(JNIEnv* env, jobject thisObj, jint x, jint y) {
// 1 = thisObj
@@ -216,7 +292,7 @@
return x - y; // non-commutative operator
}
-TEST_F(JniCompilerTest, CompileAndRunIntIntMethod) {
+void JniCompilerTest::CompileAndRunIntIntMethodImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooII", "(II)I",
reinterpret_cast<void*>(&Java_MyClassNatives_fooII));
@@ -229,8 +305,12 @@
0xCAFED00D);
EXPECT_EQ(static_cast<jint>(0xCAFEBABE - 0xCAFED00D), result);
EXPECT_EQ(2, gJava_MyClassNatives_fooII_calls);
+
+ gJava_MyClassNatives_fooII_calls = 0;
}
+JNI_TEST(CompileAndRunIntIntMethod)
+
int gJava_MyClassNatives_fooJJ_calls = 0;
jlong Java_MyClassNatives_fooJJ(JNIEnv* env, jobject thisObj, jlong x, jlong y) {
// 1 = thisObj
@@ -244,7 +324,7 @@
return x - y; // non-commutative operator
}
-TEST_F(JniCompilerTest, CompileAndRunLongLongMethod) {
+void JniCompilerTest::CompileAndRunLongLongMethodImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooJJ", "(JJ)J",
reinterpret_cast<void*>(&Java_MyClassNatives_fooJJ));
@@ -258,8 +338,12 @@
result = env_->CallNonvirtualLongMethod(jobj_, jklass_, jmethod_, b, a);
EXPECT_EQ(b - a, result);
EXPECT_EQ(2, gJava_MyClassNatives_fooJJ_calls);
+
+ gJava_MyClassNatives_fooJJ_calls = 0;
}
+JNI_TEST(CompileAndRunLongLongMethod)
+
int gJava_MyClassNatives_fooDD_calls = 0;
jdouble Java_MyClassNatives_fooDD(JNIEnv* env, jobject thisObj, jdouble x, jdouble y) {
// 1 = thisObj
@@ -273,7 +357,7 @@
return x - y; // non-commutative operator
}
-TEST_F(JniCompilerTest, CompileAndRunDoubleDoubleMethod) {
+void JniCompilerTest::CompileAndRunDoubleDoubleMethodImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooDD", "(DD)D",
reinterpret_cast<void*>(&Java_MyClassNatives_fooDD));
@@ -288,6 +372,8 @@
result = env_->CallNonvirtualDoubleMethod(jobj_, jklass_, jmethod_, a, b);
EXPECT_EQ(a - b, result);
EXPECT_EQ(2, gJava_MyClassNatives_fooDD_calls);
+
+ gJava_MyClassNatives_fooDD_calls = 0;
}
int gJava_MyClassNatives_fooJJ_synchronized_calls = 0;
@@ -303,7 +389,7 @@
return x | y;
}
-TEST_F(JniCompilerTest, CompileAndRun_fooJJ_synchronized) {
+void JniCompilerTest::CompileAndRun_fooJJ_synchronizedImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooJJ_synchronized", "(JJ)J",
reinterpret_cast<void*>(&Java_MyClassNatives_fooJJ_synchronized));
@@ -314,8 +400,12 @@
jlong result = env_->CallNonvirtualLongMethod(jobj_, jklass_, jmethod_, a, b);
EXPECT_EQ(a | b, result);
EXPECT_EQ(1, gJava_MyClassNatives_fooJJ_synchronized_calls);
+
+ gJava_MyClassNatives_fooJJ_synchronized_calls = 0;
}
+JNI_TEST(CompileAndRun_fooJJ_synchronized)
+
int gJava_MyClassNatives_fooIOO_calls = 0;
jobject Java_MyClassNatives_fooIOO(JNIEnv* env, jobject thisObj, jint x, jobject y,
jobject z) {
@@ -339,7 +429,7 @@
}
}
-TEST_F(JniCompilerTest, CompileAndRunIntObjectObjectMethod) {
+void JniCompilerTest::CompileAndRunIntObjectObjectMethodImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooIOO",
"(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
@@ -369,8 +459,12 @@
result = env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, 2, jklass_, nullptr);
EXPECT_TRUE(env_->IsSameObject(nullptr, result));
EXPECT_EQ(7, gJava_MyClassNatives_fooIOO_calls);
+
+ gJava_MyClassNatives_fooIOO_calls = 0;
}
+JNI_TEST(CompileAndRunIntObjectObjectMethod)
+
int gJava_MyClassNatives_fooSII_calls = 0;
jint Java_MyClassNatives_fooSII(JNIEnv* env, jclass klass, jint x, jint y) {
// 1 = klass
@@ -384,7 +478,7 @@
return x + y;
}
-TEST_F(JniCompilerTest, CompileAndRunStaticIntIntMethod) {
+void JniCompilerTest::CompileAndRunStaticIntIntMethodImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "fooSII", "(II)I",
reinterpret_cast<void*>(&Java_MyClassNatives_fooSII));
@@ -393,8 +487,12 @@
jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 20, 30);
EXPECT_EQ(50, result);
EXPECT_EQ(1, gJava_MyClassNatives_fooSII_calls);
+
+ gJava_MyClassNatives_fooSII_calls = 0;
}
+JNI_TEST(CompileAndRunStaticIntIntMethod)
+
int gJava_MyClassNatives_fooSDD_calls = 0;
jdouble Java_MyClassNatives_fooSDD(JNIEnv* env, jclass klass, jdouble x, jdouble y) {
// 1 = klass
@@ -408,7 +506,7 @@
return x - y; // non-commutative operator
}
-TEST_F(JniCompilerTest, CompileAndRunStaticDoubleDoubleMethod) {
+void JniCompilerTest::CompileAndRunStaticDoubleDoubleMethodImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "fooSDD", "(DD)D",
reinterpret_cast<void*>(&Java_MyClassNatives_fooSDD));
@@ -422,8 +520,87 @@
result = env_->CallStaticDoubleMethod(jklass_, jmethod_, a, b);
EXPECT_EQ(a - b, result);
EXPECT_EQ(2, gJava_MyClassNatives_fooSDD_calls);
+
+ gJava_MyClassNatives_fooSDD_calls = 0;
}
+JNI_TEST(CompileAndRunStaticDoubleDoubleMethod)
+
+// The x86 generic JNI code had a bug where it assumed a floating
+// point return value would be in xmm0. We use log, to somehow ensure
+// the compiler will use the floating point stack.
+
+jdouble Java_MyClassNatives_logD(JNIEnv* env, jclass klass, jdouble x) {
+ return log(x);
+}
+
+void JniCompilerTest::RunStaticLogDoubleMethodImpl() {
+ TEST_DISABLED_FOR_PORTABLE();
+ SetUpForTest(true, "logD", "(D)D", reinterpret_cast<void*>(&Java_MyClassNatives_logD));
+
+ jdouble result = env_->CallStaticDoubleMethod(jklass_, jmethod_, 2.0);
+ EXPECT_EQ(log(2.0), result);
+}
+
+JNI_TEST(RunStaticLogDoubleMethod)
+
+jfloat Java_MyClassNatives_logF(JNIEnv* env, jclass klass, jfloat x) {
+ return logf(x);
+}
+
+void JniCompilerTest::RunStaticLogFloatMethodImpl() {
+ TEST_DISABLED_FOR_PORTABLE();
+ SetUpForTest(true, "logF", "(F)F", reinterpret_cast<void*>(&Java_MyClassNatives_logF));
+
+ jfloat result = env_->CallStaticFloatMethod(jklass_, jmethod_, 2.0);
+ EXPECT_EQ(logf(2.0), result);
+}
+
+JNI_TEST(RunStaticLogFloatMethod)
+
+jboolean Java_MyClassNatives_returnTrue(JNIEnv* env, jclass klass) {
+ return JNI_TRUE;
+}
+
+jboolean Java_MyClassNatives_returnFalse(JNIEnv* env, jclass klass) {
+ return JNI_FALSE;
+}
+
+jint Java_MyClassNatives_returnInt(JNIEnv* env, jclass klass) {
+ return 42;
+}
+
+void JniCompilerTest::RunStaticReturnTrueImpl() {
+ TEST_DISABLED_FOR_PORTABLE();
+ SetUpForTest(true, "returnTrue", "()Z", reinterpret_cast<void*>(&Java_MyClassNatives_returnTrue));
+
+ jboolean result = env_->CallStaticBooleanMethod(jklass_, jmethod_);
+ EXPECT_TRUE(result);
+}
+
+JNI_TEST(RunStaticReturnTrue)
+
+void JniCompilerTest::RunStaticReturnFalseImpl() {
+ TEST_DISABLED_FOR_PORTABLE();
+ SetUpForTest(true, "returnFalse", "()Z",
+ reinterpret_cast<void*>(&Java_MyClassNatives_returnFalse));
+
+ jboolean result = env_->CallStaticBooleanMethod(jklass_, jmethod_);
+ EXPECT_FALSE(result);
+}
+
+JNI_TEST(RunStaticReturnFalse)
+
+void JniCompilerTest::RunGenericStaticReturnIntImpl() {
+ TEST_DISABLED_FOR_PORTABLE();
+ SetUpForTest(true, "returnInt", "()I", reinterpret_cast<void*>(&Java_MyClassNatives_returnInt));
+
+ jint result = env_->CallStaticIntMethod(jklass_, jmethod_);
+ EXPECT_EQ(42, result);
+}
+
+JNI_TEST(RunGenericStaticReturnInt)
+
int gJava_MyClassNatives_fooSIOO_calls = 0;
jobject Java_MyClassNatives_fooSIOO(JNIEnv* env, jclass klass, jint x, jobject y,
jobject z) {
@@ -448,7 +625,7 @@
}
-TEST_F(JniCompilerTest, CompileAndRunStaticIntObjectObjectMethod) {
+void JniCompilerTest::CompileAndRunStaticIntObjectObjectMethodImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "fooSIOO",
"(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
@@ -478,8 +655,12 @@
result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, jobj_, nullptr);
EXPECT_TRUE(env_->IsSameObject(nullptr, result));
EXPECT_EQ(7, gJava_MyClassNatives_fooSIOO_calls);
+
+ gJava_MyClassNatives_fooSIOO_calls = 0;
}
+JNI_TEST(CompileAndRunStaticIntObjectObjectMethod)
+
int gJava_MyClassNatives_fooSSIOO_calls = 0;
jobject Java_MyClassNatives_fooSSIOO(JNIEnv* env, jclass klass, jint x, jobject y, jobject z) {
// 3 = klass + y + z
@@ -502,7 +683,7 @@
}
}
-TEST_F(JniCompilerTest, CompileAndRunStaticSynchronizedIntObjectObjectMethod) {
+void JniCompilerTest::CompileAndRunStaticSynchronizedIntObjectObjectMethodImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "fooSSIOO",
"(ILjava/lang/Object;Ljava/lang/Object;)Ljava/lang/Object;",
@@ -532,14 +713,18 @@
result = env_->CallStaticObjectMethod(jklass_, jmethod_, 2, jobj_, nullptr);
EXPECT_TRUE(env_->IsSameObject(nullptr, result));
EXPECT_EQ(7, gJava_MyClassNatives_fooSSIOO_calls);
+
+ gJava_MyClassNatives_fooSSIOO_calls = 0;
}
+JNI_TEST(CompileAndRunStaticSynchronizedIntObjectObjectMethod)
+
void Java_MyClassNatives_throwException(JNIEnv* env, jobject) {
jclass c = env->FindClass("java/lang/RuntimeException");
env->ThrowNew(c, "hello");
}
-TEST_F(JniCompilerTest, ExceptionHandling) {
+void JniCompilerTest::ExceptionHandlingImpl() {
TEST_DISABLED_FOR_PORTABLE();
{
ASSERT_FALSE(runtime_->IsStarted());
@@ -580,8 +765,12 @@
SetUpForTest(false, "foo", "()V", reinterpret_cast<void*>(&Java_MyClassNatives_foo));
env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_);
EXPECT_EQ(2, gJava_MyClassNatives_foo_calls);
+
+ gJava_MyClassNatives_foo_calls = 0;
}
+JNI_TEST(ExceptionHandling)
+
jint Java_MyClassNatives_nativeUpCall(JNIEnv* env, jobject thisObj, jint i) {
if (i <= 0) {
// We want to check raw Object* / Array* below
@@ -620,7 +809,7 @@
}
}
-TEST_F(JniCompilerTest, NativeStackTraceElement) {
+void JniCompilerTest::NativeStackTraceElementImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooI", "(I)I",
reinterpret_cast<void*>(&Java_MyClassNatives_nativeUpCall));
@@ -628,11 +817,13 @@
EXPECT_EQ(10+9+8+7+6+5+4+3+2+1, result);
}
+JNI_TEST(NativeStackTraceElement)
+
jobject Java_MyClassNatives_fooO(JNIEnv* env, jobject, jobject x) {
return env->NewGlobalRef(x);
}
-TEST_F(JniCompilerTest, ReturnGlobalRef) {
+void JniCompilerTest::ReturnGlobalRefImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooO", "(Ljava/lang/Object;)Ljava/lang/Object;",
reinterpret_cast<void*>(&Java_MyClassNatives_fooO));
@@ -641,6 +832,8 @@
EXPECT_TRUE(env_->IsSameObject(result, jobj_));
}
+JNI_TEST(ReturnGlobalRef)
+
jint local_ref_test(JNIEnv* env, jobject thisObj, jint x) {
// Add 10 local references
ScopedObjectAccess soa(env);
@@ -650,7 +843,7 @@
return x+1;
}
-TEST_F(JniCompilerTest, LocalReferenceTableClearingTest) {
+void JniCompilerTest::LocalReferenceTableClearingTestImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "fooI", "(I)I", reinterpret_cast<void*>(&local_ref_test));
// 1000 invocations of a method that adds 10 local references
@@ -660,6 +853,8 @@
}
}
+JNI_TEST(LocalReferenceTableClearingTest)
+
void my_arraycopy(JNIEnv* env, jclass klass, jobject src, jint src_pos, jobject dst, jint dst_pos, jint length) {
EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jklass_, klass));
EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jklass_, dst));
@@ -669,13 +864,15 @@
EXPECT_EQ(9876, length);
}
-TEST_F(JniCompilerTest, JavaLangSystemArrayCopy) {
+void JniCompilerTest::JavaLangSystemArrayCopyImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "arraycopy", "(Ljava/lang/Object;ILjava/lang/Object;II)V",
reinterpret_cast<void*>(&my_arraycopy));
env_->CallStaticVoidMethod(jklass_, jmethod_, jobj_, 1234, jklass_, 5678, 9876);
}
+JNI_TEST(JavaLangSystemArrayCopy)
+
jboolean my_casi(JNIEnv* env, jobject unsafe, jobject obj, jlong offset, jint expected, jint newval) {
EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, unsafe));
EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj));
@@ -685,7 +882,7 @@
return JNI_TRUE;
}
-TEST_F(JniCompilerTest, CompareAndSwapInt) {
+void JniCompilerTest::CompareAndSwapIntImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "compareAndSwapInt", "(Ljava/lang/Object;JII)Z",
reinterpret_cast<void*>(&my_casi));
@@ -694,6 +891,8 @@
EXPECT_EQ(result, JNI_TRUE);
}
+JNI_TEST(CompareAndSwapInt)
+
jint my_gettext(JNIEnv* env, jclass klass, jlong val1, jobject obj1, jlong val2, jobject obj2) {
EXPECT_TRUE(env->IsInstanceOf(JniCompilerTest::jobj_, klass));
EXPECT_TRUE(env->IsSameObject(JniCompilerTest::jobj_, obj1));
@@ -703,7 +902,7 @@
return 42;
}
-TEST_F(JniCompilerTest, GetText) {
+void JniCompilerTest::GetTextImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "getText", "(JLjava/lang/Object;JLjava/lang/Object;)I",
reinterpret_cast<void*>(&my_gettext));
@@ -712,6 +911,8 @@
EXPECT_EQ(result, 42);
}
+JNI_TEST(GetText)
+
int gJava_MyClassNatives_GetSinkProperties_calls = 0;
jarray Java_MyClassNatives_GetSinkProperties(JNIEnv* env, jobject thisObj, jstring s) {
// 1 = thisObj
@@ -729,7 +930,7 @@
return nullptr;
}
-TEST_F(JniCompilerTest, GetSinkPropertiesNative) {
+void JniCompilerTest::GetSinkPropertiesNativeImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "getSinkPropertiesNative", "(Ljava/lang/String;)[Ljava/lang/Object;",
reinterpret_cast<void*>(&Java_MyClassNatives_GetSinkProperties));
@@ -739,8 +940,12 @@
env_->CallNonvirtualObjectMethod(jobj_, jklass_, jmethod_, nullptr));
EXPECT_EQ(nullptr, result);
EXPECT_EQ(1, gJava_MyClassNatives_GetSinkProperties_calls);
+
+ gJava_MyClassNatives_GetSinkProperties_calls = 0;
}
+JNI_TEST(GetSinkPropertiesNative)
+
// This should return jclass, but we're imitating a bug pattern.
jobject Java_MyClassNatives_instanceMethodThatShouldReturnClass(JNIEnv* env, jobject) {
return env->NewStringUTF("not a class!");
@@ -751,7 +956,7 @@
return env->NewStringUTF("not a class!");
}
-TEST_F(JniCompilerTest, UpcallReturnTypeChecking_Instance) {
+void JniCompilerTest::UpcallReturnTypeChecking_InstanceImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "instanceMethodThatShouldReturnClass", "()Ljava/lang/Class;",
reinterpret_cast<void*>(&Java_MyClassNatives_instanceMethodThatShouldReturnClass));
@@ -769,7 +974,9 @@
check_jni_abort_catcher.Check("calling non-static method java.lang.Class MyClassNatives.instanceMethodThatShouldReturnClass() with CallStaticObjectMethodV");
}
-TEST_F(JniCompilerTest, UpcallReturnTypeChecking_Static) {
+JNI_TEST(UpcallReturnTypeChecking_Instance)
+
+void JniCompilerTest::UpcallReturnTypeChecking_StaticImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "staticMethodThatShouldReturnClass", "()Ljava/lang/Class;",
reinterpret_cast<void*>(&Java_MyClassNatives_staticMethodThatShouldReturnClass));
@@ -787,6 +994,8 @@
check_jni_abort_catcher.Check("calling static method java.lang.Class MyClassNatives.staticMethodThatShouldReturnClass() with CallObjectMethodV");
}
+JNI_TEST(UpcallReturnTypeChecking_Static)
+
// This should take jclass, but we're imitating a bug pattern.
void Java_MyClassNatives_instanceMethodThatShouldTakeClass(JNIEnv*, jobject, jclass) {
}
@@ -795,7 +1004,7 @@
void Java_MyClassNatives_staticMethodThatShouldTakeClass(JNIEnv*, jclass, jclass) {
}
-TEST_F(JniCompilerTest, UpcallArgumentTypeChecking_Instance) {
+void JniCompilerTest::UpcallArgumentTypeChecking_InstanceImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "instanceMethodThatShouldTakeClass", "(ILjava/lang/Class;)V",
reinterpret_cast<void*>(&Java_MyClassNatives_instanceMethodThatShouldTakeClass));
@@ -806,7 +1015,9 @@
check_jni_abort_catcher.Check("bad arguments passed to void MyClassNatives.instanceMethodThatShouldTakeClass(int, java.lang.Class)");
}
-TEST_F(JniCompilerTest, UpcallArgumentTypeChecking_Static) {
+JNI_TEST(UpcallArgumentTypeChecking_Instance)
+
+void JniCompilerTest::UpcallArgumentTypeChecking_StaticImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "staticMethodThatShouldTakeClass", "(ILjava/lang/Class;)V",
reinterpret_cast<void*>(&Java_MyClassNatives_staticMethodThatShouldTakeClass));
@@ -817,6 +1028,8 @@
check_jni_abort_catcher.Check("bad arguments passed to void MyClassNatives.staticMethodThatShouldTakeClass(int, java.lang.Class)");
}
+JNI_TEST(UpcallArgumentTypeChecking_Static)
+
jfloat Java_MyClassNatives_checkFloats(JNIEnv* env, jobject thisObj, jfloat f1, jfloat f2) {
EXPECT_EQ(kNative, Thread::Current()->GetState());
EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
@@ -827,7 +1040,7 @@
return f1 - f2; // non-commutative operator
}
-TEST_F(JniCompilerTest, CompileAndRunFloatFloatMethod) {
+void JniCompilerTest::CompileAndRunFloatFloatMethodImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "checkFloats", "(FF)F",
reinterpret_cast<void*>(&Java_MyClassNatives_checkFloats));
@@ -841,6 +1054,8 @@
EXPECT_EQ(a - b, result);
}
+JNI_TEST(CompileAndRunFloatFloatMethod)
+
void Java_MyClassNatives_checkParameterAlign(JNIEnv* env, jobject thisObj, jint i1, jlong l1) {
// EXPECT_EQ(kNative, Thread::Current()->GetState());
// EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
@@ -852,7 +1067,7 @@
EXPECT_EQ(l1, INT64_C(0x12345678ABCDEF0));
}
-TEST_F(JniCompilerTest, CheckParameterAlign) {
+void JniCompilerTest::CheckParameterAlignImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "checkParameterAlign", "(IJ)V",
reinterpret_cast<void*>(&Java_MyClassNatives_checkParameterAlign));
@@ -860,6 +1075,8 @@
env_->CallNonvirtualVoidMethod(jobj_, jklass_, jmethod_, 1234, INT64_C(0x12345678ABCDEF0));
}
+JNI_TEST(CheckParameterAlign)
+
void Java_MyClassNatives_maxParamNumber(JNIEnv* env, jobject thisObj,
jobject o0, jobject o1, jobject o2, jobject o3, jobject o4, jobject o5, jobject o6, jobject o7,
jobject o8, jobject o9, jobject o10, jobject o11, jobject o12, jobject o13, jobject o14, jobject o15,
@@ -1265,7 +1482,7 @@
"Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;"
"Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;Ljava/lang/Object;)V";
-TEST_F(JniCompilerTest, MaxParamNumber) {
+void JniCompilerTest::MaxParamNumberImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "maxParamNumber", longSig,
reinterpret_cast<void*>(&Java_MyClassNatives_maxParamNumber));
@@ -1289,7 +1506,9 @@
env_->CallNonvirtualVoidMethodA(jobj_, jklass_, jmethod_, args);
}
-TEST_F(JniCompilerTest, WithoutImplementation) {
+JNI_TEST(MaxParamNumber)
+
+void JniCompilerTest::WithoutImplementationImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(false, "withoutImplementation", "()V", nullptr);
@@ -1299,6 +1518,8 @@
EXPECT_TRUE(env_->ExceptionCheck() == JNI_TRUE);
}
+JNI_TEST(WithoutImplementation)
+
void Java_MyClassNatives_stackArgsIntsFirst(JNIEnv* env, jclass klass, jint i1, jint i2, jint i3,
jint i4, jint i5, jint i6, jint i7, jint i8, jint i9,
jint i10, jfloat f1, jfloat f2, jfloat f3, jfloat f4,
@@ -1337,7 +1558,7 @@
EXPECT_EQ(i20, 20);
}
-TEST_F(JniCompilerTest, StackArgsIntsFirst) {
+void JniCompilerTest::StackArgsIntsFirstImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "stackArgsIntsFirst", "(IIIIIIIIIIFFFFFFFFFF)V",
reinterpret_cast<void*>(&Java_MyClassNatives_stackArgsIntsFirst));
@@ -1368,6 +1589,8 @@
f3, f4, f5, f6, f7, f8, f9, f10);
}
+JNI_TEST(StackArgsIntsFirst)
+
void Java_MyClassNatives_stackArgsFloatsFirst(JNIEnv* env, jclass klass, jfloat f1, jfloat f2,
jfloat f3, jfloat f4, jfloat f5, jfloat f6, jfloat f7,
jfloat f8, jfloat f9, jfloat f10, jint i1, jint i2,
@@ -1406,7 +1629,7 @@
EXPECT_EQ(i20, 20);
}
-TEST_F(JniCompilerTest, StackArgsFloatsFirst) {
+void JniCompilerTest::StackArgsFloatsFirstImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "stackArgsFloatsFirst", "(FFFFFFFFFFIIIIIIIIII)V",
reinterpret_cast<void*>(&Java_MyClassNatives_stackArgsFloatsFirst));
@@ -1437,6 +1660,8 @@
i4, i5, i6, i7, i8, i9, i10);
}
+JNI_TEST(StackArgsFloatsFirst)
+
void Java_MyClassNatives_stackArgsMixed(JNIEnv* env, jclass klass, jint i1, jfloat f1, jint i2,
jfloat f2, jint i3, jfloat f3, jint i4, jfloat f4, jint i5,
jfloat f5, jint i6, jfloat f6, jint i7, jfloat f7, jint i8,
@@ -1474,7 +1699,7 @@
EXPECT_EQ(i20, 20);
}
-TEST_F(JniCompilerTest, StackArgsMixed) {
+void JniCompilerTest::StackArgsMixedImpl() {
TEST_DISABLED_FOR_PORTABLE();
SetUpForTest(true, "stackArgsMixed", "(IFIFIFIFIFIFIFIFIFIF)V",
reinterpret_cast<void*>(&Java_MyClassNatives_stackArgsMixed));
@@ -1505,4 +1730,6 @@
f7, i8, f8, i9, f9, i10, f10);
}
+JNI_TEST(StackArgsMixed)
+
} // namespace art
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index 1ba5d32..41a91ed 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -18,6 +18,7 @@
#include <zlib.h>
+#include "base/allocator.h"
#include "base/bit_vector.h"
#include "base/stl_util.h"
#include "base/unix_file/fd_file.h"
@@ -411,10 +412,12 @@
const uint32_t quick_code_start = quick_code_offset -
writer_->oat_header_->GetExecutableOffset();
+ const DexFile::CodeItem *code_item = it.GetMethodCodeItem();
writer_->method_info_.push_back(DebugInfo(name,
- quick_code_start,
- quick_code_start + code_size,
- compiled_method));
+ dex_file_->GetSourceFile(dex_file_->GetClassDef(class_def_index_)),
+ quick_code_start, quick_code_start + code_size,
+ code_item == nullptr ? nullptr : dex_file_->GetDebugInfoStream(code_item),
+ compiled_method));
}
}
diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h
index ef5fd6b..11f8bff 100644
--- a/compiler/oat_writer.h
+++ b/compiler/oat_writer.h
@@ -98,14 +98,18 @@
~OatWriter();
struct DebugInfo {
- DebugInfo(const std::string& method_name, uint32_t low_pc, uint32_t high_pc,
+ DebugInfo(const std::string& method_name, const char* src_file_name,
+ uint32_t low_pc, uint32_t high_pc, const uint8_t* dbgstream,
CompiledMethod* compiled_method)
- : method_name_(method_name), low_pc_(low_pc), high_pc_(high_pc),
+ : method_name_(method_name), src_file_name_(src_file_name),
+ low_pc_(low_pc), high_pc_(high_pc), dbgstream_(dbgstream),
compiled_method_(compiled_method) {
}
std::string method_name_; // Note: this name is a pretty-printed name.
+ const char* src_file_name_;
uint32_t low_pc_;
uint32_t high_pc_;
+ const uint8_t* dbgstream_;
CompiledMethod* compiled_method_;
};
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index bd8c27e..7269fff 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -19,6 +19,7 @@
#include "code_generator_arm.h"
#include "code_generator_x86.h"
#include "code_generator_x86_64.h"
+#include "compiled_method.h"
#include "dex/verified_method.h"
#include "driver/dex_compilation_unit.h"
#include "gc_map_builder.h"
@@ -297,7 +298,7 @@
}
}
-void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data) const {
+void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data, SrcMap* src_map) const {
uint32_t pc2dex_data_size = 0u;
uint32_t pc2dex_entries = pc_infos_.Size();
uint32_t pc2dex_offset = 0u;
@@ -305,6 +306,10 @@
uint32_t dex2pc_data_size = 0u;
uint32_t dex2pc_entries = 0u;
+ if (src_map != nullptr) {
+ src_map->reserve(pc2dex_entries);
+ }
+
// We currently only have pc2dex entries.
for (size_t i = 0; i < pc2dex_entries; i++) {
struct PcInfo pc_info = pc_infos_.Get(i);
@@ -312,6 +317,9 @@
pc2dex_data_size += SignedLeb128Size(pc_info.dex_pc - pc2dex_dalvik_offset);
pc2dex_offset = pc_info.native_pc;
pc2dex_dalvik_offset = pc_info.dex_pc;
+ if (src_map != nullptr) {
+ src_map->push_back(SrcMapElem({pc2dex_offset, pc2dex_dalvik_offset}));
+ }
}
uint32_t total_entries = pc2dex_entries + dex2pc_entries;
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index b31c3a3..12337c9 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -32,6 +32,7 @@
class CodeGenerator;
class DexCompilationUnit;
+class SrcMap;
class CodeAllocator {
public:
@@ -126,7 +127,7 @@
void GenerateSlowPaths();
- void BuildMappingTable(std::vector<uint8_t>* vector) const;
+ void BuildMappingTable(std::vector<uint8_t>* vector, SrcMap* src_map) const;
void BuildVMapTable(std::vector<uint8_t>* vector) const;
void BuildNativeGCMap(
std::vector<uint8_t>* vector, const DexCompilationUnit& dex_compilation_unit) const;
@@ -142,6 +143,7 @@
protected:
CodeGenerator(HGraph* graph, size_t number_of_registers)
: frame_size_(kUninitializedFrameSize),
+ core_spill_mask_(0),
graph_(graph),
block_labels_(graph->GetArena(), 0),
pc_infos_(graph->GetArena(), 32),
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 8a5077b..fce6ab0 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -161,7 +161,10 @@
}
std::vector<uint8_t> mapping_table;
- codegen->BuildMappingTable(&mapping_table);
+ SrcMap src_mapping_table;
+ codegen->BuildMappingTable(&mapping_table,
+ GetCompilerDriver()->GetCompilerOptions().GetIncludeDebugSymbols() ?
+ &src_mapping_table : nullptr);
std::vector<uint8_t> vmap_table;
codegen->BuildVMapTable(&vmap_table);
std::vector<uint8_t> gc_map;
@@ -173,6 +176,7 @@
codegen->GetFrameSize(),
codegen->GetCoreSpillMask(),
0, /* FPR spill mask, unused */
+ &src_mapping_table,
mapping_table,
vmap_table,
gc_map,
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index bd3a7d9..da13b1e 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -16,6 +16,7 @@
#include "register_allocator.h"
+#include "base/bit_vector-inl.h"
#include "code_generator.h"
#include "ssa_liveness_analysis.h"
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index fbdc0b9..5de1ab9 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -16,6 +16,7 @@
#include "ssa_liveness_analysis.h"
+#include "base/bit_vector-inl.h"
#include "code_generator.h"
#include "nodes.h"
diff --git a/compiler/utils/arena_bit_vector.cc b/compiler/utils/arena_bit_vector.cc
index 39f7d18..de35f3d 100644
--- a/compiler/utils/arena_bit_vector.cc
+++ b/compiler/utils/arena_bit_vector.cc
@@ -16,11 +16,12 @@
#include "arena_allocator.h"
#include "arena_bit_vector.h"
+#include "base/allocator.h"
namespace art {
template <typename ArenaAlloc>
-class ArenaBitVectorAllocator : public Allocator {
+class ArenaBitVectorAllocator FINAL : public Allocator {
public:
explicit ArenaBitVectorAllocator(ArenaAlloc* arena) : arena_(arena) {}
~ArenaBitVectorAllocator() {}
@@ -37,7 +38,7 @@
static void operator delete(void* p) {} // Nop.
private:
- ArenaAlloc* arena_;
+ ArenaAlloc* const arena_;
DISALLOW_COPY_AND_ASSIGN(ArenaBitVectorAllocator);
};
diff --git a/compiler/utils/arena_bit_vector.h b/compiler/utils/arena_bit_vector.h
index 485ed76..c92658f 100644
--- a/compiler/utils/arena_bit_vector.h
+++ b/compiler/utils/arena_bit_vector.h
@@ -51,23 +51,23 @@
* A BitVector implementation that uses Arena allocation.
*/
class ArenaBitVector : public BitVector {
- public:
- ArenaBitVector(ArenaAllocator* arena, uint32_t start_bits, bool expandable,
- OatBitMapKind kind = kBitMapMisc);
- ArenaBitVector(ScopedArenaAllocator* arena, uint32_t start_bits, bool expandable,
- OatBitMapKind kind = kBitMapMisc);
- ~ArenaBitVector() {}
+ public:
+ ArenaBitVector(ArenaAllocator* arena, uint32_t start_bits, bool expandable,
+ OatBitMapKind kind = kBitMapMisc);
+ ArenaBitVector(ScopedArenaAllocator* arena, uint32_t start_bits, bool expandable,
+ OatBitMapKind kind = kBitMapMisc);
+ ~ArenaBitVector() {}
static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(sizeof(ArenaBitVector), kArenaAllocGrowableBitMap);
+ return arena->Alloc(sizeof(ArenaBitVector), kArenaAllocGrowableBitMap);
}
static void* operator new(size_t size, ScopedArenaAllocator* arena) {
- return arena->Alloc(sizeof(ArenaBitVector), kArenaAllocGrowableBitMap);
+ return arena->Alloc(sizeof(ArenaBitVector), kArenaAllocGrowableBitMap);
}
static void operator delete(void* p) {} // Nop.
- private:
- const OatBitMapKind kind_; // for memory use tuning. TODO: currently unused.
+ private:
+ const OatBitMapKind kind_; // for memory use tuning. TODO: currently unused.
};
diff --git a/compiler/utils/dwarf_cfi.cc b/compiler/utils/dwarf_cfi.cc
index b3d1a47..83e5f5a 100644
--- a/compiler/utils/dwarf_cfi.cc
+++ b/compiler/utils/dwarf_cfi.cc
@@ -65,44 +65,86 @@
buf->push_back(0x0b);
}
-void WriteFDEHeader(std::vector<uint8_t>* buf) {
+void WriteFDEHeader(std::vector<uint8_t>* buf, bool is_64bit) {
// 'length' (filled in by other functions).
- PushWord(buf, 0);
+ if (is_64bit) {
+ PushWord(buf, 0xffffffff); // Indicates 64bit
+ PushWord(buf, 0);
+ PushWord(buf, 0);
+ } else {
+ PushWord(buf, 0);
+ }
// 'CIE_pointer' (filled in by linker).
- PushWord(buf, 0);
+ if (is_64bit) {
+ PushWord(buf, 0);
+ PushWord(buf, 0);
+ } else {
+ PushWord(buf, 0);
+ }
// 'initial_location' (filled in by linker).
- PushWord(buf, 0);
+ if (is_64bit) {
+ PushWord(buf, 0);
+ PushWord(buf, 0);
+ } else {
+ PushWord(buf, 0);
+ }
// 'address_range' (filled in by other functions).
- PushWord(buf, 0);
+ if (is_64bit) {
+ PushWord(buf, 0);
+ PushWord(buf, 0);
+ } else {
+ PushWord(buf, 0);
+ }
// Augmentation length: 0
buf->push_back(0);
}
-void WriteFDEAddressRange(std::vector<uint8_t>* buf, uint32_t data) {
- const int kOffsetOfAddressRange = 12;
- CHECK(buf->size() >= kOffsetOfAddressRange + sizeof(uint32_t));
+void WriteFDEAddressRange(std::vector<uint8_t>* buf, uint64_t data, bool is_64bit) {
+ const size_t kOffsetOfAddressRange = is_64bit? 28 : 12;
+ CHECK(buf->size() >= kOffsetOfAddressRange + (is_64bit? 8 : 4));
uint8_t *p = buf->data() + kOffsetOfAddressRange;
- p[0] = data;
- p[1] = data >> 8;
- p[2] = data >> 16;
- p[3] = data >> 24;
+ if (is_64bit) {
+ p[0] = data;
+ p[1] = data >> 8;
+ p[2] = data >> 16;
+ p[3] = data >> 24;
+ p[4] = data >> 32;
+ p[5] = data >> 40;
+ p[6] = data >> 48;
+ p[7] = data >> 56;
+ } else {
+ p[0] = data;
+ p[1] = data >> 8;
+ p[2] = data >> 16;
+ p[3] = data >> 24;
+ }
}
-void WriteCFILength(std::vector<uint8_t>* buf) {
- uint32_t length = buf->size() - 4;
+void WriteCFILength(std::vector<uint8_t>* buf, bool is_64bit) {
+ uint64_t length = is_64bit ? buf->size() - 12 : buf->size() - 4;
DCHECK_EQ((length & 0x3), 0U);
- DCHECK_GT(length, 4U);
- uint8_t *p = buf->data();
- p[0] = length;
- p[1] = length >> 8;
- p[2] = length >> 16;
- p[3] = length >> 24;
+ uint8_t *p = is_64bit? buf->data() + 4 : buf->data();
+ if (is_64bit) {
+ p[0] = length;
+ p[1] = length >> 8;
+ p[2] = length >> 16;
+ p[3] = length >> 24;
+ p[4] = length >> 32;
+ p[5] = length >> 40;
+ p[6] = length >> 48;
+ p[7] = length >> 56;
+ } else {
+ p[0] = length;
+ p[1] = length >> 8;
+ p[2] = length >> 16;
+ p[3] = length >> 24;
+ }
}
void PadCFI(std::vector<uint8_t>* buf) {
diff --git a/compiler/utils/dwarf_cfi.h b/compiler/utils/dwarf_cfi.h
index e5acc0e..0c8b151 100644
--- a/compiler/utils/dwarf_cfi.h
+++ b/compiler/utils/dwarf_cfi.h
@@ -66,20 +66,24 @@
/**
* @brief Write FDE header into an FDE buffer
* @param buf FDE buffer.
+ * @param is_64bit If FDE is for 64bit application.
*/
-void WriteFDEHeader(std::vector<uint8_t>* buf);
+void WriteFDEHeader(std::vector<uint8_t>* buf, bool is_64bit);
/**
* @brief Set 'address_range' field of an FDE buffer
* @param buf FDE buffer.
+ * @param data Data value.
+ * @param is_64bit If FDE is for 64bit application.
*/
-void WriteFDEAddressRange(std::vector<uint8_t>* buf, uint32_t data);
+void WriteFDEAddressRange(std::vector<uint8_t>* buf, uint64_t data, bool is_64bit);
/**
* @brief Set 'length' field of an FDE buffer
* @param buf FDE buffer.
+ * @param is_64bit If FDE is for 64bit application.
*/
-void WriteCFILength(std::vector<uint8_t>* buf);
+void WriteCFILength(std::vector<uint8_t>* buf, bool is_64bit);
/**
* @brief Pad an FDE buffer with 0 until its size is a multiple of 4
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 48edb15..2c9bc28 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1409,13 +1409,13 @@
}
void X86Assembler::InitializeFrameDescriptionEntry() {
- WriteFDEHeader(&cfi_info_);
+ WriteFDEHeader(&cfi_info_, false /* is_64bit */);
}
void X86Assembler::FinalizeFrameDescriptionEntry() {
- WriteFDEAddressRange(&cfi_info_, buffer_.Size());
+ WriteFDEAddressRange(&cfi_info_, buffer_.Size(), false /* is_64bit */);
PadCFI(&cfi_info_);
- WriteCFILength(&cfi_info_);
+ WriteCFILength(&cfi_info_, false /* is_64bit */);
}
constexpr size_t kFramePointerSize = 4;
diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc
index 62b72c2..1e2884a 100644
--- a/compiler/utils/x86_64/assembler_x86_64.cc
+++ b/compiler/utils/x86_64/assembler_x86_64.cc
@@ -1716,13 +1716,13 @@
}
void X86_64Assembler::InitializeFrameDescriptionEntry() {
- WriteFDEHeader(&cfi_info_);
+ WriteFDEHeader(&cfi_info_, true /* is_64bit */);
}
void X86_64Assembler::FinalizeFrameDescriptionEntry() {
- WriteFDEAddressRange(&cfi_info_, buffer_.Size());
+ WriteFDEAddressRange(&cfi_info_, buffer_.Size(), true /* is_64bit */);
PadCFI(&cfi_info_);
- WriteCFILength(&cfi_info_);
+ WriteCFILength(&cfi_info_, true /* is_64bit */);
}
constexpr size_t kFramePointerSize = 8;
diff --git a/disassembler/disassembler_x86.cc b/disassembler/disassembler_x86.cc
index 0ca8962..0bf758e 100644
--- a/disassembler/disassembler_x86.cc
+++ b/disassembler/disassembler_x86.cc
@@ -702,12 +702,24 @@
load = true;
immediate_bytes = 1;
break;
+ case 0xA5:
+ opcode << "shld";
+ has_modrm = true;
+ load = true;
+ cx = true;
+ break;
case 0xAC:
opcode << "shrd";
has_modrm = true;
load = true;
immediate_bytes = 1;
break;
+ case 0xAD:
+ opcode << "shrd";
+ has_modrm = true;
+ load = true;
+ cx = true;
+ break;
case 0xAE:
if (prefix[0] == 0xF3) {
prefix[0] = 0; // clear prefix now it's served its purpose as part of the opcode
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index eed20da..bbdf3a3 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -569,6 +569,11 @@
}
}
+ t.NewTiming("Fixup Debug Sections");
+ if (!oat_file_->FixupDebugSections(delta_)) {
+ return false;
+ }
+
return true;
}
@@ -818,22 +823,12 @@
if (log_options) {
LOG(INFO) << "patchoat: option[" << i << "]=" << argv[i];
}
- // TODO: GetInstructionSetFromString shouldn't LOG(FATAL).
if (option.starts_with("--instruction-set=")) {
isa_set = true;
const char* isa_str = option.substr(strlen("--instruction-set=")).data();
- if (!strcmp("arm", isa_str)) {
- isa = kArm;
- } else if (!strcmp("arm64", isa_str)) {
- isa = kArm64;
- } else if (!strcmp("x86", isa_str)) {
- isa = kX86;
- } else if (!strcmp("x86_64", isa_str)) {
- isa = kX86_64;
- } else if (!strcmp("mips", isa_str)) {
- isa = kMips;
- } else {
- Usage("Unknown instruction set %s", isa_str);
+ isa = GetInstructionSetFromString(isa_str);
+ if (isa == kNone) {
+ Usage("Unknown or invalid instruction set %s", isa_str);
}
} else if (option.starts_with("--input-oat-location=")) {
if (have_input_oat) {
diff --git a/runtime/Android.mk b/runtime/Android.mk
index f55d3fb..1f2f86e 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -106,7 +106,7 @@
mirror/string.cc \
mirror/throwable.cc \
monitor.cc \
- native_bridge.cc \
+ native_bridge_art_interface.cc \
native/dalvik_system_DexFile.cc \
native/dalvik_system_VMDebug.cc \
native/dalvik_system_VMRuntime.cc \
@@ -417,7 +417,7 @@
LOCAL_C_INCLUDES += $$(ART_C_INCLUDES)
LOCAL_C_INCLUDES += art/sigchainlib
- LOCAL_SHARED_LIBRARIES += liblog libnativehelper
+ LOCAL_SHARED_LIBRARIES += liblog libnativehelper libnativebridge
include external/libcxx/libcxx.mk
LOCAL_SHARED_LIBRARIES += libbacktrace_libc++
ifeq ($$(art_target_or_host),target)
diff --git a/runtime/arch/arm/arm_sdiv.S b/runtime/arch/arm/arm_sdiv.S
index 925e428..babdbf5 100644
--- a/runtime/arch/arm/arm_sdiv.S
+++ b/runtime/arch/arm/arm_sdiv.S
@@ -9,7 +9,7 @@
#include "asm_support_arm.S"
.section .text
-ENTRY CheckForARMSDIVInstruction
+ENTRY_NO_HIDE CheckForARMSDIVInstruction
mov r1,#1
// depending on the architecture, the assembler will not allow an
// sdiv instruction, so we will have to output the bytes directly.
diff --git a/runtime/arch/arm/asm_support_arm.S b/runtime/arch/arm/asm_support_arm.S
index e1b0ce7..a3e3b21 100644
--- a/runtime/arch/arm/asm_support_arm.S
+++ b/runtime/arch/arm/asm_support_arm.S
@@ -33,6 +33,7 @@
.macro ENTRY name
.thumb_func
.type \name, #function
+ .hidden \name // Hide this as a global symbol, so we do not incur plt calls.
.global \name
/* Cache alignment for function entry */
.balign 16
@@ -41,9 +42,35 @@
.fnstart
.endm
+.macro ENTRY_NO_HIDE name
+ .thumb_func
+ .type \name, #function
+ .global \name
+ /* Cache alignment for function entry */
+ .balign 16
+\name:
+ .cfi_startproc
+ .fnstart
+.endm
+
+
.macro ARM_ENTRY name
.arm
.type \name, #function
+ .hidden \name // Hide this as a global symbol, so we do not incur plt calls.
+ .global \name
+ /* Cache alignment for function entry */
+ .balign 16
+\name:
+ .cfi_startproc
+ /* Ensure we get a sane starting CFA. */
+ .cfi_def_cfa sp,0
+ .fnstart
+.endm
+
+.macro ARM_ENTRY_NO_HIDE name
+ .arm
+ .type \name, #function
.global \name
/* Cache alignment for function entry */
.balign 16
diff --git a/runtime/arch/arm/portable_entrypoints_arm.S b/runtime/arch/arm/portable_entrypoints_arm.S
index 98d17dc..3491c18 100644
--- a/runtime/arch/arm/portable_entrypoints_arm.S
+++ b/runtime/arch/arm/portable_entrypoints_arm.S
@@ -138,7 +138,7 @@
END art_portable_resolution_trampoline
.extern artPortableToInterpreterBridge
-ENTRY art_portable_to_interpreter_bridge
+ENTRY_NO_HIDE art_portable_to_interpreter_bridge
@ Fake callee save ref and args frame set up, note portable doesn't use callee save frames.
@ TODO: just save the registers that are needed in artPortableToInterpreterBridge.
push {r1-r3, r5-r8, r10-r11, lr} @ 10 words of callee saves
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index dd1f04a..1b30c9c 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -308,9 +308,12 @@
#ifdef ARM_R4_SUSPEND_FLAG
mov r4, #SUSPEND_CHECK_INTERVAL @ reset r4 to suspend check interval
#endif
- add r5, r2, #16 @ create space for method pointer in frame
- and r5, #0xFFFFFFF0 @ align frame size to 16 bytes
- sub sp, r5 @ reserve stack space for argument array
+ add r5, r2, #4 @ create space for method pointer in frame
+
+ sub r5, sp, r5 @ reserve & align *stack* to 16 bytes: native calling
+ and r5, #0xFFFFFFF0 @ convention only aligns to 8B, so we have to ensure ART
+ mov sp, r5 @ 16B alignment ourselves.
+
add r0, sp, #4 @ pass stack pointer + method ptr as dest for memcpy
bl memcpy @ memcpy (dest, src, bytes)
ldr r0, [r11] @ restore method*
@@ -988,7 +991,7 @@
/*
* Called to do a generic JNI down-call
*/
-ENTRY art_quick_generic_jni_trampoline
+ENTRY_NO_HIDE art_quick_generic_jni_trampoline
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
str r0, [sp, #0] // Store native ArtMethod* to bottom of stack.
@@ -1034,14 +1037,13 @@
// result sign extension is handled in C code
// prepare for artQuickGenericJniEndTrampoline call
// (Thread*, result, result_f)
- // r0 r1,r2 r3,stack <= C calling convention
+ // r0 r2,r3 stack <= C calling convention
// r11 r0,r1 r0,r1 <= where they are
- sub sp, sp, #12 // Stack alignment.
+ sub sp, sp, #8 // Stack alignment.
- push {r1}
- mov r3, r0
- mov r2, r1
- mov r1, r0
+ push {r0-r1}
+ mov r3, r1
+ mov r2, r0
mov r0, r11
blx artQuickGenericJniEndTrampoline
@@ -1058,7 +1060,18 @@
cbnz r2, .Lexception_in_native
// Tear down the callee-save frame.
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ add sp, #12 @ rewind sp
+ // Do not pop r0 and r1, they contain the return value.
+ pop {r2-r3, r5-r8, r10-r11, lr} @ 9 words of callee saves
+ .cfi_restore r2
+ .cfi_restore r3
+ .cfi_restore r5
+ .cfi_restore r6
+ .cfi_restore r7
+ .cfi_restore r8
+ .cfi_restore r10
+ .cfi_restore r11
+ .cfi_adjust_cfa_offset -48
bx lr // ret
@@ -1073,7 +1086,7 @@
END art_quick_generic_jni_trampoline
.extern artQuickToInterpreterBridge
-ENTRY art_quick_to_interpreter_bridge
+ENTRY_NO_HIDE art_quick_to_interpreter_bridge
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
mov r1, r9 @ pass Thread::Current
mov r2, sp @ pass SP
diff --git a/runtime/arch/arm64/asm_support_arm64.S b/runtime/arch/arm64/asm_support_arm64.S
index be167fa..fb49460 100644
--- a/runtime/arch/arm64/asm_support_arm64.S
+++ b/runtime/arch/arm64/asm_support_arm64.S
@@ -44,6 +44,16 @@
.macro ENTRY name
.type \name, #function
+ .hidden \name // Hide this as a global symbol, so we do not incur plt calls.
+ .global \name
+ /* Cache alignment for function entry */
+ .balign 16
+\name:
+ .cfi_startproc
+.endm
+
+.macro ENTRY_NO_HIDE name
+ .type \name, #function
.global \name
/* Cache alignment for function entry */
.balign 16
@@ -62,4 +72,10 @@
END \name
.endm
+.macro UNIMPLEMENTED_NO_HIDE name
+ ENTRY_NO_HIDE \name
+ brk 0
+ END \name
+.endm
+
#endif // ART_RUNTIME_ARCH_ARM64_ASM_SUPPORT_ARM64_S_
diff --git a/runtime/arch/arm64/portable_entrypoints_arm64.S b/runtime/arch/arm64/portable_entrypoints_arm64.S
index e136885..41711b5 100644
--- a/runtime/arch/arm64/portable_entrypoints_arm64.S
+++ b/runtime/arch/arm64/portable_entrypoints_arm64.S
@@ -25,4 +25,4 @@
UNIMPLEMENTED art_portable_resolution_trampoline
-UNIMPLEMENTED art_portable_to_interpreter_bridge
+UNIMPLEMENTED_NO_HIDE art_portable_to_interpreter_bridge
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index ab9035a..2a19e27 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1435,7 +1435,7 @@
/*
* Called to do a generic JNI down-call
*/
-ENTRY art_quick_generic_jni_trampoline
+ENTRY_NO_HIDE art_quick_generic_jni_trampoline
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL
str x0, [sp, #0] // Store native ArtMethod* to bottom of stack.
@@ -1531,7 +1531,7 @@
* x0 = method being called/to bridge to.
* x1..x7, d0..d7 = arguments to that method.
*/
-ENTRY art_quick_to_interpreter_bridge
+ENTRY_NO_HIDE art_quick_to_interpreter_bridge
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // Set up frame and save arguments.
// x0 will contain mirror::ArtMethod* method.
diff --git a/runtime/arch/memcmp16.cc b/runtime/arch/memcmp16.cc
index 7928085..5a3e73e 100644
--- a/runtime/arch/memcmp16.cc
+++ b/runtime/arch/memcmp16.cc
@@ -28,4 +28,16 @@
return 0;
}
+namespace art {
+
+namespace testing {
+
+int32_t MemCmp16Testing(const uint16_t* s0, const uint16_t* s1, size_t count) {
+ return MemCmp16(s0, s1, count);
+}
+
+}
+
+} // namespace art
+
#pragma GCC diagnostic warning "-Wunused-function"
diff --git a/runtime/arch/memcmp16.h b/runtime/arch/memcmp16.h
index 14dc1e3..4b9fb8e 100644
--- a/runtime/arch/memcmp16.h
+++ b/runtime/arch/memcmp16.h
@@ -50,4 +50,17 @@
extern "C" int32_t memcmp16_generic_static(const uint16_t* s0, const uint16_t* s1, size_t count);
#endif
+namespace art {
+
+namespace testing {
+
+// A version that is exposed and relatively "close to the metal," so that memcmp16_test can do
+// some reasonable testing. Without this, as __memcmp16 is hidden, the test cannot access the
+// implementation.
+int32_t MemCmp16Testing(const uint16_t* s0, const uint16_t* s1, size_t count);
+
+}
+
+} // namespace art
+
#endif // ART_RUNTIME_ARCH_MEMCMP16_H_
diff --git a/runtime/arch/memcmp16_test.cc b/runtime/arch/memcmp16_test.cc
index 5747c67..5ba06f8 100644
--- a/runtime/arch/memcmp16_test.cc
+++ b/runtime/arch/memcmp16_test.cc
@@ -139,7 +139,7 @@
size_t mod_min = c1_mod < c2_mod ? c1_mod : c2_mod;
int32_t expected = memcmp16_compare(s1_pot_unaligned, s2_pot_unaligned, mod_min);
- int32_t computed = MemCmp16(s1_pot_unaligned, s2_pot_unaligned, mod_min);
+ int32_t computed = art::testing::MemCmp16Testing(s1_pot_unaligned, s2_pot_unaligned, mod_min);
ASSERT_EQ(expected, computed) << "Run " << round << ", c1=" << count1 << " c2=" << count2;
diff --git a/runtime/arch/mips/asm_support_mips.h b/runtime/arch/mips/asm_support_mips.h
index 4db5ea6..6add93b 100644
--- a/runtime/arch/mips/asm_support_mips.h
+++ b/runtime/arch/mips/asm_support_mips.h
@@ -22,9 +22,9 @@
// Offset of field Thread::tls32_.state_and_flags verified in InitCpu
#define THREAD_FLAGS_OFFSET 0
// Offset of field Thread::tlsPtr_.card_table verified in InitCpu
-#define THREAD_CARD_TABLE_OFFSET 112
+#define THREAD_CARD_TABLE_OFFSET 120
// Offset of field Thread::tlsPtr_.exception verified in InitCpu
-#define THREAD_EXCEPTION_OFFSET 116
+#define THREAD_EXCEPTION_OFFSET 124
#define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 64
#define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 64
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 25f9a5a..864e3f7 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -17,6 +17,7 @@
#include <cstdio>
#include "common_runtime_test.h"
+#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -543,15 +544,21 @@
#endif
}
+ static uintptr_t GetEntrypoint(Thread* self, QuickEntrypointEnum entrypoint) {
+ int32_t offset;
+#ifdef __LP64__
+ offset = GetThreadOffset<8>(entrypoint).Int32Value();
+#else
+ offset = GetThreadOffset<4>(entrypoint).Int32Value();
+#endif
+ return *reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(self) + offset);
+ }
+
protected:
size_t fp_result;
};
-#if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__))
-extern "C" void art_quick_memcpy(void);
-#endif
-
TEST_F(StubTest, Memcpy) {
#if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__))
Thread* self = Thread::Current();
@@ -564,7 +571,7 @@
}
Invoke3(reinterpret_cast<size_t>(&trg[4]), reinterpret_cast<size_t>(&orig[4]),
- 10 * sizeof(uint32_t), reinterpret_cast<uintptr_t>(&art_quick_memcpy), self);
+ 10 * sizeof(uint32_t), StubTest::GetEntrypoint(self, kQuickMemcpy), self);
EXPECT_EQ(orig[0], trg[0]);
@@ -589,15 +596,14 @@
#endif
}
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
-extern "C" void art_quick_lock_object(void);
-#endif
-
TEST_F(StubTest, LockObject) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
static constexpr size_t kThinLockLoops = 100;
Thread* self = Thread::Current();
+
+ const uintptr_t art_quick_lock_object = StubTest::GetEntrypoint(self, kQuickLockObject);
+
// Create an object
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
@@ -609,8 +615,7 @@
LockWord::LockState old_state = lock.GetState();
EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
- Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
+ Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
LockWord lock_after = obj->GetLockWord(false);
LockWord::LockState new_state = lock_after.GetState();
@@ -618,8 +623,7 @@
EXPECT_EQ(lock_after.ThinLockCount(), 0U); // Thin lock starts count at zero
for (size_t i = 1; i < kThinLockLoops; ++i) {
- Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
+ Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
// Check we're at lock count i
@@ -635,8 +639,7 @@
obj2->IdentityHashCode();
- Invoke3(reinterpret_cast<size_t>(obj2.Get()), 0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
+ Invoke3(reinterpret_cast<size_t>(obj2.Get()), 0U, 0U, art_quick_lock_object, self);
LockWord lock_after2 = obj2->GetLockWord(false);
LockWord::LockState new_state2 = lock_after2.GetState();
@@ -665,17 +668,15 @@
};
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
-extern "C" void art_quick_lock_object(void);
-extern "C" void art_quick_unlock_object(void);
-#endif
-
// NO_THREAD_SAFETY_ANALYSIS as we do not want to grab exclusive mutator lock for MonitorInfo.
static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
static constexpr size_t kThinLockLoops = 100;
Thread* self = Thread::Current();
+
+ const uintptr_t art_quick_lock_object = StubTest::GetEntrypoint(self, kQuickLockObject);
+ const uintptr_t art_quick_unlock_object = StubTest::GetEntrypoint(self, kQuickUnlockObject);
// Create an object
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
@@ -687,8 +688,7 @@
LockWord::LockState old_state = lock.GetState();
EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
- test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
+ test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_unlock_object, self);
// This should be an illegal monitor state.
EXPECT_TRUE(self->IsExceptionPending());
self->ClearException();
@@ -697,15 +697,13 @@
LockWord::LockState new_state = lock_after.GetState();
EXPECT_EQ(LockWord::LockState::kUnlocked, new_state);
- test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
+ test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
LockWord lock_after2 = obj->GetLockWord(false);
LockWord::LockState new_state2 = lock_after2.GetState();
EXPECT_EQ(LockWord::LockState::kThinLocked, new_state2);
- test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
+ test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_unlock_object, self);
LockWord lock_after3 = obj->GetLockWord(false);
LockWord::LockState new_state3 = lock_after3.GetState();
@@ -759,12 +757,12 @@
}
if (lock) {
- test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_lock_object), self);
+ test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U, art_quick_lock_object,
+ self);
counts[index]++;
} else {
test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
+ art_quick_unlock_object, self);
counts[index]--;
}
@@ -795,8 +793,8 @@
size_t index = kNumberOfLocks - 1 - i;
size_t count = counts[index];
while (count > 0) {
- test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_unlock_object), self);
+ test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U, art_quick_unlock_object,
+ self);
count--;
}
@@ -825,6 +823,9 @@
TEST_F(StubTest, CheckCast) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
Thread* self = Thread::Current();
+
+ const uintptr_t art_quick_check_cast = StubTest::GetEntrypoint(self, kQuickCheckCast);
+
// Find some classes.
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
@@ -838,24 +839,24 @@
EXPECT_FALSE(self->IsExceptionPending());
Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(c.Get()), 0U,
- reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
+ art_quick_check_cast, self);
EXPECT_FALSE(self->IsExceptionPending());
Invoke3(reinterpret_cast<size_t>(c2.Get()), reinterpret_cast<size_t>(c2.Get()), 0U,
- reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
+ art_quick_check_cast, self);
EXPECT_FALSE(self->IsExceptionPending());
Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(c2.Get()), 0U,
- reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
+ art_quick_check_cast, self);
EXPECT_FALSE(self->IsExceptionPending());
// TODO: Make the following work. But that would require correct managed frames.
Invoke3(reinterpret_cast<size_t>(c2.Get()), reinterpret_cast<size_t>(c.Get()), 0U,
- reinterpret_cast<uintptr_t>(&art_quick_check_cast), self);
+ art_quick_check_cast, self);
EXPECT_TRUE(self->IsExceptionPending());
self->ClearException();
@@ -868,16 +869,16 @@
}
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
-extern "C" void art_quick_aput_obj_with_null_and_bound_check(void);
-// Do not check non-checked ones, we'd need handlers and stuff...
-#endif
-
TEST_F(StubTest, APutObj) {
TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
Thread* self = Thread::Current();
+
+ // Do not check non-checked ones, we'd need handlers and stuff...
+ const uintptr_t art_quick_aput_obj_with_null_and_bound_check =
+ StubTest::GetEntrypoint(self, kQuickAputObjectWithNullAndBoundCheck);
+
// Create an object
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
@@ -907,25 +908,25 @@
EXPECT_FALSE(self->IsExceptionPending());
Invoke3(reinterpret_cast<size_t>(array.Get()), 0U, reinterpret_cast<size_t>(str_obj.Get()),
- reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+ art_quick_aput_obj_with_null_and_bound_check, self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_EQ(str_obj.Get(), array->Get(0));
Invoke3(reinterpret_cast<size_t>(array.Get()), 1U, reinterpret_cast<size_t>(str_obj.Get()),
- reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+ art_quick_aput_obj_with_null_and_bound_check, self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_EQ(str_obj.Get(), array->Get(1));
Invoke3(reinterpret_cast<size_t>(array.Get()), 2U, reinterpret_cast<size_t>(str_obj.Get()),
- reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+ art_quick_aput_obj_with_null_and_bound_check, self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_EQ(str_obj.Get(), array->Get(2));
Invoke3(reinterpret_cast<size_t>(array.Get()), 3U, reinterpret_cast<size_t>(str_obj.Get()),
- reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+ art_quick_aput_obj_with_null_and_bound_check, self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_EQ(str_obj.Get(), array->Get(3));
@@ -933,25 +934,25 @@
// 1.2) Assign null to array[0..3]
Invoke3(reinterpret_cast<size_t>(array.Get()), 0U, reinterpret_cast<size_t>(nullptr),
- reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+ art_quick_aput_obj_with_null_and_bound_check, self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_EQ(nullptr, array->Get(0));
Invoke3(reinterpret_cast<size_t>(array.Get()), 1U, reinterpret_cast<size_t>(nullptr),
- reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+ art_quick_aput_obj_with_null_and_bound_check, self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_EQ(nullptr, array->Get(1));
Invoke3(reinterpret_cast<size_t>(array.Get()), 2U, reinterpret_cast<size_t>(nullptr),
- reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+ art_quick_aput_obj_with_null_and_bound_check, self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_EQ(nullptr, array->Get(2));
Invoke3(reinterpret_cast<size_t>(array.Get()), 3U, reinterpret_cast<size_t>(nullptr),
- reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+ art_quick_aput_obj_with_null_and_bound_check, self);
EXPECT_FALSE(self->IsExceptionPending());
EXPECT_EQ(nullptr, array->Get(3));
@@ -972,7 +973,7 @@
Invoke3(reinterpret_cast<size_t>(array.Get()), static_cast<size_t>(-1),
reinterpret_cast<size_t>(str_obj.Get()),
- reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+ art_quick_aput_obj_with_null_and_bound_check, self);
EXPECT_TRUE(self->IsExceptionPending());
self->ClearException();
@@ -980,7 +981,7 @@
// 2.3) Index > 0
Invoke3(reinterpret_cast<size_t>(array.Get()), 10U, reinterpret_cast<size_t>(str_obj.Get()),
- reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+ art_quick_aput_obj_with_null_and_bound_check, self);
EXPECT_TRUE(self->IsExceptionPending());
self->ClearException();
@@ -988,7 +989,7 @@
// 3) Failure cases (obj into str[])
Invoke3(reinterpret_cast<size_t>(array.Get()), 0U, reinterpret_cast<size_t>(obj_obj.Get()),
- reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
+ art_quick_aput_obj_with_null_and_bound_check, self);
EXPECT_TRUE(self->IsExceptionPending());
self->ClearException();
@@ -1024,7 +1025,7 @@
size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()), // type_idx
reinterpret_cast<size_t>(c->GetVirtualMethod(0)), // arbitrary
0U,
- reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocObject),
+ StubTest::GetEntrypoint(self, kQuickAllocObject),
self);
EXPECT_FALSE(self->IsExceptionPending());
@@ -1038,7 +1039,7 @@
// We can use nullptr in the second argument as we do not need a method here (not used in
// resolved/initialized cases)
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
- reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocObjectResolved),
+ StubTest::GetEntrypoint(self, kQuickAllocObjectResolved),
self);
EXPECT_FALSE(self->IsExceptionPending());
@@ -1052,7 +1053,7 @@
// We can use nullptr in the second argument as we do not need a method here (not used in
// resolved/initialized cases)
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
- reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocObjectInitialized),
+ StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
self);
EXPECT_FALSE(self->IsExceptionPending());
@@ -1108,7 +1109,7 @@
self->ClearException();
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
- reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocObjectInitialized),
+ StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
self);
EXPECT_TRUE(self->IsExceptionPending());
self->ClearException();
@@ -1154,7 +1155,7 @@
size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()), // type_idx
reinterpret_cast<size_t>(c_obj->GetVirtualMethod(0)), // arbitrary
10U,
- reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocArray),
+ StubTest::GetEntrypoint(self, kQuickAllocArray),
self);
EXPECT_FALSE(self->IsExceptionPending());
@@ -1169,7 +1170,7 @@
// We can use nullptr in the second argument as we do not need a method here (not used in
// resolved/initialized cases)
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 10U,
- reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocArrayResolved),
+ StubTest::GetEntrypoint(self, kQuickAllocArrayResolved),
self);
EXPECT_FALSE(self->IsExceptionPending()) << PrettyTypeOf(self->GetException(nullptr));
EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
@@ -1188,7 +1189,7 @@
{
size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr),
GB, // that should fail...
- reinterpret_cast<uintptr_t>(GetTlsPtr(self)->quick_entrypoints.pAllocArrayResolved),
+ StubTest::GetEntrypoint(self, kQuickAllocArrayResolved),
self);
EXPECT_TRUE(self->IsExceptionPending());
@@ -1205,10 +1206,6 @@
}
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
-extern "C" void art_quick_string_compareto(void);
-#endif
-
TEST_F(StubTest, StringCompareTo) {
TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
@@ -1216,6 +1213,9 @@
// TODO: Check the "Unresolved" allocation stubs
Thread* self = Thread::Current();
+
+ const uintptr_t art_quick_string_compareto = StubTest::GetEntrypoint(self, kQuickStringCompareTo);
+
ScopedObjectAccess soa(self);
// garbage is created during ClassLinker::Init
@@ -1274,7 +1274,7 @@
// Test string_compareto x y
size_t result = Invoke3(reinterpret_cast<size_t>(s[x].Get()),
reinterpret_cast<size_t>(s[y].Get()), 0U,
- reinterpret_cast<uintptr_t>(&art_quick_string_compareto), self);
+ art_quick_string_compareto, self);
EXPECT_FALSE(self->IsExceptionPending());
@@ -1306,11 +1306,6 @@
}
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
-extern "C" void art_quick_set32_static(void);
-extern "C" void art_quick_get32_static(void);
-#endif
-
static void GetSet32Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1322,13 +1317,13 @@
test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
static_cast<size_t>(values[i]),
0U,
- reinterpret_cast<uintptr_t>(&art_quick_set32_static),
+ StubTest::GetEntrypoint(self, kQuickSet32Static),
self,
referrer);
size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_get32_static),
+ StubTest::GetEntrypoint(self, kQuickGet32Static),
self,
referrer);
@@ -1342,11 +1337,6 @@
}
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
-extern "C" void art_quick_set32_instance(void);
-extern "C" void art_quick_get32_instance(void);
-#endif
-
static void GetSet32Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1358,7 +1348,7 @@
test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
static_cast<size_t>(values[i]),
- reinterpret_cast<uintptr_t>(&art_quick_set32_instance),
+ StubTest::GetEntrypoint(self, kQuickSet32Instance),
self,
referrer);
@@ -1371,7 +1361,7 @@
size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
0U,
- reinterpret_cast<uintptr_t>(&art_quick_get32_instance),
+ StubTest::GetEntrypoint(self, kQuickGet32Instance),
self,
referrer);
EXPECT_EQ(res, static_cast<int32_t>(res2));
@@ -1385,8 +1375,6 @@
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
-extern "C" void art_quick_set_obj_static(void);
-extern "C" void art_quick_get_obj_static(void);
static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* self,
mirror::ArtMethod* referrer, StubTest* test)
@@ -1394,13 +1382,13 @@
test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
reinterpret_cast<size_t>(val),
0U,
- reinterpret_cast<uintptr_t>(&art_quick_set_obj_static),
+ StubTest::GetEntrypoint(self, kQuickSetObjStatic),
self,
referrer);
size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_get_obj_static),
+ StubTest::GetEntrypoint(self, kQuickGetObjStatic),
self,
referrer);
@@ -1428,9 +1416,6 @@
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
-extern "C" void art_quick_set_obj_instance(void);
-extern "C" void art_quick_get_obj_instance(void);
-
static void set_and_check_instance(Handle<mirror::ArtField>* f, mirror::Object* trg,
mirror::Object* val, Thread* self, mirror::ArtMethod* referrer,
StubTest* test)
@@ -1438,14 +1423,14 @@
test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
reinterpret_cast<size_t>(trg),
reinterpret_cast<size_t>(val),
- reinterpret_cast<uintptr_t>(&art_quick_set_obj_instance),
+ StubTest::GetEntrypoint(self, kQuickSetObjInstance),
self,
referrer);
size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
reinterpret_cast<size_t>(trg),
0U,
- reinterpret_cast<uintptr_t>(&art_quick_get_obj_instance),
+ StubTest::GetEntrypoint(self, kQuickGetObjInstance),
self,
referrer);
@@ -1476,11 +1461,6 @@
// TODO: Complete these tests for 32b architectures.
-#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
-extern "C" void art_quick_set64_static(void);
-extern "C" void art_quick_get64_static(void);
-#endif
-
static void GetSet64Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1491,13 +1471,13 @@
for (size_t i = 0; i < num_values; ++i) {
test->Invoke3UWithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
values[i],
- reinterpret_cast<uintptr_t>(&art_quick_set64_static),
+ StubTest::GetEntrypoint(self, kQuickSet64Static),
self,
referrer);
size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
0U, 0U,
- reinterpret_cast<uintptr_t>(&art_quick_get64_static),
+ StubTest::GetEntrypoint(self, kQuickGet64Static),
self,
referrer);
@@ -1511,11 +1491,6 @@
}
-#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
-extern "C" void art_quick_set64_instance(void);
-extern "C" void art_quick_get64_instance(void);
-#endif
-
static void GetSet64Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1527,7 +1502,7 @@
test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
static_cast<size_t>(values[i]),
- reinterpret_cast<uintptr_t>(&art_quick_set64_instance),
+ StubTest::GetEntrypoint(self, kQuickSet64Instance),
self,
referrer);
@@ -1540,7 +1515,7 @@
size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
reinterpret_cast<size_t>(obj->Get()),
0U,
- reinterpret_cast<uintptr_t>(&art_quick_get64_instance),
+ StubTest::GetEntrypoint(self, kQuickGet64Instance),
self,
referrer);
EXPECT_EQ(res, static_cast<int64_t>(res2));
@@ -1683,9 +1658,6 @@
TestFields(self, this, Primitive::Type::kPrimLong);
}
-#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
-extern "C" void art_quick_imt_conflict_trampoline(void);
-#endif
TEST_F(StubTest, IMT) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
@@ -1716,7 +1688,7 @@
// Patch up ArrayList.contains.
if (contains_amethod.Get()->GetEntryPointFromQuickCompiledCode() == nullptr) {
contains_amethod.Get()->SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>(
- GetTlsPtr(self)->quick_entrypoints.pQuickToInterpreterBridge));
+ StubTest::GetEntrypoint(self, kQuickQuickToInterpreterBridge)));
}
// List
@@ -1765,7 +1737,7 @@
size_t result =
Invoke3WithReferrerAndHidden(0U, reinterpret_cast<size_t>(array_list.Get()),
reinterpret_cast<size_t>(obj.Get()),
- reinterpret_cast<uintptr_t>(&art_quick_imt_conflict_trampoline),
+ StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline),
self, contains_amethod.Get(),
static_cast<size_t>(inf_contains.Get()->GetDexMethodIndex()));
@@ -1782,7 +1754,7 @@
result = Invoke3WithReferrerAndHidden(0U, reinterpret_cast<size_t>(array_list.Get()),
reinterpret_cast<size_t>(obj.Get()),
- reinterpret_cast<uintptr_t>(&art_quick_imt_conflict_trampoline),
+ StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline),
self, contains_amethod.Get(),
static_cast<size_t>(inf_contains.Get()->GetDexMethodIndex()));
@@ -1795,10 +1767,6 @@
#endif
}
-#if defined(__arm__) || defined(__aarch64__)
-extern "C" void art_quick_indexof(void);
-#endif
-
TEST_F(StubTest, StringIndexOf) {
#if defined(__arm__) || defined(__aarch64__)
TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
@@ -1848,7 +1816,7 @@
// Test string_compareto x y
size_t result = Invoke3(reinterpret_cast<size_t>(s[x].Get()), c_char[y], start,
- reinterpret_cast<uintptr_t>(&art_quick_indexof), self);
+ StubTest::GetEntrypoint(self, kQuickIndexOf), self);
EXPECT_FALSE(self->IsExceptionPending());
diff --git a/runtime/arch/x86/asm_support_x86.S b/runtime/arch/x86/asm_support_x86.S
index 96c2c05..a578023 100644
--- a/runtime/arch/x86/asm_support_x86.S
+++ b/runtime/arch/x86/asm_support_x86.S
@@ -112,6 +112,7 @@
#define PLT_SYMBOL(name) _ ## name
#endif
+// Directive to hide a function symbol.
#if defined(__APPLE__)
#define ASM_HIDDEN .private_extern
#else
@@ -125,6 +126,17 @@
MACRO1(DEFINE_FUNCTION, c_name)
FUNCTION_TYPE(\c_name, 0)
+ ASM_HIDDEN VAR(c_name, 0)
+ .globl VAR(c_name, 0)
+ ALIGN_FUNCTION_ENTRY
+VAR(c_name, 0):
+ CFI_STARTPROC
+ // Ensure we get a sane starting CFA.
+ CFI_DEF_CFA(esp, 4)
+END_MACRO
+
+MACRO1(DEFINE_FUNCTION_NO_HIDE, c_name)
+ FUNCTION_TYPE(\c_name, 0)
.globl VAR(c_name, 0)
ALIGN_FUNCTION_ENTRY
VAR(c_name, 0):
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index c143c5d..65a48f6 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -66,7 +66,7 @@
#if defined(__APPLE__) && defined(__x86_64__)
// mac symbols have a prefix of _ on x86_64
extern "C" void _art_quick_throw_null_pointer_exception();
-extern "C" void _art_quick_throw_stack_overflow_from_signal();
+extern "C" void _art_quick_throw_stack_overflow();
extern "C" void _art_quick_test_suspend();
#define EXT_SYM(sym) _ ## sym
#else
@@ -395,7 +395,7 @@
// the previous frame.
// Now arrange for the signal handler to return to art_quick_throw_stack_overflow.
- uc->CTX_EIP = reinterpret_cast<uintptr_t>(art_quick_throw_stack_overflow);
+ uc->CTX_EIP = reinterpret_cast<uintptr_t>(EXT_SYM(art_quick_throw_stack_overflow));
return true;
}
diff --git a/runtime/arch/x86/portable_entrypoints_x86.S b/runtime/arch/x86/portable_entrypoints_x86.S
index 5f270f8..9365795 100644
--- a/runtime/arch/x86/portable_entrypoints_x86.S
+++ b/runtime/arch/x86/portable_entrypoints_x86.S
@@ -115,7 +115,7 @@
ret
END_FUNCTION art_portable_resolution_trampoline
-DEFINE_FUNCTION art_portable_to_interpreter_bridge
+DEFINE_FUNCTION_NO_HIDE art_portable_to_interpreter_bridge
PUSH ebp // Set up frame.
movl %esp, %ebp
CFI_DEF_CFA_REGISTER(%ebp)
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 75ec49d..2f3e317 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -106,7 +106,7 @@
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
SETUP_GOT_NOSAVE // clobbers ebx (harmless here)
- call PLT_SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*, SP)
+ call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*, SP)
int3 // unreached
END_MACRO
@@ -121,7 +121,7 @@
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
SETUP_GOT_NOSAVE // clobbers ebx (harmless here)
- call PLT_VAR(cxx_name, 1) // cxx_name(Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(Thread*, SP)
int3 // unreached
END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
@@ -137,7 +137,7 @@
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass arg1
SETUP_GOT_NOSAVE // clobbers ebx (harmless here)
- call PLT_VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP)
int3 // unreached
END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
@@ -153,7 +153,7 @@
PUSH ecx // pass arg2
PUSH eax // pass arg1
SETUP_GOT_NOSAVE // clobbers ebx (harmless here)
- call PLT_VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*, SP)
int3 // unreached
END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
@@ -161,7 +161,6 @@
/*
* Called by managed code to create and deliver a NullPointerException.
*/
- ASM_HIDDEN art_quick_throw_null_pointer_exception
NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode
/*
@@ -189,7 +188,6 @@
* Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds
* index, arg2 holds limit.
*/
- ASM_HIDDEN art_quick_throw_array_bounds
TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode
/*
@@ -231,7 +229,7 @@
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass arg2
PUSH eax // pass arg1
- call PLT_VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*, SP)
movl %edx, %edi // save code pointer in EDI
addl MACRO_LITERAL(36), %esp // Pop arguments skip eax
CFI_ADJUST_CFA_OFFSET(-36)
@@ -253,7 +251,6 @@
END_FUNCTION RAW_VAR(c_name, 0)
END_MACRO
- ASM_HIDDEN art_quick_invoke_interface_trampoline
INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline
INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
@@ -328,7 +325,7 @@
PUSH edx // pass SP
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
- call PLT_VAR(cxx_name, 1) // cxx_name(Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(Thread*, SP)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -347,7 +344,7 @@
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass arg1
- call PLT_VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -366,7 +363,7 @@
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass arg2
PUSH eax // pass arg1
- call PLT_VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, arg2, Thread*, SP)
addl MACRO_LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -388,7 +385,7 @@
PUSH ecx // pass arg2
PUSH eax // pass arg1
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, arg2, arg3, Thread*, SP)
addl MACRO_LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -569,7 +566,7 @@
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass object
- call PLT_SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*, SP)
+ call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*, SP)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -603,7 +600,7 @@
pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current()
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass object
- call PLT_SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*, SP)
+ call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*, SP)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -615,7 +612,7 @@
PUSH eax // alignment padding
PUSH ecx // pass arg2 - obj->klass
PUSH eax // pass arg1 - checked class
- call PLT_SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
+ call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
addl LITERAL(12), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-12)
ret
@@ -626,7 +623,7 @@
PUSH eax // alignment padding
PUSH ecx // pass arg2 - obj->klass
PUSH eax // pass arg1 - checked class
- call PLT_SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
+ call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
testl %eax, %eax
jz 1f // jump forward if not assignable
addl LITERAL(12), %esp // pop arguments
@@ -645,7 +642,7 @@
CFI_ADJUST_CFA_OFFSET(4)
PUSH ecx // pass arg2
PUSH eax // pass arg1
- call PLT_SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*, SP)
+ call SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*, SP)
int3 // unreached
END_FUNCTION art_quick_check_cast
@@ -660,7 +657,6 @@
jmp SYMBOL(art_quick_throw_null_pointer_exception)
END_FUNCTION art_quick_aput_obj_with_null_and_bound_check
- ASM_HIDDEN art_quick_aput_obj_with_bound_check
DEFINE_FUNCTION art_quick_aput_obj_with_bound_check
movl ARRAY_LENGTH_OFFSET(%eax), %ebx
cmpl %ebx, %ecx
@@ -670,7 +666,6 @@
jmp SYMBOL(art_quick_throw_array_bounds)
END_FUNCTION art_quick_aput_obj_with_bound_check
- ASM_HIDDEN art_quick_aput_obj
DEFINE_FUNCTION art_quick_aput_obj
test %edx, %edx // store of null
jz .Ldo_aput_null
@@ -697,7 +692,7 @@
CFI_ADJUST_CFA_OFFSET(4)
PUSH ebx // pass arg1 - component type of the array
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b)
+ call SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
testl %eax, %eax
@@ -722,7 +717,7 @@
CFI_ADJUST_CFA_OFFSET(4)
PUSH edx // pass arg2 - value
PUSH eax // pass arg1 - array
- call PLT_SYMBOL(artThrowArrayStoreException) // (array, value, Thread*, SP)
+ call SYMBOL(artThrowArrayStoreException) // (array, value, Thread*, SP)
int3 // unreached
END_FUNCTION art_quick_aput_obj
@@ -744,7 +739,7 @@
PUSH ecx // pass arg2 a.hi
PUSH eax // pass arg1 a.lo
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(art_d2l) // (jdouble a)
+ call SYMBOL(art_d2l) // (jdouble a)
addl LITERAL(12), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-12)
ret
@@ -755,7 +750,7 @@
CFI_ADJUST_CFA_OFFSET(8)
SETUP_GOT_NOSAVE // clobbers EBX
PUSH eax // pass arg1 a
- call PLT_SYMBOL(art_f2l) // (jfloat a)
+ call SYMBOL(art_f2l) // (jfloat a)
addl LITERAL(12), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-12)
ret
@@ -769,7 +764,7 @@
PUSH ecx // pass arg2 a.hi
PUSH eax // pass arg1 a.lo
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artLdiv) // (jlong a, jlong b)
+ call SYMBOL(artLdiv) // (jlong a, jlong b)
addl LITERAL(28), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-28)
ret
@@ -783,7 +778,7 @@
PUSH ecx // pass arg2 a.hi
PUSH eax // pass arg1 a.lo
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artLmod) // (jlong a, jlong b)
+ call SYMBOL(artLmod) // (jlong a, jlong b)
addl LITERAL(28), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-28)
ret
@@ -851,7 +846,7 @@
PUSH ecx // pass object
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artSet32InstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*, SP)
+ call SYMBOL(artSet32InstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -871,7 +866,7 @@
PUSH ecx // pass object
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artSet64InstanceFromCode) // (field_idx, Object*, new_val, Thread*, SP)
+ call SYMBOL(artSet64InstanceFromCode) // (field_idx, Object*, new_val, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -892,7 +887,7 @@
PUSH ecx // pass object
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artSetObjInstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*, SP)
+ call SYMBOL(artSetObjInstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -912,7 +907,7 @@
PUSH ecx // pass object
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artGet32InstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP)
+ call SYMBOL(artGet32InstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -932,7 +927,7 @@
PUSH ecx // pass object
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artGet64InstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP)
+ call SYMBOL(artGet64InstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -952,7 +947,7 @@
PUSH ecx // pass object
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artGetObjInstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP)
+ call SYMBOL(artGetObjInstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -972,7 +967,7 @@
PUSH ecx // pass new_val
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artSet32StaticFromCode) // (field_idx, new_val, referrer, Thread*, SP)
+ call SYMBOL(artSet32StaticFromCode) // (field_idx, new_val, referrer, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -993,7 +988,7 @@
PUSH ebx // pass referrer
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*, SP)
+ call SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-32)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -1013,7 +1008,7 @@
PUSH ecx // pass new_val
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artSetObjStaticFromCode) // (field_idx, new_val, referrer, Thread*, SP)
+ call SYMBOL(artSetObjStaticFromCode) // (field_idx, new_val, referrer, Thread*, SP)
addl LITERAL(32), %esp // pop arguments
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO // return or deliver exception
@@ -1029,7 +1024,7 @@
PUSH ecx // pass referrer
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artGet32StaticFromCode) // (field_idx, referrer, Thread*, SP)
+ call SYMBOL(artGet32StaticFromCode) // (field_idx, referrer, Thread*, SP)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -1046,7 +1041,7 @@
PUSH ecx // pass referrer
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artGet64StaticFromCode) // (field_idx, referrer, Thread*, SP)
+ call SYMBOL(artGet64StaticFromCode) // (field_idx, referrer, Thread*, SP)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -1063,7 +1058,7 @@
PUSH ecx // pass referrer
PUSH eax // pass field_idx
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artGetObjStaticFromCode) // (field_idx, referrer, Thread*, SP)
+ call SYMBOL(artGetObjStaticFromCode) // (field_idx, referrer, Thread*, SP)
addl LITERAL(16), %esp // pop arguments
CFI_ADJUST_CFA_OFFSET(-16)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
@@ -1078,7 +1073,7 @@
PUSH ecx // pass receiver
PUSH eax // pass proxy method
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artQuickProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
+ call SYMBOL(artQuickProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
movd %eax, %xmm0 // place return value also into floating point return value
movd %edx, %xmm1
punpckldq %xmm1, %xmm0
@@ -1110,7 +1105,7 @@
PUSH ecx // pass receiver
PUSH eax // pass method
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artQuickResolutionTrampoline) // (Method* called, receiver, Thread*, SP)
+ call SYMBOL(artQuickResolutionTrampoline) // (Method* called, receiver, Thread*, SP)
movl %eax, %edi // remember code pointer in EDI
addl LITERAL(16), %esp // pop arguments
test %eax, %eax // if code pointer is NULL goto deliver pending exception
@@ -1128,7 +1123,7 @@
DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_resolution_trampoline
-DEFINE_FUNCTION art_quick_generic_jni_trampoline
+DEFINE_FUNCTION_NO_HIDE art_quick_generic_jni_trampoline
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
// This also stores the native ArtMethod reference at the bottom of the stack.
@@ -1145,7 +1140,7 @@
pushl %ebp // Pass SP (to ArtMethod).
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
SETUP_GOT_NOSAVE // Clobbers ebx.
- call PLT_SYMBOL(artQuickGenericJniTrampoline) // (Thread*, sp)
+ call SYMBOL(artQuickGenericJniTrampoline) // (Thread*, sp)
// The C call will have registered the complete save-frame on success.
// The result of the call is:
@@ -1168,14 +1163,14 @@
// prepare for artQuickGenericJniEndTrampoline call
// (Thread*, result, result_f)
// (esp) 4(esp) 12(esp) <= C calling convention
- // fs:... eax:edx xmm0 <= where they are
+ // fs:... eax:edx fp0 <= where they are
subl LITERAL(20), %esp // Padding & pass float result.
- movsd %xmm0, (%esp)
+ fstpl (%esp)
pushl %edx // Pass int result.
pushl %eax
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
- call PLT_SYMBOL(artQuickGenericJniEndTrampoline)
+ call SYMBOL(artQuickGenericJniEndTrampoline)
// Tear down the alloca.
movl %ebp, %esp
@@ -1196,7 +1191,7 @@
POP ebp // Restore callee saves
POP esi
POP edi
- // store into fpr, for when it's a fpr return...
+ // Quick expects the return value to be in xmm0.
movd %eax, %xmm0
movd %edx, %xmm1
punpckldq %xmm1, %xmm0
@@ -1209,7 +1204,7 @@
DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_generic_jni_trampoline
-DEFINE_FUNCTION art_quick_to_interpreter_bridge
+DEFINE_FUNCTION_NO_HIDE art_quick_to_interpreter_bridge
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save frame
mov %esp, %edx // remember SP
PUSH eax // alignment padding
@@ -1218,7 +1213,7 @@
CFI_ADJUST_CFA_OFFSET(4)
PUSH eax // pass method
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artQuickToInterpreterBridge) // (method, Thread*, SP)
+ call SYMBOL(artQuickToInterpreterBridge) // (method, Thread*, SP)
movd %eax, %xmm0 // place return value also into floating point return value
movd %edx, %xmm1
punpckldq %xmm1, %xmm0
@@ -1245,7 +1240,7 @@
PUSH ecx // Pass receiver.
PUSH eax // Pass Method*.
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, SP, LR)
+ call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, SP, LR)
addl LITERAL(28), %esp // Pop arguments upto saved Method*.
movl 28(%esp), %edi // Restore edi.
movl %eax, 28(%esp) // Place code* over edi, just under return pc.
@@ -1280,7 +1275,7 @@
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current.
CFI_ADJUST_CFA_OFFSET(4)
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artInstrumentationMethodExitFromCode) // (Thread*, SP, gpr_result, fpr_result)
+ call SYMBOL(artInstrumentationMethodExitFromCode) // (Thread*, SP, gpr_result, fpr_result)
mov %eax, %ecx // Move returned link register.
addl LITERAL(32), %esp // Pop arguments.
CFI_ADJUST_CFA_OFFSET(-32)
@@ -1310,7 +1305,7 @@
pushl %fs:THREAD_SELF_OFFSET // Pass Thread::Current().
CFI_ADJUST_CFA_OFFSET(4)
SETUP_GOT_NOSAVE // clobbers EBX
- call PLT_SYMBOL(artDeoptimize) // artDeoptimize(Thread*, SP)
+ call SYMBOL(artDeoptimize) // artDeoptimize(Thread*, SP)
int3 // Unreachable.
END_FUNCTION art_quick_deoptimize
diff --git a/runtime/arch/x86_64/asm_support_x86_64.S b/runtime/arch/x86_64/asm_support_x86_64.S
index 682ba43..4ae61a2 100644
--- a/runtime/arch/x86_64/asm_support_x86_64.S
+++ b/runtime/arch/x86_64/asm_support_x86_64.S
@@ -107,6 +107,13 @@
#define PLT_SYMBOL(name) _ ## name
#endif
+// Directive to hide a function symbol.
+#if defined(__APPLE__)
+ #define ASM_HIDDEN .private_extern
+#else
+ #define ASM_HIDDEN .hidden
+#endif
+
/* Cache alignment for function entry */
MACRO0(ALIGN_FUNCTION_ENTRY)
.balign 16
@@ -116,13 +123,20 @@
// for mac builds.
MACRO1(DEFINE_FUNCTION, c_name)
FUNCTION_TYPE(\c_name, 0)
+ ASM_HIDDEN VAR(c_name, 0)
.globl VAR(c_name, 0)
ALIGN_FUNCTION_ENTRY
VAR(c_name, 0):
-#if !defined(__APPLE__)
- // Have a local entrypoint that's not globl
-VAR(c_name, 0)_local:
-#endif
+ CFI_STARTPROC
+ // Ensure we get a sane starting CFA.
+ CFI_DEF_CFA(rsp, 8)
+END_MACRO
+
+MACRO1(DEFINE_FUNCTION_NO_HIDE, c_name)
+ FUNCTION_TYPE(\c_name, 0)
+ .globl VAR(c_name, 0)
+ ALIGN_FUNCTION_ENTRY
+VAR(c_name, 0):
CFI_STARTPROC
// Ensure we get a sane starting CFA.
CFI_DEF_CFA(rsp, 8)
@@ -147,6 +161,19 @@
MACRO1(UNIMPLEMENTED,name)
FUNCTION_TYPE(\name, 0)
+ ASM_HIDDEN VAR(c_name, 0)
+ .globl VAR(name, 0)
+ ALIGN_FUNCTION_ENTRY
+VAR(name, 0):
+ CFI_STARTPROC
+ int3
+ int3
+ CFI_ENDPROC
+ SIZE(\name, 0)
+END_MACRO
+
+MACRO1(UNIMPLEMENTED_NO_HIDE,name)
+ FUNCTION_TYPE(\name, 0)
.globl VAR(name, 0)
ALIGN_FUNCTION_ENTRY
VAR(name, 0):
diff --git a/runtime/arch/x86_64/portable_entrypoints_x86_64.S b/runtime/arch/x86_64/portable_entrypoints_x86_64.S
index 2e9d19a..7b84d17 100644
--- a/runtime/arch/x86_64/portable_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/portable_entrypoints_x86_64.S
@@ -25,4 +25,4 @@
UNIMPLEMENTED art_portable_resolution_trampoline
-UNIMPLEMENTED art_portable_to_interpreter_bridge
+UNIMPLEMENTED_NO_HIDE art_portable_to_interpreter_bridge
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 48bc240..f95bd22 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -232,7 +232,7 @@
// (Thread*, SP) setup
movq %gs:THREAD_SELF_OFFSET, %rdi
movq %rsp, %rsi
- call PLT_SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*, SP)
+ call SYMBOL(artDeliverPendingExceptionFromCode) // artDeliverPendingExceptionFromCode(Thread*, SP)
UNREACHABLE
END_MACRO
@@ -242,7 +242,7 @@
// Outgoing argument set up
movq %rsp, %rsi // pass SP
movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
- call PLT_VAR(cxx_name, 1) // cxx_name(Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(Thread*, SP)
UNREACHABLE
END_FUNCTION VAR(c_name, 0)
END_MACRO
@@ -253,7 +253,7 @@
// Outgoing argument set up
movq %rsp, %rdx // pass SP
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
- call PLT_VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, Thread*, SP)
UNREACHABLE
END_FUNCTION VAR(c_name, 0)
END_MACRO
@@ -264,7 +264,7 @@
// Outgoing argument set up
movq %rsp, %rcx // pass SP
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call PLT_VAR(cxx_name, 1) // cxx_name(Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(Thread*, SP)
UNREACHABLE
END_FUNCTION VAR(c_name, 0)
END_MACRO
@@ -329,7 +329,7 @@
movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread
movq %rsp, %r8 // pass SP
- call PLT_VAR(cxx_name, 1) // cxx_name(arg1, arg2, caller method*, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg1, arg2, caller method*, Thread*, SP)
// save the code pointer
movq %rax, %rdi
movq %rdx, %rax
@@ -643,7 +643,7 @@
// Outgoing argument set up
movq %rsp, %rsi // pass SP
movq %gs:THREAD_SELF_OFFSET, %rdi // pass Thread::Current()
- call PLT_VAR(cxx_name, 1) // cxx_name(Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(Thread*, SP)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION VAR(c_name, 0)
@@ -655,7 +655,7 @@
// Outgoing argument set up
movq %rsp, %rdx // pass SP
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
- call PLT_VAR(cxx_name, 1) // cxx_name(arg0, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg0, Thread*, SP)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION VAR(c_name, 0)
@@ -667,7 +667,7 @@
// Outgoing argument set up
movq %rsp, %rcx // pass SP
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call PLT_VAR(cxx_name, 1) // cxx_name(arg0, arg1, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg0, arg1, Thread*, SP)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION VAR(c_name, 0)
@@ -679,7 +679,7 @@
// Outgoing argument set up
movq %rsp, %r8 // pass SP
movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
- call PLT_VAR(cxx_name, 1) // cxx_name(arg0, arg1, arg2, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg0, arg1, arg2, Thread*, SP)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION VAR(c_name, 0)
@@ -692,7 +692,7 @@
// arg0 is in rdi
movq %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
movq %rsp, %rcx // pass SP
- call PLT_VAR(cxx_name, 1) // cxx_name(arg0, referrer, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg0, referrer, Thread*, SP)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2)
END_FUNCTION VAR(c_name, 0)
@@ -705,7 +705,7 @@
// arg0 and arg1 are in rdi/rsi
movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
movq %rsp, %r8 // pass SP
- call PLT_VAR(cxx_name, 1) // (arg0, arg1, referrer, Thread*, SP)
+ call VAR(cxx_name, 1) // (arg0, arg1, referrer, Thread*, SP)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2)
END_FUNCTION VAR(c_name, 0)
@@ -718,7 +718,7 @@
// arg0, arg1, and arg2 are in rdi/rsi/rdx
movq %gs:THREAD_SELF_OFFSET, %r8 // pass Thread::Current()
movq %rsp, %r9 // pass SP
- call PLT_VAR(cxx_name, 1) // cxx_name(arg0, arg1, arg2, referrer, Thread*, SP)
+ call VAR(cxx_name, 1) // cxx_name(arg0, arg1, arg2, referrer, Thread*, SP)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
CALL_MACRO(return_macro, 2) // return or deliver exception
END_FUNCTION VAR(c_name, 0)
@@ -887,7 +887,7 @@
SETUP_REF_ONLY_CALLEE_SAVE_FRAME
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
movq %rsp, %rdx // pass SP
- call PLT_SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*, SP)
+ call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*, SP)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_lock_object
@@ -913,7 +913,7 @@
SETUP_REF_ONLY_CALLEE_SAVE_FRAME
movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current()
movq %rsp, %rdx // pass SP
- call PLT_SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*, SP)
+ call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*, SP)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO
END_FUNCTION art_quick_unlock_object
@@ -922,7 +922,7 @@
PUSH rdi // Save args for exc
PUSH rsi
SETUP_FP_CALLEE_SAVE_FRAME
- call PLT_SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
+ call SYMBOL(artIsAssignableFromCode) // (Class* klass, Class* ref_klass)
testq %rax, %rax
jz 1f // jump forward if not assignable
RESTORE_FP_CALLEE_SAVE_FRAME
@@ -937,7 +937,7 @@
SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context
mov %rsp, %rcx // pass SP
mov %gs:THREAD_SELF_OFFSET, %rdx // pass Thread::Current()
- call PLT_SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*, SP)
+ call SYMBOL(artThrowClassCastException) // (Class* a, Class* b, Thread*, SP)
int3 // unreached
END_FUNCTION art_quick_check_cast
@@ -958,8 +958,8 @@
#else
testl %edi, %edi
// testq %rdi, %rdi
- jnz art_quick_aput_obj_with_bound_check_local
- jmp art_quick_throw_null_pointer_exception_local
+ jnz art_quick_aput_obj_with_bound_check
+ jmp art_quick_throw_null_pointer_exception
#endif // __APPLE__
END_FUNCTION art_quick_aput_obj_with_null_and_bound_check
@@ -972,12 +972,12 @@
movl ARRAY_LENGTH_OFFSET(%edi), %ecx
// movl ARRAY_LENGTH_OFFSET(%rdi), %ecx // This zero-extends, so value(%rcx)=value(%ecx)
cmpl %ecx, %esi
- jb art_quick_aput_obj_local
+ jb art_quick_aput_obj
mov %esi, %edi
// mov %rsi, %rdi
mov %ecx, %esi
// mov %rcx, %rsi
- jmp art_quick_throw_array_bounds_local
+ jmp art_quick_throw_array_bounds
#endif // __APPLE__
END_FUNCTION art_quick_aput_obj_with_bound_check
@@ -1018,7 +1018,7 @@
movl CLASS_OFFSET(%edx), %esi // Pass arg2 = value's class.
movq %rcx, %rdi // Pass arg1 = array's component type.
- call PLT_SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b)
+ call SYMBOL(artIsAssignableFromCode) // (Class* a, Class* b)
// Exception?
testq %rax, %rax
@@ -1057,7 +1057,7 @@
movq %gs:THREAD_SELF_OFFSET, %rdx // Pass arg 3 = Thread::Current().
// Pass arg 1 = array.
- call PLT_SYMBOL(artThrowArrayStoreException) // (array, value, Thread*, SP)
+ call SYMBOL(artThrowArrayStoreException) // (array, value, Thread*, SP)
int3 // unreached
END_FUNCTION art_quick_aput_obj
@@ -1099,7 +1099,7 @@
// field_idx is in rdi
movq %gs:THREAD_SELF_OFFSET, %rcx // pass Thread::Current()
movq %rsp, %r8 // pass SP
- call PLT_SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*, SP)
+ call SYMBOL(artSet64StaticFromCode) // (field_idx, referrer, new_val, Thread*, SP)
RESTORE_REF_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address
RETURN_IF_EAX_ZERO // return or deliver exception
END_FUNCTION art_quick_set64_static
@@ -1139,7 +1139,7 @@
movq %rdi, 0(%rsp)
movq %gs:THREAD_SELF_OFFSET, %rdx // Pass Thread::Current().
movq %rsp, %rcx // Pass SP.
- call PLT_SYMBOL(artQuickProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
+ call SYMBOL(artQuickProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
movq %rax, %xmm0 // Copy return value in case of float returns.
addq LITERAL(168 + 4*8), %rsp // Pop arguments.
CFI_ADJUST_CFA_OFFSET(-168 - 4*8)
@@ -1158,7 +1158,7 @@
movl 8(%rsp), %edi // load caller Method*
movl METHOD_DEX_CACHE_METHODS_OFFSET(%rdi), %edi // load dex_cache_resolved_methods
movl OBJECT_ARRAY_DATA_OFFSET(%rdi, %rax, 4), %edi // load the target method
- jmp art_quick_invoke_interface_trampoline_local
+ jmp art_quick_invoke_interface_trampoline
#endif // __APPLE__
END_FUNCTION art_quick_imt_conflict_trampoline
@@ -1166,7 +1166,7 @@
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME
movq %gs:THREAD_SELF_OFFSET, %rdx
movq %rsp, %rcx
- call PLT_SYMBOL(artQuickResolutionTrampoline) // (called, receiver, Thread*, SP)
+ call SYMBOL(artQuickResolutionTrampoline) // (called, receiver, Thread*, SP)
movq %rax, %r10 // Remember returned code pointer in R10.
movq (%rsp), %rdi // Load called method into RDI.
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
@@ -1254,7 +1254,7 @@
/*
* Called to do a generic JNI down-call
*/
-DEFINE_FUNCTION art_quick_generic_jni_trampoline
+DEFINE_FUNCTION_NO_HIDE art_quick_generic_jni_trampoline
// Save callee and GPR args, mixed together to agree with core spills bitmap.
PUSH r15 // Callee save.
PUSH r14 // Callee save.
@@ -1310,7 +1310,7 @@
// gs:... rbp <= where they are
movq %gs:THREAD_SELF_OFFSET, %rdi
movq %rbp, %rsi
- call PLT_SYMBOL(artQuickGenericJniTrampoline) // (Thread*, sp)
+ call SYMBOL(artQuickGenericJniTrampoline) // (Thread*, sp)
// The C call will have registered the complete save-frame on success.
// The result of the call is:
@@ -1354,7 +1354,7 @@
movq %gs:THREAD_SELF_OFFSET, %rdi
movq %rax, %rsi
movq %xmm0, %rdx
- call PLT_SYMBOL(artQuickGenericJniEndTrampoline)
+ call SYMBOL(artQuickGenericJniEndTrampoline)
// Tear down the alloca.
movq %rbp, %rsp
@@ -1441,11 +1441,11 @@
* RDI = method being called / to bridge to.
* RSI, RDX, RCX, R8, R9 are arguments to that method.
*/
-DEFINE_FUNCTION art_quick_to_interpreter_bridge
+DEFINE_FUNCTION_NO_HIDE art_quick_to_interpreter_bridge
SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // Set up frame and save arguments.
movq %gs:THREAD_SELF_OFFSET, %rsi // RSI := Thread::Current()
movq %rsp, %rdx // RDX := sp
- call PLT_SYMBOL(artQuickToInterpreterBridge) // (method, Thread*, SP)
+ call SYMBOL(artQuickToInterpreterBridge) // (method, Thread*, SP)
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME // TODO: no need to restore arguments in this case.
movq %rax, %xmm0 // Place return value also into floating point return value.
RETURN_OR_DELIVER_PENDING_EXCEPTION // return or deliver exception
@@ -1467,12 +1467,12 @@
movq %rsp, %rcx // Pass SP.
movq FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-8(%rsp), %r8 // Pass return PC.
- call PLT_SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, SP, LR)
+ call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, SP, LR)
// %rax = result of call.
movq %r12, %rdi // Reload method pointer.
- leaq art_quick_instrumentation_exit_local(%rip), %r12 // Set up return through instrumentation
+ leaq art_quick_instrumentation_exit(%rip), %r12 // Set up return through instrumentation
movq %r12, FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE-8(%rsp) // exit.
RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
@@ -1501,7 +1501,7 @@
movq %rax, %rdx // Pass integer result.
movq %xmm0, %rcx // Pass floating-point result.
- call PLT_SYMBOL(artInstrumentationMethodExitFromCode) // (Thread*, SP, gpr_res, fpr_res)
+ call SYMBOL(artInstrumentationMethodExitFromCode) // (Thread*, SP, gpr_res, fpr_res)
movq %rax, %rdi // Store return PC
movq %rdx, %rsi // Store second return PC in hidden arg.
@@ -1526,7 +1526,7 @@
// Stack should be aligned now.
movq %rsp, %rsi // Pass SP.
movq %gs:THREAD_SELF_OFFSET, %rdi // Pass Thread.
- call PLT_SYMBOL(artDeoptimize) // artDeoptimize(Thread*, SP)
+ call SYMBOL(artDeoptimize) // artDeoptimize(Thread*, SP)
int3 // Unreachable.
END_FUNCTION art_quick_deoptimize
@@ -1577,7 +1577,7 @@
DEFINE_FUNCTION art_quick_assignable_from_code
SETUP_FP_CALLEE_SAVE_FRAME
- call PLT_SYMBOL(artIsAssignableFromCode) // (const mirror::Class*, const mirror::Class*)
+ call SYMBOL(artIsAssignableFromCode) // (const mirror::Class*, const mirror::Class*)
RESTORE_FP_CALLEE_SAVE_FRAME
ret
END_FUNCTION art_quick_assignable_from_code
diff --git a/runtime/base/allocator.cc b/runtime/base/allocator.cc
index 4f7753d..3469eca 100644
--- a/runtime/base/allocator.cc
+++ b/runtime/base/allocator.cc
@@ -23,7 +23,7 @@
namespace art {
-class MallocAllocator : public Allocator {
+class MallocAllocator FINAL : public Allocator {
public:
explicit MallocAllocator() {}
~MallocAllocator() {}
@@ -42,7 +42,7 @@
MallocAllocator g_malloc_allocator;
-class NoopAllocator : public Allocator {
+class NoopAllocator FINAL : public Allocator {
public:
explicit NoopAllocator() {}
~NoopAllocator() {}
diff --git a/runtime/base/bit_vector-inl.h b/runtime/base/bit_vector-inl.h
new file mode 100644
index 0000000..dc13dd5
--- /dev/null
+++ b/runtime/base/bit_vector-inl.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_BIT_VECTOR_INL_H_
+#define ART_RUNTIME_BASE_BIT_VECTOR_INL_H_
+
+#include "bit_vector.h"
+#include "logging.h"
+#include "utils.h"
+
+namespace art {
+
+inline bool BitVector::IndexIterator::operator==(const IndexIterator& other) const {
+ DCHECK(bit_storage_ == other.bit_storage_);
+ DCHECK_EQ(storage_size_, other.storage_size_);
+ return bit_index_ == other.bit_index_;
+}
+
+inline int BitVector::IndexIterator::operator*() const {
+ DCHECK_LT(bit_index_, BitSize());
+ return bit_index_;
+}
+
+inline BitVector::IndexIterator& BitVector::IndexIterator::operator++() {
+ DCHECK_LT(bit_index_, BitSize());
+ bit_index_ = FindIndex(bit_index_ + 1u);
+ return *this;
+}
+
+inline BitVector::IndexIterator BitVector::IndexIterator::operator++(int) {
+ IndexIterator result(*this);
+ ++*this;
+ return result;
+}
+
+inline uint32_t BitVector::IndexIterator::FindIndex(uint32_t start_index) const {
+ DCHECK_LE(start_index, BitSize());
+ uint32_t word_index = start_index / kWordBits;
+ if (UNLIKELY(word_index == storage_size_)) {
+ return start_index;
+ }
+ uint32_t word = bit_storage_[word_index];
+ // Mask out any bits in the first word we've already considered.
+ word &= static_cast<uint32_t>(-1) << (start_index & 0x1f);
+ while (word == 0u) {
+ ++word_index;
+ if (UNLIKELY(word_index == storage_size_)) {
+ return BitSize();
+ }
+ word = bit_storage_[word_index];
+ }
+ return word_index * 32u + CTZ(word);
+}
+
+inline void BitVector::ClearAllBits() {
+ memset(storage_, 0, storage_size_ * kWordBytes);
+}
+
+inline bool BitVector::Equal(const BitVector* src) const {
+ return (storage_size_ == src->GetStorageSize()) &&
+ (expandable_ == src->IsExpandable()) &&
+ (memcmp(storage_, src->GetRawStorage(), storage_size_ * sizeof(uint32_t)) == 0);
+}
+
+} // namespace art
+
+#endif // ART_RUNTIME_BASE_BIT_VECTOR_INL_H_
diff --git a/runtime/base/bit_vector.cc b/runtime/base/bit_vector.cc
index 1b9022e..3d2f0de 100644
--- a/runtime/base/bit_vector.cc
+++ b/runtime/base/bit_vector.cc
@@ -16,20 +16,14 @@
#include "bit_vector.h"
+#include "allocator.h"
+#include "bit_vector-inl.h"
+
namespace art {
-// TODO: profile to make sure this is still a win relative to just using shifted masks.
-static uint32_t check_masks[32] = {
- 0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
- 0x00000020, 0x00000040, 0x00000080, 0x00000100, 0x00000200,
- 0x00000400, 0x00000800, 0x00001000, 0x00002000, 0x00004000,
- 0x00008000, 0x00010000, 0x00020000, 0x00040000, 0x00080000,
- 0x00100000, 0x00200000, 0x00400000, 0x00800000, 0x01000000,
- 0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000,
- 0x40000000, 0x80000000 };
-
-static inline uint32_t BitsToWords(uint32_t bits) {
- return (bits + 31) >> 5;
+// The number of words necessary to encode bits.
+static constexpr uint32_t BitsToWords(uint32_t bits) {
+ return RoundUp(bits, 32) / 32;
}
// TODO: replace excessive argument defaulting when we are at gcc 4.7
@@ -40,10 +34,10 @@
Allocator* allocator,
uint32_t storage_size,
uint32_t* storage)
- : allocator_(allocator),
- expandable_(expandable),
+ : storage_(storage),
storage_size_(storage_size),
- storage_(storage) {
+ allocator_(allocator),
+ expandable_(expandable) {
COMPILE_ASSERT(sizeof(*storage_) == kWordBytes, check_word_bytes);
COMPILE_ASSERT(sizeof(*storage_) * 8u == kWordBits, check_word_bits);
if (storage_ == nullptr) {
@@ -56,59 +50,7 @@
allocator_->Free(storage_);
}
-/*
- * Determine whether or not the specified bit is set.
- */
-bool BitVector::IsBitSet(uint32_t num) const {
- // If the index is over the size:
- if (num >= storage_size_ * kWordBits) {
- // Whether it is expandable or not, this bit does not exist: thus it is not set.
- return false;
- }
-
- return IsBitSet(storage_, num);
-}
-
-// Mark all bits bit as "clear".
-void BitVector::ClearAllBits() {
- memset(storage_, 0, storage_size_ * kWordBytes);
-}
-
-// Mark the specified bit as "set".
-/*
- * TUNING: this could have pathologically bad growth/expand behavior. Make sure we're
- * not using it badly or change resize mechanism.
- */
-void BitVector::SetBit(uint32_t num) {
- if (num >= storage_size_ * kWordBits) {
- DCHECK(expandable_) << "Attempted to expand a non-expandable bitmap to position " << num;
-
- /* Round up to word boundaries for "num+1" bits */
- uint32_t new_size = BitsToWords(num + 1);
- DCHECK_GT(new_size, storage_size_);
- uint32_t *new_storage =
- static_cast<uint32_t*>(allocator_->Alloc(new_size * kWordBytes));
- memcpy(new_storage, storage_, storage_size_ * kWordBytes);
- // Zero out the new storage words.
- memset(&new_storage[storage_size_], 0, (new_size - storage_size_) * kWordBytes);
- // TOTO: collect stats on space wasted because of resize.
- storage_ = new_storage;
- storage_size_ = new_size;
- }
-
- storage_[num >> 5] |= check_masks[num & 0x1f];
-}
-
-// Mark the specified bit as "unset".
-void BitVector::ClearBit(uint32_t num) {
- // If the index is over the size, we don't have to do anything, it is cleared.
- if (num < storage_size_ * kWordBits) {
- // Otherwise, go ahead and clear it.
- storage_[num >> 5] &= ~check_masks[num & 0x1f];
- }
-}
-
-bool BitVector::SameBitsSet(const BitVector *src) {
+bool BitVector::SameBitsSet(const BitVector *src) const {
int our_highest = GetHighestBitSet();
int src_highest = src->GetHighestBitSet();
@@ -134,7 +76,6 @@
return (memcmp(storage_, src->GetRawStorage(), our_highest_index * kWordBytes) == 0);
}
-// Intersect with another bit vector.
void BitVector::Intersect(const BitVector* src) {
uint32_t src_storage_size = src->storage_size_;
@@ -155,9 +96,6 @@
}
}
-/*
- * Union with another bit vector.
- */
bool BitVector::Union(const BitVector* src) {
// Get the highest bit to determine how much we need to expand.
int highest_bit = src->GetHighestBitSet();
@@ -175,8 +113,7 @@
if (storage_size_ < src_size) {
changed = true;
- // Set it to reallocate.
- SetBit(highest_bit);
+ EnsureSize(highest_bit);
// Paranoid: storage size should be big enough to hold this bit now.
DCHECK_LT(static_cast<uint32_t> (highest_bit), storage_size_ * kWordBits);
@@ -242,21 +179,20 @@
}
void BitVector::Subtract(const BitVector *src) {
- uint32_t src_size = src->storage_size_;
+ uint32_t src_size = src->storage_size_;
- // We only need to operate on bytes up to the smaller of the sizes of the two operands.
- unsigned int min_size = (storage_size_ > src_size) ? src_size : storage_size_;
+ // We only need to operate on bytes up to the smaller of the sizes of the two operands.
+ unsigned int min_size = (storage_size_ > src_size) ? src_size : storage_size_;
- // Difference until max, we know both accept it:
- // There is no need to do more:
- // If we are bigger than src, the upper bits are unchanged.
- // If we are smaller than src, the non-existant upper bits are 0 and thus can't get subtracted.
- for (uint32_t idx = 0; idx < min_size; idx++) {
- storage_[idx] &= (~(src->GetRawStorageWord(idx)));
- }
+ // Difference until max, we know both accept it:
+ // There is no need to do more:
+ // If we are bigger than src, the upper bits are unchanged.
+ // If we are smaller than src, the non-existant upper bits are 0 and thus can't get subtracted.
+ for (uint32_t idx = 0; idx < min_size; idx++) {
+ storage_[idx] &= (~(src->GetRawStorageWord(idx)));
+ }
}
-// Count the number of bits that are set.
uint32_t BitVector::NumSetBits() const {
uint32_t count = 0;
for (uint32_t word = 0; word < storage_size_; word++) {
@@ -265,17 +201,11 @@
return count;
}
-// Count the number of bits that are set in range [0, end).
uint32_t BitVector::NumSetBits(uint32_t end) const {
DCHECK_LE(end, storage_size_ * kWordBits);
return NumSetBits(storage_, end);
}
-/*
- * Mark specified number of bits as "set". Cannot set all bits like ClearAll
- * since there might be unused bits - setting those to one will confuse the
- * iterator.
- */
void BitVector::SetInitialBits(uint32_t num_bits) {
// If num_bits is 0, clear everything.
if (num_bits == 0) {
@@ -288,7 +218,7 @@
uint32_t idx;
// We can set every storage element with -1.
- for (idx = 0; idx < (num_bits >> 5); idx++) {
+ for (idx = 0; idx < WordIndex(num_bits); idx++) {
storage_[idx] = -1;
}
@@ -312,20 +242,8 @@
uint32_t value = storage_[idx];
if (value != 0) {
- // Shift right for the counting.
- value /= 2;
-
- int cnt = 0;
-
- // Count the bits.
- while (value > 0) {
- value /= 2;
- cnt++;
- }
-
- // Return cnt + how many storage units still remain * the number of bits per unit.
- int res = cnt + (idx * kWordBits);
- return res;
+ // Return highest bit set in value plus bits from previous storage indexes.
+ return 31 - CLZ(value) + (idx * kWordBits);
}
}
@@ -333,23 +251,6 @@
return -1;
}
-bool BitVector::EnsureSizeAndClear(unsigned int num) {
- // Check if the bitvector is expandable.
- if (IsExpandable() == false) {
- return false;
- }
-
- if (num > 0) {
- // Now try to expand by setting the last bit.
- SetBit(num - 1);
- }
-
- // We must clear all bits as per our specification.
- ClearAllBits();
-
- return true;
-}
-
void BitVector::Copy(const BitVector *src) {
// Get highest bit set, we only need to copy till then.
int highest_bit = src->GetHighestBitSet();
@@ -375,13 +276,8 @@
}
}
-bool BitVector::IsBitSet(const uint32_t* storage, uint32_t num) {
- uint32_t val = storage[num >> 5] & check_masks[num & 0x1f];
- return (val != 0);
-}
-
uint32_t BitVector::NumSetBits(const uint32_t* storage, uint32_t end) {
- uint32_t word_end = end >> 5;
+ uint32_t word_end = WordIndex(end);
uint32_t partial_word_bits = end & 0x1f;
uint32_t count = 0u;
@@ -400,45 +296,6 @@
os << buffer.str() << std::endl;
}
-
-void BitVector::DumpDotHelper(bool last_entry, FILE* file, std::ostringstream& buffer) const {
- // Now print it to the file.
- fprintf(file, " {%s}", buffer.str().c_str());
-
- // If it isn't the last entry, add a |.
- if (last_entry == false) {
- fprintf(file, "|");
- }
-
- // Add the \n.
- fprintf(file, "\\\n");
-}
-
-void BitVector::DumpDot(FILE* file, const char* prefix, bool last_entry) const {
- std::ostringstream buffer;
- DumpHelper(prefix, buffer);
- DumpDotHelper(last_entry, file, buffer);
-}
-
-void BitVector::DumpIndicesDot(FILE* file, const char* prefix, bool last_entry) const {
- std::ostringstream buffer;
- DumpIndicesHelper(prefix, buffer);
- DumpDotHelper(last_entry, file, buffer);
-}
-
-void BitVector::DumpIndicesHelper(const char* prefix, std::ostringstream& buffer) const {
- // Initialize it.
- if (prefix != nullptr) {
- buffer << prefix;
- }
-
- for (size_t i = 0; i < storage_size_ * kWordBits; i++) {
- if (IsBitSet(i)) {
- buffer << i << " ";
- }
- }
-}
-
void BitVector::DumpHelper(const char* prefix, std::ostringstream& buffer) const {
// Initialize it.
if (prefix != nullptr) {
@@ -452,4 +309,22 @@
buffer << ')';
}
+void BitVector::EnsureSize(uint32_t idx) {
+ if (idx >= storage_size_ * kWordBits) {
+ DCHECK(expandable_) << "Attempted to expand a non-expandable bitmap to position " << idx;
+
+ /* Round up to word boundaries for "idx+1" bits */
+ uint32_t new_size = BitsToWords(idx + 1);
+ DCHECK_GT(new_size, storage_size_);
+ uint32_t *new_storage =
+ static_cast<uint32_t*>(allocator_->Alloc(new_size * kWordBytes));
+ memcpy(new_storage, storage_, storage_size_ * kWordBytes);
+ // Zero out the new storage words.
+ memset(&new_storage[storage_size_], 0, (new_size - storage_size_) * kWordBytes);
+ // TOTO: collect stats on space wasted because of resize.
+ storage_ = new_storage;
+ storage_size_ = new_size;
+ }
+}
+
} // namespace art
diff --git a/runtime/base/bit_vector.h b/runtime/base/bit_vector.h
index fb1646f..1e28a27 100644
--- a/runtime/base/bit_vector.h
+++ b/runtime/base/bit_vector.h
@@ -18,235 +18,237 @@
#define ART_RUNTIME_BASE_BIT_VECTOR_H_
#include <stdint.h>
-#include <stddef.h>
-
-#include "allocator.h"
-#include "base/logging.h"
-#include "utils.h"
+#include <iterator>
namespace art {
+class Allocator;
+
/*
* Expanding bitmap, used for tracking resources. Bits are numbered starting
* from zero. All operations on a BitVector are unsynchronized.
*/
class BitVector {
- public:
- class IndexContainer;
+ public:
+ class IndexContainer;
- /**
- * @brief Convenient iterator across the indexes of the BitVector's set bits.
- *
- * @details IndexIterator is a Forward iterator (C++11: 24.2.5) from the lowest
- * to the highest index of the BitVector's set bits. Instances can be retrieved
- * only through BitVector::Indexes() which returns an IndexContainer wrapper
- * object with begin() and end() suitable for range-based loops:
- * for (uint32_t idx : bit_vector.Indexes()) {
- * // Use idx.
- * }
- */
- class IndexIterator
- : std::iterator<std::forward_iterator_tag, uint32_t, ptrdiff_t, void, uint32_t> {
- public:
- bool operator==(const IndexIterator& other) const {
- DCHECK(bit_storage_ == other.bit_storage_);
- DCHECK_EQ(storage_size_, other.storage_size_);
- return bit_index_ == other.bit_index_;
- }
+ /**
+ * @brief Convenient iterator across the indexes of the BitVector's set bits.
+ *
+ * @details IndexIterator is a Forward iterator (C++11: 24.2.5) from the lowest
+ * to the highest index of the BitVector's set bits. Instances can be retrieved
+ * only through BitVector::Indexes() which returns an IndexContainer wrapper
+ * object with begin() and end() suitable for range-based loops:
+ * for (uint32_t idx : bit_vector.Indexes()) {
+ * // Use idx.
+ * }
+ */
+ class IndexIterator :
+ std::iterator<std::forward_iterator_tag, uint32_t, ptrdiff_t, void, uint32_t> {
+ public:
+ bool operator==(const IndexIterator& other) const;
- bool operator!=(const IndexIterator& other) const {
- return !(*this == other);
- }
-
- int operator*() const {
- DCHECK_LT(bit_index_, BitSize());
- return bit_index_;
- }
-
- IndexIterator& operator++() {
- DCHECK_LT(bit_index_, BitSize());
- bit_index_ = FindIndex(bit_index_ + 1u);
- return *this;
- }
-
- IndexIterator operator++(int) {
- IndexIterator result(*this);
- ++*this;
- return result;
- }
-
- // Helper function to check for end without comparing with bit_vector.Indexes().end().
- bool Done() const {
- return bit_index_ == BitSize();
- }
-
- private:
- struct begin_tag { };
- struct end_tag { };
-
- IndexIterator(const BitVector* bit_vector, begin_tag)
- : bit_storage_(bit_vector->GetRawStorage()),
- storage_size_(bit_vector->storage_size_),
- bit_index_(FindIndex(0u)) { }
-
- IndexIterator(const BitVector* bit_vector, end_tag)
- : bit_storage_(bit_vector->GetRawStorage()),
- storage_size_(bit_vector->storage_size_),
- bit_index_(BitSize()) { }
-
- uint32_t BitSize() const {
- return storage_size_ * kWordBits;
- }
-
- uint32_t FindIndex(uint32_t start_index) const {
- DCHECK_LE(start_index, BitSize());
- uint32_t word_index = start_index / kWordBits;
- if (UNLIKELY(word_index == storage_size_)) {
- return start_index;
- }
- uint32_t word = bit_storage_[word_index];
- // Mask out any bits in the first word we've already considered.
- word &= static_cast<uint32_t>(-1) << (start_index & 0x1f);
- while (word == 0u) {
- ++word_index;
- if (UNLIKELY(word_index == storage_size_)) {
- return BitSize();
- }
- word = bit_storage_[word_index];
- }
- return word_index * 32u + CTZ(word);
- }
-
- const uint32_t* const bit_storage_;
- const uint32_t storage_size_; // Size of vector in words.
- uint32_t bit_index_; // Current index (size in bits).
-
- friend class BitVector::IndexContainer;
- };
-
- /**
- * @brief BitVector wrapper class for iteration across indexes of set bits.
- */
- class IndexContainer {
- public:
- explicit IndexContainer(const BitVector* bit_vector) : bit_vector_(bit_vector) { }
-
- IndexIterator begin() const {
- return IndexIterator(bit_vector_, IndexIterator::begin_tag());
- }
-
- IndexIterator end() const {
- return IndexIterator(bit_vector_, IndexIterator::end_tag());
- }
-
- private:
- const BitVector* const bit_vector_;
- };
-
- BitVector(uint32_t start_bits,
- bool expandable,
- Allocator* allocator,
- uint32_t storage_size = 0,
- uint32_t* storage = nullptr);
-
- virtual ~BitVector();
-
- void SetBit(uint32_t num);
- void ClearBit(uint32_t num);
- bool IsBitSet(uint32_t num) const;
- void ClearAllBits();
- void SetInitialBits(uint32_t num_bits);
-
- void Copy(const BitVector* src);
- void Intersect(const BitVector* src2);
- bool Union(const BitVector* src);
-
- // Set bits of union_with that are not in not_in.
- bool UnionIfNotIn(const BitVector* union_with, const BitVector* not_in);
-
- void Subtract(const BitVector* src);
- // Are we equal to another bit vector? Note: expandability attributes must also match.
- bool Equal(const BitVector* src) {
- return (storage_size_ == src->GetStorageSize()) &&
- (expandable_ == src->IsExpandable()) &&
- (memcmp(storage_, src->GetRawStorage(), storage_size_ * sizeof(uint32_t)) == 0);
+ bool operator!=(const IndexIterator& other) const {
+ return !(*this == other);
}
- /**
- * @brief Are all the bits set the same?
- * @details expandability and size can differ as long as the same bits are set.
- */
- bool SameBitsSet(const BitVector *src);
+ int operator*() const;
- uint32_t NumSetBits() const;
+ IndexIterator& operator++();
- // Number of bits set in range [0, end).
- uint32_t NumSetBits(uint32_t end) const;
+ IndexIterator operator++(int);
- IndexContainer Indexes() const {
- return IndexContainer(this);
+ // Helper function to check for end without comparing with bit_vector.Indexes().end().
+ bool Done() const {
+ return bit_index_ == BitSize();
}
- uint32_t GetStorageSize() const { return storage_size_; }
- bool IsExpandable() const { return expandable_; }
- uint32_t GetRawStorageWord(size_t idx) const { return storage_[idx]; }
- uint32_t* GetRawStorage() { return storage_; }
- const uint32_t* GetRawStorage() const { return storage_; }
- size_t GetSizeOf() const { return storage_size_ * kWordBytes; }
+ private:
+ struct begin_tag { };
+ struct end_tag { };
- /**
- * @return the highest bit set, -1 if none are set
+ IndexIterator(const BitVector* bit_vector, begin_tag)
+ : bit_storage_(bit_vector->GetRawStorage()),
+ storage_size_(bit_vector->storage_size_),
+ bit_index_(FindIndex(0u)) { }
+
+ IndexIterator(const BitVector* bit_vector, end_tag)
+ : bit_storage_(bit_vector->GetRawStorage()),
+ storage_size_(bit_vector->storage_size_),
+ bit_index_(BitSize()) { }
+
+ uint32_t BitSize() const {
+ return storage_size_ * kWordBits;
+ }
+
+ uint32_t FindIndex(uint32_t start_index) const;
+ const uint32_t* const bit_storage_;
+ const uint32_t storage_size_; // Size of vector in words.
+ uint32_t bit_index_; // Current index (size in bits).
+
+ friend class BitVector::IndexContainer;
+ };
+
+ /**
+ * @brief BitVector wrapper class for iteration across indexes of set bits.
+ */
+ class IndexContainer {
+ public:
+ explicit IndexContainer(const BitVector* bit_vector) : bit_vector_(bit_vector) { }
+
+ IndexIterator begin() const {
+ return IndexIterator(bit_vector_, IndexIterator::begin_tag());
+ }
+
+ IndexIterator end() const {
+ return IndexIterator(bit_vector_, IndexIterator::end_tag());
+ }
+
+ private:
+ const BitVector* const bit_vector_;
+ };
+
+ BitVector(uint32_t start_bits,
+ bool expandable,
+ Allocator* allocator,
+ uint32_t storage_size = 0,
+ uint32_t* storage = nullptr);
+
+ virtual ~BitVector();
+
+ // Mark the specified bit as "set".
+ void SetBit(uint32_t idx) {
+ /*
+ * TUNING: this could have pathologically bad growth/expand behavior. Make sure we're
+ * not using it badly or change resize mechanism.
*/
- int GetHighestBitSet() const;
+ if (idx >= storage_size_ * kWordBits) {
+ EnsureSize(idx);
+ }
+ storage_[WordIndex(idx)] |= BitMask(idx);
+ }
- // Is bit set in storage. (No range check.)
- static bool IsBitSet(const uint32_t* storage, uint32_t num);
- // Number of bits set in range [0, end) in storage. (No range check.)
- static uint32_t NumSetBits(const uint32_t* storage, uint32_t end);
+ // Mark the specified bit as "unset".
+ void ClearBit(uint32_t idx) {
+ // If the index is over the size, we don't have to do anything, it is cleared.
+ if (idx < storage_size_ * kWordBits) {
+ // Otherwise, go ahead and clear it.
+ storage_[WordIndex(idx)] &= ~BitMask(idx);
+ }
+ }
- bool EnsureSizeAndClear(unsigned int num);
+ // Determine whether or not the specified bit is set.
+ bool IsBitSet(uint32_t idx) const {
+ // If the index is over the size, whether it is expandable or not, this bit does not exist:
+ // thus it is not set.
+ return (idx < (storage_size_ * kWordBits)) && IsBitSet(storage_, idx);
+ }
- void Dump(std::ostream& os, const char* prefix) const;
+ // Mark all bits bit as "clear".
+ void ClearAllBits();
- /**
- * @brief last_entry is this the last entry for the dot dumping
- * @details if not, a "|" is appended to the dump.
- */
- void DumpDot(FILE* file, const char* prefix, bool last_entry = false) const;
+ // Mark specified number of bits as "set". Cannot set all bits like ClearAll since there might
+ // be unused bits - setting those to one will confuse the iterator.
+ void SetInitialBits(uint32_t num_bits);
- /**
- * @brief last_entry is this the last entry for the dot dumping
- * @details if not, a "|" is appended to the dump.
- */
- void DumpIndicesDot(FILE* file, const char* prefix, bool last_entry = false) const;
+ void Copy(const BitVector* src);
- protected:
- /**
- * @brief Dump the bitvector into buffer in a 00101..01 format.
- * @param buffer the ostringstream used to dump the bitvector into.
- */
- void DumpHelper(const char* prefix, std::ostringstream& buffer) const;
+ // Intersect with another bit vector.
+ void Intersect(const BitVector* src2);
- /**
- * @brief Dump the bitvector in a 1 2 5 8 format, where the numbers are the bit set.
- * @param buffer the ostringstream used to dump the bitvector into.
- */
- void DumpIndicesHelper(const char* prefix, std::ostringstream& buffer) const;
+ // Union with another bit vector.
+ bool Union(const BitVector* src);
- /**
- * @brief Wrapper to perform the bitvector dumping with the .dot format.
- * @param buffer the ostringstream used to dump the bitvector into.
- */
- void DumpDotHelper(bool last_entry, FILE* file, std::ostringstream& buffer) const;
+ // Set bits of union_with that are not in not_in.
+ bool UnionIfNotIn(const BitVector* union_with, const BitVector* not_in);
- private:
- static constexpr uint32_t kWordBytes = sizeof(uint32_t);
- static constexpr uint32_t kWordBits = kWordBytes * 8;
+ void Subtract(const BitVector* src);
- Allocator* const allocator_;
- const bool expandable_; // expand bitmap if we run out?
- uint32_t storage_size_; // current size, in 32-bit words.
- uint32_t* storage_;
+ // Are we equal to another bit vector? Note: expandability attributes must also match.
+ bool Equal(const BitVector* src) const;
+
+ /**
+ * @brief Are all the bits set the same?
+ * @details expandability and size can differ as long as the same bits are set.
+ */
+ bool SameBitsSet(const BitVector *src) const;
+
+ // Count the number of bits that are set.
+ uint32_t NumSetBits() const;
+
+ // Count the number of bits that are set in range [0, end).
+ uint32_t NumSetBits(uint32_t end) const;
+
+ IndexContainer Indexes() const {
+ return IndexContainer(this);
+ }
+
+ uint32_t GetStorageSize() const {
+ return storage_size_;
+ }
+
+ bool IsExpandable() const {
+ return expandable_;
+ }
+
+ uint32_t GetRawStorageWord(size_t idx) const {
+ return storage_[idx];
+ }
+
+ uint32_t* GetRawStorage() {
+ return storage_;
+ }
+
+ const uint32_t* GetRawStorage() const {
+ return storage_;
+ }
+
+ size_t GetSizeOf() const {
+ return storage_size_ * kWordBytes;
+ }
+
+ /**
+ * @return the highest bit set, -1 if none are set
+ */
+ int GetHighestBitSet() const;
+
+ // Is bit set in storage. (No range check.)
+ static bool IsBitSet(const uint32_t* storage, uint32_t idx) {
+ return (storage[WordIndex(idx)] & BitMask(idx)) != 0;
+ }
+
+ // Number of bits set in range [0, end) in storage. (No range check.)
+ static uint32_t NumSetBits(const uint32_t* storage, uint32_t end);
+
+ void Dump(std::ostream& os, const char* prefix) const;
+
+ private:
+ /**
+ * @brief Dump the bitvector into buffer in a 00101..01 format.
+ * @param buffer the ostringstream used to dump the bitvector into.
+ */
+ void DumpHelper(const char* prefix, std::ostringstream& buffer) const;
+
+ // Ensure there is space for a bit at idx.
+ void EnsureSize(uint32_t idx);
+
+ // The index of the word within storage.
+ static constexpr uint32_t WordIndex(uint32_t idx) {
+ return idx >> 5;
+ }
+
+ // A bit mask to extract the bit for the given index.
+ static constexpr uint32_t BitMask(uint32_t idx) {
+ return 1 << (idx & 0x1f);
+ }
+
+ static constexpr uint32_t kWordBytes = sizeof(uint32_t);
+ static constexpr uint32_t kWordBits = kWordBytes * 8;
+
+ uint32_t* storage_; // The storage for the bit vector.
+ uint32_t storage_size_; // Current size, in 32-bit words.
+ Allocator* const allocator_; // Allocator if expandable.
+ const bool expandable_; // Should the bitmap expand if too small?
};
diff --git a/runtime/base/bit_vector_test.cc b/runtime/base/bit_vector_test.cc
index 1403f50..df5d79d 100644
--- a/runtime/base/bit_vector_test.cc
+++ b/runtime/base/bit_vector_test.cc
@@ -16,7 +16,8 @@
#include <memory>
-#include "bit_vector.h"
+#include "allocator.h"
+#include "bit_vector-inl.h"
#include "gtest/gtest.h"
namespace art {
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 99277a0..b0f8e22 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -228,7 +228,10 @@
}
if (invoke != kStatic) {
mirror::Object* o = soa.Decode<mirror::Object*>(jobj);
- if (!o->InstanceOf(m->GetDeclaringClass())) {
+ if (o == nullptr) {
+ AbortF("can't call %s on null object", PrettyMethod(m).c_str());
+ return false;
+ } else if (!o->InstanceOf(m->GetDeclaringClass())) {
AbortF("can't call %s on instance of %s", PrettyMethod(m).c_str(), PrettyTypeOf(o).c_str());
return false;
}
@@ -292,7 +295,10 @@
return false;
}
mirror::Object* o = soa.Decode<mirror::Object*>(java_object);
- if (!o->InstanceOf(m->GetDeclaringClass())) {
+ if (o == nullptr) {
+ AbortF("can't call %s on null object", PrettyMethod(m).c_str());
+ return false;
+ } else if (!o->InstanceOf(m->GetDeclaringClass())) {
AbortF("can't call %s on instance of %s", PrettyMethod(m).c_str(), PrettyTypeOf(o).c_str());
return false;
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index db42146..8fc1d07 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -851,7 +851,8 @@
// We opened the oat file, so we must register it.
RegisterOatFile(oat_file);
}
- return true;
+ // If the file isn't executable we failed patchoat but did manage to get the dex files.
+ return oat_file->IsExecutable();
} else {
if (needs_registering) {
// We opened it, delete it.
@@ -1136,11 +1137,18 @@
error_msgs->push_back(StringPrintf("Failed to open oat file from dex location '%s'",
dex_location));
return nullptr;
- } else if (!VerifyOatWithDexFile(oat_file.get(), dex_location, &error_msg)) {
+ } else if (oat_file->IsExecutable() &&
+ !VerifyOatWithDexFile(oat_file.get(), dex_location, &error_msg)) {
error_msgs->push_back(StringPrintf("Failed to verify oat file '%s' found for dex location "
"'%s': %s", oat_file->GetLocation().c_str(), dex_location,
error_msg.c_str()));
return nullptr;
+ } else if (!oat_file->IsExecutable() &&
+ !VerifyOatImageChecksum(oat_file.get(), isa)) {
+ error_msgs->push_back(StringPrintf("Failed to verify non-executable oat file '%s' found for "
+ "dex location '%s'. Image checksum incorrect.",
+ oat_file->GetLocation().c_str(), dex_location));
+ return nullptr;
} else {
return oat_file.release();
}
@@ -1310,11 +1318,35 @@
return ret;
}
+const OatFile* ClassLinker::GetInterpretedOnlyOat(const std::string& oat_path,
+ InstructionSet isa,
+ std::string* error_msg) {
+ // We open it non-executable
+ std::unique_ptr<OatFile> output(OatFile::Open(oat_path, oat_path, NULL, false, error_msg));
+ if (output.get() == nullptr) {
+ return nullptr;
+ }
+ if (VerifyOatImageChecksum(output.get(), isa)) {
+ return output.release();
+ } else {
+ *error_msg = StringPrintf("Could not use oat file '%s', image checksum failed to verify.",
+ oat_path.c_str());
+ return nullptr;
+ }
+}
+
const OatFile* ClassLinker::PatchAndRetrieveOat(const std::string& input_oat,
const std::string& output_oat,
const std::string& image_location,
InstructionSet isa,
std::string* error_msg) {
+ if (!Runtime::Current()->IsDex2OatEnabled()) {
+ // We don't have dex2oat so we can assume we don't have patchoat either. We should just use the
+ // input_oat but make sure we only do interpretation on it's dex files.
+ LOG(WARNING) << "Patching of oat file '" << input_oat << "' not attempted due to dex2oat being "
+ << "disabled. Attempting to use oat file for interpretation";
+ return GetInterpretedOnlyOat(input_oat, isa, error_msg);
+ }
Locks::mutator_lock_->AssertNotHeld(Thread::Current()); // Avoid starving GC.
std::string patchoat(Runtime::Current()->GetPatchoatExecutable());
@@ -1352,6 +1384,12 @@
"but was unable to open output file '%s': %s",
input_oat.c_str(), output_oat.c_str(), error_msg->c_str());
}
+ } else if (!Runtime::Current()->IsCompiler()) {
+ // patchoat failed which means we probably don't have enough room to place the output oat file,
+ // instead of failing we should just run the interpreter from the dex files in the input oat.
+ LOG(WARNING) << "Patching of oat file '" << input_oat << "' failed. Attempting to use oat file "
+ << "for interpretation. patchoat failure was: " << *error_msg;
+ return GetInterpretedOnlyOat(input_oat, isa, error_msg);
} else {
*error_msg = StringPrintf("Patching of oat file '%s to '%s' "
"failed: %s", input_oat.c_str(), output_oat.c_str(),
@@ -2022,21 +2060,20 @@
return mirror::Class::ComputeClassSize(false, 0, num_32, num_64, num_ref);
}
-bool ClassLinker::FindOatClass(const DexFile& dex_file,
- uint16_t class_def_idx,
- OatFile::OatClass* oat_class) {
- DCHECK(oat_class != nullptr);
+OatFile::OatClass ClassLinker::FindOatClass(const DexFile& dex_file, uint16_t class_def_idx,
+ bool* found) {
DCHECK_NE(class_def_idx, DexFile::kDexNoIndex16);
const OatFile* oat_file = FindOpenedOatFileForDexFile(dex_file);
if (oat_file == nullptr) {
- return false;
+ *found = false;
+ return OatFile::OatClass::Invalid();
}
uint dex_location_checksum = dex_file.GetLocationChecksum();
const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_file.GetLocation().c_str(),
&dex_location_checksum);
CHECK(oat_dex_file != NULL) << dex_file.GetLocation();
- *oat_class = oat_dex_file->GetOatClass(class_def_idx);
- return true;
+ *found = true;
+ return oat_dex_file->GetOatClass(class_def_idx);
}
static uint32_t GetOatMethodIndexFromMethodIndex(const DexFile& dex_file, uint16_t class_def_idx,
@@ -2073,8 +2110,7 @@
return 0;
}
-bool ClassLinker::FindOatMethodFor(mirror::ArtMethod* method, OatFile::OatMethod* oat_method) {
- DCHECK(oat_method != nullptr);
+const OatFile::OatMethod ClassLinker::FindOatMethodFor(mirror::ArtMethod* method, bool* found) {
// Although we overwrite the trampoline of non-static methods, we may get here via the resolution
// method for direct methods (or virtual methods made direct).
mirror::Class* declaring_class = method->GetDeclaringClass();
@@ -2101,15 +2137,14 @@
GetOatMethodIndexFromMethodIndex(*declaring_class->GetDexCache()->GetDexFile(),
method->GetDeclaringClass()->GetDexClassDefIndex(),
method->GetDexMethodIndex()));
- OatFile::OatClass oat_class;
- if (!FindOatClass(*declaring_class->GetDexCache()->GetDexFile(),
- declaring_class->GetDexClassDefIndex(),
- &oat_class)) {
- return false;
+ OatFile::OatClass oat_class = FindOatClass(*declaring_class->GetDexCache()->GetDexFile(),
+ declaring_class->GetDexClassDefIndex(),
+ found);
+ if (!found) {
+ return OatFile::OatMethod::Invalid();
}
-
- *oat_method = oat_class.GetOatMethod(oat_method_index);
- return true;
+ *found = true;
+ return oat_class.GetOatMethod(oat_method_index);
}
// Special case to get oat code without overwriting a trampoline.
@@ -2118,9 +2153,10 @@
if (method->IsProxyMethod()) {
return GetQuickProxyInvokeHandler();
}
- OatFile::OatMethod oat_method;
+ bool found;
+ OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
const void* result = nullptr;
- if (FindOatMethodFor(method, &oat_method)) {
+ if (found) {
result = oat_method.GetQuickCode();
}
@@ -2146,10 +2182,11 @@
if (method->IsProxyMethod()) {
return GetPortableProxyInvokeHandler();
}
- OatFile::OatMethod oat_method;
+ bool found;
+ OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
const void* result = nullptr;
const void* quick_code = nullptr;
- if (FindOatMethodFor(method, &oat_method)) {
+ if (found) {
result = oat_method.GetPortableCode();
quick_code = oat_method.GetQuickCode();
}
@@ -2168,10 +2205,29 @@
return result;
}
+const void* ClassLinker::GetOatMethodQuickCodeFor(mirror::ArtMethod* method) {
+ if (method->IsNative() || method->IsAbstract() || method->IsProxyMethod()) {
+ return nullptr;
+ }
+ bool found;
+ OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
+ return found ? oat_method.GetQuickCode() : nullptr;
+}
+
+const void* ClassLinker::GetOatMethodPortableCodeFor(mirror::ArtMethod* method) {
+ if (method->IsNative() || method->IsAbstract() || method->IsProxyMethod()) {
+ return nullptr;
+ }
+ bool found;
+ OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
+ return found ? oat_method.GetPortableCode() : nullptr;
+}
+
const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx,
uint32_t method_idx) {
- OatFile::OatClass oat_class;
- if (!FindOatClass(dex_file, class_def_idx, &oat_class)) {
+ bool found;
+ OatFile::OatClass oat_class = FindOatClass(dex_file, class_def_idx, &found);
+ if (!found) {
return nullptr;
}
uint32_t oat_method_idx = GetOatMethodIndexFromMethodIndex(dex_file, class_def_idx, method_idx);
@@ -2180,8 +2236,9 @@
const void* ClassLinker::GetPortableOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx,
uint32_t method_idx) {
- OatFile::OatClass oat_class;
- if (!FindOatClass(dex_file, class_def_idx, &oat_class)) {
+ bool found;
+ OatFile::OatClass oat_class = FindOatClass(dex_file, class_def_idx, &found);
+ if (!found) {
return nullptr;
}
uint32_t oat_method_idx = GetOatMethodIndexFromMethodIndex(dex_file, class_def_idx, method_idx);
@@ -2234,8 +2291,9 @@
while (it.HasNextInstanceField()) {
it.Next();
}
- OatFile::OatClass oat_class;
- bool has_oat_class = FindOatClass(dex_file, klass->GetDexClassDefIndex(), &oat_class);
+ bool has_oat_class;
+ OatFile::OatClass oat_class = FindOatClass(dex_file, klass->GetDexClassDefIndex(),
+ &has_oat_class);
// Link the code of methods skipped by LinkCode.
for (size_t method_index = 0; it.HasNextDirectMethod(); ++method_index, it.Next()) {
mirror::ArtMethod* method = klass->GetDirectMethod(method_index);
@@ -2386,12 +2444,16 @@
return; // no fields or methods - for example a marker interface
}
- OatFile::OatClass oat_class;
- if (Runtime::Current()->IsStarted()
- && !Runtime::Current()->UseCompileTimeClassPath()
- && FindOatClass(dex_file, klass->GetDexClassDefIndex(), &oat_class)) {
- LoadClassMembers(dex_file, class_data, klass, class_loader, &oat_class);
- } else {
+
+ bool has_oat_class = false;
+ if (Runtime::Current()->IsStarted() && !Runtime::Current()->UseCompileTimeClassPath()) {
+ OatFile::OatClass oat_class = FindOatClass(dex_file, klass->GetDexClassDefIndex(),
+ &has_oat_class);
+ if (has_oat_class) {
+ LoadClassMembers(dex_file, class_data, klass, class_loader, &oat_class);
+ }
+ }
+ if (!has_oat_class) {
LoadClassMembers(dex_file, class_data, klass, class_loader, nullptr);
}
}
@@ -3561,19 +3623,13 @@
proxy_class->GetDirectMethods();
CHECK_EQ(proxy_direct_methods->GetLength(), 16);
mirror::ArtMethod* proxy_constructor = proxy_direct_methods->Get(2);
+ // Clone the existing constructor of Proxy (our constructor would just invoke it so steal its
+ // code_ too)
mirror::ArtMethod* constructor = down_cast<mirror::ArtMethod*>(proxy_constructor->Clone(self));
if (constructor == nullptr) {
CHECK(self->IsExceptionPending()); // OOME.
return nullptr;
}
- // Make the proxy constructor's code always point to the uninstrumented code. This avoids
- // getting a method enter event for the proxy constructor as the proxy constructor doesn't
- // have an activation.
- bool have_portable_code;
- constructor->SetEntryPointFromQuickCompiledCode(GetQuickOatCodeFor(proxy_constructor));
- constructor->SetEntryPointFromPortableCompiledCode(GetPortableOatCodeFor(proxy_constructor,
- &have_portable_code));
-
// Make this constructor public and fix the class to be our Proxy version
constructor->SetAccessFlags((constructor->GetAccessFlags() & ~kAccProtected) | kAccPublic);
constructor->SetDeclaringClass(klass.Get());
@@ -4330,7 +4386,7 @@
return true;
}
}
- StackHandleScope<4> hs(self);
+ StackHandleScope<5> hs(self);
Handle<mirror::IfTable> iftable(hs.NewHandle(AllocIfTable(self, ifcount)));
if (UNLIKELY(iftable.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
@@ -4413,7 +4469,13 @@
}
MethodHelper interface_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
MethodHelper vtable_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
- std::vector<mirror::ArtMethod*> miranda_list;
+ size_t max_miranda_methods = 0; // The max size of miranda_list.
+ for (size_t i = 0; i < ifcount; ++i) {
+ max_miranda_methods += iftable->GetInterface(i)->NumVirtualMethods();
+ }
+ Handle<mirror::ObjectArray<mirror::ArtMethod>>
+ miranda_list(hs.NewHandle(AllocArtMethodArray(self, max_miranda_methods)));
+ size_t miranda_list_size = 0; // The current size of miranda_list.
for (size_t i = 0; i < ifcount; ++i) {
size_t num_methods = iftable->GetInterface(i)->NumVirtualMethods();
if (num_methods > 0) {
@@ -4428,8 +4490,7 @@
Handle<mirror::ObjectArray<mirror::ArtMethod>> vtable(
hs.NewHandle(klass->GetVTableDuringLinking()));
for (size_t j = 0; j < num_methods; ++j) {
- mirror::ArtMethod* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j);
- interface_mh.ChangeMethod(interface_method);
+ interface_mh.ChangeMethod(iftable->GetInterface(i)->GetVirtualMethod(j));
int32_t k;
// For each method listed in the interface's method list, find the
// matching method in our class's method list. We want to favor the
@@ -4440,22 +4501,21 @@
// those don't end up in the virtual method table, so it shouldn't
// matter which direction we go. We walk it backward anyway.)
for (k = vtable->GetLength() - 1; k >= 0; --k) {
- mirror::ArtMethod* vtable_method = vtable->Get(k);
- vtable_mh.ChangeMethod(vtable_method);
+ vtable_mh.ChangeMethod(vtable->Get(k));
if (interface_mh.HasSameNameAndSignature(&vtable_mh)) {
- if (!vtable_method->IsAbstract() && !vtable_method->IsPublic()) {
+ if (!vtable_mh.Get()->IsAbstract() && !vtable_mh.Get()->IsPublic()) {
ThrowIllegalAccessError(
klass.Get(),
"Method '%s' implementing interface method '%s' is not public",
- PrettyMethod(vtable_method).c_str(),
- PrettyMethod(interface_method).c_str());
+ PrettyMethod(vtable_mh.Get()).c_str(),
+ PrettyMethod(interface_mh.Get()).c_str());
return false;
}
- method_array->Set<false>(j, vtable_method);
+ method_array->Set<false>(j, vtable_mh.Get());
// Place method in imt if entry is empty, place conflict otherwise.
- uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize;
+ uint32_t imt_index = interface_mh.Get()->GetDexMethodIndex() % mirror::Class::kImtSize;
if (imtable->Get(imt_index) == NULL) {
- imtable->Set<false>(imt_index, vtable_method);
+ imtable->Set<false>(imt_index, vtable_mh.Get());
imtable_changed = true;
} else {
imtable->Set<false>(imt_index, runtime->GetImtConflictMethod());
@@ -4466,7 +4526,9 @@
if (k < 0) {
StackHandleScope<1> hs(self);
auto miranda_method = hs.NewHandle<mirror::ArtMethod>(nullptr);
- for (mirror::ArtMethod* mir_method : miranda_list) {
+ for (size_t l = 0; l < miranda_list_size; ++l) {
+ mirror::ArtMethod* mir_method = miranda_list->Get(l);
+ DCHECK(mir_method != nullptr);
vtable_mh.ChangeMethod(mir_method);
if (interface_mh.HasSameNameAndSignature(&vtable_mh)) {
miranda_method.Assign(mir_method);
@@ -4475,13 +4537,13 @@
}
if (miranda_method.Get() == NULL) {
// Point the interface table at a phantom slot.
- miranda_method.Assign(down_cast<mirror::ArtMethod*>(interface_method->Clone(self)));
+ miranda_method.Assign(down_cast<mirror::ArtMethod*>(interface_mh.Get()->Clone(self)));
if (UNLIKELY(miranda_method.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
- // TODO: If a methods move then the miranda_list may hold stale references.
- miranda_list.push_back(miranda_method.Get());
+ DCHECK_LT(miranda_list_size, max_miranda_methods);
+ miranda_list->Set<false>(miranda_list_size++, miranda_method.Get());
}
method_array->Set<false>(j, miranda_method.Get());
}
@@ -4498,9 +4560,9 @@
}
klass->SetImTable(imtable.Get());
}
- if (!miranda_list.empty()) {
+ if (miranda_list_size > 0) {
int old_method_count = klass->NumVirtualMethods();
- int new_method_count = old_method_count + miranda_list.size();
+ int new_method_count = old_method_count + miranda_list_size;
mirror::ObjectArray<mirror::ArtMethod>* virtuals;
if (old_method_count == 0) {
virtuals = AllocArtMethodArray(self, new_method_count);
@@ -4518,14 +4580,14 @@
hs.NewHandle(klass->GetVTableDuringLinking()));
CHECK(vtable.Get() != NULL);
int old_vtable_count = vtable->GetLength();
- int new_vtable_count = old_vtable_count + miranda_list.size();
+ int new_vtable_count = old_vtable_count + miranda_list_size;
vtable.Assign(vtable->CopyOf(self, new_vtable_count));
if (UNLIKELY(vtable.Get() == NULL)) {
CHECK(self->IsExceptionPending()); // OOME.
return false;
}
- for (size_t i = 0; i < miranda_list.size(); ++i) {
- mirror::ArtMethod* method = miranda_list[i];
+ for (size_t i = 0; i < miranda_list_size; ++i) {
+ mirror::ArtMethod* method = miranda_list->Get(i);
// Leave the declaring class alone as type indices are relative to it
method->SetAccessFlags(method->GetAccessFlags() | kAccMiranda);
method->SetMethodIndex(0xFFFF & (old_vtable_count + i));
@@ -4853,7 +4915,7 @@
Handle<mirror::ClassLoader> class_loader,
Handle<mirror::ArtMethod> referrer,
InvokeType type) {
- DCHECK(dex_cache.Get() != NULL);
+ DCHECK(dex_cache.Get() != nullptr);
// Check for hit in the dex cache.
mirror::ArtMethod* resolved = dex_cache->GetResolvedMethod(method_idx);
if (resolved != nullptr && !resolved->IsRuntimeMethod()) {
@@ -4862,9 +4924,9 @@
// Fail, get the declaring class.
const DexFile::MethodId& method_id = dex_file.GetMethodId(method_idx);
mirror::Class* klass = ResolveType(dex_file, method_id.class_idx_, dex_cache, class_loader);
- if (klass == NULL) {
+ if (klass == nullptr) {
DCHECK(Thread::Current()->IsExceptionPending());
- return NULL;
+ return nullptr;
}
// Scan using method_idx, this saves string compares but will only hit for matching dex
// caches/files.
@@ -4875,7 +4937,7 @@
break;
case kInterface:
resolved = klass->FindInterfaceMethod(dex_cache.Get(), method_idx);
- DCHECK(resolved == NULL || resolved->GetDeclaringClass()->IsInterface());
+ DCHECK(resolved == nullptr || resolved->GetDeclaringClass()->IsInterface());
break;
case kSuper: // Fall-through.
case kVirtual:
@@ -4884,7 +4946,7 @@
default:
LOG(FATAL) << "Unreachable - invocation type: " << type;
}
- if (resolved == NULL) {
+ if (resolved == nullptr) {
// Search by name, which works across dex files.
const char* name = dex_file.StringDataByIdx(method_id.name_idx_);
const Signature signature = dex_file.GetMethodSignature(method_id);
@@ -4895,7 +4957,7 @@
break;
case kInterface:
resolved = klass->FindInterfaceMethod(name, signature);
- DCHECK(resolved == NULL || resolved->GetDeclaringClass()->IsInterface());
+ DCHECK(resolved == nullptr || resolved->GetDeclaringClass()->IsInterface());
break;
case kSuper: // Fall-through.
case kVirtual:
@@ -4903,94 +4965,97 @@
break;
}
}
- if (resolved != NULL) {
- // We found a method, check for incompatible class changes.
- if (resolved->CheckIncompatibleClassChange(type)) {
- resolved = NULL;
- }
- }
- if (resolved != NULL) {
+ // If we found a method, check for incompatible class changes.
+ if (LIKELY(resolved != nullptr && !resolved->CheckIncompatibleClassChange(type))) {
// Be a good citizen and update the dex cache to speed subsequent calls.
dex_cache->SetResolvedMethod(method_idx, resolved);
return resolved;
} else {
- // We failed to find the method which means either an access error, an incompatible class
- // change, or no such method. First try to find the method among direct and virtual methods.
- const char* name = dex_file.StringDataByIdx(method_id.name_idx_);
- const Signature signature = dex_file.GetMethodSignature(method_id);
- switch (type) {
- case kDirect:
- case kStatic:
- resolved = klass->FindVirtualMethod(name, signature);
- break;
- case kInterface:
- case kVirtual:
- case kSuper:
- resolved = klass->FindDirectMethod(name, signature);
- break;
- }
-
- // If we found something, check that it can be accessed by the referrer.
- if (resolved != NULL && referrer.Get() != NULL) {
- mirror::Class* methods_class = resolved->GetDeclaringClass();
- mirror::Class* referring_class = referrer->GetDeclaringClass();
- if (!referring_class->CanAccess(methods_class)) {
- ThrowIllegalAccessErrorClassForMethodDispatch(referring_class, methods_class,
- resolved, type);
- return NULL;
- } else if (!referring_class->CanAccessMember(methods_class,
- resolved->GetAccessFlags())) {
- ThrowIllegalAccessErrorMethod(referring_class, resolved);
- return NULL;
- }
- }
-
- // Otherwise, throw an IncompatibleClassChangeError if we found something, and check interface
- // methods and throw if we find the method there. If we find nothing, throw a NoSuchMethodError.
- switch (type) {
- case kDirect:
- case kStatic:
- if (resolved != NULL) {
- ThrowIncompatibleClassChangeError(type, kVirtual, resolved, referrer.Get());
- } else {
- resolved = klass->FindInterfaceMethod(name, signature);
- if (resolved != NULL) {
- ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer.Get());
- } else {
- ThrowNoSuchMethodError(type, klass, name, signature);
- }
- }
- break;
- case kInterface:
- if (resolved != NULL) {
- ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer.Get());
- } else {
+ // If we had a method, it's an incompatible-class-change error.
+ if (resolved != nullptr) {
+ ThrowIncompatibleClassChangeError(type, resolved->GetInvokeType(), resolved, referrer.Get());
+ } else {
+ // We failed to find the method which means either an access error, an incompatible class
+ // change, or no such method. First try to find the method among direct and virtual methods.
+ const char* name = dex_file.StringDataByIdx(method_id.name_idx_);
+ const Signature signature = dex_file.GetMethodSignature(method_id);
+ switch (type) {
+ case kDirect:
+ case kStatic:
resolved = klass->FindVirtualMethod(name, signature);
- if (resolved != NULL) {
+ // Note: kDirect and kStatic are also mutually exclusive, but in that case we would
+ // have had a resolved method before, which triggers the "true" branch above.
+ break;
+ case kInterface:
+ case kVirtual:
+ case kSuper:
+ resolved = klass->FindDirectMethod(name, signature);
+ break;
+ }
+
+ // If we found something, check that it can be accessed by the referrer.
+ if (resolved != nullptr && referrer.Get() != nullptr) {
+ mirror::Class* methods_class = resolved->GetDeclaringClass();
+ mirror::Class* referring_class = referrer->GetDeclaringClass();
+ if (!referring_class->CanAccess(methods_class)) {
+ ThrowIllegalAccessErrorClassForMethodDispatch(referring_class, methods_class,
+ resolved, type);
+ return nullptr;
+ } else if (!referring_class->CanAccessMember(methods_class,
+ resolved->GetAccessFlags())) {
+ ThrowIllegalAccessErrorMethod(referring_class, resolved);
+ return nullptr;
+ }
+ }
+
+ // Otherwise, throw an IncompatibleClassChangeError if we found something, and check interface
+ // methods and throw if we find the method there. If we find nothing, throw a
+ // NoSuchMethodError.
+ switch (type) {
+ case kDirect:
+ case kStatic:
+ if (resolved != nullptr) {
ThrowIncompatibleClassChangeError(type, kVirtual, resolved, referrer.Get());
} else {
- ThrowNoSuchMethodError(type, klass, name, signature);
+ resolved = klass->FindInterfaceMethod(name, signature);
+ if (resolved != nullptr) {
+ ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer.Get());
+ } else {
+ ThrowNoSuchMethodError(type, klass, name, signature);
+ }
}
- }
- break;
- case kSuper:
- ThrowNoSuchMethodError(type, klass, name, signature);
- break;
- case kVirtual:
- if (resolved != NULL) {
- ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer.Get());
- } else {
- resolved = klass->FindInterfaceMethod(name, signature);
- if (resolved != NULL) {
- ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer.Get());
+ break;
+ case kInterface:
+ if (resolved != nullptr) {
+ ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer.Get());
} else {
- ThrowNoSuchMethodError(type, klass, name, signature);
+ resolved = klass->FindVirtualMethod(name, signature);
+ if (resolved != nullptr) {
+ ThrowIncompatibleClassChangeError(type, kVirtual, resolved, referrer.Get());
+ } else {
+ ThrowNoSuchMethodError(type, klass, name, signature);
+ }
}
- }
- break;
+ break;
+ case kSuper:
+ ThrowNoSuchMethodError(type, klass, name, signature);
+ break;
+ case kVirtual:
+ if (resolved != nullptr) {
+ ThrowIncompatibleClassChangeError(type, kDirect, resolved, referrer.Get());
+ } else {
+ resolved = klass->FindInterfaceMethod(name, signature);
+ if (resolved != nullptr) {
+ ThrowIncompatibleClassChangeError(type, kInterface, resolved, referrer.Get());
+ } else {
+ ThrowNoSuchMethodError(type, klass, name, signature);
+ }
+ }
+ break;
+ }
}
DCHECK(Thread::Current()->IsExceptionPending());
- return NULL;
+ return nullptr;
}
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 6fc0f0e..5694149 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -344,6 +344,14 @@
const void* GetPortableOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx, uint32_t method_idx)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Get compiled code for a method, return null if no code
+ // exists. This is unlike Get..OatCodeFor which will return a bridge
+ // or interpreter entrypoint.
+ const void* GetOatMethodQuickCodeFor(mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ const void* GetOatMethodPortableCodeFor(mirror::ArtMethod* method)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
pid_t GetClassesLockOwner(); // For SignalCatcher.
pid_t GetDexLockOwner(); // For SignalCatcher.
@@ -392,7 +400,7 @@
}
private:
- bool FindOatMethodFor(mirror::ArtMethod* method, OatFile::OatMethod* oat_method)
+ const OatFile::OatMethod FindOatMethodFor(mirror::ArtMethod* method, bool* found)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
OatFile& GetImageOatFile(gc::space::ImageSpace* space)
@@ -461,9 +469,9 @@
void FixupStaticTrampolines(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Finds the associated oat class for a dex_file and descriptor. Returns whether the class
- // was found, and sets the data in oat_class.
- bool FindOatClass(const DexFile& dex_file, uint16_t class_def_idx, OatFile::OatClass* oat_class)
+ // Finds the associated oat class for a dex_file and descriptor. Returns an invalid OatClass on
+ // error and sets found to false.
+ OatFile::OatClass FindOatClass(const DexFile& dex_file, uint16_t class_def_idx, bool* found)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void RegisterDexFileLocked(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
@@ -565,6 +573,10 @@
std::vector<std::string>* error_msg)
LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_);
+ const OatFile* GetInterpretedOnlyOat(const std::string& oat_path,
+ InstructionSet isa,
+ std::string* error_msg);
+
const OatFile* PatchAndRetrieveOat(const std::string& input, const std::string& output,
const std::string& image_location, InstructionSet isa,
std::string* error_msg)
@@ -744,6 +756,7 @@
friend class ImageDumper; // for FindOpenedOatFileFromOatLocation
friend class ElfPatcher; // for FindOpenedOatFileForDexFile & FindOpenedOatFileFromOatLocation
friend class NoDex2OatTest; // for FindOpenedOatFileForDexFile
+ friend class NoPatchoatTest; // for FindOpenedOatFileForDexFile
FRIEND_TEST(ClassLinkerTest, ClassRootDescriptors);
FRIEND_TEST(mirror::DexCacheTest, Open);
FRIEND_TEST(ExceptionTest, FindExceptionHandler);
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index ab4a2bb..eed6f71 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -285,19 +285,6 @@
return StringPrintf("%s/framework/%s.jar", GetAndroidRoot(), jar_prefix.c_str());
}
-std::string CommonRuntimeTest::GetLibCoreOatFileName() {
- return GetOatFileName("core");
-}
-
-std::string CommonRuntimeTest::GetOatFileName(const std::string& oat_prefix) {
- if (IsHost()) {
- const char* host_dir = getenv("ANDROID_HOST_OUT");
- CHECK(host_dir != nullptr);
- return StringPrintf("%s/framework/%s.art", host_dir, oat_prefix.c_str());
- }
- return StringPrintf("%s/framework/%s.art", GetAndroidRoot(), oat_prefix.c_str());
-}
-
std::string CommonRuntimeTest::GetTestAndroidRoot() {
if (IsHost()) {
const char* host_dir = getenv("ANDROID_HOST_OUT");
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 12c1241..1ca6eb3 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -97,12 +97,6 @@
// Gets the path of the specified dex file for host or target.
std::string GetDexFileName(const std::string& jar_prefix);
- // Gets the path of the libcore oat file.
- std::string GetLibCoreOatFileName();
-
- // Gets the path of the specified oat file for host or target.
- std::string GetOatFileName(const std::string& oat_prefix);
-
std::string GetTestAndroidRoot();
std::vector<const DexFile*> OpenTestDexFiles(const char* name)
@@ -161,6 +155,12 @@
return; \
}
+#define TEST_DISABLED_FOR_MIPS() \
+ if (kRuntimeISA == kMips || kRuntimeISA == kMips64) { \
+ printf("WARNING: TEST DISABLED FOR MIPS\n"); \
+ return; \
+ }
+
} // namespace art
namespace std {
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 1cddb8b..6d2f21e 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -664,6 +664,11 @@
}
void Dbg::StopJdwp() {
+ // Post VM_DEATH event before the JDWP connection is closed (either by the JDWP thread or the
+ // destruction of gJdwpState).
+ if (gJdwpState != nullptr && gJdwpState->IsActive()) {
+ gJdwpState->PostVMDeath();
+ }
// Prevent the JDWP thread from processing JDWP incoming packets after we close the connection.
Disposed();
delete gJdwpState;
diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc
index 6179b5e..566ce03 100644
--- a/runtime/elf_file.cc
+++ b/runtime/elf_file.cc
@@ -1020,7 +1020,7 @@
return nullptr;
}
-struct PACKED(1) FDE {
+struct PACKED(1) FDE32 {
uint32_t raw_length_;
uint32_t GetLength() {
return raw_length_ + sizeof(raw_length_);
@@ -1031,25 +1031,186 @@
uint8_t instructions[0];
};
-static FDE* NextFDE(FDE* frame) {
+static FDE32* NextFDE(FDE32* frame) {
byte* fde_bytes = reinterpret_cast<byte*>(frame);
fde_bytes += frame->GetLength();
- return reinterpret_cast<FDE*>(fde_bytes);
+ return reinterpret_cast<FDE32*>(fde_bytes);
}
-static bool IsFDE(FDE* frame) {
+static bool IsFDE(FDE32* frame) {
return frame->CIE_pointer != 0;
}
-// TODO This only works for 32-bit Elf Files.
-static bool FixupEHFrame(uintptr_t text_start, byte* eh_frame, size_t eh_frame_size) {
- FDE* last_frame = reinterpret_cast<FDE*>(eh_frame + eh_frame_size);
- FDE* frame = NextFDE(reinterpret_cast<FDE*>(eh_frame));
- for (; frame < last_frame; frame = NextFDE(frame)) {
- if (!IsFDE(frame)) {
+struct PACKED(1) FDE64 {
+ uint32_t raw_length_;
+ uint64_t extended_length_;
+ uint64_t GetLength() {
+ return extended_length_ + sizeof(raw_length_) + sizeof(extended_length_);
+ }
+ uint64_t CIE_pointer;
+ uint64_t initial_location;
+ uint64_t address_range;
+ uint8_t instructions[0];
+};
+
+static FDE64* NextFDE(FDE64* frame) {
+ byte* fde_bytes = reinterpret_cast<byte*>(frame);
+ fde_bytes += frame->GetLength();
+ return reinterpret_cast<FDE64*>(fde_bytes);
+}
+
+static bool IsFDE(FDE64* frame) {
+ return frame->CIE_pointer != 0;
+}
+
+static bool FixupEHFrame(off_t base_address_delta,
+ byte* eh_frame, size_t eh_frame_size) {
+ if (*(reinterpret_cast<uint32_t*>(eh_frame)) == 0xffffffff) {
+ FDE64* last_frame = reinterpret_cast<FDE64*>(eh_frame + eh_frame_size);
+ FDE64* frame = NextFDE(reinterpret_cast<FDE64*>(eh_frame));
+ for (; frame < last_frame; frame = NextFDE(frame)) {
+ if (!IsFDE(frame)) {
+ return false;
+ }
+ frame->initial_location += base_address_delta;
+ }
+ return true;
+ } else {
+ FDE32* last_frame = reinterpret_cast<FDE32*>(eh_frame + eh_frame_size);
+ FDE32* frame = NextFDE(reinterpret_cast<FDE32*>(eh_frame));
+ for (; frame < last_frame; frame = NextFDE(frame)) {
+ if (!IsFDE(frame)) {
+ return false;
+ }
+ frame->initial_location += base_address_delta;
+ }
+ return true;
+ }
+}
+
+static uint8_t* NextLeb128(uint8_t* current) {
+ DecodeUnsignedLeb128(const_cast<const uint8_t**>(¤t));
+ return current;
+}
+
+struct PACKED(1) DebugLineHeader {
+ uint32_t unit_length_; // TODO 32-bit specific size
+ uint16_t version_;
+ uint32_t header_length_; // TODO 32-bit specific size
+ uint8_t minimum_instruction_lenght_;
+ uint8_t maximum_operations_per_instruction_;
+ uint8_t default_is_stmt_;
+ int8_t line_base_;
+ uint8_t line_range_;
+ uint8_t opcode_base_;
+ uint8_t remaining_[0];
+
+ bool IsStandardOpcode(const uint8_t* op) const {
+ return *op != 0 && *op < opcode_base_;
+ }
+
+ bool IsExtendedOpcode(const uint8_t* op) const {
+ return *op == 0;
+ }
+
+ const uint8_t* GetStandardOpcodeLengths() const {
+ return remaining_;
+ }
+
+ uint8_t* GetNextOpcode(uint8_t* op) const {
+ if (IsExtendedOpcode(op)) {
+ uint8_t* length_field = op + 1;
+ uint32_t length = DecodeUnsignedLeb128(const_cast<const uint8_t**>(&length_field));
+ return length_field + length;
+ } else if (!IsStandardOpcode(op)) {
+ return op + 1;
+ } else if (*op == DW_LNS_fixed_advance_pc) {
+ return op + 1 + sizeof(uint16_t);
+ } else {
+ uint8_t num_args = GetStandardOpcodeLengths()[*op - 1];
+ op += 1;
+ for (int i = 0; i < num_args; i++) {
+ op = NextLeb128(op);
+ }
+ return op;
+ }
+ }
+
+ uint8_t* GetDebugLineData() const {
+ const uint8_t* hdr_start =
+ reinterpret_cast<const uint8_t*>(&header_length_) + sizeof(header_length_);
+ return const_cast<uint8_t*>(hdr_start + header_length_);
+ }
+};
+
+class DebugLineInstructionIterator {
+ public:
+ static DebugLineInstructionIterator* Create(DebugLineHeader* header, size_t section_size) {
+ std::unique_ptr<DebugLineInstructionIterator> line_iter(
+ new DebugLineInstructionIterator(header, section_size));
+ if (line_iter.get() == nullptr) {
+ return nullptr;
+ } else {
+ return line_iter.release();
+ }
+ }
+
+ ~DebugLineInstructionIterator() {}
+
+ bool Next() {
+ if (current_instruction_ == nullptr) {
return false;
}
- frame->initial_location += text_start;
+ current_instruction_ = header_->GetNextOpcode(current_instruction_);
+ if (current_instruction_ >= last_instruction_) {
+ current_instruction_ = nullptr;
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ uint8_t* GetInstruction() {
+ return current_instruction_;
+ }
+
+ bool IsExtendedOpcode() {
+ return header_->IsExtendedOpcode(current_instruction_);
+ }
+
+ uint8_t GetOpcode() {
+ if (!IsExtendedOpcode()) {
+ return *current_instruction_;
+ } else {
+ uint8_t* len_ptr = current_instruction_ + 1;
+ return *NextLeb128(len_ptr);
+ }
+ }
+
+ uint8_t* GetArguments() {
+ if (!IsExtendedOpcode()) {
+ return current_instruction_ + 1;
+ } else {
+ uint8_t* len_ptr = current_instruction_ + 1;
+ return NextLeb128(len_ptr) + 1;
+ }
+ }
+
+ private:
+ DebugLineInstructionIterator(DebugLineHeader* header, size_t size)
+ : header_(header), last_instruction_(reinterpret_cast<uint8_t*>(header) + size),
+ current_instruction_(header->GetDebugLineData()) {}
+
+ DebugLineHeader* header_;
+ uint8_t* last_instruction_;
+ uint8_t* current_instruction_;
+};
+
+static bool FixupDebugLine(off_t base_offset_delta, DebugLineInstructionIterator* iter) {
+ while (iter->Next()) {
+ if (iter->IsExtendedOpcode() && iter->GetOpcode() == DW_LNE_set_address) {
+ *reinterpret_cast<uint32_t*>(iter->GetArguments()) += base_offset_delta;
+ }
}
return true;
}
@@ -1189,20 +1350,29 @@
public:
~DebugAbbrev() {}
static DebugAbbrev* Create(const byte* dbg_abbrev, size_t dbg_abbrev_size) {
- std::unique_ptr<DebugAbbrev> abbrev(new DebugAbbrev);
- const byte* last = dbg_abbrev + dbg_abbrev_size;
- while (dbg_abbrev < last) {
- std::unique_ptr<DebugTag> tag(DebugTag::Create(&dbg_abbrev));
- if (tag.get() == nullptr) {
- return nullptr;
- } else {
- abbrev->tags_.insert(std::pair<uint32_t, uint32_t>(tag->index_, abbrev->tag_list_.size()));
- abbrev->tag_list_.push_back(std::move(tag));
- }
+ std::unique_ptr<DebugAbbrev> abbrev(new DebugAbbrev(dbg_abbrev, dbg_abbrev + dbg_abbrev_size));
+ if (!abbrev->ReadAtOffset(0)) {
+ return nullptr;
}
return abbrev.release();
}
+ bool ReadAtOffset(uint32_t abbrev_offset) {
+ tags_.clear();
+ tag_list_.clear();
+ const byte* dbg_abbrev = begin_ + abbrev_offset;
+ while (dbg_abbrev < end_ && *dbg_abbrev != 0) {
+ std::unique_ptr<DebugTag> tag(DebugTag::Create(&dbg_abbrev));
+ if (tag.get() == nullptr) {
+ return false;
+ } else {
+ tags_.insert(std::pair<uint32_t, uint32_t>(tag->index_, tag_list_.size()));
+ tag_list_.push_back(std::move(tag));
+ }
+ }
+ return true;
+ }
+
DebugTag* ReadTag(const byte* entry) {
uint32_t tag_num = DecodeUnsignedLeb128(&entry);
auto it = tags_.find(tag_num);
@@ -1215,7 +1385,9 @@
}
private:
- DebugAbbrev() {}
+ DebugAbbrev(const byte* begin, const byte* end) : begin_(begin), end_(end) {}
+ const byte* begin_;
+ const byte* end_;
std::map<uint32_t, uint32_t> tags_;
std::vector<std::unique_ptr<DebugTag>> tag_list_;
};
@@ -1239,11 +1411,21 @@
if (current_entry_ == nullptr || current_tag_ == nullptr) {
return false;
}
+ bool reread_abbrev = false;
current_entry_ += current_tag_->GetSize();
+ if (reinterpret_cast<DebugInfoHeader*>(current_entry_) >= next_cu_) {
+ current_cu_ = next_cu_;
+ next_cu_ = GetNextCu(current_cu_);
+ current_entry_ = reinterpret_cast<byte*>(current_cu_) + sizeof(DebugInfoHeader);
+ reread_abbrev = true;
+ }
if (current_entry_ >= last_entry_) {
current_entry_ = nullptr;
return false;
}
+ if (reread_abbrev) {
+ abbrev_->ReadAtOffset(current_cu_->debug_abbrev_offset);
+ }
current_tag_ = abbrev_->ReadTag(current_entry_);
if (current_tag_ == nullptr) {
current_entry_ = nullptr;
@@ -1271,49 +1453,91 @@
}
private:
+ static DebugInfoHeader* GetNextCu(DebugInfoHeader* hdr) {
+ byte* hdr_byte = reinterpret_cast<byte*>(hdr);
+ return reinterpret_cast<DebugInfoHeader*>(hdr_byte + sizeof(uint32_t) + hdr->unit_length);
+ }
+
DebugInfoIterator(DebugInfoHeader* header, size_t frame_size, DebugAbbrev* abbrev)
: abbrev_(abbrev),
+ current_cu_(header),
+ next_cu_(GetNextCu(header)),
last_entry_(reinterpret_cast<byte*>(header) + frame_size),
current_entry_(reinterpret_cast<byte*>(header) + sizeof(DebugInfoHeader)),
current_tag_(abbrev_->ReadTag(current_entry_)) {}
DebugAbbrev* abbrev_;
+ DebugInfoHeader* current_cu_;
+ DebugInfoHeader* next_cu_;
byte* last_entry_;
byte* current_entry_;
DebugTag* current_tag_;
};
-static bool FixupDebugInfo(uint32_t text_start, DebugInfoIterator* iter) {
+static bool FixupDebugInfo(off_t base_address_delta, DebugInfoIterator* iter) {
do {
if (iter->GetCurrentTag()->GetAttrSize(DW_AT_low_pc) != sizeof(int32_t) ||
iter->GetCurrentTag()->GetAttrSize(DW_AT_high_pc) != sizeof(int32_t)) {
+ LOG(ERROR) << "DWARF information with 64 bit pointers is not supported yet.";
return false;
}
uint32_t* PC_low = reinterpret_cast<uint32_t*>(iter->GetPointerToField(DW_AT_low_pc));
uint32_t* PC_high = reinterpret_cast<uint32_t*>(iter->GetPointerToField(DW_AT_high_pc));
if (PC_low != nullptr && PC_high != nullptr) {
- *PC_low += text_start;
- *PC_high += text_start;
+ *PC_low += base_address_delta;
+ *PC_high += base_address_delta;
}
} while (iter->next());
return true;
}
-static bool FixupDebugSections(const byte* dbg_abbrev, size_t dbg_abbrev_size,
- uintptr_t text_start,
- byte* dbg_info, size_t dbg_info_size,
- byte* eh_frame, size_t eh_frame_size) {
- std::unique_ptr<DebugAbbrev> abbrev(DebugAbbrev::Create(dbg_abbrev, dbg_abbrev_size));
+bool ElfFile::FixupDebugSections(off_t base_address_delta) {
+ const Elf32_Shdr* debug_info = FindSectionByName(".debug_info");
+ const Elf32_Shdr* debug_abbrev = FindSectionByName(".debug_abbrev");
+ const Elf32_Shdr* eh_frame = FindSectionByName(".eh_frame");
+ const Elf32_Shdr* debug_str = FindSectionByName(".debug_str");
+ const Elf32_Shdr* debug_line = FindSectionByName(".debug_line");
+ const Elf32_Shdr* strtab_sec = FindSectionByName(".strtab");
+ const Elf32_Shdr* symtab_sec = FindSectionByName(".symtab");
+
+ if (debug_info == nullptr || debug_abbrev == nullptr ||
+ debug_str == nullptr || strtab_sec == nullptr || symtab_sec == nullptr) {
+ // Release version of ART does not generate debug info.
+ return true;
+ }
+ if (base_address_delta == 0) {
+ return true;
+ }
+ if (eh_frame != nullptr &&
+ !FixupEHFrame(base_address_delta, Begin() + eh_frame->sh_offset, eh_frame->sh_size)) {
+ return false;
+ }
+
+ std::unique_ptr<DebugAbbrev> abbrev(DebugAbbrev::Create(Begin() + debug_abbrev->sh_offset,
+ debug_abbrev->sh_size));
if (abbrev.get() == nullptr) {
return false;
}
- std::unique_ptr<DebugInfoIterator> iter(
- DebugInfoIterator::Create(reinterpret_cast<DebugInfoHeader*>(dbg_info),
- dbg_info_size, abbrev.get()));
- if (iter.get() == nullptr) {
+ DebugInfoHeader* info_header =
+ reinterpret_cast<DebugInfoHeader*>(Begin() + debug_info->sh_offset);
+ std::unique_ptr<DebugInfoIterator> info_iter(DebugInfoIterator::Create(info_header,
+ debug_info->sh_size,
+ abbrev.get()));
+ if (info_iter.get() == nullptr) {
return false;
}
- return FixupDebugInfo(text_start, iter.get())
- && FixupEHFrame(text_start, eh_frame, eh_frame_size);
+ if (debug_line != nullptr) {
+ DebugLineHeader* line_header =
+ reinterpret_cast<DebugLineHeader*>(Begin() + debug_line->sh_offset);
+ std::unique_ptr<DebugLineInstructionIterator> line_iter(
+ DebugLineInstructionIterator::Create(line_header, debug_line->sh_size));
+ if (line_iter.get() == nullptr) {
+ return false;
+ }
+ if (!FixupDebugLine(base_address_delta, line_iter.get())) {
+ return false;
+ }
+ }
+ return FixupDebugInfo(base_address_delta, info_iter.get());
}
void ElfFile::GdbJITSupport() {
@@ -1331,19 +1555,13 @@
}
ElfFile& all = *all_ptr;
- // Do we have interesting sections?
- const Elf32_Shdr* debug_info = all.FindSectionByName(".debug_info");
- const Elf32_Shdr* debug_abbrev = all.FindSectionByName(".debug_abbrev");
+ // We need the eh_frame for gdb but debug info might be present without it.
const Elf32_Shdr* eh_frame = all.FindSectionByName(".eh_frame");
- const Elf32_Shdr* debug_str = all.FindSectionByName(".debug_str");
- const Elf32_Shdr* strtab_sec = all.FindSectionByName(".strtab");
- const Elf32_Shdr* symtab_sec = all.FindSectionByName(".symtab");
- Elf32_Shdr* text_sec = all.FindSectionByName(".text");
- if (debug_info == nullptr || debug_abbrev == nullptr || eh_frame == nullptr ||
- debug_str == nullptr || text_sec == nullptr || strtab_sec == nullptr ||
- symtab_sec == nullptr) {
+ if (eh_frame == nullptr) {
return;
}
+
+ // Do we have interesting sections?
// We need to add in a strtab and symtab to the image.
// all is MAP_PRIVATE so it can be written to freely.
// We also already have strtab and symtab so we are fine there.
@@ -1354,13 +1572,9 @@
elf_hdr.e_phentsize = 0;
elf_hdr.e_type = ET_EXEC;
- text_sec->sh_type = SHT_NOBITS;
- text_sec->sh_offset = 0;
-
- if (!FixupDebugSections(
- all.Begin() + debug_abbrev->sh_offset, debug_abbrev->sh_size, text_sec->sh_addr,
- all.Begin() + debug_info->sh_offset, debug_info->sh_size,
- all.Begin() + eh_frame->sh_offset, eh_frame->sh_size)) {
+ // Since base_address_ is 0 if we are actually loaded at a known address (i.e. this is boot.oat)
+ // and the actual address stuff starts at in regular files this is good.
+ if (!all.FixupDebugSections(reinterpret_cast<intptr_t>(base_address_))) {
LOG(ERROR) << "Failed to load GDB data";
return;
}
diff --git a/runtime/elf_file.h b/runtime/elf_file.h
index a966bd9..1922911 100644
--- a/runtime/elf_file.h
+++ b/runtime/elf_file.h
@@ -128,6 +128,8 @@
// executable is true at run time, false at compile time.
bool Load(bool executable, std::string* error_msg);
+ bool FixupDebugSections(off_t base_address_delta);
+
private:
ElfFile(File* file, bool writable, bool program_header_only);
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index a0e35f8..cf89850 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -211,7 +211,7 @@
}
bool explicit_overflow_check = Runtime::Current()->ExplicitStackOverflowChecks();
- self->ResetDefaultStackEnd(!explicit_overflow_check); // Return to default stack size.
+ self->ResetDefaultStackEnd(); // Return to default stack size.
// And restore protection if implicit checks are on.
if (!explicit_overflow_check) {
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 4730701..dfd2e11 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -1699,7 +1699,15 @@
artQuickGenericJniEndJNINonRef(self, cookie, lock);
switch (return_shorty_char) {
- case 'F': // Fall-through.
+ case 'F': {
+ if (kRuntimeISA == kX86) {
+ // Convert back the result to float.
+ double d = bit_cast<uint64_t, double>(result_f);
+ return bit_cast<float, uint32_t>(static_cast<float>(d));
+ } else {
+ return result_f;
+ }
+ }
case 'D':
return result_f;
case 'Z':
diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h
index 217360f..3b06f74 100644
--- a/runtime/gc/accounting/card_table-inl.h
+++ b/runtime/gc/accounting/card_table-inl.h
@@ -55,7 +55,7 @@
// scan_end is the byte after the last byte we scan.
DCHECK_LE(scan_end, reinterpret_cast<byte*>(bitmap->HeapLimit()));
byte* card_cur = CardFromAddr(scan_begin);
- byte* card_end = CardFromAddr(scan_end);
+ byte* card_end = CardFromAddr(AlignUp(scan_end, kCardSize));
CheckCardValid(card_cur);
CheckCardValid(card_end);
size_t cards_scanned = 0;
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 2686af0..3acf80d 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -72,9 +72,11 @@
class ModUnionUpdateObjectReferencesVisitor {
public:
- ModUnionUpdateObjectReferencesVisitor(MarkHeapReferenceCallback* callback, void* arg)
- : callback_(callback),
- arg_(arg) {
+ ModUnionUpdateObjectReferencesVisitor(MarkHeapReferenceCallback* callback, void* arg,
+ space::ContinuousSpace* from_space,
+ bool* contains_reference_to_other_space)
+ : callback_(callback), arg_(arg), from_space_(from_space),
+ contains_reference_to_other_space_(contains_reference_to_other_space) {
}
// Extra parameters are required since we use this same visitor signature for checking objects.
@@ -82,7 +84,9 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Only add the reference if it is non null and fits our criteria.
mirror::HeapReference<Object>* obj_ptr = obj->GetFieldObjectReferenceAddr(offset);
- if (obj_ptr->AsMirrorPtr() != nullptr) {
+ mirror::Object* ref = obj_ptr->AsMirrorPtr();
+ if (ref != nullptr && !from_space_->HasAddress(ref)) {
+ *contains_reference_to_other_space_ = true;
callback_(obj_ptr, arg_);
}
}
@@ -90,24 +94,36 @@
private:
MarkHeapReferenceCallback* const callback_;
void* arg_;
+ // Space which we are scanning
+ space::ContinuousSpace* const from_space_;
+ // Set if we have any references to another space.
+ bool* const contains_reference_to_other_space_;
};
class ModUnionScanImageRootVisitor {
public:
- ModUnionScanImageRootVisitor(MarkHeapReferenceCallback* callback, void* arg)
- : callback_(callback), arg_(arg) {}
+ ModUnionScanImageRootVisitor(MarkHeapReferenceCallback* callback, void* arg,
+ space::ContinuousSpace* from_space,
+ bool* contains_reference_to_other_space)
+ : callback_(callback), arg_(arg), from_space_(from_space),
+ contains_reference_to_other_space_(contains_reference_to_other_space) {}
void operator()(Object* root) const
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(root != NULL);
- ModUnionUpdateObjectReferencesVisitor ref_visitor(callback_, arg_);
+ ModUnionUpdateObjectReferencesVisitor ref_visitor(callback_, arg_, from_space_,
+ contains_reference_to_other_space_);
root->VisitReferences<kMovingClasses>(ref_visitor, VoidFunctor());
}
private:
MarkHeapReferenceCallback* const callback_;
void* const arg_;
+ // Space which we are scanning
+ space::ContinuousSpace* const from_space_;
+ // Set if we have any references to another space.
+ bool* const contains_reference_to_other_space_;
};
void ModUnionTableReferenceCache::ClearCards() {
@@ -313,12 +329,20 @@
void ModUnionTableCardCache::UpdateAndMarkReferences(MarkHeapReferenceCallback* callback,
void* arg) {
CardTable* card_table = heap_->GetCardTable();
- ModUnionScanImageRootVisitor scan_visitor(callback, arg);
ContinuousSpaceBitmap* bitmap = space_->GetLiveBitmap();
- for (const byte* card_addr : cleared_cards_) {
- uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
+ bool reference_to_other_space = false;
+ ModUnionScanImageRootVisitor scan_visitor(callback, arg, space_, &reference_to_other_space);
+ for (auto it = cleared_cards_.begin(), end = cleared_cards_.end(); it != end; ) {
+ uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(*it));
DCHECK(space_->HasAddress(reinterpret_cast<Object*>(start)));
+ reference_to_other_space = false;
bitmap->VisitMarkedRange(start, start + CardTable::kCardSize, scan_visitor);
+ if (!reference_to_other_space) {
+ // No non null reference to another space, remove the card.
+ it = cleared_cards_.erase(it);
+ } else {
+ ++it;
+ }
}
}
@@ -333,6 +357,17 @@
os << "]";
}
+void ModUnionTableCardCache::SetCards() {
+ CardTable* card_table = heap_->GetCardTable();
+ for (byte* addr = space_->Begin(); addr < AlignUp(space_->End(), CardTable::kCardSize);
+ addr += CardTable::kCardSize) {
+ cleared_cards_.insert(card_table->CardFromAddr(addr));
+ }
+}
+
+void ModUnionTableReferenceCache::SetCards() {
+}
+
} // namespace accounting
} // namespace gc
} // namespace art
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index 449e171..f67dc27 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -65,6 +65,9 @@
// determining references to track.
virtual void ClearCards() = 0;
+ // Set all the cards.
+ virtual void SetCards() = 0;
+
// Update the mod-union table using data stored by ClearCards. There may be multiple ClearCards
// before a call to update, for example, back-to-back sticky GCs. Also mark references to other
// spaces which are stored in the mod-union table.
@@ -120,6 +123,8 @@
void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetCards() OVERRIDE;
+
protected:
// Cleared card array, used to update the mod-union table.
ModUnionTable::CardSet cleared_cards_;
@@ -150,6 +155,8 @@
void Dump(std::ostream& os);
+ void SetCards() OVERRIDE;
+
protected:
// Cleared card array, used to update the mod-union table.
CardSet cleared_cards_;
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 7d3fd2d..d1fb600 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -84,7 +84,9 @@
&klass);
if (obj == nullptr) {
bool after_is_current_allocator = allocator == GetCurrentAllocator();
- if (is_current_allocator && !after_is_current_allocator) {
+ // If there is a pending exception, fail the allocation right away since the next one
+ // could cause OOM and abort the runtime.
+ if (!self->IsExceptionPending() && is_current_allocator && !after_is_current_allocator) {
// If the allocator changed, we need to restart the allocation.
return AllocObject<kInstrumented>(self, klass, byte_count, pre_fence_visitor);
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 5d138d2..f0b7685 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -128,8 +128,8 @@
long_gc_log_threshold_(long_gc_log_threshold),
ignore_max_footprint_(ignore_max_footprint),
zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
- have_zygote_space_(false),
- large_object_threshold_(std::numeric_limits<size_t>::max()), // Starts out disabled.
+ zygote_space_(nullptr),
+ large_object_threshold_(kDefaultLargeObjectThreshold), // Starts out disabled.
collector_type_running_(kCollectorTypeNone),
last_gc_type_(collector::kGcTypeNone),
next_gc_type_(collector::kGcTypePartial),
@@ -190,7 +190,6 @@
// If we aren't the zygote, switch to the default non zygote allocator. This may update the
// entrypoints.
if (!Runtime::Current()->IsZygote()) {
- large_object_threshold_ = kDefaultLargeObjectThreshold;
// Background compaction is currently not supported for command line runs.
if (background_collector_type_ != foreground_collector_type_) {
VLOG(heap) << "Disabling background compaction for non zygote";
@@ -468,7 +467,7 @@
// After the zygote we want this to be false if we don't have background compaction enabled so
// that getting primitive array elements is faster.
// We never have homogeneous compaction with GSS and don't need a space with movable objects.
- can_move_objects = !have_zygote_space_ && foreground_collector_type_ != kCollectorTypeGSS;
+ can_move_objects = !HasZygoteSpace() && foreground_collector_type_ != kCollectorTypeGSS;
}
if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
RemoveRememberedSet(main_space_);
@@ -801,6 +800,9 @@
os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated)
<< "\n";
}
+ if (HasZygoteSpace()) {
+ os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
+ }
os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
os << "Approximate GC data structures memory overhead: " << gc_memory_overhead_.LoadRelaxed();
@@ -1823,7 +1825,8 @@
Thread* self = Thread::Current();
MutexLock mu(self, zygote_creation_lock_);
// Try to see if we have any Zygote spaces.
- if (have_zygote_space_) {
+ if (HasZygoteSpace()) {
+ LOG(WARNING) << __FUNCTION__ << " called when we already have a zygote space.";
return;
}
VLOG(heap) << "Starting PreZygoteFork";
@@ -1897,26 +1900,26 @@
// from this point on.
RemoveRememberedSet(old_alloc_space);
}
- space::ZygoteSpace* zygote_space = old_alloc_space->CreateZygoteSpace("alloc space",
- low_memory_mode_,
- &non_moving_space_);
+ zygote_space_ = old_alloc_space->CreateZygoteSpace("alloc space", low_memory_mode_,
+ &non_moving_space_);
CHECK(!non_moving_space_->CanMoveObjects());
if (same_space) {
main_space_ = non_moving_space_;
SetSpaceAsDefault(main_space_);
}
delete old_alloc_space;
- CHECK(zygote_space != nullptr) << "Failed creating zygote space";
- AddSpace(zygote_space);
+ CHECK(HasZygoteSpace()) << "Failed creating zygote space";
+ AddSpace(zygote_space_);
non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
AddSpace(non_moving_space_);
- have_zygote_space_ = true;
- // Enable large object space allocations.
- large_object_threshold_ = kDefaultLargeObjectThreshold;
// Create the zygote space mod union table.
accounting::ModUnionTable* mod_union_table =
- new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space);
+ new accounting::ModUnionTableCardCache("zygote space mod-union table", this,
+ zygote_space_);
CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
+ // Set all the cards in the mod-union table since we don't know which objects contain references
+ // to large objects.
+ mod_union_table->SetCards();
AddModUnionTable(mod_union_table);
if (collector::SemiSpace::kUseRememberedSet) {
// Add a new remembered set for the post-zygote non-moving space.
@@ -1986,7 +1989,7 @@
// If the heap can't run the GC, silently fail and return that no GC was run.
switch (gc_type) {
case collector::kGcTypePartial: {
- if (!have_zygote_space_) {
+ if (!HasZygoteSpace()) {
return collector::kGcTypeNone;
}
break;
@@ -2483,7 +2486,6 @@
bool Heap::VerifyMissingCardMarks() {
Thread* self = Thread::Current();
Locks::mutator_lock_->AssertExclusiveHeld(self);
-
// We need to sort the live stack since we binary search it.
live_stack_->Sort();
// Since we sorted the allocation stack content, need to revoke all
@@ -2491,7 +2493,6 @@
RevokeAllThreadLocalAllocationStacks(self);
VerifyLiveStackReferences visitor(this);
GetLiveBitmap()->Visit(visitor);
-
// We can verify objects in the live stack since none of these should reference dead objects.
for (mirror::Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
if (!kUseThreadLocalAllocationStack || *it != nullptr) {
@@ -2689,7 +2690,7 @@
void Heap::PostGcVerification(collector::GarbageCollector* gc) {
if (verify_system_weaks_ || verify_post_gc_rosalloc_ || verify_post_gc_heap_) {
collector::GarbageCollector::ScopedPause pause(gc);
- PreGcVerificationPaused(gc);
+ PostGcVerificationPaused(gc);
}
}
@@ -2812,7 +2813,7 @@
next_gc_type_ = collector::kGcTypeSticky;
} else {
collector::GcType non_sticky_gc_type =
- have_zygote_space_ ? collector::kGcTypePartial : collector::kGcTypeFull;
+ HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
// Find what the next non sticky collector will be.
collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
// If the throughput of the current sticky GC >= throughput of the non sticky collector, then
@@ -3035,7 +3036,7 @@
size_t new_native_bytes_allocated = native_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes);
new_native_bytes_allocated += bytes;
if (new_native_bytes_allocated > native_footprint_gc_watermark_) {
- collector::GcType gc_type = have_zygote_space_ ? collector::kGcTypePartial :
+ collector::GcType gc_type = HasZygoteSpace() ? collector::kGcTypePartial :
collector::kGcTypeFull;
// The second watermark is higher than the gc watermark. If you hit this it means you are
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index d5b49d8..ed93ad9 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -79,6 +79,7 @@
namespace space {
class AllocSpace;
class BumpPointerSpace;
+ class ContinuousMemMapAllocSpace;
class DiscontinuousSpace;
class DlMallocSpace;
class ImageSpace;
@@ -87,7 +88,7 @@
class RosAllocSpace;
class Space;
class SpaceTest;
- class ContinuousMemMapAllocSpace;
+ class ZygoteSpace;
} // namespace space
class AgeCardVisitor {
@@ -599,6 +600,10 @@
return &reference_processor_;
}
+ bool HasZygoteSpace() const {
+ return zygote_space_ != nullptr;
+ }
+
private:
// Compact source space to target space.
void Compact(space::ContinuousMemMapAllocSpace* target_space,
@@ -849,8 +854,9 @@
// Lock which guards zygote space creation.
Mutex zygote_creation_lock_;
- // If we have a zygote space.
- bool have_zygote_space_;
+ // Non-null iff we have a zygote space. Doesn't contain the large objects allocated before
+ // zygote space creation.
+ space::ZygoteSpace* zygote_space_;
// Minimum allocation size of large object.
size_t large_object_threshold_;
diff --git a/runtime/instruction_set.cc b/runtime/instruction_set.cc
index d8a38f4..644e055 100644
--- a/runtime/instruction_set.cc
+++ b/runtime/instruction_set.cc
@@ -42,21 +42,18 @@
InstructionSet GetInstructionSetFromString(const char* isa_str) {
CHECK(isa_str != nullptr);
- if (!strcmp("arm", isa_str)) {
+ if (strcmp("arm", isa_str) == 0) {
return kArm;
- } else if (!strcmp("arm64", isa_str)) {
+ } else if (strcmp("arm64", isa_str) == 0) {
return kArm64;
- } else if (!strcmp("x86", isa_str)) {
+ } else if (strcmp("x86", isa_str) == 0) {
return kX86;
- } else if (!strcmp("x86_64", isa_str)) {
+ } else if (strcmp("x86_64", isa_str) == 0) {
return kX86_64;
- } else if (!strcmp("mips", isa_str)) {
+ } else if (strcmp("mips", isa_str) == 0) {
return kMips;
- } else if (!strcmp("none", isa_str)) {
- return kNone;
}
- LOG(FATAL) << "Unknown ISA " << isa_str;
return kNone;
}
diff --git a/runtime/instruction_set.h b/runtime/instruction_set.h
index f212811..ae8eeac 100644
--- a/runtime/instruction_set.h
+++ b/runtime/instruction_set.h
@@ -75,6 +75,8 @@
const char* GetInstructionSetString(InstructionSet isa);
+
+// Note: Returns kNone when the string cannot be parsed to a known value.
InstructionSet GetInstructionSetFromString(const char* instruction_set);
static inline size_t GetInstructionSetPointerSize(InstructionSet isa) {
diff --git a/runtime/instruction_set_test.cc b/runtime/instruction_set_test.cc
index ece3238..ac17c4f 100644
--- a/runtime/instruction_set_test.cc
+++ b/runtime/instruction_set_test.cc
@@ -29,6 +29,7 @@
EXPECT_EQ(kX86_64, GetInstructionSetFromString("x86_64"));
EXPECT_EQ(kMips, GetInstructionSetFromString("mips"));
EXPECT_EQ(kNone, GetInstructionSetFromString("none"));
+ EXPECT_EQ(kNone, GetInstructionSetFromString("random-string"));
}
TEST_F(InstructionSetTest, GetInstructionSetString) {
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index ae42284..0f45b9e 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -121,6 +121,11 @@
// Do not change stubs for these methods.
return;
}
+ // Don't stub Proxy.<init>. Note that the Proxy class itself is not a proxy class.
+ if (method->IsConstructor() &&
+ method->GetDeclaringClass()->DescriptorEquals("Ljava/lang/reflect/Proxy;")) {
+ return;
+ }
const void* new_portable_code;
const void* new_quick_code;
bool uninstall = !entry_exit_stubs_installed_ && !interpreter_stubs_installed_;
@@ -205,7 +210,8 @@
LOG(INFO) << " Handling quick to interpreter transition. Frame " << GetFrameId();
}
CHECK_LT(instrumentation_stack_depth_, instrumentation_stack_->size());
- const InstrumentationStackFrame& frame = instrumentation_stack_->at(instrumentation_stack_depth_);
+ const InstrumentationStackFrame& frame =
+ instrumentation_stack_->at(instrumentation_stack_depth_);
CHECK(frame.interpreter_entry_);
// This is an interpreter frame so method enter event must have been reported. However we
// need to push a DEX pc into the dex_pcs_ list to match size of instrumentation stack.
@@ -231,7 +237,8 @@
reached_existing_instrumentation_frames_ = true;
CHECK_LT(instrumentation_stack_depth_, instrumentation_stack_->size());
- const InstrumentationStackFrame& frame = instrumentation_stack_->at(instrumentation_stack_depth_);
+ const InstrumentationStackFrame& frame =
+ instrumentation_stack_->at(instrumentation_stack_depth_);
CHECK_EQ(m, frame.method_) << "Expected " << PrettyMethod(m)
<< ", Found " << PrettyMethod(frame.method_);
return_pc = frame.return_pc_;
@@ -324,7 +331,8 @@
mirror::ArtMethod* m = GetMethod();
if (GetCurrentQuickFrame() == NULL) {
if (kVerboseInstrumentation) {
- LOG(INFO) << " Ignoring a shadow frame. Frame " << GetFrameId() << " Method=" << PrettyMethod(m);
+ LOG(INFO) << " Ignoring a shadow frame. Frame " << GetFrameId()
+ << " Method=" << PrettyMethod(m);
}
return true; // Ignore shadow frames.
}
@@ -405,19 +413,47 @@
have_method_unwind_listeners_ = true;
}
if ((events & kDexPcMoved) != 0) {
- dex_pc_listeners_.push_back(listener);
+ std::list<InstrumentationListener*>* modified;
+ if (have_dex_pc_listeners_) {
+ modified = new std::list<InstrumentationListener*>(*dex_pc_listeners_.get());
+ } else {
+ modified = new std::list<InstrumentationListener*>();
+ }
+ modified->push_back(listener);
+ dex_pc_listeners_.reset(modified);
have_dex_pc_listeners_ = true;
}
if ((events & kFieldRead) != 0) {
- field_read_listeners_.push_back(listener);
+ std::list<InstrumentationListener*>* modified;
+ if (have_field_read_listeners_) {
+ modified = new std::list<InstrumentationListener*>(*field_read_listeners_.get());
+ } else {
+ modified = new std::list<InstrumentationListener*>();
+ }
+ modified->push_back(listener);
+ field_read_listeners_.reset(modified);
have_field_read_listeners_ = true;
}
if ((events & kFieldWritten) != 0) {
- field_write_listeners_.push_back(listener);
+ std::list<InstrumentationListener*>* modified;
+ if (have_field_write_listeners_) {
+ modified = new std::list<InstrumentationListener*>(*field_write_listeners_.get());
+ } else {
+ modified = new std::list<InstrumentationListener*>();
+ }
+ modified->push_back(listener);
+ field_write_listeners_.reset(modified);
have_field_write_listeners_ = true;
}
if ((events & kExceptionCaught) != 0) {
- exception_caught_listeners_.push_back(listener);
+ std::list<InstrumentationListener*>* modified;
+ if (have_exception_caught_listeners_) {
+ modified = new std::list<InstrumentationListener*>(*exception_caught_listeners_.get());
+ } else {
+ modified = new std::list<InstrumentationListener*>();
+ }
+ modified->push_back(listener);
+ exception_caught_listeners_.reset(modified);
have_exception_caught_listeners_ = true;
}
UpdateInterpreterHandlerTable();
@@ -427,51 +463,78 @@
Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
if ((events & kMethodEntered) != 0) {
- bool contains = std::find(method_entry_listeners_.begin(), method_entry_listeners_.end(),
- listener) != method_entry_listeners_.end();
- if (contains) {
+ if (have_method_entry_listeners_) {
method_entry_listeners_.remove(listener);
+ have_method_entry_listeners_ = !method_entry_listeners_.empty();
}
- have_method_entry_listeners_ = method_entry_listeners_.size() > 0;
}
if ((events & kMethodExited) != 0) {
- bool contains = std::find(method_exit_listeners_.begin(), method_exit_listeners_.end(),
- listener) != method_exit_listeners_.end();
- if (contains) {
+ if (have_method_exit_listeners_) {
method_exit_listeners_.remove(listener);
+ have_method_exit_listeners_ = !method_exit_listeners_.empty();
}
- have_method_exit_listeners_ = method_exit_listeners_.size() > 0;
}
if ((events & kMethodUnwind) != 0) {
- method_unwind_listeners_.remove(listener);
+ if (have_method_unwind_listeners_) {
+ method_unwind_listeners_.remove(listener);
+ have_method_unwind_listeners_ = !method_unwind_listeners_.empty();
+ }
}
if ((events & kDexPcMoved) != 0) {
- bool contains = std::find(dex_pc_listeners_.begin(), dex_pc_listeners_.end(),
- listener) != dex_pc_listeners_.end();
- if (contains) {
- dex_pc_listeners_.remove(listener);
+ if (have_dex_pc_listeners_) {
+ std::list<InstrumentationListener*>* modified =
+ new std::list<InstrumentationListener*>(*dex_pc_listeners_.get());
+ modified->remove(listener);
+ have_dex_pc_listeners_ = !modified->empty();
+ if (have_dex_pc_listeners_) {
+ dex_pc_listeners_.reset(modified);
+ } else {
+ dex_pc_listeners_.reset();
+ delete modified;
+ }
}
- have_dex_pc_listeners_ = dex_pc_listeners_.size() > 0;
}
if ((events & kFieldRead) != 0) {
- bool contains = std::find(field_read_listeners_.begin(), field_read_listeners_.end(),
- listener) != field_read_listeners_.end();
- if (contains) {
- field_read_listeners_.remove(listener);
+ if (have_field_read_listeners_) {
+ std::list<InstrumentationListener*>* modified =
+ new std::list<InstrumentationListener*>(*field_read_listeners_.get());
+ modified->remove(listener);
+ have_field_read_listeners_ = !modified->empty();
+ if (have_field_read_listeners_) {
+ field_read_listeners_.reset(modified);
+ } else {
+ field_read_listeners_.reset();
+ delete modified;
+ }
}
- have_field_read_listeners_ = field_read_listeners_.size() > 0;
}
if ((events & kFieldWritten) != 0) {
- bool contains = std::find(field_write_listeners_.begin(), field_write_listeners_.end(),
- listener) != field_write_listeners_.end();
- if (contains) {
- field_write_listeners_.remove(listener);
+ if (have_field_write_listeners_) {
+ std::list<InstrumentationListener*>* modified =
+ new std::list<InstrumentationListener*>(*field_write_listeners_.get());
+ modified->remove(listener);
+ have_field_write_listeners_ = !modified->empty();
+ if (have_field_write_listeners_) {
+ field_write_listeners_.reset(modified);
+ } else {
+ field_write_listeners_.reset();
+ delete modified;
+ }
}
- have_field_write_listeners_ = field_write_listeners_.size() > 0;
}
if ((events & kExceptionCaught) != 0) {
- exception_caught_listeners_.remove(listener);
- have_exception_caught_listeners_ = exception_caught_listeners_.size() > 0;
+ if (have_exception_caught_listeners_) {
+ std::list<InstrumentationListener*>* modified =
+ new std::list<InstrumentationListener*>(*exception_caught_listeners_.get());
+ modified->remove(listener);
+ have_exception_caught_listeners_ = !modified->empty();
+ if (have_exception_caught_listeners_) {
+ exception_caught_listeners_.reset(modified);
+ } else {
+ exception_caught_listeners_.reset();
+ delete modified;
+ }
+ }
}
UpdateInterpreterHandlerTable();
}
@@ -684,7 +747,8 @@
{
WriterMutexLock mu(self, deoptimized_methods_lock_);
bool has_not_been_deoptimized = AddDeoptimizedMethod(method);
- CHECK(has_not_been_deoptimized) << "Method " << PrettyMethod(method) << " is already deoptimized";
+ CHECK(has_not_been_deoptimized) << "Method " << PrettyMethod(method)
+ << " is already deoptimized";
}
if (!interpreter_stubs_installed_) {
UpdateEntrypoints(method, GetQuickInstrumentationEntryPoint(), GetPortableToInterpreterBridge(),
@@ -853,33 +917,33 @@
void Instrumentation::DexPcMovedEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method,
uint32_t dex_pc) const {
- // TODO: STL copy-on-write collection? The copy below is due to the debug listener having an
- // action where it can remove itself as a listener and break the iterator. The copy only works
- // around the problem and in general we may have to move to something like reference counting to
- // ensure listeners are deleted correctly.
- std::list<InstrumentationListener*> copy(dex_pc_listeners_);
- for (InstrumentationListener* listener : copy) {
- listener->DexPcMoved(thread, this_object, method, dex_pc);
+ if (HasDexPcListeners()) {
+ std::shared_ptr<std::list<InstrumentationListener*>> original(dex_pc_listeners_);
+ for (InstrumentationListener* listener : *original.get()) {
+ listener->DexPcMoved(thread, this_object, method, dex_pc);
+ }
}
}
void Instrumentation::FieldReadEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc,
mirror::ArtField* field) const {
- // TODO: same comment than DexPcMovedEventImpl.
- std::list<InstrumentationListener*> copy(field_read_listeners_);
- for (InstrumentationListener* listener : copy) {
- listener->FieldRead(thread, this_object, method, dex_pc, field);
+ if (HasFieldReadListeners()) {
+ std::shared_ptr<std::list<InstrumentationListener*>> original(field_read_listeners_);
+ for (InstrumentationListener* listener : *original.get()) {
+ listener->FieldRead(thread, this_object, method, dex_pc, field);
+ }
}
}
void Instrumentation::FieldWriteEventImpl(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc,
mirror::ArtField* field, const JValue& field_value) const {
- // TODO: same comment than DexPcMovedEventImpl.
- std::list<InstrumentationListener*> copy(field_write_listeners_);
- for (InstrumentationListener* listener : copy) {
- listener->FieldWritten(thread, this_object, method, dex_pc, field, field_value);
+ if (HasFieldWriteListeners()) {
+ std::shared_ptr<std::list<InstrumentationListener*>> original(field_write_listeners_);
+ for (InstrumentationListener* listener : *original.get()) {
+ listener->FieldWritten(thread, this_object, method, dex_pc, field, field_value);
+ }
}
}
@@ -891,11 +955,10 @@
DCHECK_EQ(thread->GetException(nullptr), exception_object);
bool is_exception_reported = thread->IsExceptionReportedToInstrumentation();
thread->ClearException();
- // TODO: The copy below is due to the debug listener having an action where it can remove
- // itself as a listener and break the iterator. The copy only works around the problem.
- std::list<InstrumentationListener*> copy(exception_caught_listeners_);
- for (InstrumentationListener* listener : copy) {
- listener->ExceptionCaught(thread, throw_location, catch_method, catch_dex_pc, exception_object);
+ std::shared_ptr<std::list<InstrumentationListener*>> original(exception_caught_listeners_);
+ for (InstrumentationListener* listener : *original.get()) {
+ listener->ExceptionCaught(thread, throw_location, catch_method, catch_dex_pc,
+ exception_object);
}
thread->SetException(throw_location, exception_object);
thread->SetExceptionReportedToInstrumentation(is_exception_reported);
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 66c6b38..21d11a5 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -433,10 +433,14 @@
std::list<InstrumentationListener*> method_entry_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::list<InstrumentationListener*> method_exit_listeners_ GUARDED_BY(Locks::mutator_lock_);
std::list<InstrumentationListener*> method_unwind_listeners_ GUARDED_BY(Locks::mutator_lock_);
- std::list<InstrumentationListener*> dex_pc_listeners_ GUARDED_BY(Locks::mutator_lock_);
- std::list<InstrumentationListener*> field_read_listeners_ GUARDED_BY(Locks::mutator_lock_);
- std::list<InstrumentationListener*> field_write_listeners_ GUARDED_BY(Locks::mutator_lock_);
- std::list<InstrumentationListener*> exception_caught_listeners_ GUARDED_BY(Locks::mutator_lock_);
+ std::shared_ptr<std::list<InstrumentationListener*>> dex_pc_listeners_
+ GUARDED_BY(Locks::mutator_lock_);
+ std::shared_ptr<std::list<InstrumentationListener*>> field_read_listeners_
+ GUARDED_BY(Locks::mutator_lock_);
+ std::shared_ptr<std::list<InstrumentationListener*>> field_write_listeners_
+ GUARDED_BY(Locks::mutator_lock_);
+ std::shared_ptr<std::list<InstrumentationListener*>> exception_caught_listeners_
+ GUARDED_BY(Locks::mutator_lock_);
// The set of methods being deoptimized (by the debugger) which must be executed with interpreter
// only.
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 9eab3fd..e085ac2 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -25,7 +25,7 @@
#include "mirror/art_method.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
-#include "native_bridge.h"
+#include "nativebridge/native_bridge.h"
#include "java_vm_ext.h"
#include "parsed_options.h"
#include "ScopedLocalRef.h"
@@ -135,7 +135,7 @@
CHECK(NeedsNativeBridge());
uint32_t len = 0;
- return NativeBridgeGetTrampoline(handle_, symbol_name.c_str(), shorty, len);
+ return android::NativeBridgeGetTrampoline(handle_, symbol_name.c_str(), shorty, len);
}
private:
@@ -645,8 +645,8 @@
void* handle = dlopen(path_str, RTLD_LAZY);
bool needs_native_bridge = false;
if (handle == nullptr) {
- if (NativeBridgeIsSupported(path_str)) {
- handle = NativeBridgeLoadLibrary(path_str, RTLD_LAZY);
+ if (android::NativeBridgeIsSupported(path_str)) {
+ handle = android::NativeBridgeLoadLibrary(path_str, RTLD_LAZY);
needs_native_bridge = true;
}
}
diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc
index 7795b7c..4155c82 100644
--- a/runtime/jdwp/jdwp_main.cc
+++ b/runtime/jdwp/jdwp_main.cc
@@ -283,7 +283,9 @@
{
ScopedThreadStateChange tsc(self, kWaitingForDebuggerToAttach);
MutexLock attach_locker(self, state->attach_lock_);
- state->attach_cond_.Wait(self);
+ while (state->debug_thread_id_ == 0) {
+ state->attach_cond_.Wait(self);
+ }
}
if (!state->IsActive()) {
LOG(ERROR) << "JDWP connection failed";
@@ -335,10 +337,6 @@
*/
JdwpState::~JdwpState() {
if (netState != NULL) {
- if (IsConnected()) {
- PostVMDeath();
- }
-
/*
* Close down the network to inspire the thread to halt.
*/
@@ -458,6 +456,7 @@
if (!netState->Establish(options_)) {
/* wake anybody who was waiting for us to succeed */
MutexLock mu(thread_, attach_lock_);
+ debug_thread_id_ = static_cast<ObjectId>(-1);
attach_cond_.Broadcast(thread_);
break;
}
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index b7d485e..1c870cd 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -43,7 +43,6 @@
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
#include "mirror/throwable.h"
-#include "native_bridge.h"
#include "parsed_options.h"
#include "reflection.h"
#include "runtime.h"
diff --git a/runtime/jni_internal_test.cc b/runtime/jni_internal_test.cc
index 844d14a..b236ede 100644
--- a/runtime/jni_internal_test.cc
+++ b/runtime/jni_internal_test.cc
@@ -746,6 +746,21 @@
GetMethodIdBadArgumentTest(true);
}
+TEST_F(JniInternalTest, CallVoidMethodNullReceiver) {
+ jclass jlobject = env_->FindClass("java/lang/Object");
+ jmethodID method;
+
+ // Check that GetMethodID for java.lang.NoSuchMethodError.<init>(String) finds the constructor.
+ method = env_->GetMethodID(jlobject, "<init>", "()V");
+ EXPECT_NE(nullptr, method);
+ EXPECT_FALSE(env_->ExceptionCheck());
+
+ // Null object to CallVoidMethod.
+ CheckJniAbortCatcher check_jni_abort_catcher;
+ env_->CallVoidMethod(nullptr, method);
+ check_jni_abort_catcher.Check("null");
+}
+
TEST_F(JniInternalTest, GetStaticMethodID) {
jclass jlobject = env_->FindClass("java/lang/Object");
jclass jlnsme = env_->FindClass("java/lang/NoSuchMethodError");
diff --git a/runtime/leb128.h b/runtime/leb128.h
index 89de16e..dfb42b8 100644
--- a/runtime/leb128.h
+++ b/runtime/leb128.h
@@ -137,25 +137,26 @@
return dest;
}
-// An encoder with an API similar to vector<uint32_t> where the data is captured in ULEB128 format.
-class Leb128EncodingVector {
+// An encoder that pushed uint32_t data onto the given std::vector.
+class Leb128Encoder {
public:
- Leb128EncodingVector() {
+ explicit Leb128Encoder(std::vector<uint8_t>* data) : data_(data) {
+ DCHECK(data != nullptr);
}
void Reserve(uint32_t size) {
- data_.reserve(size);
+ data_->reserve(size);
}
void PushBackUnsigned(uint32_t value) {
uint8_t out = value & 0x7f;
value >>= 7;
while (value != 0) {
- data_.push_back(out | 0x80);
+ data_->push_back(out | 0x80);
out = value & 0x7f;
value >>= 7;
}
- data_.push_back(out);
+ data_->push_back(out);
}
template<typename It>
@@ -169,12 +170,12 @@
uint32_t extra_bits = static_cast<uint32_t>(value ^ (value >> 31)) >> 6;
uint8_t out = value & 0x7f;
while (extra_bits != 0u) {
- data_.push_back(out | 0x80);
+ data_->push_back(out | 0x80);
value >>= 7;
out = value & 0x7f;
extra_bits >>= 7;
}
- data_.push_back(out);
+ data_->push_back(out);
}
template<typename It>
@@ -185,12 +186,23 @@
}
const std::vector<uint8_t>& GetData() const {
- return data_;
+ return *data_;
+ }
+
+ protected:
+ std::vector<uint8_t>* const data_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Leb128Encoder);
+};
+
+// An encoder with an API similar to vector<uint32_t> where the data is captured in ULEB128 format.
+class Leb128EncodingVector FINAL : private std::vector<uint8_t>, public Leb128Encoder {
+ public:
+ Leb128EncodingVector() : Leb128Encoder(this) {
}
private:
- std::vector<uint8_t> data_;
-
DISALLOW_COPY_AND_ASSIGN(Leb128EncodingVector);
};
diff --git a/runtime/method_helper.h b/runtime/method_helper.h
index f71d273..8150456 100644
--- a/runtime/method_helper.h
+++ b/runtime/method_helper.h
@@ -41,6 +41,11 @@
return method_->GetInterfaceMethodIfProxy();
}
+ // GetMethod() != Get() for proxy methods.
+ mirror::ArtMethod* Get() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return method_.Get();
+ }
+
mirror::String* GetNameAsString(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
const char* GetShorty() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 370bfb9..131f5d6 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -281,6 +281,19 @@
return found_dex_pc;
}
+bool ArtMethod::IsEntrypointInterpreter() {
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+ const void* oat_quick_code = class_linker->GetOatMethodQuickCodeFor(this);
+ const void* oat_portable_code = class_linker->GetOatMethodPortableCodeFor(this);
+ if (!IsPortableCompiled()) { // Quick.
+ return oat_quick_code == nullptr ||
+ oat_quick_code != GetEntryPointFromQuickCompiledCode();
+ } else { // Portable.
+ return oat_portable_code == nullptr ||
+ oat_portable_code != GetEntryPointFromPortableCompiledCode();
+ }
+}
+
void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result,
const char* shorty) {
if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
@@ -318,6 +331,13 @@
have_quick_code ? GetEntryPointFromQuickCompiledCode()
: GetEntryPointFromPortableCompiledCode());
}
+
+ // Ensure that we won't be accidentally calling quick/portable compiled code when -Xint.
+ if (kIsDebugBuild && Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly()) {
+ CHECK(IsEntrypointInterpreter())
+ << "Don't call compiled code when -Xint " << PrettyMethod(this);
+ }
+
if (!IsPortableCompiled()) {
#ifdef __LP64__
if (!IsStatic()) {
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index fa592c2..ebd5bd5 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -309,6 +309,11 @@
void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Returns true if the entrypoint points to the interpreter, as
+ // opposed to the compiled code, that is, this method will be
+ // interpretered on invocation.
+ bool IsEntrypointInterpreter() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
uint32_t GetQuickOatCodeOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
uint32_t GetPortableOatCodeOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetQuickOatCodeOffset(uint32_t code_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 3543654..a1177d6 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -162,7 +162,8 @@
break;
}
case LockWord::kThinLocked: {
- // Inflate the thin lock to a monitor and stick the hash code inside of the monitor.
+ // Inflate the thin lock to a monitor and stick the hash code inside of the monitor. May
+ // fail spuriously.
Thread* self = Thread::Current();
StackHandleScope<1> hs(self);
Handle<mirror::Object> h_this(hs.NewHandle(current_this));
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 433c1b2..5dd16ef 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -176,10 +176,6 @@
// Deflated monitors have a null object.
}
-/*
- * Links a thread into a monitor's wait set. The monitor lock must be
- * held by the caller of this routine.
- */
void Monitor::AppendToWaitSet(Thread* thread) {
DCHECK(owner_ == Thread::Current());
DCHECK(thread != NULL);
@@ -197,10 +193,6 @@
t->SetWaitNext(thread);
}
-/*
- * Unlinks a thread from a monitor's wait set. The monitor lock must
- * be held by the caller of this routine.
- */
void Monitor::RemoveFromWaitSet(Thread *thread) {
DCHECK(owner_ == Thread::Current());
DCHECK(thread != NULL);
@@ -395,29 +387,6 @@
return true;
}
-/*
- * Wait on a monitor until timeout, interrupt, or notification. Used for
- * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
- *
- * If another thread calls Thread.interrupt(), we throw InterruptedException
- * and return immediately if one of the following are true:
- * - blocked in wait(), wait(long), or wait(long, int) methods of Object
- * - blocked in join(), join(long), or join(long, int) methods of Thread
- * - blocked in sleep(long), or sleep(long, int) methods of Thread
- * Otherwise, we set the "interrupted" flag.
- *
- * Checks to make sure that "ns" is in the range 0-999999
- * (i.e. fractions of a millisecond) and throws the appropriate
- * exception if it isn't.
- *
- * The spec allows "spurious wakeups", and recommends that all code using
- * Object.wait() do so in a loop. This appears to derive from concerns
- * about pthread_cond_wait() on multiprocessor systems. Some commentary
- * on the web casts doubt on whether these can/should occur.
- *
- * Since we're allowed to wake up "early", we clamp extremely long durations
- * to return at the end of the 32-bit time epoch.
- */
void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
bool interruptShouldThrow, ThreadState why) {
DCHECK(self != NULL);
@@ -641,11 +610,6 @@
return true;
}
-/*
- * Changes the shape of a monitor from thin to fat, preserving the internal lock state. The calling
- * thread must own the lock or the owner must be suspended. There's a race with other threads
- * inflating the lock and so the caller should read the monitor following the call.
- */
void Monitor::Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code) {
DCHECK(self != nullptr);
DCHECK(obj != nullptr);
@@ -823,38 +787,37 @@
}
}
-/*
- * Object.wait(). Also called for class init.
- */
void Monitor::Wait(Thread* self, mirror::Object *obj, int64_t ms, int32_t ns,
bool interruptShouldThrow, ThreadState why) {
DCHECK(self != nullptr);
DCHECK(obj != nullptr);
LockWord lock_word = obj->GetLockWord(true);
- switch (lock_word.GetState()) {
- case LockWord::kHashCode:
- // Fall-through.
- case LockWord::kUnlocked:
- ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
- return; // Failure.
- case LockWord::kThinLocked: {
- uint32_t thread_id = self->GetThreadId();
- uint32_t owner_thread_id = lock_word.ThinLockOwner();
- if (owner_thread_id != thread_id) {
+ while (lock_word.GetState() != LockWord::kFatLocked) {
+ switch (lock_word.GetState()) {
+ case LockWord::kHashCode:
+ // Fall-through.
+ case LockWord::kUnlocked:
ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
return; // Failure.
- } else {
- // We own the lock, inflate to enqueue ourself on the Monitor.
- Inflate(self, self, obj, 0);
- lock_word = obj->GetLockWord(true);
+ case LockWord::kThinLocked: {
+ uint32_t thread_id = self->GetThreadId();
+ uint32_t owner_thread_id = lock_word.ThinLockOwner();
+ if (owner_thread_id != thread_id) {
+ ThrowIllegalMonitorStateExceptionF("object not locked by thread before wait()");
+ return; // Failure.
+ } else {
+ // We own the lock, inflate to enqueue ourself on the Monitor. May fail spuriously so
+ // re-load.
+ Inflate(self, self, obj, 0);
+ lock_word = obj->GetLockWord(true);
+ }
+ break;
}
- break;
- }
- case LockWord::kFatLocked:
- break; // Already set for a wait.
- default: {
- LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
- return;
+ case LockWord::kFatLocked: // Unreachable given the loop condition above. Fall-through.
+ default: {
+ LOG(FATAL) << "Invalid monitor state " << lock_word.GetState();
+ return;
+ }
}
}
Monitor* mon = lock_word.FatLockMonitor();
@@ -982,7 +945,7 @@
}
void Monitor::VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*),
- void* callback_context) {
+ void* callback_context, bool abort_on_failure) {
mirror::ArtMethod* m = stack_visitor->GetMethod();
CHECK(m != NULL);
@@ -1015,10 +978,19 @@
return; // No "tries" implies no synchronization, so no held locks to report.
}
+ // Get the dex pc. If abort_on_failure is false, GetDexPc will not abort in the case it cannot
+ // find the dex pc, and instead return kDexNoIndex. Then bail out, as it indicates we have an
+ // inconsistent stack anyways.
+ uint32_t dex_pc = stack_visitor->GetDexPc(abort_on_failure);
+ if (!abort_on_failure && dex_pc == DexFile::kDexNoIndex) {
+ LOG(ERROR) << "Could not find dex_pc for " << PrettyMethod(m);
+ return;
+ }
+
// Ask the verifier for the dex pcs of all the monitor-enter instructions corresponding to
// the locks held in this stack frame.
std::vector<uint32_t> monitor_enter_dex_pcs;
- verifier::MethodVerifier::FindLocksAtDexPc(m, stack_visitor->GetDexPc(), &monitor_enter_dex_pcs);
+ verifier::MethodVerifier::FindLocksAtDexPc(m, dex_pc, &monitor_enter_dex_pcs);
if (monitor_enter_dex_pcs.empty()) {
return;
}
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 26d43c9..efa83c7 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -74,6 +74,8 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DoNotify(self, obj, true);
}
+
+ // Object.wait(). Also called for class init.
static void Wait(Thread* self, mirror::Object* obj, int64_t ms, int32_t ns,
bool interruptShouldThrow, ThreadState why)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -88,8 +90,10 @@
// Calls 'callback' once for each lock held in the single stack frame represented by
// the current state of 'stack_visitor'.
+ // The abort_on_failure flag allows to not die when the state of the runtime is unorderly. This
+ // is necessary when we have already aborted but want to dump the stack as much as we can.
static void VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::Object*, void*),
- void* callback_context)
+ void* callback_context, bool abort_on_failure = true)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static bool IsValidLockWord(LockWord lock_word);
@@ -117,6 +121,7 @@
return monitor_id_;
}
+ // Inflate the lock on obj. May fail to inflate for spurious reasons, always re-check.
static void InflateThinLocked(Thread* self, Handle<mirror::Object> obj, LockWord lock_word,
uint32_t hash_code) NO_THREAD_SAFETY_ANALYSIS;
@@ -135,9 +140,18 @@
LOCKS_EXCLUDED(monitor_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Links a thread into a monitor's wait set. The monitor lock must be held by the caller of this
+ // routine.
void AppendToWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_);
+
+ // Unlinks a thread from a monitor's wait set. The monitor lock must be held by the caller of
+ // this routine.
void RemoveFromWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_);
+ // Changes the shape of a monitor from thin to fat, preserving the internal lock state. The
+ // calling thread must own the lock or the owner must be suspended. There's a race with other
+ // threads inflating the lock, installing hash codes and spurious failures. The caller should
+ // re-read the lock word following the call.
static void Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -168,6 +182,25 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Wait on a monitor until timeout, interrupt, or notification. Used for Object.wait() and
+ // (somewhat indirectly) Thread.sleep() and Thread.join().
+ //
+ // If another thread calls Thread.interrupt(), we throw InterruptedException and return
+ // immediately if one of the following are true:
+ // - blocked in wait(), wait(long), or wait(long, int) methods of Object
+ // - blocked in join(), join(long), or join(long, int) methods of Thread
+ // - blocked in sleep(long), or sleep(long, int) methods of Thread
+ // Otherwise, we set the "interrupted" flag.
+ //
+ // Checks to make sure that "ns" is in the range 0-999999 (i.e. fractions of a millisecond) and
+ // throws the appropriate exception if it isn't.
+ //
+ // The spec allows "spurious wakeups", and recommends that all code using Object.wait() do so in
+ // a loop. This appears to derive from concerns about pthread_cond_wait() on multiprocessor
+ // systems. Some commentary on the web casts doubt on whether these can/should occur.
+ //
+ // Since we're allowed to wake up "early", we clamp extremely long durations to return at the end
+ // of the 32-bit time epoch.
void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow, ThreadState why)
LOCKS_EXCLUDED(monitor_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index f199c99..14d6cd9 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -27,6 +27,7 @@
#include "base/logging.h"
#include "base/stl_util.h"
+#include "base/stringprintf.h"
#include "class_linker.h"
#include "common_throws.h"
#include "dex_file-inl.h"
@@ -490,6 +491,12 @@
}
const InstructionSet target_instruction_set = GetInstructionSetFromString(instruction_set);
+ if (target_instruction_set == kNone) {
+ ScopedLocalRef<jclass> iae(env, env->FindClass("java/lang/IllegalArgumentException"));
+ std::string message(StringPrintf("Instruction set %s is invalid.", instruction_set));
+ env->ThrowNew(iae.get(), message.c_str());
+ return 0;
+ }
// Get the filename for odex file next to the dex file.
std::string odex_filename(DexFilenameToOdexFilename(filename, target_instruction_set));
@@ -551,8 +558,16 @@
static jbyte DexFile_isDexOptNeededInternal(JNIEnv* env, jclass, jstring javaFilename,
jstring javaPkgname, jstring javaInstructionSet, jboolean defer) {
ScopedUtfChars filename(env, javaFilename);
+ if (env->ExceptionCheck()) {
+ return 0;
+ }
+
NullableScopedUtfChars pkgname(env, javaPkgname);
+
ScopedUtfChars instruction_set(env, javaInstructionSet);
+ if (env->ExceptionCheck()) {
+ return 0;
+ }
return IsDexOptNeededInternal(env, filename.c_str(), pkgname.c_str(),
instruction_set.c_str(), defer);
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index 5f718ba..b079229 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -76,6 +76,10 @@
ScopedFastNativeObjectAccess soa(env);
NthCallerVisitor visitor(soa.Self(), 2);
visitor.WalkStack();
+ if (UNLIKELY(visitor.caller == nullptr)) {
+ // The caller is an attached native thread.
+ return nullptr;
+ }
return soa.AddLocalReference<jobject>(visitor.caller->GetDeclaringClass()->GetClassLoader());
}
@@ -113,6 +117,10 @@
ScopedFastNativeObjectAccess soa(env);
NthCallerVisitor visitor(soa.Self(), 3);
visitor.WalkStack();
+ if (UNLIKELY(visitor.caller == nullptr)) {
+ // The caller is an attached native thread.
+ return nullptr;
+ }
return soa.AddLocalReference<jclass>(visitor.caller->GetDeclaringClass());
}
diff --git a/runtime/native_bridge.cc b/runtime/native_bridge.cc
deleted file mode 100644
index d0b516b..0000000
--- a/runtime/native_bridge.cc
+++ /dev/null
@@ -1,267 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "native_bridge.h"
-
-#include <dlfcn.h>
-#include <stdio.h>
-#include "jni.h"
-
-#include "base/mutex.h"
-#include "mirror/art_method-inl.h"
-#include "mirror/class-inl.h"
-#include "scoped_thread_state_change.h"
-#include "ScopedLocalRef.h"
-#include "thread.h"
-
-#ifdef HAVE_ANDROID_OS
-#include "cutils/properties.h"
-#endif
-
-
-namespace art {
-
-// The symbol name exposed by native-bridge with the type of NativeBridgeCallbacks.
-static constexpr const char* kNativeBridgeInterfaceSymbol = "NativeBridgeItf";
-
-// The library name we are supposed to load.
-static std::string native_bridge_library_string = "";
-
-// Whether a native bridge is available (loaded and ready).
-static bool available = false;
-// Whether we have already initialized (or tried to).
-static bool initialized = false;
-
-struct NativeBridgeCallbacks;
-static NativeBridgeCallbacks* callbacks = nullptr;
-
-// ART interfaces to native-bridge.
-struct NativeBridgeArtCallbacks {
- // Get shorty of a Java method. The shorty is supposed to be persistent in memory.
- //
- // Parameters:
- // env [IN] pointer to JNIenv.
- // mid [IN] Java methodID.
- // Returns:
- // short descriptor for method.
- const char* (*getMethodShorty)(JNIEnv* env, jmethodID mid);
-
- // Get number of native methods for specified class.
- //
- // Parameters:
- // env [IN] pointer to JNIenv.
- // clazz [IN] Java class object.
- // Returns:
- // number of native methods.
- uint32_t (*getNativeMethodCount)(JNIEnv* env, jclass clazz);
-
- // Get at most 'method_count' native methods for specified class 'clazz'. Results are outputed
- // via 'methods' [OUT]. The signature pointer in JNINativeMethod is reused as the method shorty.
- //
- // Parameters:
- // env [IN] pointer to JNIenv.
- // clazz [IN] Java class object.
- // methods [OUT] array of method with the name, shorty, and fnPtr.
- // method_count [IN] max number of elements in methods.
- // Returns:
- // number of method it actually wrote to methods.
- uint32_t (*getNativeMethods)(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
- uint32_t method_count);
-};
-
-// Native-bridge interfaces to ART
-struct NativeBridgeCallbacks {
- // Initialize native-bridge. Native-bridge's internal implementation must ensure MT safety and
- // that the native-bridge is initialized only once. Thus it is OK to call this interface for an
- // already initialized native-bridge.
- //
- // Parameters:
- // art_cbs [IN] the pointer to NativeBridgeArtCallbacks.
- // Returns:
- // true iff initialization was successful.
- bool (*initialize)(NativeBridgeArtCallbacks* art_cbs);
-
- // Load a shared library that is supported by the native-bridge.
- //
- // Parameters:
- // libpath [IN] path to the shared library
- // flag [IN] the stardard RTLD_XXX defined in bionic dlfcn.h
- // Returns:
- // The opaque handle of the shared library if sucessful, otherwise NULL
- void* (*loadLibrary)(const char* libpath, int flag);
-
- // Get a native-bridge trampoline for specified native method. The trampoline has same
- // sigature as the native method.
- //
- // Parameters:
- // handle [IN] the handle returned from loadLibrary
- // shorty [IN] short descriptor of native method
- // len [IN] length of shorty
- // Returns:
- // address of trampoline if successful, otherwise NULL
- void* (*getTrampoline)(void* handle, const char* name, const char* shorty, uint32_t len);
-
- // Check whether native library is valid and is for an ABI that is supported by native-bridge.
- //
- // Parameters:
- // libpath [IN] path to the shared library
- // Returns:
- // TRUE if library is supported by native-bridge, FALSE otherwise
- bool (*isSupported)(const char* libpath);
-};
-
-static const char* GetMethodShorty(JNIEnv* env, jmethodID mid) {
- ScopedObjectAccess soa(env);
- StackHandleScope<1> scope(soa.Self());
- mirror::ArtMethod* m = soa.DecodeMethod(mid);
- MethodHelper mh(scope.NewHandle(m));
- return mh.GetShorty();
-}
-
-static uint32_t GetNativeMethodCount(JNIEnv* env, jclass clazz) {
- if (clazz == nullptr)
- return 0;
-
- ScopedObjectAccess soa(env);
- mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
-
- uint32_t native_method_count = 0;
- for (uint32_t i = 0; i < c->NumDirectMethods(); ++i) {
- mirror::ArtMethod* m = c->GetDirectMethod(i);
- if (m->IsNative()) {
- native_method_count++;
- }
- }
- for (uint32_t i = 0; i < c->NumVirtualMethods(); ++i) {
- mirror::ArtMethod* m = c->GetVirtualMethod(i);
- if (m->IsNative()) {
- native_method_count++;
- }
- }
- return native_method_count;
-}
-
-static uint32_t GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
- uint32_t method_count) {
- if ((clazz == nullptr) || (methods == nullptr)) {
- return 0;
- }
- ScopedObjectAccess soa(env);
- mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
-
- uint32_t count = 0;
- for (uint32_t i = 0; i < c->NumDirectMethods(); ++i) {
- mirror::ArtMethod* m = c->GetDirectMethod(i);
- if (m->IsNative()) {
- if (count < method_count) {
- methods[count].name = m->GetName();
- methods[count].signature = m->GetShorty();
- methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
- count++;
- } else {
- LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);
- }
- }
- }
- for (uint32_t i = 0; i < c->NumVirtualMethods(); ++i) {
- mirror::ArtMethod* m = c->GetVirtualMethod(i);
- if (m->IsNative()) {
- if (count < method_count) {
- methods[count].name = m->GetName();
- methods[count].signature = m->GetShorty();
- methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
- count++;
- } else {
- LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);
- }
- }
- }
- return count;
-}
-
-static NativeBridgeArtCallbacks NativeBridgeArtItf = {
- GetMethodShorty,
- GetNativeMethodCount,
- GetNativeMethods
-};
-
-void SetNativeBridgeLibraryString(const std::string& nb_library_string) {
- // This is called when the runtime starts and nothing is working concurrently
- // so we don't need a lock here.
-
- native_bridge_library_string = nb_library_string;
-
- if (native_bridge_library_string.empty()) {
- initialized = true;
- available = false;
- }
-}
-
-static bool NativeBridgeInitialize() {
- // TODO: Missing annotalysis static lock ordering of DEFAULT_MUTEX_ACQUIRED, place lock into
- // global order or remove.
- static Mutex lock("native bridge lock");
- MutexLock mu(Thread::Current(), lock);
-
- if (initialized) {
- // Somebody did it before.
- return available;
- }
-
- available = false;
-
- void* handle = dlopen(native_bridge_library_string.c_str(), RTLD_LAZY);
- if (handle != nullptr) {
- callbacks = reinterpret_cast<NativeBridgeCallbacks*>(dlsym(handle,
- kNativeBridgeInterfaceSymbol));
-
- if (callbacks != nullptr) {
- available = callbacks->initialize(&NativeBridgeArtItf);
- }
-
- if (!available) {
- dlclose(handle);
- }
- }
-
- initialized = true;
-
- return available;
-}
-
-void* NativeBridgeLoadLibrary(const char* libpath, int flag) {
- if (NativeBridgeInitialize()) {
- return callbacks->loadLibrary(libpath, flag);
- }
- return nullptr;
-}
-
-void* NativeBridgeGetTrampoline(void* handle, const char* name, const char* shorty,
- uint32_t len) {
- if (NativeBridgeInitialize()) {
- return callbacks->getTrampoline(handle, name, shorty, len);
- }
- return nullptr;
-}
-
-bool NativeBridgeIsSupported(const char* libpath) {
- if (NativeBridgeInitialize()) {
- return callbacks->isSupported(libpath);
- }
- return false;
-}
-
-}; // namespace art
diff --git a/runtime/native_bridge.h b/runtime/native_bridge.h
deleted file mode 100644
index be647fc..0000000
--- a/runtime/native_bridge.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_NATIVE_BRIDGE_H_
-#define ART_RUNTIME_NATIVE_BRIDGE_H_
-
-#include <string>
-
-namespace art {
-
-// Initialize the native bridge, if any. Should be called by Runtime::Init(). An empty string
-// signals that we do not want to load a native bridge.
-void SetNativeBridgeLibraryString(const std::string& native_bridge_library_string);
-
-// Load a shared library that is supported by the native-bridge.
-void* NativeBridgeLoadLibrary(const char* libpath, int flag);
-
-// Get a native-bridge trampoline for specified native method.
-void* NativeBridgeGetTrampoline(void* handle, const char* name, const char* shorty, uint32_t len);
-
-// True if native library is valid and is for an ABI that is supported by native-bridge.
-bool NativeBridgeIsSupported(const char* libpath);
-
-}; // namespace art
-
-#endif // ART_RUNTIME_NATIVE_BRIDGE_H_
diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc
new file mode 100644
index 0000000..453c92f
--- /dev/null
+++ b/runtime/native_bridge_art_interface.cc
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "native_bridge_art_interface.h"
+
+#include "mirror/art_method-inl.h"
+#include "mirror/class-inl.h"
+#include "scoped_thread_state_change.h"
+
+namespace art {
+
+const char* GetMethodShorty(JNIEnv* env, jmethodID mid) {
+ ScopedObjectAccess soa(env);
+ StackHandleScope<1> scope(soa.Self());
+ mirror::ArtMethod* m = soa.DecodeMethod(mid);
+ MethodHelper mh(scope.NewHandle(m));
+ return mh.GetShorty();
+}
+
+uint32_t GetNativeMethodCount(JNIEnv* env, jclass clazz) {
+ if (clazz == nullptr)
+ return 0;
+
+ ScopedObjectAccess soa(env);
+ mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
+
+ uint32_t native_method_count = 0;
+ for (uint32_t i = 0; i < c->NumDirectMethods(); ++i) {
+ mirror::ArtMethod* m = c->GetDirectMethod(i);
+ if (m->IsNative()) {
+ native_method_count++;
+ }
+ }
+ for (uint32_t i = 0; i < c->NumVirtualMethods(); ++i) {
+ mirror::ArtMethod* m = c->GetVirtualMethod(i);
+ if (m->IsNative()) {
+ native_method_count++;
+ }
+ }
+ return native_method_count;
+}
+
+uint32_t GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
+ uint32_t method_count) {
+ if ((clazz == nullptr) || (methods == nullptr)) {
+ return 0;
+ }
+ ScopedObjectAccess soa(env);
+ mirror::Class* c = soa.Decode<mirror::Class*>(clazz);
+
+ uint32_t count = 0;
+ for (uint32_t i = 0; i < c->NumDirectMethods(); ++i) {
+ mirror::ArtMethod* m = c->GetDirectMethod(i);
+ if (m->IsNative()) {
+ if (count < method_count) {
+ methods[count].name = m->GetName();
+ methods[count].signature = m->GetShorty();
+ methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
+ count++;
+ } else {
+ LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);
+ }
+ }
+ }
+ for (uint32_t i = 0; i < c->NumVirtualMethods(); ++i) {
+ mirror::ArtMethod* m = c->GetVirtualMethod(i);
+ if (m->IsNative()) {
+ if (count < method_count) {
+ methods[count].name = m->GetName();
+ methods[count].signature = m->GetShorty();
+ methods[count].fnPtr = const_cast<void*>(m->GetNativeMethod());
+ count++;
+ } else {
+ LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m);
+ }
+ }
+ }
+ return count;
+}
+
+}; // namespace art
diff --git a/runtime/native_bridge_art_interface.h b/runtime/native_bridge_art_interface.h
new file mode 100644
index 0000000..08735c8
--- /dev/null
+++ b/runtime/native_bridge_art_interface.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_NATIVE_BRIDGE_ART_INTERFACE_H_
+#define ART_RUNTIME_NATIVE_BRIDGE_ART_INTERFACE_H_
+
+#include <jni.h>
+#include <stdint.h>
+
+namespace art {
+
+const char* GetMethodShorty(JNIEnv* env, jmethodID mid);
+
+uint32_t GetNativeMethodCount(JNIEnv* env, jclass clazz);
+
+uint32_t GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
+ uint32_t method_count);
+
+}; // namespace art
+
+#endif // ART_RUNTIME_NATIVE_BRIDGE_ART_INTERFACE_H_
diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc
index 971daf8..50dfe21 100644
--- a/runtime/oat_file.cc
+++ b/runtime/oat_file.cc
@@ -45,7 +45,7 @@
std::string* error_msg) {
CHECK(!oat_contents.empty()) << location;
CheckLocation(location);
- std::unique_ptr<OatFile> oat_file(new OatFile(location));
+ std::unique_ptr<OatFile> oat_file(new OatFile(location, false));
oat_file->begin_ = &oat_contents[0];
oat_file->end_ = &oat_contents[oat_contents.size()];
return oat_file->Setup(error_msg) ? oat_file.release() : nullptr;
@@ -97,7 +97,7 @@
const std::string& location,
byte* requested_base,
std::string* error_msg) {
- std::unique_ptr<OatFile> oat_file(new OatFile(location));
+ std::unique_ptr<OatFile> oat_file(new OatFile(location, true));
bool success = oat_file->Dlopen(elf_filename, requested_base, error_msg);
if (!success) {
return nullptr;
@@ -111,7 +111,7 @@
bool writable,
bool executable,
std::string* error_msg) {
- std::unique_ptr<OatFile> oat_file(new OatFile(location));
+ std::unique_ptr<OatFile> oat_file(new OatFile(location, executable));
bool success = oat_file->ElfFileOpen(file, requested_base, writable, executable, error_msg);
if (!success) {
CHECK(!error_msg->empty());
@@ -120,8 +120,9 @@
return oat_file.release();
}
-OatFile::OatFile(const std::string& location)
- : location_(location), begin_(NULL), end_(NULL), dlopen_handle_(NULL),
+OatFile::OatFile(const std::string& location, bool is_executable)
+ : location_(location), begin_(NULL), end_(NULL), is_executable_(is_executable),
+ dlopen_handle_(NULL),
secondary_lookup_lock_("OatFile secondary lookup lock", kOatFileSecondaryLookupLock) {
CHECK(!location_.empty());
}
@@ -460,15 +461,17 @@
uint32_t bitmap_size = 0;
const byte* bitmap_pointer = nullptr;
const byte* methods_pointer = nullptr;
- if (type == kOatClassSomeCompiled) {
- bitmap_size = static_cast<uint32_t>(*reinterpret_cast<const uint32_t*>(after_type_pointer));
- bitmap_pointer = after_type_pointer + sizeof(bitmap_size);
- CHECK_LE(bitmap_pointer, oat_file_->End()) << oat_file_->GetLocation();
- methods_pointer = bitmap_pointer + bitmap_size;
- } else {
- methods_pointer = after_type_pointer;
+ if (type != kOatClassNoneCompiled) {
+ if (type == kOatClassSomeCompiled) {
+ bitmap_size = static_cast<uint32_t>(*reinterpret_cast<const uint32_t*>(after_type_pointer));
+ bitmap_pointer = after_type_pointer + sizeof(bitmap_size);
+ CHECK_LE(bitmap_pointer, oat_file_->End()) << oat_file_->GetLocation();
+ methods_pointer = bitmap_pointer + bitmap_size;
+ } else {
+ methods_pointer = after_type_pointer;
+ }
+ CHECK_LE(methods_pointer, oat_file_->End()) << oat_file_->GetLocation();
}
- CHECK_LE(methods_pointer, oat_file_->End()) << oat_file_->GetLocation();
return OatClass(oat_file_,
status,
@@ -486,22 +489,23 @@
const OatMethodOffsets* methods_pointer)
: oat_file_(oat_file), status_(status), type_(type),
bitmap_(bitmap_pointer), methods_pointer_(methods_pointer) {
- CHECK(methods_pointer != nullptr);
switch (type_) {
case kOatClassAllCompiled: {
CHECK_EQ(0U, bitmap_size);
CHECK(bitmap_pointer == nullptr);
+ CHECK(methods_pointer != nullptr);
break;
}
case kOatClassSomeCompiled: {
CHECK_NE(0U, bitmap_size);
CHECK(bitmap_pointer != nullptr);
+ CHECK(methods_pointer != nullptr);
break;
}
case kOatClassNoneCompiled: {
CHECK_EQ(0U, bitmap_size);
CHECK(bitmap_pointer == nullptr);
- methods_pointer_ = nullptr;
+ CHECK(methods_pointer_ == nullptr);
break;
}
case kOatClassMax: {
@@ -530,22 +534,19 @@
methods_pointer_index = num_set_bits;
}
const OatMethodOffsets& oat_method_offsets = methods_pointer_[methods_pointer_index];
- return OatMethod(
- oat_file_->Begin(),
- oat_method_offsets.code_offset_,
- oat_method_offsets.gc_map_offset_);
+ if (oat_file_->IsExecutable()
+ || (Runtime::Current() == nullptr)
+ || Runtime::Current()->IsCompiler()) {
+ return OatMethod(
+ oat_file_->Begin(),
+ oat_method_offsets.code_offset_,
+ oat_method_offsets.gc_map_offset_);
+ } else {
+ // We aren't allowed to use the compiled code. We just force it down the interpreted version.
+ return OatMethod(oat_file_->Begin(), 0, 0);
+ }
}
-OatFile::OatMethod::OatMethod(const byte* base,
- const uint32_t code_offset,
- const uint32_t gc_map_offset)
- : begin_(base),
- code_offset_(code_offset),
- native_gc_map_offset_(gc_map_offset) {
-}
-
-OatFile::OatMethod::~OatMethod() {}
-
uint32_t OatFile::OatMethod::GetQuickCodeSize() const {
uintptr_t code = reinterpret_cast<uintptr_t>(GetQuickCode());
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 810eccb..8535bf4 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -64,6 +64,10 @@
~OatFile();
+ bool IsExecutable() const {
+ return is_executable_;
+ }
+
ElfFile* GetElfFile() const {
CHECK_NE(reinterpret_cast<uintptr_t>(elf_file_.get()), reinterpret_cast<uintptr_t>(nullptr))
<< "Cannot get an elf file from " << GetLocation();
@@ -126,14 +130,19 @@
const uint8_t* GetMappingTable() const;
const uint8_t* GetVmapTable() const;
- ~OatMethod();
-
// Create an OatMethod with offsets relative to the given base address
- OatMethod(const byte* base,
- const uint32_t code_offset,
- const uint32_t gc_map_offset);
+ OatMethod(const byte* base, const uint32_t code_offset, const uint32_t gc_map_offset)
+ : begin_(base),
+ code_offset_(code_offset),
+ native_gc_map_offset_(gc_map_offset) {
+ }
+ ~OatMethod() {}
- OatMethod() {}
+ // A representation of an invalid OatMethod, used when an OatMethod or OatClass can't be found.
+ // See ClassLinker::FindOatMethodFor.
+ static const OatMethod Invalid() {
+ return OatMethod(nullptr, -1, -1);
+ }
private:
template<class T>
@@ -144,10 +153,10 @@
return reinterpret_cast<T>(begin_ + offset);
}
- const byte* begin_;
+ const byte* const begin_;
- uint32_t code_offset_;
- uint32_t native_gc_map_offset_;
+ const uint32_t code_offset_;
+ const uint32_t native_gc_map_offset_;
friend class OatClass;
};
@@ -168,7 +177,12 @@
// methods are not included.
const OatMethod GetOatMethod(uint32_t method_index) const;
- OatClass() {}
+ // A representation of an invalid OatClass, used when an OatClass can't be found.
+ // See ClassLinker::FindOatClass.
+ static OatClass Invalid() {
+ return OatClass(nullptr, mirror::Class::kStatusError, kOatClassNoneCompiled, 0, nullptr,
+ nullptr);
+ }
private:
OatClass(const OatFile* oat_file,
@@ -178,15 +192,15 @@
const uint32_t* bitmap_pointer,
const OatMethodOffsets* methods_pointer);
- const OatFile* oat_file_;
+ const OatFile* const oat_file_;
- mirror::Class::Status status_;
+ const mirror::Class::Status status_;
- OatClassType type_;
+ const OatClassType type_;
- const uint32_t* bitmap_;
+ const uint32_t* const bitmap_;
- const OatMethodOffsets* methods_pointer_;
+ const OatMethodOffsets* const methods_pointer_;
friend class OatDexFile;
};
@@ -260,7 +274,7 @@
bool executable,
std::string* error_msg);
- explicit OatFile(const std::string& filename);
+ explicit OatFile(const std::string& filename, bool executable);
bool Dlopen(const std::string& elf_filename, byte* requested_base, std::string* error_msg);
bool ElfFileOpen(File* file, byte* requested_base, bool writable, bool executable,
std::string* error_msg);
@@ -277,6 +291,9 @@
// Pointer to end of oat region for bounds checking.
const byte* end_;
+ // Was this oat_file loaded executable?
+ const bool is_executable_;
+
// Backing memory map for oat file during when opened by ElfWriter during initial compilation.
std::unique_ptr<MemMap> mem_map_;
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 27f7765..26360d7 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -414,8 +414,12 @@
compiler_callbacks_ =
reinterpret_cast<CompilerCallbacks*>(const_cast<void*>(options[i].second));
} else if (option == "imageinstructionset") {
- image_isa_ = GetInstructionSetFromString(
- reinterpret_cast<const char*>(options[i].second));
+ const char* isa_str = reinterpret_cast<const char*>(options[i].second);
+ image_isa_ = GetInstructionSetFromString(isa_str);
+ if (image_isa_ == kNone) {
+ Usage("%s is not a valid instruction set.", isa_str);
+ return false;
+ }
} else if (option == "-Xzygote") {
is_zygote_ = true;
} else if (StartsWith(option, "-Xpatchoat:")) {
@@ -609,7 +613,7 @@
return false;
}
} else if (StartsWith(option, "-XX:NativeBridge=")) {
- if (!ParseStringAfterChar(option, '=', &native_bridge_library_string_)) {
+ if (!ParseStringAfterChar(option, '=', &native_bridge_library_path_)) {
return false;
}
} else if (StartsWith(option, "-ea") ||
diff --git a/runtime/parsed_options.h b/runtime/parsed_options.h
index aa2c557..1afd610 100644
--- a/runtime/parsed_options.h
+++ b/runtime/parsed_options.h
@@ -46,7 +46,7 @@
bool check_jni_;
bool force_copy_;
std::string jni_trace_;
- std::string native_bridge_library_string_;
+ std::string native_bridge_library_path_;
CompilerCallbacks* compiler_callbacks_;
bool is_zygote_;
bool must_relocate_;
diff --git a/runtime/proxy_test.cc b/runtime/proxy_test.cc
index 5af26b0..d977ce9 100644
--- a/runtime/proxy_test.cc
+++ b/runtime/proxy_test.cc
@@ -17,14 +17,14 @@
#include <jni.h>
#include <vector>
-#include "common_runtime_test.h"
+#include "common_compiler_test.h"
#include "field_helper.h"
#include "mirror/art_field-inl.h"
#include "scoped_thread_state_change.h"
namespace art {
-class ProxyTest : public CommonRuntimeTest {
+class ProxyTest : public CommonCompilerTest {
public:
// Generate a proxy class with the given name and interfaces. This is a simplification from what
// libcore does to fit to our test needs. We do not check for duplicated interfaces or methods and
@@ -103,12 +103,6 @@
soa.Self()->AssertNoPendingException();
return proxyClass;
}
-
- protected:
- void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
- options->push_back(std::make_pair(StringPrintf("-Ximage:%s", GetLibCoreOatFileName().c_str()),
- nullptr));
- }
};
// Creates a proxy class and check ClassHelper works correctly.
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 41d6989..98eeda7 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -29,14 +29,14 @@
namespace art {
static constexpr bool kDebugExceptionDelivery = false;
-static constexpr size_t kInvalidFrameId = 0xffffffff;
+static constexpr size_t kInvalidFrameDepth = 0xffffffff;
QuickExceptionHandler::QuickExceptionHandler(Thread* self, bool is_deoptimization)
: self_(self), context_(self->GetLongJumpContext()), is_deoptimization_(is_deoptimization),
method_tracing_active_(is_deoptimization ||
Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()),
handler_quick_frame_(nullptr), handler_quick_frame_pc_(0), handler_method_(nullptr),
- handler_dex_pc_(0), clear_exception_(false), handler_frame_id_(kInvalidFrameId) {
+ handler_dex_pc_(0), clear_exception_(false), handler_frame_depth_(kInvalidFrameDepth) {
}
// Finds catch handler or prepares for deoptimization.
@@ -51,7 +51,7 @@
bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::ArtMethod* method = GetMethod();
- exception_handler_->SetHandlerFrameId(GetFrameId());
+ exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
if (method == nullptr) {
// This is the upcall, we remember the frame and last pc so that we may long jump to them.
exception_handler_->SetHandlerQuickFramePc(GetCurrentQuickFramePc());
@@ -177,7 +177,7 @@
}
bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- exception_handler_->SetHandlerFrameId(GetFrameId());
+ exception_handler_->SetHandlerFrameDepth(GetFrameDepth());
mirror::ArtMethod* method = GetMethod();
if (method == nullptr) {
// This is the upcall, we remember the frame and last pc so that we may long jump to them.
@@ -295,17 +295,17 @@
// Unwinds all instrumentation stack frame prior to catch handler or upcall.
class InstrumentationStackVisitor : public StackVisitor {
public:
- InstrumentationStackVisitor(Thread* self, bool is_deoptimization, size_t frame_id)
+ InstrumentationStackVisitor(Thread* self, bool is_deoptimization, size_t frame_depth)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: StackVisitor(self, nullptr),
- self_(self), frame_id_(frame_id),
+ self_(self), frame_depth_(frame_depth),
instrumentation_frames_to_pop_(0) {
- CHECK_NE(frame_id_, kInvalidFrameId);
+ CHECK_NE(frame_depth_, kInvalidFrameDepth);
}
bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- size_t current_frame_id = GetFrameId();
- if (current_frame_id > frame_id_) {
+ size_t current_frame_depth = GetFrameDepth();
+ if (current_frame_depth < frame_depth_) {
CHECK(GetMethod() != nullptr);
if (UNLIKELY(GetQuickInstrumentationExitPc() == GetReturnPc())) {
++instrumentation_frames_to_pop_;
@@ -323,7 +323,7 @@
private:
Thread* const self_;
- const size_t frame_id_;
+ const size_t frame_depth_;
size_t instrumentation_frames_to_pop_;
DISALLOW_COPY_AND_ASSIGN(InstrumentationStackVisitor);
@@ -331,7 +331,7 @@
void QuickExceptionHandler::UpdateInstrumentationStack() {
if (method_tracing_active_) {
- InstrumentationStackVisitor visitor(self_, is_deoptimization_, handler_frame_id_);
+ InstrumentationStackVisitor visitor(self_, is_deoptimization_, handler_frame_depth_);
visitor.WalkStack(true);
size_t instrumentation_frames_to_pop = visitor.GetInstrumentationFramesToPop();
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index 1d600ed..b93769c 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -77,8 +77,8 @@
clear_exception_ = clear_exception;
}
- void SetHandlerFrameId(size_t frame_id) {
- handler_frame_id_ = frame_id;
+ void SetHandlerFrameDepth(size_t frame_depth) {
+ handler_frame_depth_ = frame_depth;
}
private:
@@ -97,8 +97,8 @@
uint32_t handler_dex_pc_;
// Should the exception be cleared as the catch block has no move-exception?
bool clear_exception_;
- // Frame id of the catch handler or the upcall.
- size_t handler_frame_id_;
+ // Frame depth of the catch handler or the upcall.
+ size_t handler_frame_depth_;
DISALLOW_COPY_AND_ASSIGN(QuickExceptionHandler);
};
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index d3957c1..ba53c43 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -63,7 +63,7 @@
#include "mirror/stack_trace_element.h"
#include "mirror/throwable.h"
#include "monitor.h"
-#include "native_bridge.h"
+#include "native_bridge_art_interface.h"
#include "parsed_options.h"
#include "oat_file.h"
#include "quick/quick_method_frame_info.h"
@@ -143,7 +143,8 @@
target_sdk_version_(0),
implicit_null_checks_(false),
implicit_so_checks_(false),
- implicit_suspend_checks_(false) {
+ implicit_suspend_checks_(false),
+ native_bridge_art_callbacks_({GetMethodShorty, GetNativeMethodCount, GetNativeMethods}) {
}
Runtime::~Runtime() {
@@ -708,8 +709,11 @@
self->ClearException();
// Look for a native bridge.
- SetNativeBridgeLibraryString(options->native_bridge_library_string_);
-
+ native_bridge_library_path_ = options->native_bridge_library_path_;
+ if (!native_bridge_library_path_.empty()) {
+ android::SetupNativeBridge(native_bridge_library_path_.c_str(), &native_bridge_art_callbacks_);
+ VLOG(startup) << "Runtime::Setup native bridge library: " << native_bridge_library_path_;
+ }
VLOG(startup) << "Runtime::Init exiting";
return true;
}
@@ -896,11 +900,7 @@
bool Runtime::AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
bool create_peer) {
- bool success = Thread::Attach(thread_name, as_daemon, thread_group, create_peer) != NULL;
- if (thread_name == NULL) {
- LOG(WARNING) << *Thread::Current() << " attached without supplying a name";
- }
- return success;
+ return Thread::Attach(thread_name, as_daemon, thread_group, create_peer) != NULL;
}
void Runtime::DetachCurrentThread() {
diff --git a/runtime/runtime.h b/runtime/runtime.h
index e76280a..34ccdcb 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -31,6 +31,7 @@
#include "instrumentation.h"
#include "instruction_set.h"
#include "jobject_comparator.h"
+#include "nativebridge/native_bridge.h"
#include "object_callbacks.h"
#include "offsets.h"
#include "profiler_options.h"
@@ -616,6 +617,25 @@
bool implicit_so_checks_; // StackOverflow checks are implicit.
bool implicit_suspend_checks_; // Thread suspension checks are implicit.
+ // The path to the native bridge library. If this is not empty the native bridge will be
+ // initialized and loaded from the pointed path.
+ //
+ // The native bridge allows running native code compiled for a foreign ISA. The way it works is,
+ // if standard dlopen fails to load native library associated with native activity, it calls to
+ // the native bridge to load it and then gets the trampoline for the entry to native activity.
+ std::string native_bridge_library_path_;
+
+ // Native bridge library runtime callbacks. They represent the runtime interface to native bridge.
+ //
+ // The interface is expected to expose the following methods:
+ // getMethodShorty(): in the case of native method calling JNI native function CallXXXXMethodY(),
+ // native bridge calls back to VM for the shorty of the method so that it can prepare based on
+ // host calling convention.
+ // getNativeMethodCount() and getNativeMethods(): in case of JNI function UnregisterNatives(),
+ // native bridge can call back to get all native methods of specified class so that all
+ // corresponding trampolines can be destroyed.
+ android::NativeBridgeRuntimeCallbacks native_bridge_art_callbacks_;
+
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
diff --git a/runtime/stack.h b/runtime/stack.h
index 578f569..e58caee 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -553,6 +553,10 @@
return num_frames_;
}
+ size_t GetFrameDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return cur_depth_;
+ }
+
// Get the method and dex pc immediately after the one that's currently being visited.
bool GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32_t* next_dex_pc)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 7aabfce..7ac685b 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -224,7 +224,8 @@
} else {
// If we are going to use implicit stack checks, allocate space for the protected
// region at the bottom of the stack.
- stack_size += Thread::kStackOverflowImplicitCheckSize;
+ stack_size += Thread::kStackOverflowImplicitCheckSize +
+ GetStackOverflowReservedBytes(kRuntimeISA);
}
// Some systems require the stack size to be a multiple of the system page size, so round up.
@@ -264,7 +265,7 @@
// a segv.
// Read every page from the high address to the low.
- for (byte* p = stack_top; p > pregion; p -= kPageSize) {
+ for (byte* p = stack_top; p >= pregion; p -= kPageSize) {
dont_optimize_this = *p;
}
@@ -403,6 +404,8 @@
if (thread_name != nullptr) {
self->tlsPtr_.name->assign(thread_name);
::art::SetThreadName(thread_name);
+ } else if (self->GetJniEnv()->check_jni) {
+ LOG(WARNING) << *Thread::Current() << " attached without supplying a name";
}
}
@@ -505,9 +508,7 @@
}
// TODO: move this into the Linux GetThreadStack implementation.
-#if defined(__APPLE__)
- bool is_main_thread = false;
-#else
+#if !defined(__APPLE__)
// If we're the main thread, check whether we were run with an unlimited stack. In that case,
// glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection
// will be broken because we'll die long before we get close to 2GB.
@@ -539,7 +540,7 @@
// Set stack_end_ to the bottom of the stack saving space of stack overflows
bool implicit_stack_check = !Runtime::Current()->ExplicitStackOverflowChecks();
- ResetDefaultStackEnd(implicit_stack_check);
+ ResetDefaultStackEnd();
// Install the protected region if we are doing implicit overflow checks.
if (implicit_stack_check) {
@@ -551,8 +552,19 @@
// The thread might have protected region at the bottom. We need
// to install our own region so we need to move the limits
// of the stack to make room for it.
- tlsPtr_.stack_begin += guardsize;
- tlsPtr_.stack_end += guardsize;
+
+#if defined(__i386__) || defined(__x86_64__)
+ // Work around issue trying to read last page of stack on Intel.
+ // The bug for this is b/17111575. The problem is that we are
+ // unable to read the page just above the guard page on the
+ // main stack on an intel target. When the bug is fixed
+ // this can be removed.
+ if (::art::GetTid() == getpid()) {
+ guardsize += 4 * KB;
+ }
+#endif
+ tlsPtr_.stack_begin += guardsize + kStackOverflowProtectedSize;
+ tlsPtr_.stack_end += guardsize + kStackOverflowProtectedSize;
tlsPtr_.stack_size -= guardsize;
InstallImplicitProtection();
@@ -908,7 +920,8 @@
Monitor::DescribeWait(os, thread);
}
if (can_allocate) {
- Monitor::VisitLocks(this, DumpLockedObject, &os);
+ // Visit locks, but do not abort on errors. This would trigger a nested abort.
+ Monitor::VisitLocks(this, DumpLockedObject, &os, false);
}
}
diff --git a/runtime/thread.h b/runtime/thread.h
index 120ff6f..fe950c4 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -594,16 +594,10 @@
void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Set the stack end to that to be used during regular execution
- void ResetDefaultStackEnd(bool implicit_overflow_check) {
+ void ResetDefaultStackEnd() {
// Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
// to throw a StackOverflowError.
- if (implicit_overflow_check) {
- // For implicit checks we also need to add in the protected region above the
- // overflow region.
- tlsPtr_.stack_end = tlsPtr_.stack_begin + kStackOverflowImplicitCheckSize;
- } else {
- tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
- }
+ tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
}
// Install the protected region for implicit stack checks.
diff --git a/runtime/utils.cc b/runtime/utils.cc
index b845f50..d7e215b 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -66,8 +66,9 @@
uint64_t owner;
CHECK_PTHREAD_CALL(pthread_threadid_np, (NULL, &owner), __FUNCTION__); // Requires Mac OS 10.6
return owner;
+#elif defined(__BIONIC__)
+ return gettid();
#else
- // Neither bionic nor glibc exposes gettid(2).
return syscall(__NR_gettid);
#endif
}
@@ -999,7 +1000,7 @@
} else {
s = thread_name + len - 15;
}
-#if defined(HAVE_ANDROID_PTHREAD_SETNAME_NP)
+#if defined(__BIONIC__)
// pthread_setname_np fails rather than truncating long strings.
char buf[16]; // MAX_TASK_COMM_LEN=16 is hard-coded into bionic
strncpy(buf, s, sizeof(buf)-1);
@@ -1372,21 +1373,11 @@
}
void EncodeUnsignedLeb128(uint32_t data, std::vector<uint8_t>* dst) {
- size_t encoded_size = UnsignedLeb128Size(data);
- size_t cur_index = dst->size();
- dst->resize(dst->size() + encoded_size);
- uint8_t* write_pos = &((*dst)[cur_index]);
- uint8_t* write_pos_after = EncodeUnsignedLeb128(write_pos, data);
- DCHECK_EQ(static_cast<size_t>(write_pos_after - write_pos), encoded_size);
+ Leb128Encoder(dst).PushBackUnsigned(data);
}
void EncodeSignedLeb128(int32_t data, std::vector<uint8_t>* dst) {
- size_t encoded_size = SignedLeb128Size(data);
- size_t cur_index = dst->size();
- dst->resize(dst->size() + encoded_size);
- uint8_t* write_pos = &((*dst)[cur_index]);
- uint8_t* write_pos_after = EncodeSignedLeb128(write_pos, data);
- DCHECK_EQ(static_cast<size_t>(write_pos_after - write_pos), encoded_size);
+ Leb128Encoder(dst).PushBackSigned(data);
}
void PushWord(std::vector<uint8_t>* buf, int data) {
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index fb57fc7..69627f5 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -499,9 +499,9 @@
}
}
failures_.push_back(error);
- std::string location(StringPrintf("%s: [0x%X]", PrettyMethod(dex_method_idx_, *dex_file_).c_str(),
+ std::string location(StringPrintf("%s: [0x%X] ", PrettyMethod(dex_method_idx_, *dex_file_).c_str(),
work_insn_idx_));
- std::ostringstream* failure_message = new std::ostringstream(location);
+ std::ostringstream* failure_message = new std::ostringstream(location, std::ostringstream::ate);
failure_messages_.push_back(failure_message);
return *failure_message;
}
@@ -516,7 +516,7 @@
DCHECK_NE(failure_num, 0U);
std::ostringstream* last_fail_message = failure_messages_[failure_num - 1];
prepend += last_fail_message->str();
- failure_messages_[failure_num - 1] = new std::ostringstream(prepend);
+ failure_messages_[failure_num - 1] = new std::ostringstream(prepend, std::ostringstream::ate);
delete last_fail_message;
}
diff --git a/runtime/zip_archive.cc b/runtime/zip_archive.cc
index c02f310..63bfc44 100644
--- a/runtime/zip_archive.cc
+++ b/runtime/zip_archive.cc
@@ -123,7 +123,7 @@
// Resist the urge to delete the space. <: is a bigraph sequence.
std::unique_ptr< ::ZipEntry> zip_entry(new ::ZipEntry);
- const int32_t error = FindEntry(handle_, name, zip_entry.get());
+ const int32_t error = FindEntry(handle_, ZipEntryName(name), zip_entry.get());
if (error) {
*error_msg = std::string(ErrorCodeString(error));
return nullptr;
diff --git a/test/004-JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc
index 554712a..f5a1d65 100644
--- a/test/004-JniTest/jni_test.cc
+++ b/test/004-JniTest/jni_test.cc
@@ -28,162 +28,133 @@
static JavaVM* jvm = NULL;
extern "C" JNIEXPORT jint JNI_OnLoad(JavaVM *vm, void *) {
- assert(vm != NULL);
- assert(jvm == NULL);
+ assert(vm != nullptr);
+ assert(jvm == nullptr);
jvm = vm;
return JNI_VERSION_1_6;
}
-static void* testFindClassOnAttachedNativeThread(void*) {
- assert(jvm != NULL);
+static void* AttachHelper(void* arg) {
+ assert(jvm != nullptr);
- JNIEnv* env = NULL;
+ JNIEnv* env = nullptr;
JavaVMAttachArgs args = { JNI_VERSION_1_6, __FUNCTION__, NULL };
int attach_result = jvm->AttachCurrentThread(&env, &args);
assert(attach_result == 0);
- jclass clazz = env->FindClass("Main");
- assert(clazz != NULL);
- assert(!env->ExceptionCheck());
-
- jobjectArray array = env->NewObjectArray(0, clazz, NULL);
- assert(array != NULL);
- assert(!env->ExceptionCheck());
+ typedef void (*Fn)(JNIEnv*);
+ Fn fn = reinterpret_cast<Fn>(arg);
+ fn(env);
int detach_result = jvm->DetachCurrentThread();
assert(detach_result == 0);
- return NULL;
+ return nullptr;
+}
+
+static void PthreadHelper(void (*fn)(JNIEnv*)) {
+ pthread_t pthread;
+ int pthread_create_result = pthread_create(&pthread, nullptr, AttachHelper,
+ reinterpret_cast<void*>(fn));
+ assert(pthread_create_result == 0);
+ int pthread_join_result = pthread_join(pthread, nullptr);
+ assert(pthread_join_result == 0);
+}
+
+static void testFindClassOnAttachedNativeThread(JNIEnv* env) {
+ jclass clazz = env->FindClass("Main");
+ assert(clazz != nullptr);
+ assert(!env->ExceptionCheck());
+
+ jobjectArray array = env->NewObjectArray(0, clazz, nullptr);
+ assert(array != nullptr);
+ assert(!env->ExceptionCheck());
}
// http://b/10994325
-extern "C" JNIEXPORT void JNICALL Java_Main_testFindClassOnAttachedNativeThread(JNIEnv*,
- jclass) {
- pthread_t pthread;
- int pthread_create_result = pthread_create(&pthread,
- NULL,
- testFindClassOnAttachedNativeThread,
- NULL);
- assert(pthread_create_result == 0);
- int pthread_join_result = pthread_join(pthread, NULL);
- assert(pthread_join_result == 0);
+extern "C" JNIEXPORT void JNICALL Java_Main_testFindClassOnAttachedNativeThread(JNIEnv*, jclass) {
+ PthreadHelper(&testFindClassOnAttachedNativeThread);
}
-static void* testFindFieldOnAttachedNativeThread(void*) {
- assert(jvm != NULL);
-
- JNIEnv* env = NULL;
- JavaVMAttachArgs args = { JNI_VERSION_1_6, __FUNCTION__, NULL };
- int attach_result = jvm->AttachCurrentThread(&env, &args);
- assert(attach_result == 0);
-
+static void testFindFieldOnAttachedNativeThread(JNIEnv* env) {
jclass clazz = env->FindClass("Main");
- assert(clazz != NULL);
+ assert(clazz != nullptr);
assert(!env->ExceptionCheck());
jfieldID field = env->GetStaticFieldID(clazz, "testFindFieldOnAttachedNativeThreadField", "Z");
- assert(field != NULL);
+ assert(field != nullptr);
assert(!env->ExceptionCheck());
env->SetStaticBooleanField(clazz, field, JNI_TRUE);
-
- int detach_result = jvm->DetachCurrentThread();
- assert(detach_result == 0);
- return NULL;
}
extern "C" JNIEXPORT void JNICALL Java_Main_testFindFieldOnAttachedNativeThreadNative(JNIEnv*,
- jclass) {
- pthread_t pthread;
- int pthread_create_result = pthread_create(&pthread,
- NULL,
- testFindFieldOnAttachedNativeThread,
- NULL);
- assert(pthread_create_result == 0);
- int pthread_join_result = pthread_join(pthread, NULL);
- assert(pthread_join_result == 0);
+ jclass) {
+ PthreadHelper(&testFindFieldOnAttachedNativeThread);
}
-static void* testReflectFieldGetFromAttachedNativeThread(void*) {
- assert(jvm != NULL);
-
- JNIEnv* env = NULL;
- JavaVMAttachArgs args = { JNI_VERSION_1_6, __FUNCTION__, NULL };
- int attach_result = jvm->AttachCurrentThread(&env, &args);
- assert(attach_result == 0);
-
+static void testReflectFieldGetFromAttachedNativeThread(JNIEnv* env) {
jclass clazz = env->FindClass("Main");
- assert(clazz != NULL);
+ assert(clazz != nullptr);
assert(!env->ExceptionCheck());
jclass class_clazz = env->FindClass("java/lang/Class");
- assert(class_clazz != NULL);
+ assert(class_clazz != nullptr);
assert(!env->ExceptionCheck());
jmethodID getFieldMetodId = env->GetMethodID(class_clazz, "getField",
"(Ljava/lang/String;)Ljava/lang/reflect/Field;");
- assert(getFieldMetodId != NULL);
+ assert(getFieldMetodId != nullptr);
assert(!env->ExceptionCheck());
jstring field_name = env->NewStringUTF("testReflectFieldGetFromAttachedNativeThreadField");
- assert(field_name != NULL);
+ assert(field_name != nullptr);
assert(!env->ExceptionCheck());
jobject field = env->CallObjectMethod(clazz, getFieldMetodId, field_name);
- assert(field != NULL);
+ assert(field != nullptr);
assert(!env->ExceptionCheck());
jclass field_clazz = env->FindClass("java/lang/reflect/Field");
- assert(field_clazz != NULL);
+ assert(field_clazz != nullptr);
assert(!env->ExceptionCheck());
jmethodID getBooleanMetodId = env->GetMethodID(field_clazz, "getBoolean",
"(Ljava/lang/Object;)Z");
- assert(getBooleanMetodId != NULL);
+ assert(getBooleanMetodId != nullptr);
assert(!env->ExceptionCheck());
jboolean value = env->CallBooleanMethod(field, getBooleanMetodId, /* ignored */ clazz);
assert(value == false);
assert(!env->ExceptionCheck());
-
- int detach_result = jvm->DetachCurrentThread();
- assert(detach_result == 0);
- return NULL;
}
// http://b/15539150
extern "C" JNIEXPORT void JNICALL Java_Main_testReflectFieldGetFromAttachedNativeThreadNative(
JNIEnv*, jclass) {
- pthread_t pthread;
- int pthread_create_result = pthread_create(&pthread,
- NULL,
- testReflectFieldGetFromAttachedNativeThread,
- NULL);
- assert(pthread_create_result == 0);
- int pthread_join_result = pthread_join(pthread, NULL);
- assert(pthread_join_result == 0);
+ PthreadHelper(&testReflectFieldGetFromAttachedNativeThread);
}
// http://b/11243757
extern "C" JNIEXPORT void JNICALL Java_Main_testCallStaticVoidMethodOnSubClassNative(JNIEnv* env,
- jclass) {
+ jclass) {
jclass super_class = env->FindClass("Main$testCallStaticVoidMethodOnSubClass_SuperClass");
- assert(super_class != NULL);
+ assert(super_class != nullptr);
jmethodID execute = env->GetStaticMethodID(super_class, "execute", "()V");
- assert(execute != NULL);
+ assert(execute != nullptr);
jclass sub_class = env->FindClass("Main$testCallStaticVoidMethodOnSubClass_SubClass");
- assert(sub_class != NULL);
+ assert(sub_class != nullptr);
env->CallStaticVoidMethod(sub_class, execute);
}
extern "C" JNIEXPORT jobject JNICALL Java_Main_testGetMirandaMethodNative(JNIEnv* env, jclass) {
jclass abstract_class = env->FindClass("Main$testGetMirandaMethod_MirandaAbstract");
- assert(abstract_class != NULL);
+ assert(abstract_class != nullptr);
jmethodID miranda_method = env->GetMethodID(abstract_class, "inInterface", "()Z");
- assert(miranda_method != NULL);
+ assert(miranda_method != nullptr);
return env->ToReflectedMethod(abstract_class, miranda_method, JNI_FALSE);
}
@@ -191,7 +162,7 @@
extern "C" void JNICALL Java_Main_testZeroLengthByteBuffers(JNIEnv* env, jclass) {
std::vector<uint8_t> buffer(1);
jobject byte_buffer = env->NewDirectByteBuffer(&buffer[0], 0);
- assert(byte_buffer != NULL);
+ assert(byte_buffer != nullptr);
assert(!env->ExceptionCheck());
assert(env->GetDirectBufferAddress(byte_buffer) == &buffer[0]);
@@ -202,8 +173,8 @@
jbyte byte_returns[kByteReturnSize] = { 0, 1, 2, 127, -1, -2, -128 };
extern "C" jbyte JNICALL Java_Main_byteMethod(JNIEnv* env, jclass klass, jbyte b1, jbyte b2,
- jbyte b3, jbyte b4, jbyte b5, jbyte b6,
- jbyte b7, jbyte b8, jbyte b9, jbyte b10) {
+ jbyte b3, jbyte b4, jbyte b5, jbyte b6,
+ jbyte b7, jbyte b8, jbyte b9, jbyte b10) {
// We use b1 to drive the output.
assert(b2 == 2);
assert(b3 == -3);
@@ -227,8 +198,8 @@
// The weird static_cast is because short int is only guaranteed down to -32767, not Java's -32768.
extern "C" jshort JNICALL Java_Main_shortMethod(JNIEnv* env, jclass klass, jshort s1, jshort s2,
- jshort s3, jshort s4, jshort s5, jshort s6,
- jshort s7, jshort s8, jshort s9, jshort s10) {
+ jshort s3, jshort s4, jshort s5, jshort s6,
+ jshort s7, jshort s8, jshort s9, jshort s10) {
// We use s1 to drive the output.
assert(s2 == 2);
assert(s3 == -3);
@@ -247,9 +218,9 @@
}
extern "C" jboolean JNICALL Java_Main_booleanMethod(JNIEnv* env, jclass klass, jboolean b1,
- jboolean b2, jboolean b3, jboolean b4,
- jboolean b5, jboolean b6, jboolean b7,
- jboolean b8, jboolean b9, jboolean b10) {
+ jboolean b2, jboolean b3, jboolean b4,
+ jboolean b5, jboolean b6, jboolean b7,
+ jboolean b8, jboolean b9, jboolean b10) {
// We use b1 to drive the output.
assert(b2 == JNI_TRUE);
assert(b3 == JNI_FALSE);
@@ -269,8 +240,8 @@
jchar char_returns[kCharReturnSize] = { 0, 1, 2, 127, 255, 256, 15000, 34000 };
extern "C" jchar JNICALL Java_Main_charMethod(JNIEnv* env, jclass klacc, jchar c1, jchar c2,
- jchar c3, jchar c4, jchar c5, jchar c6,
- jchar c7, jchar c8, jchar c9, jchar c10) {
+ jchar c3, jchar c4, jchar c5, jchar c6, jchar c7,
+ jchar c8, jchar c9, jchar c10) {
// We use c1 to drive the output.
assert(c2 == 'a');
assert(c3 == 'b');
@@ -291,3 +262,94 @@
jclass from, jclass to) {
return env->IsAssignableFrom(from, to);
}
+
+static void testShallowGetCallingClassLoader(JNIEnv* env) {
+ // Test direct call.
+ {
+ jclass vmstack_clazz = env->FindClass("dalvik/system/VMStack");
+ assert(vmstack_clazz != nullptr);
+ assert(!env->ExceptionCheck());
+
+ jmethodID getCallingClassLoaderMethodId = env->GetStaticMethodID(vmstack_clazz,
+ "getCallingClassLoader",
+ "()Ljava/lang/ClassLoader;");
+ assert(getCallingClassLoaderMethodId != nullptr);
+ assert(!env->ExceptionCheck());
+
+ jobject class_loader = env->CallStaticObjectMethod(vmstack_clazz,
+ getCallingClassLoaderMethodId);
+ assert(class_loader == nullptr);
+ assert(!env->ExceptionCheck());
+ }
+
+ // Test one-level call. Use System.loadLibrary().
+ {
+ jclass system_clazz = env->FindClass("java/lang/System");
+ assert(system_clazz != nullptr);
+ assert(!env->ExceptionCheck());
+
+ jmethodID loadLibraryMethodId = env->GetStaticMethodID(system_clazz, "loadLibrary",
+ "(Ljava/lang/String;)V");
+ assert(loadLibraryMethodId != nullptr);
+ assert(!env->ExceptionCheck());
+
+ // Create a string object.
+ jobject library_string = env->NewStringUTF("arttest");
+ assert(library_string != nullptr);
+ assert(!env->ExceptionCheck());
+
+ env->CallStaticVoidMethod(system_clazz, loadLibraryMethodId, library_string);
+ if (env->ExceptionCheck()) {
+ // At most we expect UnsatisfiedLinkError.
+ jthrowable thrown = env->ExceptionOccurred();
+ env->ExceptionClear();
+
+ jclass unsatisfied_link_error_clazz = env->FindClass("java/lang/UnsatisfiedLinkError");
+ jclass thrown_class = env->GetObjectClass(thrown);
+ assert(env->IsSameObject(unsatisfied_link_error_clazz, thrown_class));
+ }
+ }
+}
+
+// http://b/16867274
+extern "C" JNIEXPORT void JNICALL Java_Main_nativeTestShallowGetCallingClassLoader(JNIEnv* env,
+ jclass) {
+ PthreadHelper(&testShallowGetCallingClassLoader);
+}
+
+static void testShallowGetStackClass2(JNIEnv* env) {
+ jclass vmstack_clazz = env->FindClass("dalvik/system/VMStack");
+ assert(vmstack_clazz != nullptr);
+ assert(!env->ExceptionCheck());
+
+ // Test direct call.
+ {
+ jmethodID getStackClass2MethodId = env->GetStaticMethodID(vmstack_clazz, "getStackClass2",
+ "()Ljava/lang/Class;");
+ assert(getStackClass2MethodId != nullptr);
+ assert(!env->ExceptionCheck());
+
+ jobject caller_class = env->CallStaticObjectMethod(vmstack_clazz, getStackClass2MethodId);
+ assert(caller_class == nullptr);
+ assert(!env->ExceptionCheck());
+ }
+
+ // Test one-level call. Use VMStack.getStackClass1().
+ {
+ jmethodID getStackClass1MethodId = env->GetStaticMethodID(vmstack_clazz, "getStackClass1",
+ "()Ljava/lang/Class;");
+ assert(getStackClass1MethodId != nullptr);
+ assert(!env->ExceptionCheck());
+
+ jobject caller_class = env->CallStaticObjectMethod(vmstack_clazz, getStackClass1MethodId);
+ assert(caller_class == nullptr);
+ assert(!env->ExceptionCheck());
+ }
+
+ // For better testing we would need to compile against libcore and have a two-deep stack
+ // ourselves.
+}
+
+extern "C" JNIEXPORT void JNICALL Java_Main_nativeTestShallowGetStackClass2(JNIEnv* env, jclass) {
+ PthreadHelper(&testShallowGetStackClass2);
+}
diff --git a/test/004-JniTest/src/Main.java b/test/004-JniTest/src/Main.java
index ae133be..5884bc0 100644
--- a/test/004-JniTest/src/Main.java
+++ b/test/004-JniTest/src/Main.java
@@ -30,6 +30,8 @@
testBooleanMethod();
testCharMethod();
testIsAssignableFromOnPrimitiveTypes();
+ testShallowGetCallingClassLoader();
+ testShallowGetStackClass2();
}
private static native void testFindClassOnAttachedNativeThread();
@@ -167,4 +169,16 @@
}
native static boolean nativeIsAssignableFrom(Class<?> from, Class<?> to);
+
+ static void testShallowGetCallingClassLoader() {
+ nativeTestShallowGetCallingClassLoader();
+ }
+
+ native static void nativeTestShallowGetCallingClassLoader();
+
+ static void testShallowGetStackClass2() {
+ nativeTestShallowGetStackClass2();
+ }
+
+ native static void nativeTestShallowGetStackClass2();
}
diff --git a/test/080-oom-throw/src/Main.java b/test/080-oom-throw/src/Main.java
index 035690f..c93f8bb 100644
--- a/test/080-oom-throw/src/Main.java
+++ b/test/080-oom-throw/src/Main.java
@@ -31,6 +31,7 @@
static class InstanceMemEater {
static boolean sawOome;
+ static InstanceMemEater hook;
InstanceMemEater next;
double d1, d2, d3, d4, d5, d6, d7, d8; // Bloat this object so we fill the heap faster.
@@ -45,6 +46,7 @@
}
static void confuseCompilerOptimization(InstanceMemEater instance) {
+ hook = instance;
}
}
@@ -61,6 +63,7 @@
lastMemEater = lastMemEater.next;
} while (lastMemEater != null);
memEater.confuseCompilerOptimization(memEater);
+ InstanceMemEater.hook = null;
return InstanceMemEater.sawOome;
}
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index 268f0be..3acc643 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -22,27 +22,9 @@
#include "jni.h"
#include "stdio.h"
-#include "string.h"
#include "unistd.h"
-#include "native_bridge.h"
-
-
-// Native bridge interfaces...
-
-struct NativeBridgeArtCallbacks {
- const char* (*getMethodShorty)(JNIEnv* env, jmethodID mid);
- uint32_t (*getNativeMethodCount)(JNIEnv* env, jclass clazz);
- uint32_t (*getNativeMethods)(JNIEnv* env, jclass clazz, JNINativeMethod* methods,
- uint32_t method_count);
-};
-
-struct NativeBridgeCallbacks {
- bool (*initialize)(NativeBridgeArtCallbacks* art_cbs);
- void* (*loadLibrary)(const char* libpath, int flag);
- void* (*getTrampoline)(void* handle, const char* name, const char* shorty, uint32_t len);
- bool (*isSupported)(const char* libpath);
-};
+#include "nativebridge/native_bridge.h"
struct NativeBridgeMethod {
const char* name;
@@ -53,7 +35,7 @@
};
static NativeBridgeMethod* find_native_bridge_method(const char *name);
-static NativeBridgeArtCallbacks* gNativeBridgeArtCallbacks;
+static const android::NativeBridgeRuntimeCallbacks* gNativeBridgeArtCallbacks;
static jint trampoline_JNI_OnLoad(JavaVM* vm, void* reserved) {
JNIEnv* env = nullptr;
@@ -225,7 +207,7 @@
}
// NativeBridgeCallbacks implementations
-extern "C" bool native_bridge_initialize(NativeBridgeArtCallbacks* art_cbs) {
+extern "C" bool native_bridge_initialize(const android::NativeBridgeRuntimeCallbacks* art_cbs) {
if (art_cbs != nullptr) {
gNativeBridgeArtCallbacks = art_cbs;
printf("Native bridge initialized.\n");
@@ -281,7 +263,9 @@
return strcmp(libpath, "libjavacore.so") != 0;
}
-NativeBridgeCallbacks NativeBridgeItf {
+// "NativeBridgeItf" is effectively an API (it is the name of the symbol that will be loaded
+// by the native bridge library).
+android::NativeBridgeCallbacks NativeBridgeItf {
.initialize = &native_bridge_initialize,
.loadLibrary = &native_bridge_loadLibrary,
.getTrampoline = &native_bridge_getTrampoline,
diff --git a/test/116-nodex2oat/run b/test/116-nodex2oat/run
index 5ffeecd..2df6705 100755
--- a/test/116-nodex2oat/run
+++ b/test/116-nodex2oat/run
@@ -17,6 +17,9 @@
# Remove prebuild from the flags, this test is for testing not having oat files.
flags="${@/--prebuild/}"
+# Use the non-prebuild script.
+RUN="${RUN/push-and-run-prebuilt-test-jar/push-and-run-test-jar}"
+
# Make sure we can run without an oat file,
echo "Run -Xnodex2oat"
${RUN} ${flags} --runtime-option -Xnodex2oat
diff --git a/test/117-nopatchoat/expected.txt b/test/117-nopatchoat/expected.txt
new file mode 100644
index 0000000..a1293ae
--- /dev/null
+++ b/test/117-nopatchoat/expected.txt
@@ -0,0 +1,9 @@
+Run without dex2oat/patchoat
+dex2oat & patchoat are disabled, has oat is true, has executable oat is false.
+This is a function call
+Run with dexoat/patchoat
+dex2oat & patchoat are enabled, has oat is true, has executable oat is true.
+This is a function call
+Run default
+dex2oat & patchoat are enabled, has oat is true, has executable oat is true.
+This is a function call
diff --git a/test/117-nopatchoat/info.txt b/test/117-nopatchoat/info.txt
new file mode 100644
index 0000000..aa9f57c
--- /dev/null
+++ b/test/117-nopatchoat/info.txt
@@ -0,0 +1 @@
+Test that disables patchoat'ing the application.
diff --git a/test/117-nopatchoat/nopatchoat.cc b/test/117-nopatchoat/nopatchoat.cc
new file mode 100644
index 0000000..ced7f6e
--- /dev/null
+++ b/test/117-nopatchoat/nopatchoat.cc
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "class_linker.h"
+#include "dex_file-inl.h"
+#include "mirror/class-inl.h"
+#include "scoped_thread_state_change.h"
+#include "thread.h"
+
+namespace art {
+
+class NoPatchoatTest {
+ public:
+ static bool hasExecutableOat(jclass cls) {
+ ScopedObjectAccess soa(Thread::Current());
+ mirror::Class* klass = soa.Decode<mirror::Class*>(cls);
+ const DexFile& dex_file = klass->GetDexFile();
+ const OatFile* oat_file =
+ Runtime::Current()->GetClassLinker()->FindOpenedOatFileForDexFile(dex_file);
+ return oat_file != nullptr && oat_file->IsExecutable();
+ }
+};
+
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasExecutableOat(JNIEnv*, jclass cls) {
+ return NoPatchoatTest::hasExecutableOat(cls);
+}
+
+} // namespace art
diff --git a/test/117-nopatchoat/run b/test/117-nopatchoat/run
new file mode 100755
index 0000000..a7c96a0
--- /dev/null
+++ b/test/117-nopatchoat/run
@@ -0,0 +1,36 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# ensure flags includes prebuild and relocate. It doesn't make sense unless we
+# have a oat file we want to relocate.
+# TODO Unfortunately we have no way to force prebuild on for both host and target (or skip if not on).
+flags="${@/--relocate/}"
+flags="${flags/--no-relocate/}"
+flags="${flags} --relocate"
+
+# Make sure we can run without relocation
+echo "Run without dex2oat/patchoat"
+# /bin/false is actually not even there for either, so the exec will fail.
+# Unfortunately there is no equivalent to /bin/false in android.
+${RUN} ${flags} --runtime-option -Xnodex2oat
+
+# Make sure we can run with the oat file.
+echo "Run with dexoat/patchoat"
+${RUN} ${flags} --runtime-option -Xdex2oat
+
+# Make sure we can run with the default settings.
+echo "Run default"
+${RUN} ${flags}
diff --git a/test/117-nopatchoat/src/Main.java b/test/117-nopatchoat/src/Main.java
new file mode 100644
index 0000000..f3f91ce
--- /dev/null
+++ b/test/117-nopatchoat/src/Main.java
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public class Main {
+ public static void main(String[] args) {
+ System.out.println(
+ "dex2oat & patchoat are " + ((isDex2OatEnabled()) ? "enabled" : "disabled") +
+ ", has oat is " + hasOat() + ", has executable oat is " + hasExecutableOat() + ".");
+
+ if (!hasOat() && isDex2OatEnabled()) {
+ throw new Error("Application with dex2oat enabled runs without an oat file");
+ }
+
+ System.out.println(functionCall());
+ }
+
+ public static String functionCall() {
+ String arr[] = {"This", "is", "a", "function", "call"};
+ String ret = "";
+ for (int i = 0; i < arr.length; i++) {
+ ret = ret + arr[i] + " ";
+ }
+ return ret.substring(0, ret.length() - 1);
+ }
+
+ static {
+ System.loadLibrary("arttest");
+ }
+
+ private native static boolean isDex2OatEnabled();
+
+ private native static boolean hasOat();
+
+ private native static boolean hasExecutableOat();
+}
diff --git a/test/702-LargeBranchOffset/build b/test/702-LargeBranchOffset/build
new file mode 100644
index 0000000..eacf730
--- /dev/null
+++ b/test/702-LargeBranchOffset/build
@@ -0,0 +1,27 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stop if something fails.
+set -e
+
+# Write out a bunch of source files.
+cpp -P src/Main.java.in src/Main.java
+
+mkdir classes
+${JAVAC} -d classes src/*.java
+
+${DX} --debug --dex --output=classes.dex classes
+zip $TEST_NAME.jar classes.dex
diff --git a/test/702-LargeBranchOffset/expected.txt b/test/702-LargeBranchOffset/expected.txt
new file mode 100644
index 0000000..130678f
--- /dev/null
+++ b/test/702-LargeBranchOffset/expected.txt
@@ -0,0 +1,5 @@
+0
+0
+2
+1
+512
diff --git a/test/702-LargeBranchOffset/info.txt b/test/702-LargeBranchOffset/info.txt
new file mode 100644
index 0000000..747263e
--- /dev/null
+++ b/test/702-LargeBranchOffset/info.txt
@@ -0,0 +1 @@
+Simple test to check if large branch offset works correctly.
diff --git a/test/702-LargeBranchOffset/src/Main.java.in b/test/702-LargeBranchOffset/src/Main.java.in
new file mode 100644
index 0000000..270d766
--- /dev/null
+++ b/test/702-LargeBranchOffset/src/Main.java.in
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define DO_2_TIMES(x) x x
+#define DO_4_TIMES(x) DO_2_TIMES(DO_2_TIMES(x))
+#define DO_16_TIMES(x) DO_4_TIMES(DO_4_TIMES(x))
+#define DO_256_TIMES(x) DO_16_TIMES(DO_16_TIMES(x))
+#define DO_512_TIMES(x) DO_256_TIMES(DO_2_TIMES(x))
+
+
+public class Main {
+ public static void main(String[] args) {
+ Main m = new Main();
+ System.out.println(m.foo(-1, -1));
+ System.out.println(m.foo(-1, +1));
+ System.out.println(m.foo(+1, -1));
+ System.out.println(m.foo(+1, +1));
+ System.out.println(m.value);
+ }
+
+ public int foo(int a, int b) {
+ if ( a >= 0 ) {
+ if ( b < 0 ) {
+ DO_512_TIMES( synchronized(lock) { value++; } )
+ return 2;
+ }
+ return 1;
+ }
+ return 0;
+ }
+
+ Object lock = new Object();
+ int value = 0;
+}
diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk
index 3871b28..caaf649 100644
--- a/test/Android.libarttest.mk
+++ b/test/Android.libarttest.mk
@@ -24,7 +24,8 @@
004-ReferenceMap/stack_walk_refmap_jni.cc \
004-StackWalk/stack_walk_jni.cc \
004-UnsafeTest/unsafe_test.cc \
- 116-nodex2oat/nodex2oat.cc
+ 116-nodex2oat/nodex2oat.cc \
+ 117-nopatchoat/nopatchoat.cc
ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so
ifdef TARGET_2ND_ARCH
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 0cf9e16..ce0eb3f 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -124,6 +124,42 @@
ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcverify,-no-prebuild)
ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcstress,-no-prebuild)
+# NB 116-nodex2oat is not broken per-se it just doesn't (and isn't meant to) work with --prebuild.
+# On host this is patched around by changing a run flag but we cannot do this on the target due to
+# a different run-script.
+TEST_ART_TARGET_BROKEN_PREBUILD_RUN_TESTS := \
+ 116-nodex2oat
+
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TARGET_PREBUILD_RUN_TESTS), $(call all-run-test-target-names,$(test),,-prebuild))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TARGET_PREBUILD_RUN_TESTS), $(call all-run-test-target-names,$(test),-trace,-prebuild))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TARGET_PREBUILD_RUN_TESTS), $(call all-run-test-target-names,$(test),-gcverify,-prebuild))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TARGET_PREBUILD_RUN_TESTS), $(call all-run-test-target-names,$(test),-gcstress,-prebuild))
+
+# NB 117-nopatchoat is not broken per-se it just doesn't work (and isn't meant to) without --prebuild --relocate
+TEST_ART_BROKEN_RELOCATE_TESTS := \
+ 117-nopatchoat
+
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_RELOCATE_TESTS), $(call all-run-test-names,$(test),,-relocate))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_RELOCATE_TESTS), $(call all-run-test-names,$(test),-trace,-relocate))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_RELOCATE_TESTS), $(call all-run-test-names,$(test),-gcverify,-relocate))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_RELOCATE_TESTS), $(call all-run-test-names,$(test),-gcstress,-relocate))
+
+TEST_ART_BROKEN_NORELOCATE_TESTS := \
+ 117-nopatchoat
+
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NORELOCATE_TESTS), $(call all-run-test-names,$(test),,-norelocate))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NORELOCATE_TESTS), $(call all-run-test-names,$(test),-trace,-norelocate))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NORELOCATE_TESTS), $(call all-run-test-names,$(test),-gcverify,-norelocate))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NORELOCATE_TESTS), $(call all-run-test-names,$(test),-gcstress,-norelocate))
+
+TEST_ART_BROKEN_NO_PREBUILD_TESTS := \
+ 117-nopatchoat
+
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NO_PREBUILD_TESTS), $(call all-run-test-names,$(test),,-no-prebuild))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NO_PREBUILD_TESTS), $(call all-run-test-names,$(test),-trace,-no-prebuild))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NO_PREBUILD_TESTS), $(call all-run-test-names,$(test),-gcverify,-no-prebuild))
+ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NO_PREBUILD_TESTS), $(call all-run-test-names,$(test),-gcstress,-no-prebuild))
+
# The path where build only targets will be output, e.g.
# out/target/product/generic_x86_64/obj/PACKAGING/art-run-tests_intermediates/DATA
art_run_tests_dir := $(call intermediates-dir-for,PACKAGING,art-run-tests)/DATA
diff --git a/test/MyClassNatives/MyClassNatives.java b/test/MyClassNatives/MyClassNatives.java
index b5e0204..fab153b 100644
--- a/test/MyClassNatives/MyClassNatives.java
+++ b/test/MyClassNatives/MyClassNatives.java
@@ -80,16 +80,22 @@
Object o248, Object o249, Object o250, Object o251, Object o252, Object o253);
native void withoutImplementation();
-
+
native static void stackArgsIntsFirst(int i1, int i2, int i3, int i4, int i5, int i6, int i7,
int i8, int i9, int i10, float f1, float f2, float f3, float f4, float f5, float f6,
float f7, float f8, float f9, float f10);
-
+
native static void stackArgsFloatsFirst(float f1, float f2, float f3, float f4, float f5,
float f6, float f7, float f8, float f9, float f10, int i1, int i2, int i3, int i4, int i5,
int i6, int i7, int i8, int i9, int i10);
-
+
native static void stackArgsMixed(int i1, float f1, int i2, float f2, int i3, float f3, int i4,
float f4, int i5, float f5, int i6, float f6, int i7, float f7, int i8, float f8, int i9,
float f9, int i10, float f10);
+
+ static native double logD(double d);
+ static native float logF(float f);
+ static native boolean returnTrue();
+ static native boolean returnFalse();
+ static native int returnInt();
}
diff --git a/test/run-test b/test/run-test
index 496f7d1..6fac03b 100755
--- a/test/run-test
+++ b/test/run-test
@@ -206,6 +206,15 @@
break
fi
done
+
+# tmp_dir may be relative, resolve.
+#
+# Cannot use realpath, as it does not exist on Mac.
+# Cannot us a simple "cd", as the path might not be created yet.
+# Use -m option of readlink: canonicalizes, but allows non-existing components.
+noncanonical_tmp_dir=$tmp_dir
+tmp_dir="`cd $oldwd ; readlink -m $tmp_dir`"
+
mkdir -p $tmp_dir
if [ "$basic_verify" = "true" ]; then
@@ -466,6 +475,8 @@
"./${run}" $run_args "$@" >"$output" 2>&1
else
cp "$build_output" "$output"
+ echo "Failed to build in tmpdir=${tmp_dir} from oldwd=${oldwd} and cwd=`pwd`"
+ echo "Non-canonical tmpdir was ${noncanonical_tmp_dir}"
echo "build exit status: $build_exit" >>"$output"
fi
./$check_cmd "$expected" "$output"